You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@storm.apache.org by bo...@apache.org on 2016/01/11 21:56:57 UTC

[01/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Repository: storm
Updated Branches:
  refs/heads/master f61416254 -> a4f9f8bc5


http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/generated/ComponentPageInfo.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/generated/ComponentPageInfo.java b/storm-core/src/jvm/backtype/storm/generated/ComponentPageInfo.java
deleted file mode 100644
index 6152d02..0000000
--- a/storm-core/src/jvm/backtype/storm/generated/ComponentPageInfo.java
+++ /dev/null
@@ -1,2194 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * Autogenerated by Thrift Compiler (0.9.3)
- *
- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
- *  @generated
- */
-package backtype.storm.generated;
-
-import org.apache.thrift.scheme.IScheme;
-import org.apache.thrift.scheme.SchemeFactory;
-import org.apache.thrift.scheme.StandardScheme;
-
-import org.apache.thrift.scheme.TupleScheme;
-import org.apache.thrift.protocol.TTupleProtocol;
-import org.apache.thrift.protocol.TProtocolException;
-import org.apache.thrift.EncodingUtils;
-import org.apache.thrift.TException;
-import org.apache.thrift.async.AsyncMethodCallback;
-import org.apache.thrift.server.AbstractNonblockingServer.*;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.EnumMap;
-import java.util.Set;
-import java.util.HashSet;
-import java.util.EnumSet;
-import java.util.Collections;
-import java.util.BitSet;
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import javax.annotation.Generated;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
-public class ComponentPageInfo implements org.apache.thrift.TBase<ComponentPageInfo, ComponentPageInfo._Fields>, java.io.Serializable, Cloneable, Comparable<ComponentPageInfo> {
-  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ComponentPageInfo");
-
-  private static final org.apache.thrift.protocol.TField COMPONENT_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("component_id", org.apache.thrift.protocol.TType.STRING, (short)1);
-  private static final org.apache.thrift.protocol.TField COMPONENT_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("component_type", org.apache.thrift.protocol.TType.I32, (short)2);
-  private static final org.apache.thrift.protocol.TField TOPOLOGY_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("topology_id", org.apache.thrift.protocol.TType.STRING, (short)3);
-  private static final org.apache.thrift.protocol.TField TOPOLOGY_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("topology_name", org.apache.thrift.protocol.TType.STRING, (short)4);
-  private static final org.apache.thrift.protocol.TField NUM_EXECUTORS_FIELD_DESC = new org.apache.thrift.protocol.TField("num_executors", org.apache.thrift.protocol.TType.I32, (short)5);
-  private static final org.apache.thrift.protocol.TField NUM_TASKS_FIELD_DESC = new org.apache.thrift.protocol.TField("num_tasks", org.apache.thrift.protocol.TType.I32, (short)6);
-  private static final org.apache.thrift.protocol.TField WINDOW_TO_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("window_to_stats", org.apache.thrift.protocol.TType.MAP, (short)7);
-  private static final org.apache.thrift.protocol.TField GSID_TO_INPUT_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("gsid_to_input_stats", org.apache.thrift.protocol.TType.MAP, (short)8);
-  private static final org.apache.thrift.protocol.TField SID_TO_OUTPUT_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("sid_to_output_stats", org.apache.thrift.protocol.TType.MAP, (short)9);
-  private static final org.apache.thrift.protocol.TField EXEC_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("exec_stats", org.apache.thrift.protocol.TType.LIST, (short)10);
-  private static final org.apache.thrift.protocol.TField ERRORS_FIELD_DESC = new org.apache.thrift.protocol.TField("errors", org.apache.thrift.protocol.TType.LIST, (short)11);
-  private static final org.apache.thrift.protocol.TField EVENTLOG_HOST_FIELD_DESC = new org.apache.thrift.protocol.TField("eventlog_host", org.apache.thrift.protocol.TType.STRING, (short)12);
-  private static final org.apache.thrift.protocol.TField EVENTLOG_PORT_FIELD_DESC = new org.apache.thrift.protocol.TField("eventlog_port", org.apache.thrift.protocol.TType.I32, (short)13);
-  private static final org.apache.thrift.protocol.TField DEBUG_OPTIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("debug_options", org.apache.thrift.protocol.TType.STRUCT, (short)14);
-  private static final org.apache.thrift.protocol.TField TOPOLOGY_STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("topology_status", org.apache.thrift.protocol.TType.STRING, (short)15);
-
-  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
-  static {
-    schemes.put(StandardScheme.class, new ComponentPageInfoStandardSchemeFactory());
-    schemes.put(TupleScheme.class, new ComponentPageInfoTupleSchemeFactory());
-  }
-
-  private String component_id; // required
-  private ComponentType component_type; // required
-  private String topology_id; // optional
-  private String topology_name; // optional
-  private int num_executors; // optional
-  private int num_tasks; // optional
-  private Map<String,ComponentAggregateStats> window_to_stats; // optional
-  private Map<GlobalStreamId,ComponentAggregateStats> gsid_to_input_stats; // optional
-  private Map<String,ComponentAggregateStats> sid_to_output_stats; // optional
-  private List<ExecutorAggregateStats> exec_stats; // optional
-  private List<ErrorInfo> errors; // optional
-  private String eventlog_host; // optional
-  private int eventlog_port; // optional
-  private DebugOptions debug_options; // optional
-  private String topology_status; // optional
-
-  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
-  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-    COMPONENT_ID((short)1, "component_id"),
-    /**
-     * 
-     * @see ComponentType
-     */
-    COMPONENT_TYPE((short)2, "component_type"),
-    TOPOLOGY_ID((short)3, "topology_id"),
-    TOPOLOGY_NAME((short)4, "topology_name"),
-    NUM_EXECUTORS((short)5, "num_executors"),
-    NUM_TASKS((short)6, "num_tasks"),
-    WINDOW_TO_STATS((short)7, "window_to_stats"),
-    GSID_TO_INPUT_STATS((short)8, "gsid_to_input_stats"),
-    SID_TO_OUTPUT_STATS((short)9, "sid_to_output_stats"),
-    EXEC_STATS((short)10, "exec_stats"),
-    ERRORS((short)11, "errors"),
-    EVENTLOG_HOST((short)12, "eventlog_host"),
-    EVENTLOG_PORT((short)13, "eventlog_port"),
-    DEBUG_OPTIONS((short)14, "debug_options"),
-    TOPOLOGY_STATUS((short)15, "topology_status");
-
-    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
-
-    static {
-      for (_Fields field : EnumSet.allOf(_Fields.class)) {
-        byName.put(field.getFieldName(), field);
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, or null if its not found.
-     */
-    public static _Fields findByThriftId(int fieldId) {
-      switch(fieldId) {
-        case 1: // COMPONENT_ID
-          return COMPONENT_ID;
-        case 2: // COMPONENT_TYPE
-          return COMPONENT_TYPE;
-        case 3: // TOPOLOGY_ID
-          return TOPOLOGY_ID;
-        case 4: // TOPOLOGY_NAME
-          return TOPOLOGY_NAME;
-        case 5: // NUM_EXECUTORS
-          return NUM_EXECUTORS;
-        case 6: // NUM_TASKS
-          return NUM_TASKS;
-        case 7: // WINDOW_TO_STATS
-          return WINDOW_TO_STATS;
-        case 8: // GSID_TO_INPUT_STATS
-          return GSID_TO_INPUT_STATS;
-        case 9: // SID_TO_OUTPUT_STATS
-          return SID_TO_OUTPUT_STATS;
-        case 10: // EXEC_STATS
-          return EXEC_STATS;
-        case 11: // ERRORS
-          return ERRORS;
-        case 12: // EVENTLOG_HOST
-          return EVENTLOG_HOST;
-        case 13: // EVENTLOG_PORT
-          return EVENTLOG_PORT;
-        case 14: // DEBUG_OPTIONS
-          return DEBUG_OPTIONS;
-        case 15: // TOPOLOGY_STATUS
-          return TOPOLOGY_STATUS;
-        default:
-          return null;
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, throwing an exception
-     * if it is not found.
-     */
-    public static _Fields findByThriftIdOrThrow(int fieldId) {
-      _Fields fields = findByThriftId(fieldId);
-      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
-      return fields;
-    }
-
-    /**
-     * Find the _Fields constant that matches name, or null if its not found.
-     */
-    public static _Fields findByName(String name) {
-      return byName.get(name);
-    }
-
-    private final short _thriftId;
-    private final String _fieldName;
-
-    _Fields(short thriftId, String fieldName) {
-      _thriftId = thriftId;
-      _fieldName = fieldName;
-    }
-
-    public short getThriftFieldId() {
-      return _thriftId;
-    }
-
-    public String getFieldName() {
-      return _fieldName;
-    }
-  }
-
-  // isset id assignments
-  private static final int __NUM_EXECUTORS_ISSET_ID = 0;
-  private static final int __NUM_TASKS_ISSET_ID = 1;
-  private static final int __EVENTLOG_PORT_ISSET_ID = 2;
-  private byte __isset_bitfield = 0;
-  private static final _Fields optionals[] = {_Fields.TOPOLOGY_ID,_Fields.TOPOLOGY_NAME,_Fields.NUM_EXECUTORS,_Fields.NUM_TASKS,_Fields.WINDOW_TO_STATS,_Fields.GSID_TO_INPUT_STATS,_Fields.SID_TO_OUTPUT_STATS,_Fields.EXEC_STATS,_Fields.ERRORS,_Fields.EVENTLOG_HOST,_Fields.EVENTLOG_PORT,_Fields.DEBUG_OPTIONS,_Fields.TOPOLOGY_STATUS};
-  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
-  static {
-    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.COMPONENT_ID, new org.apache.thrift.meta_data.FieldMetaData("component_id", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    tmpMap.put(_Fields.COMPONENT_TYPE, new org.apache.thrift.meta_data.FieldMetaData("component_type", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, ComponentType.class)));
-    tmpMap.put(_Fields.TOPOLOGY_ID, new org.apache.thrift.meta_data.FieldMetaData("topology_id", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    tmpMap.put(_Fields.TOPOLOGY_NAME, new org.apache.thrift.meta_data.FieldMetaData("topology_name", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    tmpMap.put(_Fields.NUM_EXECUTORS, new org.apache.thrift.meta_data.FieldMetaData("num_executors", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
-    tmpMap.put(_Fields.NUM_TASKS, new org.apache.thrift.meta_data.FieldMetaData("num_tasks", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
-    tmpMap.put(_Fields.WINDOW_TO_STATS, new org.apache.thrift.meta_data.FieldMetaData("window_to_stats", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
-            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
-            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ComponentAggregateStats.class))));
-    tmpMap.put(_Fields.GSID_TO_INPUT_STATS, new org.apache.thrift.meta_data.FieldMetaData("gsid_to_input_stats", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
-            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GlobalStreamId.class), 
-            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ComponentAggregateStats.class))));
-    tmpMap.put(_Fields.SID_TO_OUTPUT_STATS, new org.apache.thrift.meta_data.FieldMetaData("sid_to_output_stats", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
-            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
-            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ComponentAggregateStats.class))));
-    tmpMap.put(_Fields.EXEC_STATS, new org.apache.thrift.meta_data.FieldMetaData("exec_stats", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
-            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ExecutorAggregateStats.class))));
-    tmpMap.put(_Fields.ERRORS, new org.apache.thrift.meta_data.FieldMetaData("errors", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
-            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ErrorInfo.class))));
-    tmpMap.put(_Fields.EVENTLOG_HOST, new org.apache.thrift.meta_data.FieldMetaData("eventlog_host", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    tmpMap.put(_Fields.EVENTLOG_PORT, new org.apache.thrift.meta_data.FieldMetaData("eventlog_port", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
-    tmpMap.put(_Fields.DEBUG_OPTIONS, new org.apache.thrift.meta_data.FieldMetaData("debug_options", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, DebugOptions.class)));
-    tmpMap.put(_Fields.TOPOLOGY_STATUS, new org.apache.thrift.meta_data.FieldMetaData("topology_status", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    metaDataMap = Collections.unmodifiableMap(tmpMap);
-    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ComponentPageInfo.class, metaDataMap);
-  }
-
-  public ComponentPageInfo() {
-  }
-
-  public ComponentPageInfo(
-    String component_id,
-    ComponentType component_type)
-  {
-    this();
-    this.component_id = component_id;
-    this.component_type = component_type;
-  }
-
-  /**
-   * Performs a deep copy on <i>other</i>.
-   */
-  public ComponentPageInfo(ComponentPageInfo other) {
-    __isset_bitfield = other.__isset_bitfield;
-    if (other.is_set_component_id()) {
-      this.component_id = other.component_id;
-    }
-    if (other.is_set_component_type()) {
-      this.component_type = other.component_type;
-    }
-    if (other.is_set_topology_id()) {
-      this.topology_id = other.topology_id;
-    }
-    if (other.is_set_topology_name()) {
-      this.topology_name = other.topology_name;
-    }
-    this.num_executors = other.num_executors;
-    this.num_tasks = other.num_tasks;
-    if (other.is_set_window_to_stats()) {
-      Map<String,ComponentAggregateStats> __this__window_to_stats = new HashMap<String,ComponentAggregateStats>(other.window_to_stats.size());
-      for (Map.Entry<String, ComponentAggregateStats> other_element : other.window_to_stats.entrySet()) {
-
-        String other_element_key = other_element.getKey();
-        ComponentAggregateStats other_element_value = other_element.getValue();
-
-        String __this__window_to_stats_copy_key = other_element_key;
-
-        ComponentAggregateStats __this__window_to_stats_copy_value = new ComponentAggregateStats(other_element_value);
-
-        __this__window_to_stats.put(__this__window_to_stats_copy_key, __this__window_to_stats_copy_value);
-      }
-      this.window_to_stats = __this__window_to_stats;
-    }
-    if (other.is_set_gsid_to_input_stats()) {
-      Map<GlobalStreamId,ComponentAggregateStats> __this__gsid_to_input_stats = new HashMap<GlobalStreamId,ComponentAggregateStats>(other.gsid_to_input_stats.size());
-      for (Map.Entry<GlobalStreamId, ComponentAggregateStats> other_element : other.gsid_to_input_stats.entrySet()) {
-
-        GlobalStreamId other_element_key = other_element.getKey();
-        ComponentAggregateStats other_element_value = other_element.getValue();
-
-        GlobalStreamId __this__gsid_to_input_stats_copy_key = new GlobalStreamId(other_element_key);
-
-        ComponentAggregateStats __this__gsid_to_input_stats_copy_value = new ComponentAggregateStats(other_element_value);
-
-        __this__gsid_to_input_stats.put(__this__gsid_to_input_stats_copy_key, __this__gsid_to_input_stats_copy_value);
-      }
-      this.gsid_to_input_stats = __this__gsid_to_input_stats;
-    }
-    if (other.is_set_sid_to_output_stats()) {
-      Map<String,ComponentAggregateStats> __this__sid_to_output_stats = new HashMap<String,ComponentAggregateStats>(other.sid_to_output_stats.size());
-      for (Map.Entry<String, ComponentAggregateStats> other_element : other.sid_to_output_stats.entrySet()) {
-
-        String other_element_key = other_element.getKey();
-        ComponentAggregateStats other_element_value = other_element.getValue();
-
-        String __this__sid_to_output_stats_copy_key = other_element_key;
-
-        ComponentAggregateStats __this__sid_to_output_stats_copy_value = new ComponentAggregateStats(other_element_value);
-
-        __this__sid_to_output_stats.put(__this__sid_to_output_stats_copy_key, __this__sid_to_output_stats_copy_value);
-      }
-      this.sid_to_output_stats = __this__sid_to_output_stats;
-    }
-    if (other.is_set_exec_stats()) {
-      List<ExecutorAggregateStats> __this__exec_stats = new ArrayList<ExecutorAggregateStats>(other.exec_stats.size());
-      for (ExecutorAggregateStats other_element : other.exec_stats) {
-        __this__exec_stats.add(new ExecutorAggregateStats(other_element));
-      }
-      this.exec_stats = __this__exec_stats;
-    }
-    if (other.is_set_errors()) {
-      List<ErrorInfo> __this__errors = new ArrayList<ErrorInfo>(other.errors.size());
-      for (ErrorInfo other_element : other.errors) {
-        __this__errors.add(new ErrorInfo(other_element));
-      }
-      this.errors = __this__errors;
-    }
-    if (other.is_set_eventlog_host()) {
-      this.eventlog_host = other.eventlog_host;
-    }
-    this.eventlog_port = other.eventlog_port;
-    if (other.is_set_debug_options()) {
-      this.debug_options = new DebugOptions(other.debug_options);
-    }
-    if (other.is_set_topology_status()) {
-      this.topology_status = other.topology_status;
-    }
-  }
-
-  public ComponentPageInfo deepCopy() {
-    return new ComponentPageInfo(this);
-  }
-
-  @Override
-  public void clear() {
-    this.component_id = null;
-    this.component_type = null;
-    this.topology_id = null;
-    this.topology_name = null;
-    set_num_executors_isSet(false);
-    this.num_executors = 0;
-    set_num_tasks_isSet(false);
-    this.num_tasks = 0;
-    this.window_to_stats = null;
-    this.gsid_to_input_stats = null;
-    this.sid_to_output_stats = null;
-    this.exec_stats = null;
-    this.errors = null;
-    this.eventlog_host = null;
-    set_eventlog_port_isSet(false);
-    this.eventlog_port = 0;
-    this.debug_options = null;
-    this.topology_status = null;
-  }
-
-  public String get_component_id() {
-    return this.component_id;
-  }
-
-  public void set_component_id(String component_id) {
-    this.component_id = component_id;
-  }
-
-  public void unset_component_id() {
-    this.component_id = null;
-  }
-
-  /** Returns true if field component_id is set (has been assigned a value) and false otherwise */
-  public boolean is_set_component_id() {
-    return this.component_id != null;
-  }
-
-  public void set_component_id_isSet(boolean value) {
-    if (!value) {
-      this.component_id = null;
-    }
-  }
-
-  /**
-   * 
-   * @see ComponentType
-   */
-  public ComponentType get_component_type() {
-    return this.component_type;
-  }
-
-  /**
-   * 
-   * @see ComponentType
-   */
-  public void set_component_type(ComponentType component_type) {
-    this.component_type = component_type;
-  }
-
-  public void unset_component_type() {
-    this.component_type = null;
-  }
-
-  /** Returns true if field component_type is set (has been assigned a value) and false otherwise */
-  public boolean is_set_component_type() {
-    return this.component_type != null;
-  }
-
-  public void set_component_type_isSet(boolean value) {
-    if (!value) {
-      this.component_type = null;
-    }
-  }
-
-  public String get_topology_id() {
-    return this.topology_id;
-  }
-
-  public void set_topology_id(String topology_id) {
-    this.topology_id = topology_id;
-  }
-
-  public void unset_topology_id() {
-    this.topology_id = null;
-  }
-
-  /** Returns true if field topology_id is set (has been assigned a value) and false otherwise */
-  public boolean is_set_topology_id() {
-    return this.topology_id != null;
-  }
-
-  public void set_topology_id_isSet(boolean value) {
-    if (!value) {
-      this.topology_id = null;
-    }
-  }
-
-  public String get_topology_name() {
-    return this.topology_name;
-  }
-
-  public void set_topology_name(String topology_name) {
-    this.topology_name = topology_name;
-  }
-
-  public void unset_topology_name() {
-    this.topology_name = null;
-  }
-
-  /** Returns true if field topology_name is set (has been assigned a value) and false otherwise */
-  public boolean is_set_topology_name() {
-    return this.topology_name != null;
-  }
-
-  public void set_topology_name_isSet(boolean value) {
-    if (!value) {
-      this.topology_name = null;
-    }
-  }
-
-  public int get_num_executors() {
-    return this.num_executors;
-  }
-
-  public void set_num_executors(int num_executors) {
-    this.num_executors = num_executors;
-    set_num_executors_isSet(true);
-  }
-
-  public void unset_num_executors() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUM_EXECUTORS_ISSET_ID);
-  }
-
-  /** Returns true if field num_executors is set (has been assigned a value) and false otherwise */
-  public boolean is_set_num_executors() {
-    return EncodingUtils.testBit(__isset_bitfield, __NUM_EXECUTORS_ISSET_ID);
-  }
-
-  public void set_num_executors_isSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUM_EXECUTORS_ISSET_ID, value);
-  }
-
-  public int get_num_tasks() {
-    return this.num_tasks;
-  }
-
-  public void set_num_tasks(int num_tasks) {
-    this.num_tasks = num_tasks;
-    set_num_tasks_isSet(true);
-  }
-
-  public void unset_num_tasks() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUM_TASKS_ISSET_ID);
-  }
-
-  /** Returns true if field num_tasks is set (has been assigned a value) and false otherwise */
-  public boolean is_set_num_tasks() {
-    return EncodingUtils.testBit(__isset_bitfield, __NUM_TASKS_ISSET_ID);
-  }
-
-  public void set_num_tasks_isSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUM_TASKS_ISSET_ID, value);
-  }
-
-  public int get_window_to_stats_size() {
-    return (this.window_to_stats == null) ? 0 : this.window_to_stats.size();
-  }
-
-  public void put_to_window_to_stats(String key, ComponentAggregateStats val) {
-    if (this.window_to_stats == null) {
-      this.window_to_stats = new HashMap<String,ComponentAggregateStats>();
-    }
-    this.window_to_stats.put(key, val);
-  }
-
-  public Map<String,ComponentAggregateStats> get_window_to_stats() {
-    return this.window_to_stats;
-  }
-
-  public void set_window_to_stats(Map<String,ComponentAggregateStats> window_to_stats) {
-    this.window_to_stats = window_to_stats;
-  }
-
-  public void unset_window_to_stats() {
-    this.window_to_stats = null;
-  }
-
-  /** Returns true if field window_to_stats is set (has been assigned a value) and false otherwise */
-  public boolean is_set_window_to_stats() {
-    return this.window_to_stats != null;
-  }
-
-  public void set_window_to_stats_isSet(boolean value) {
-    if (!value) {
-      this.window_to_stats = null;
-    }
-  }
-
-  public int get_gsid_to_input_stats_size() {
-    return (this.gsid_to_input_stats == null) ? 0 : this.gsid_to_input_stats.size();
-  }
-
-  public void put_to_gsid_to_input_stats(GlobalStreamId key, ComponentAggregateStats val) {
-    if (this.gsid_to_input_stats == null) {
-      this.gsid_to_input_stats = new HashMap<GlobalStreamId,ComponentAggregateStats>();
-    }
-    this.gsid_to_input_stats.put(key, val);
-  }
-
-  public Map<GlobalStreamId,ComponentAggregateStats> get_gsid_to_input_stats() {
-    return this.gsid_to_input_stats;
-  }
-
-  public void set_gsid_to_input_stats(Map<GlobalStreamId,ComponentAggregateStats> gsid_to_input_stats) {
-    this.gsid_to_input_stats = gsid_to_input_stats;
-  }
-
-  public void unset_gsid_to_input_stats() {
-    this.gsid_to_input_stats = null;
-  }
-
-  /** Returns true if field gsid_to_input_stats is set (has been assigned a value) and false otherwise */
-  public boolean is_set_gsid_to_input_stats() {
-    return this.gsid_to_input_stats != null;
-  }
-
-  public void set_gsid_to_input_stats_isSet(boolean value) {
-    if (!value) {
-      this.gsid_to_input_stats = null;
-    }
-  }
-
-  public int get_sid_to_output_stats_size() {
-    return (this.sid_to_output_stats == null) ? 0 : this.sid_to_output_stats.size();
-  }
-
-  public void put_to_sid_to_output_stats(String key, ComponentAggregateStats val) {
-    if (this.sid_to_output_stats == null) {
-      this.sid_to_output_stats = new HashMap<String,ComponentAggregateStats>();
-    }
-    this.sid_to_output_stats.put(key, val);
-  }
-
-  public Map<String,ComponentAggregateStats> get_sid_to_output_stats() {
-    return this.sid_to_output_stats;
-  }
-
-  public void set_sid_to_output_stats(Map<String,ComponentAggregateStats> sid_to_output_stats) {
-    this.sid_to_output_stats = sid_to_output_stats;
-  }
-
-  public void unset_sid_to_output_stats() {
-    this.sid_to_output_stats = null;
-  }
-
-  /** Returns true if field sid_to_output_stats is set (has been assigned a value) and false otherwise */
-  public boolean is_set_sid_to_output_stats() {
-    return this.sid_to_output_stats != null;
-  }
-
-  public void set_sid_to_output_stats_isSet(boolean value) {
-    if (!value) {
-      this.sid_to_output_stats = null;
-    }
-  }
-
-  public int get_exec_stats_size() {
-    return (this.exec_stats == null) ? 0 : this.exec_stats.size();
-  }
-
-  public java.util.Iterator<ExecutorAggregateStats> get_exec_stats_iterator() {
-    return (this.exec_stats == null) ? null : this.exec_stats.iterator();
-  }
-
-  public void add_to_exec_stats(ExecutorAggregateStats elem) {
-    if (this.exec_stats == null) {
-      this.exec_stats = new ArrayList<ExecutorAggregateStats>();
-    }
-    this.exec_stats.add(elem);
-  }
-
-  public List<ExecutorAggregateStats> get_exec_stats() {
-    return this.exec_stats;
-  }
-
-  public void set_exec_stats(List<ExecutorAggregateStats> exec_stats) {
-    this.exec_stats = exec_stats;
-  }
-
-  public void unset_exec_stats() {
-    this.exec_stats = null;
-  }
-
-  /** Returns true if field exec_stats is set (has been assigned a value) and false otherwise */
-  public boolean is_set_exec_stats() {
-    return this.exec_stats != null;
-  }
-
-  public void set_exec_stats_isSet(boolean value) {
-    if (!value) {
-      this.exec_stats = null;
-    }
-  }
-
-  public int get_errors_size() {
-    return (this.errors == null) ? 0 : this.errors.size();
-  }
-
-  public java.util.Iterator<ErrorInfo> get_errors_iterator() {
-    return (this.errors == null) ? null : this.errors.iterator();
-  }
-
-  public void add_to_errors(ErrorInfo elem) {
-    if (this.errors == null) {
-      this.errors = new ArrayList<ErrorInfo>();
-    }
-    this.errors.add(elem);
-  }
-
-  public List<ErrorInfo> get_errors() {
-    return this.errors;
-  }
-
-  public void set_errors(List<ErrorInfo> errors) {
-    this.errors = errors;
-  }
-
-  public void unset_errors() {
-    this.errors = null;
-  }
-
-  /** Returns true if field errors is set (has been assigned a value) and false otherwise */
-  public boolean is_set_errors() {
-    return this.errors != null;
-  }
-
-  public void set_errors_isSet(boolean value) {
-    if (!value) {
-      this.errors = null;
-    }
-  }
-
-  public String get_eventlog_host() {
-    return this.eventlog_host;
-  }
-
-  public void set_eventlog_host(String eventlog_host) {
-    this.eventlog_host = eventlog_host;
-  }
-
-  public void unset_eventlog_host() {
-    this.eventlog_host = null;
-  }
-
-  /** Returns true if field eventlog_host is set (has been assigned a value) and false otherwise */
-  public boolean is_set_eventlog_host() {
-    return this.eventlog_host != null;
-  }
-
-  public void set_eventlog_host_isSet(boolean value) {
-    if (!value) {
-      this.eventlog_host = null;
-    }
-  }
-
-  public int get_eventlog_port() {
-    return this.eventlog_port;
-  }
-
-  public void set_eventlog_port(int eventlog_port) {
-    this.eventlog_port = eventlog_port;
-    set_eventlog_port_isSet(true);
-  }
-
-  public void unset_eventlog_port() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __EVENTLOG_PORT_ISSET_ID);
-  }
-
-  /** Returns true if field eventlog_port is set (has been assigned a value) and false otherwise */
-  public boolean is_set_eventlog_port() {
-    return EncodingUtils.testBit(__isset_bitfield, __EVENTLOG_PORT_ISSET_ID);
-  }
-
-  public void set_eventlog_port_isSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __EVENTLOG_PORT_ISSET_ID, value);
-  }
-
-  public DebugOptions get_debug_options() {
-    return this.debug_options;
-  }
-
-  public void set_debug_options(DebugOptions debug_options) {
-    this.debug_options = debug_options;
-  }
-
-  public void unset_debug_options() {
-    this.debug_options = null;
-  }
-
-  /** Returns true if field debug_options is set (has been assigned a value) and false otherwise */
-  public boolean is_set_debug_options() {
-    return this.debug_options != null;
-  }
-
-  public void set_debug_options_isSet(boolean value) {
-    if (!value) {
-      this.debug_options = null;
-    }
-  }
-
-  public String get_topology_status() {
-    return this.topology_status;
-  }
-
-  public void set_topology_status(String topology_status) {
-    this.topology_status = topology_status;
-  }
-
-  public void unset_topology_status() {
-    this.topology_status = null;
-  }
-
-  /** Returns true if field topology_status is set (has been assigned a value) and false otherwise */
-  public boolean is_set_topology_status() {
-    return this.topology_status != null;
-  }
-
-  public void set_topology_status_isSet(boolean value) {
-    if (!value) {
-      this.topology_status = null;
-    }
-  }
-
-  public void setFieldValue(_Fields field, Object value) {
-    switch (field) {
-    case COMPONENT_ID:
-      if (value == null) {
-        unset_component_id();
-      } else {
-        set_component_id((String)value);
-      }
-      break;
-
-    case COMPONENT_TYPE:
-      if (value == null) {
-        unset_component_type();
-      } else {
-        set_component_type((ComponentType)value);
-      }
-      break;
-
-    case TOPOLOGY_ID:
-      if (value == null) {
-        unset_topology_id();
-      } else {
-        set_topology_id((String)value);
-      }
-      break;
-
-    case TOPOLOGY_NAME:
-      if (value == null) {
-        unset_topology_name();
-      } else {
-        set_topology_name((String)value);
-      }
-      break;
-
-    case NUM_EXECUTORS:
-      if (value == null) {
-        unset_num_executors();
-      } else {
-        set_num_executors((Integer)value);
-      }
-      break;
-
-    case NUM_TASKS:
-      if (value == null) {
-        unset_num_tasks();
-      } else {
-        set_num_tasks((Integer)value);
-      }
-      break;
-
-    case WINDOW_TO_STATS:
-      if (value == null) {
-        unset_window_to_stats();
-      } else {
-        set_window_to_stats((Map<String,ComponentAggregateStats>)value);
-      }
-      break;
-
-    case GSID_TO_INPUT_STATS:
-      if (value == null) {
-        unset_gsid_to_input_stats();
-      } else {
-        set_gsid_to_input_stats((Map<GlobalStreamId,ComponentAggregateStats>)value);
-      }
-      break;
-
-    case SID_TO_OUTPUT_STATS:
-      if (value == null) {
-        unset_sid_to_output_stats();
-      } else {
-        set_sid_to_output_stats((Map<String,ComponentAggregateStats>)value);
-      }
-      break;
-
-    case EXEC_STATS:
-      if (value == null) {
-        unset_exec_stats();
-      } else {
-        set_exec_stats((List<ExecutorAggregateStats>)value);
-      }
-      break;
-
-    case ERRORS:
-      if (value == null) {
-        unset_errors();
-      } else {
-        set_errors((List<ErrorInfo>)value);
-      }
-      break;
-
-    case EVENTLOG_HOST:
-      if (value == null) {
-        unset_eventlog_host();
-      } else {
-        set_eventlog_host((String)value);
-      }
-      break;
-
-    case EVENTLOG_PORT:
-      if (value == null) {
-        unset_eventlog_port();
-      } else {
-        set_eventlog_port((Integer)value);
-      }
-      break;
-
-    case DEBUG_OPTIONS:
-      if (value == null) {
-        unset_debug_options();
-      } else {
-        set_debug_options((DebugOptions)value);
-      }
-      break;
-
-    case TOPOLOGY_STATUS:
-      if (value == null) {
-        unset_topology_status();
-      } else {
-        set_topology_status((String)value);
-      }
-      break;
-
-    }
-  }
-
-  public Object getFieldValue(_Fields field) {
-    switch (field) {
-    case COMPONENT_ID:
-      return get_component_id();
-
-    case COMPONENT_TYPE:
-      return get_component_type();
-
-    case TOPOLOGY_ID:
-      return get_topology_id();
-
-    case TOPOLOGY_NAME:
-      return get_topology_name();
-
-    case NUM_EXECUTORS:
-      return get_num_executors();
-
-    case NUM_TASKS:
-      return get_num_tasks();
-
-    case WINDOW_TO_STATS:
-      return get_window_to_stats();
-
-    case GSID_TO_INPUT_STATS:
-      return get_gsid_to_input_stats();
-
-    case SID_TO_OUTPUT_STATS:
-      return get_sid_to_output_stats();
-
-    case EXEC_STATS:
-      return get_exec_stats();
-
-    case ERRORS:
-      return get_errors();
-
-    case EVENTLOG_HOST:
-      return get_eventlog_host();
-
-    case EVENTLOG_PORT:
-      return get_eventlog_port();
-
-    case DEBUG_OPTIONS:
-      return get_debug_options();
-
-    case TOPOLOGY_STATUS:
-      return get_topology_status();
-
-    }
-    throw new IllegalStateException();
-  }
-
-  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
-  public boolean isSet(_Fields field) {
-    if (field == null) {
-      throw new IllegalArgumentException();
-    }
-
-    switch (field) {
-    case COMPONENT_ID:
-      return is_set_component_id();
-    case COMPONENT_TYPE:
-      return is_set_component_type();
-    case TOPOLOGY_ID:
-      return is_set_topology_id();
-    case TOPOLOGY_NAME:
-      return is_set_topology_name();
-    case NUM_EXECUTORS:
-      return is_set_num_executors();
-    case NUM_TASKS:
-      return is_set_num_tasks();
-    case WINDOW_TO_STATS:
-      return is_set_window_to_stats();
-    case GSID_TO_INPUT_STATS:
-      return is_set_gsid_to_input_stats();
-    case SID_TO_OUTPUT_STATS:
-      return is_set_sid_to_output_stats();
-    case EXEC_STATS:
-      return is_set_exec_stats();
-    case ERRORS:
-      return is_set_errors();
-    case EVENTLOG_HOST:
-      return is_set_eventlog_host();
-    case EVENTLOG_PORT:
-      return is_set_eventlog_port();
-    case DEBUG_OPTIONS:
-      return is_set_debug_options();
-    case TOPOLOGY_STATUS:
-      return is_set_topology_status();
-    }
-    throw new IllegalStateException();
-  }
-
-  @Override
-  public boolean equals(Object that) {
-    if (that == null)
-      return false;
-    if (that instanceof ComponentPageInfo)
-      return this.equals((ComponentPageInfo)that);
-    return false;
-  }
-
-  public boolean equals(ComponentPageInfo that) {
-    if (that == null)
-      return false;
-
-    boolean this_present_component_id = true && this.is_set_component_id();
-    boolean that_present_component_id = true && that.is_set_component_id();
-    if (this_present_component_id || that_present_component_id) {
-      if (!(this_present_component_id && that_present_component_id))
-        return false;
-      if (!this.component_id.equals(that.component_id))
-        return false;
-    }
-
-    boolean this_present_component_type = true && this.is_set_component_type();
-    boolean that_present_component_type = true && that.is_set_component_type();
-    if (this_present_component_type || that_present_component_type) {
-      if (!(this_present_component_type && that_present_component_type))
-        return false;
-      if (!this.component_type.equals(that.component_type))
-        return false;
-    }
-
-    boolean this_present_topology_id = true && this.is_set_topology_id();
-    boolean that_present_topology_id = true && that.is_set_topology_id();
-    if (this_present_topology_id || that_present_topology_id) {
-      if (!(this_present_topology_id && that_present_topology_id))
-        return false;
-      if (!this.topology_id.equals(that.topology_id))
-        return false;
-    }
-
-    boolean this_present_topology_name = true && this.is_set_topology_name();
-    boolean that_present_topology_name = true && that.is_set_topology_name();
-    if (this_present_topology_name || that_present_topology_name) {
-      if (!(this_present_topology_name && that_present_topology_name))
-        return false;
-      if (!this.topology_name.equals(that.topology_name))
-        return false;
-    }
-
-    boolean this_present_num_executors = true && this.is_set_num_executors();
-    boolean that_present_num_executors = true && that.is_set_num_executors();
-    if (this_present_num_executors || that_present_num_executors) {
-      if (!(this_present_num_executors && that_present_num_executors))
-        return false;
-      if (this.num_executors != that.num_executors)
-        return false;
-    }
-
-    boolean this_present_num_tasks = true && this.is_set_num_tasks();
-    boolean that_present_num_tasks = true && that.is_set_num_tasks();
-    if (this_present_num_tasks || that_present_num_tasks) {
-      if (!(this_present_num_tasks && that_present_num_tasks))
-        return false;
-      if (this.num_tasks != that.num_tasks)
-        return false;
-    }
-
-    boolean this_present_window_to_stats = true && this.is_set_window_to_stats();
-    boolean that_present_window_to_stats = true && that.is_set_window_to_stats();
-    if (this_present_window_to_stats || that_present_window_to_stats) {
-      if (!(this_present_window_to_stats && that_present_window_to_stats))
-        return false;
-      if (!this.window_to_stats.equals(that.window_to_stats))
-        return false;
-    }
-
-    boolean this_present_gsid_to_input_stats = true && this.is_set_gsid_to_input_stats();
-    boolean that_present_gsid_to_input_stats = true && that.is_set_gsid_to_input_stats();
-    if (this_present_gsid_to_input_stats || that_present_gsid_to_input_stats) {
-      if (!(this_present_gsid_to_input_stats && that_present_gsid_to_input_stats))
-        return false;
-      if (!this.gsid_to_input_stats.equals(that.gsid_to_input_stats))
-        return false;
-    }
-
-    boolean this_present_sid_to_output_stats = true && this.is_set_sid_to_output_stats();
-    boolean that_present_sid_to_output_stats = true && that.is_set_sid_to_output_stats();
-    if (this_present_sid_to_output_stats || that_present_sid_to_output_stats) {
-      if (!(this_present_sid_to_output_stats && that_present_sid_to_output_stats))
-        return false;
-      if (!this.sid_to_output_stats.equals(that.sid_to_output_stats))
-        return false;
-    }
-
-    boolean this_present_exec_stats = true && this.is_set_exec_stats();
-    boolean that_present_exec_stats = true && that.is_set_exec_stats();
-    if (this_present_exec_stats || that_present_exec_stats) {
-      if (!(this_present_exec_stats && that_present_exec_stats))
-        return false;
-      if (!this.exec_stats.equals(that.exec_stats))
-        return false;
-    }
-
-    boolean this_present_errors = true && this.is_set_errors();
-    boolean that_present_errors = true && that.is_set_errors();
-    if (this_present_errors || that_present_errors) {
-      if (!(this_present_errors && that_present_errors))
-        return false;
-      if (!this.errors.equals(that.errors))
-        return false;
-    }
-
-    boolean this_present_eventlog_host = true && this.is_set_eventlog_host();
-    boolean that_present_eventlog_host = true && that.is_set_eventlog_host();
-    if (this_present_eventlog_host || that_present_eventlog_host) {
-      if (!(this_present_eventlog_host && that_present_eventlog_host))
-        return false;
-      if (!this.eventlog_host.equals(that.eventlog_host))
-        return false;
-    }
-
-    boolean this_present_eventlog_port = true && this.is_set_eventlog_port();
-    boolean that_present_eventlog_port = true && that.is_set_eventlog_port();
-    if (this_present_eventlog_port || that_present_eventlog_port) {
-      if (!(this_present_eventlog_port && that_present_eventlog_port))
-        return false;
-      if (this.eventlog_port != that.eventlog_port)
-        return false;
-    }
-
-    boolean this_present_debug_options = true && this.is_set_debug_options();
-    boolean that_present_debug_options = true && that.is_set_debug_options();
-    if (this_present_debug_options || that_present_debug_options) {
-      if (!(this_present_debug_options && that_present_debug_options))
-        return false;
-      if (!this.debug_options.equals(that.debug_options))
-        return false;
-    }
-
-    boolean this_present_topology_status = true && this.is_set_topology_status();
-    boolean that_present_topology_status = true && that.is_set_topology_status();
-    if (this_present_topology_status || that_present_topology_status) {
-      if (!(this_present_topology_status && that_present_topology_status))
-        return false;
-      if (!this.topology_status.equals(that.topology_status))
-        return false;
-    }
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    List<Object> list = new ArrayList<Object>();
-
-    boolean present_component_id = true && (is_set_component_id());
-    list.add(present_component_id);
-    if (present_component_id)
-      list.add(component_id);
-
-    boolean present_component_type = true && (is_set_component_type());
-    list.add(present_component_type);
-    if (present_component_type)
-      list.add(component_type.getValue());
-
-    boolean present_topology_id = true && (is_set_topology_id());
-    list.add(present_topology_id);
-    if (present_topology_id)
-      list.add(topology_id);
-
-    boolean present_topology_name = true && (is_set_topology_name());
-    list.add(present_topology_name);
-    if (present_topology_name)
-      list.add(topology_name);
-
-    boolean present_num_executors = true && (is_set_num_executors());
-    list.add(present_num_executors);
-    if (present_num_executors)
-      list.add(num_executors);
-
-    boolean present_num_tasks = true && (is_set_num_tasks());
-    list.add(present_num_tasks);
-    if (present_num_tasks)
-      list.add(num_tasks);
-
-    boolean present_window_to_stats = true && (is_set_window_to_stats());
-    list.add(present_window_to_stats);
-    if (present_window_to_stats)
-      list.add(window_to_stats);
-
-    boolean present_gsid_to_input_stats = true && (is_set_gsid_to_input_stats());
-    list.add(present_gsid_to_input_stats);
-    if (present_gsid_to_input_stats)
-      list.add(gsid_to_input_stats);
-
-    boolean present_sid_to_output_stats = true && (is_set_sid_to_output_stats());
-    list.add(present_sid_to_output_stats);
-    if (present_sid_to_output_stats)
-      list.add(sid_to_output_stats);
-
-    boolean present_exec_stats = true && (is_set_exec_stats());
-    list.add(present_exec_stats);
-    if (present_exec_stats)
-      list.add(exec_stats);
-
-    boolean present_errors = true && (is_set_errors());
-    list.add(present_errors);
-    if (present_errors)
-      list.add(errors);
-
-    boolean present_eventlog_host = true && (is_set_eventlog_host());
-    list.add(present_eventlog_host);
-    if (present_eventlog_host)
-      list.add(eventlog_host);
-
-    boolean present_eventlog_port = true && (is_set_eventlog_port());
-    list.add(present_eventlog_port);
-    if (present_eventlog_port)
-      list.add(eventlog_port);
-
-    boolean present_debug_options = true && (is_set_debug_options());
-    list.add(present_debug_options);
-    if (present_debug_options)
-      list.add(debug_options);
-
-    boolean present_topology_status = true && (is_set_topology_status());
-    list.add(present_topology_status);
-    if (present_topology_status)
-      list.add(topology_status);
-
-    return list.hashCode();
-  }
-
-  @Override
-  public int compareTo(ComponentPageInfo other) {
-    if (!getClass().equals(other.getClass())) {
-      return getClass().getName().compareTo(other.getClass().getName());
-    }
-
-    int lastComparison = 0;
-
-    lastComparison = Boolean.valueOf(is_set_component_id()).compareTo(other.is_set_component_id());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_component_id()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.component_id, other.component_id);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_component_type()).compareTo(other.is_set_component_type());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_component_type()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.component_type, other.component_type);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_topology_id()).compareTo(other.is_set_topology_id());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_topology_id()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.topology_id, other.topology_id);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_topology_name()).compareTo(other.is_set_topology_name());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_topology_name()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.topology_name, other.topology_name);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_num_executors()).compareTo(other.is_set_num_executors());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_num_executors()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.num_executors, other.num_executors);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_num_tasks()).compareTo(other.is_set_num_tasks());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_num_tasks()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.num_tasks, other.num_tasks);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_window_to_stats()).compareTo(other.is_set_window_to_stats());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_window_to_stats()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.window_to_stats, other.window_to_stats);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_gsid_to_input_stats()).compareTo(other.is_set_gsid_to_input_stats());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_gsid_to_input_stats()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.gsid_to_input_stats, other.gsid_to_input_stats);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_sid_to_output_stats()).compareTo(other.is_set_sid_to_output_stats());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_sid_to_output_stats()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sid_to_output_stats, other.sid_to_output_stats);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_exec_stats()).compareTo(other.is_set_exec_stats());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_exec_stats()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.exec_stats, other.exec_stats);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_errors()).compareTo(other.is_set_errors());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_errors()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.errors, other.errors);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_eventlog_host()).compareTo(other.is_set_eventlog_host());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_eventlog_host()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.eventlog_host, other.eventlog_host);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_eventlog_port()).compareTo(other.is_set_eventlog_port());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_eventlog_port()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.eventlog_port, other.eventlog_port);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_debug_options()).compareTo(other.is_set_debug_options());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_debug_options()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.debug_options, other.debug_options);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_topology_status()).compareTo(other.is_set_topology_status());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_topology_status()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.topology_status, other.topology_status);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    return 0;
-  }
-
-  public _Fields fieldForId(int fieldId) {
-    return _Fields.findByThriftId(fieldId);
-  }
-
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
-    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
-  }
-
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
-    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder("ComponentPageInfo(");
-    boolean first = true;
-
-    sb.append("component_id:");
-    if (this.component_id == null) {
-      sb.append("null");
-    } else {
-      sb.append(this.component_id);
-    }
-    first = false;
-    if (!first) sb.append(", ");
-    sb.append("component_type:");
-    if (this.component_type == null) {
-      sb.append("null");
-    } else {
-      sb.append(this.component_type);
-    }
-    first = false;
-    if (is_set_topology_id()) {
-      if (!first) sb.append(", ");
-      sb.append("topology_id:");
-      if (this.topology_id == null) {
-        sb.append("null");
-      } else {
-        sb.append(this.topology_id);
-      }
-      first = false;
-    }
-    if (is_set_topology_name()) {
-      if (!first) sb.append(", ");
-      sb.append("topology_name:");
-      if (this.topology_name == null) {
-        sb.append("null");
-      } else {
-        sb.append(this.topology_name);
-      }
-      first = false;
-    }
-    if (is_set_num_executors()) {
-      if (!first) sb.append(", ");
-      sb.append("num_executors:");
-      sb.append(this.num_executors);
-      first = false;
-    }
-    if (is_set_num_tasks()) {
-      if (!first) sb.append(", ");
-      sb.append("num_tasks:");
-      sb.append(this.num_tasks);
-      first = false;
-    }
-    if (is_set_window_to_stats()) {
-      if (!first) sb.append(", ");
-      sb.append("window_to_stats:");
-      if (this.window_to_stats == null) {
-        sb.append("null");
-      } else {
-        sb.append(this.window_to_stats);
-      }
-      first = false;
-    }
-    if (is_set_gsid_to_input_stats()) {
-      if (!first) sb.append(", ");
-      sb.append("gsid_to_input_stats:");
-      if (this.gsid_to_input_stats == null) {
-        sb.append("null");
-      } else {
-        sb.append(this.gsid_to_input_stats);
-      }
-      first = false;
-    }
-    if (is_set_sid_to_output_stats()) {
-      if (!first) sb.append(", ");
-      sb.append("sid_to_output_stats:");
-      if (this.sid_to_output_stats == null) {
-        sb.append("null");
-      } else {
-        sb.append(this.sid_to_output_stats);
-      }
-      first = false;
-    }
-    if (is_set_exec_stats()) {
-      if (!first) sb.append(", ");
-      sb.append("exec_stats:");
-      if (this.exec_stats == null) {
-        sb.append("null");
-      } else {
-        sb.append(this.exec_stats);
-      }
-      first = false;
-    }
-    if (is_set_errors()) {
-      if (!first) sb.append(", ");
-      sb.append("errors:");
-      if (this.errors == null) {
-        sb.append("null");
-      } else {
-        sb.append(this.errors);
-      }
-      first = false;
-    }
-    if (is_set_eventlog_host()) {
-      if (!first) sb.append(", ");
-      sb.append("eventlog_host:");
-      if (this.eventlog_host == null) {
-        sb.append("null");
-      } else {
-        sb.append(this.eventlog_host);
-      }
-      first = false;
-    }
-    if (is_set_eventlog_port()) {
-      if (!first) sb.append(", ");
-      sb.append("eventlog_port:");
-      sb.append(this.eventlog_port);
-      first = false;
-    }
-    if (is_set_debug_options()) {
-      if (!first) sb.append(", ");
-      sb.append("debug_options:");
-      if (this.debug_options == null) {
-        sb.append("null");
-      } else {
-        sb.append(this.debug_options);
-      }
-      first = false;
-    }
-    if (is_set_topology_status()) {
-      if (!first) sb.append(", ");
-      sb.append("topology_status:");
-      if (this.topology_status == null) {
-        sb.append("null");
-      } else {
-        sb.append(this.topology_status);
-      }
-      first = false;
-    }
-    sb.append(")");
-    return sb.toString();
-  }
-
-  public void validate() throws org.apache.thrift.TException {
-    // check for required fields
-    if (!is_set_component_id()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'component_id' is unset! Struct:" + toString());
-    }
-
-    if (!is_set_component_type()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'component_type' is unset! Struct:" + toString());
-    }
-
-    // check for sub-struct validity
-    if (debug_options != null) {
-      debug_options.validate();
-    }
-  }
-
-  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
-    try {
-      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
-    try {
-      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
-      __isset_bitfield = 0;
-      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private static class ComponentPageInfoStandardSchemeFactory implements SchemeFactory {
-    public ComponentPageInfoStandardScheme getScheme() {
-      return new ComponentPageInfoStandardScheme();
-    }
-  }
-
-  private static class ComponentPageInfoStandardScheme extends StandardScheme<ComponentPageInfo> {
-
-    public void read(org.apache.thrift.protocol.TProtocol iprot, ComponentPageInfo struct) throws org.apache.thrift.TException {
-      org.apache.thrift.protocol.TField schemeField;
-      iprot.readStructBegin();
-      while (true)
-      {
-        schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
-          break;
-        }
-        switch (schemeField.id) {
-          case 1: // COMPONENT_ID
-            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
-              struct.component_id = iprot.readString();
-              struct.set_component_id_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 2: // COMPONENT_TYPE
-            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
-              struct.component_type = backtype.storm.generated.ComponentType.findByValue(iprot.readI32());
-              struct.set_component_type_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 3: // TOPOLOGY_ID
-            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
-              struct.topology_id = iprot.readString();
-              struct.set_topology_id_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 4: // TOPOLOGY_NAME
-            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
-              struct.topology_name = iprot.readString();
-              struct.set_topology_name_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 5: // NUM_EXECUTORS
-            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
-              struct.num_executors = iprot.readI32();
-              struct.set_num_executors_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 6: // NUM_TASKS
-            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
-              struct.num_tasks = iprot.readI32();
-              struct.set_num_tasks_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 7: // WINDOW_TO_STATS
-            if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
-              {
-                org.apache.thrift.protocol.TMap _map422 = iprot.readMapBegin();
-                struct.window_to_stats = new HashMap<String,ComponentAggregateStats>(2*_map422.size);
-                String _key423;
-                ComponentAggregateStats _val424;
-                for (int _i425 = 0; _i425 < _map422.size; ++_i425)
-                {
-                  _key423 = iprot.readString();
-                  _val424 = new ComponentAggregateStats();
-                  _val424.read(iprot);
-                  struct.window_to_stats.put(_key423, _val424);
-                }
-                iprot.readMapEnd();
-              }
-              struct.set_window_to_stats_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 8: // GSID_TO_INPUT_STATS
-            if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
-              {
-                org.apache.thrift.protocol.TMap _map426 = iprot.readMapBegin();
-                struct.gsid_to_input_stats = new HashMap<GlobalStreamId,ComponentAggregateStats>(2*_map426.size);
-                GlobalStreamId _key427;
-                ComponentAggregateStats _val428;
-                for (int _i429 = 0; _i429 < _map426.size; ++_i429)
-                {
-                  _key427 = new GlobalStreamId();
-                  _key427.read(iprot);
-                  _val428 = new ComponentAggregateStats();
-                  _val428.read(iprot);
-                  struct.gsid_to_input_stats.put(_key427, _val428);
-                }
-                iprot.readMapEnd();
-              }
-              struct.set_gsid_to_input_stats_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 9: // SID_TO_OUTPUT_STATS
-            if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
-              {
-                org.apache.thrift.protocol.TMap _map430 = iprot.readMapBegin();
-                struct.sid_to_output_stats = new HashMap<String,ComponentAggregateStats>(2*_map430.size);
-                String _key431;
-                ComponentAggregateStats _val432;
-                for (int _i433 = 0; _i433 < _map430.size; ++_i433)
-                {
-                  _key431 = iprot.readString();
-                  _val432 = new ComponentAggregateStats();
-                  _val432.read(iprot);
-                  struct.sid_to_output_stats.put(_key431, _val432);
-                }
-                iprot.readMapEnd();
-              }
-              struct.set_sid_to_output_stats_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 10: // EXEC_STATS
-            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
-              {
-                org.apache.thrift.protocol.TList _list434 = iprot.readListBegin();
-                struct.exec_stats = new ArrayList<ExecutorAggregateStats>(_list434.size);
-                ExecutorAggregateStats _elem435;
-                for (int _i436 = 0; _i436 < _list434.size; ++_i436)
-                {
-                  _elem435 = new ExecutorAggregateStats();
-                  _elem435.read(iprot);
-                  struct.exec_stats.add(_elem435);
-                }
-                iprot.readListEnd();
-              }
-              struct.set_exec_stats_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 11: // ERRORS
-            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
-              {
-                org.apache.thrift.protocol.TList _list437 = iprot.readListBegin();
-                struct.errors = new ArrayList<ErrorInfo>(_list437.size);
-                ErrorInfo _elem438;
-                for (int _i439 = 0; _i439 < _list437.size; ++_i439)
-                {
-                  _elem438 = new ErrorInfo();
-                  _elem438.read(iprot);
-                  struct.errors.add(_elem438);
-                }
-                iprot.readListEnd();
-              }
-              struct.set_errors_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 12: // EVENTLOG_HOST
-            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
-              struct.eventlog_host = iprot.readString();
-              struct.set_eventlog_host_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 13: // EVENTLOG_PORT
-            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
-              struct.eventlog_port = iprot.readI32();
-              struct.set_eventlog_port_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 14: // DEBUG_OPTIONS
-            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
-              struct.debug_options = new DebugOptions();
-              struct.debug_options.read(iprot);
-              struct.set_debug_options_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 15: // TOPOLOGY_STATUS
-            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
-              struct.topology_status = iprot.readString();
-              struct.set_topology_status_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          default:
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-        }
-        iprot.readFieldEnd();
-      }
-      iprot.readStructEnd();
-      struct.validate();
-    }
-
-    public void write(org.apache.thrift.protocol.TProtocol oprot, ComponentPageInfo struct) throws org.apache.thrift.TException {
-      struct.validate();
-
-      oprot.writeStructBegin(STRUCT_DESC);
-      if (struct.component_id != null) {
-        oprot.writeFieldBegin(COMPONENT_ID_FIELD_DESC);
-        oprot.writeString(struct.component_id);
-        oprot.writeFieldEnd();
-      }
-      if (struct.component_type != null) {
-        oprot.writeFieldBegin(COMPONENT_TYPE_FIELD_DESC);
-        oprot.writeI32(struct.component_type.getValue());
-        oprot.writeFieldEnd();
-      }
-      if (struct.topology_id != null) {
-        if (struct.is_set_topology_id()) {
-          oprot.writeFieldBegin(TOPOLOGY_ID_FIELD_DESC);
-          oprot.writeString(struct.topology_id);
-          oprot.writeFieldEnd();
-        }
-      }
-      if (struct.topology_name != null) {
-        if (struct.is_set_topology_name()) {
-          oprot.writeFieldBegin(TOPOLOGY_NAME_FIELD_DESC);
-          oprot.writeString(struct.topology_name);
-          oprot.writeFieldEnd();
-        }
-      }
-      if (struct.is_set_num_executors()) {
-        oprot.writeFieldBegin(NUM_EXECUTORS_FIELD_DESC);
-        oprot.writeI32(struct.num_executors);
-        oprot.writeFieldEnd();
-      }
-      if (struct.is_set_num_tasks()) {
-        oprot.writeFieldBegin(NUM_TASKS_FIELD_DESC);
-        oprot.writeI32(struct.num_tasks);
-        oprot.writeFieldEnd();
-      }
-      if (struct.window_to_stats != null) {
-        if (struct.is_set_window_to_stats()) {
-          oprot.writeFieldBegin(WINDOW_TO_STATS_FIELD_DESC);
-          {
-            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.window_to_stats.size()));
-            for (Map.Entry<String, ComponentAggregateStats> _iter440 : struct.window_to_stats.entrySet())
-            {
-              oprot.writeString(_iter440.getKey());
-              _iter440.getValue().write(oprot);
-            }
-            oprot.writeMapEnd();
-          }
-          oprot.writeFieldEnd();
-        }
-      }
-      if (struct.gsid_to_input_stats != null) {
-        if (struct.is_set_gsid_to_input_stats()) {
-          oprot.writeFieldBegin(GSID_TO_INPUT_STATS_FIELD_DESC);
-          {
-            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.STRUCT, struct.gsid_to_input_stats.size()));
-            for (Map.Entry<GlobalStreamId, ComponentAggregateStats> _iter441 : struct.gsid_to_input_stats.entrySet())
-            {
-              _iter441.getKey().write(oprot);
-              _iter441.getValue().write(oprot);
-            }
-            oprot.writeMapEnd();
-          }
-          oprot.writeFieldEnd();
-        }
-      }
-      if (struct.sid_to_output_stats != null) {
-        if (struct.is_set_sid_to_output_stats()) {
-          oprot.writeFieldBegin(SID_TO_OUTPUT_STATS_FIELD_DESC);
-          {
-            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.sid_to_output_stats.size()));
-            for (Map.Entry<String, ComponentAggregateStats> _iter442 : struct.sid_to_output_stats.entrySet())
-            {
-              oprot.writeString(_iter442.getKey());
-              _iter442.getValue().write(oprot);
-            }
-            oprot.writeMapEnd();
-          }
-          oprot.writeFieldEnd();
-        }
-      }
-      if (struct.exec_stats != null) {
-        if (struct.is_set_exec_stats()) {
-          oprot.writeFieldBegin(EXEC_STATS_FIELD_DESC);
-          {
-            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.exec_stats.size()));
-            for (ExecutorAggregateStats _iter443 : struct.exec_stats)
-            {
-              _iter443.write(oprot);
-            }
-            oprot.writeListEnd();
-          }
-          oprot.writeFieldEnd();
-        }
-      }
-      if (struct.errors != null) {
-        if (struct.is_set_errors()) {
-          oprot.writeFieldBegin(ERRORS_FIELD_DESC);
-          {
-            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.errors.size()));
-            for (ErrorInfo _iter444 : struct.errors)
-            {
-              _iter444.write(oprot);
-            }
-            oprot.writeListEnd();
-          }
-          oprot.writeFieldEnd();
-        }
-      }
-      if (struct.eventlog_host != null) {
-        if (struct.is_set_eventlog_host()) {
-          oprot.writeFieldBegin(EVENTLOG_HOST_FIELD_DESC);
-          oprot.writeString(struct.eventlog_host);
-          oprot.writeFieldEnd();
-        }
-      }
-      if (struct.is_set_eventlog_port()) {
-        oprot.writeFieldBegin(EVENTLOG_PORT_FIELD_DESC);
-        oprot.writeI32(struct.eventlog_port);
-        oprot.writeFieldEnd();
-      }
-      if (struct.debug_options != null) {
-        if (struct.is_set_debug_options()) {
-          oprot.writeFieldBegin(DEBUG_OPTIONS_FIELD_DESC);
-          struct.debug_options.write(oprot);
-          oprot.writeFieldEnd();
-        }
-      }
-      if (struct.topology_status != null) {
-        if (struct.is_set_topology_status()) {
-          oprot.writeFieldBegin(TOPOLOGY_STATUS_FIELD_DESC);
-          oprot.writeString(struct.topology_status);
-          oprot.writeFieldEnd();
-        }
-      }
-      oprot.writeFieldStop();
-      oprot.writeStructEnd();
-    }
-
-  }
-
-  private static class ComponentPageInfoTupleSchemeFactory implements SchemeFactory {
-    public ComponentPageInfoTupleScheme getScheme() {
-      return new ComponentPageInfoTupleScheme();
-    }
-  }
-
-  private static class ComponentPageInfoTupleScheme extends TupleScheme<ComponentPageInfo> {
-
-    @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, ComponentPageInfo struct) throws org.apache.thrift.TException {
-      TTupleProtocol oprot = (TTupleProtocol) prot;
-      oprot.writeString(struct.component_id);
-      oprot.writeI32(struct.component_type.getValue());
-      BitSet optionals = new BitSet();
-      if (struct.is_set_topology_id()) {
-        optionals.set(0);
-      }
-      if (struct.is_set_topology_name()) {
-        optionals.set(1);
-      }
-      if (struct.is_set_num_executors()) {
-        optionals.set(2);
-      }
-      if (struct.is_set_num_tasks()) {
-        optionals.set(3);
-      }
-      if (struct.is_set_window_to_stats()) {
-        optionals.set(4);
-      }
-      if (struct.is_set_gsid_to_input_stats()) {
-        optionals.set(5);
-      }
-      if (struct.is_set_sid_to_output_stats()) {
-        optionals.set(6);
-      }
-      if (struct.is_set_exec_stats()) {
-        optionals.set(7);
-      }
-      if (struct.is_set_errors()) {
-        optionals.set(8);
-      }
-      if (struct.is_set_eventlog_host()) {
-        optionals.set(9);
-      }
-      if (struct.is_set_eventlog_port()) {
-        optionals.set(10);
-      }
-      if (struct.is_set_debug_options()) {
-        optionals.set(11);
-      }
-      if (struct.is_set_topology_status()) {
-        optionals.set(12);
-      }
-      oprot.writeBitSet(optionals, 13);
-      if (struct.is_set_topology_id()) {
-        oprot.writeString(struct.topology_id);
-      }
-      if (struct.is_set_topology_name()) {
-        oprot.writeString(struct.topology_name);
-      }
-      if (struct.is_set_num_executors()) {
-        oprot.writeI32(struct.num_executors);
-      }
-      if (struct.is_set_num_tasks()) {
-        oprot.writeI32(struct.num_tasks);
-      }
-      if (struct.is_set_window_to_stats()) {
-        {
-          oprot.writeI32(struct.window_to_stats.size());
-          for (Map.Entry<String, ComponentAggregateStats> _iter445 : struct.window_to_stats.entrySet())
-          {
-            oprot.writeString(_iter445.getKey());
-            _iter445.getValue().write(oprot);
-          }
-        }
-      }
-      if (struct.is_set_gsid_to_input_stats()) {
-        {
-          oprot.writeI32(struct.gsid_to_input_stats.size());
-          for (Map.Entry<GlobalStreamId, ComponentAggregateStats> _iter446 : struct.gsid_to_input_stats.entrySet())
-          {
-            _iter446.getKey().write(oprot);
-            _iter446.getValue().write(oprot);
-          }
-        }
-      }
-      if (struct.is_set_sid_to_output_stats()) {
-        {
-          oprot.writeI32(struct.sid_to_output_stats.size());
-          for (Map.Entry<String, ComponentAggregateStats> _iter447 : struct.sid_to_output_stats.entrySet())
-          {
-            oprot.writeString(_iter447.getKey());
-            _iter447.getValue().write(oprot);
-          }
-        }
-      }
-      if (struct.is_set_exec_stats()) {
-        {
-          oprot.writeI32(struct.exec_stats.size());
-          for (ExecutorAggregateStats _iter448 : struct.exec_stats)
-          {
-            _iter448.write(oprot);
-          }
-        }
-      }
-      if (struct.is_set_errors()) {
-        {
-          oprot.writeI32(struct.errors.size());
-          for (ErrorInfo _iter449 : struct.errors)
-          {
-            _iter449.write(oprot);
-          }
-        }
-      }
-      if (struct.is_set_eventlog_host()) {
-        oprot.writeString(struct.eventlog_host);
-      }
-      if (struct.is_set_eventlog_port()) {
-        oprot.writeI32(struct.eventlog_port);
-      }
-      if (struct.is_set_debug_options()) {
-        struct.debug_options.write(oprot);
-      }
-      if (struct.is_set_topology_status()) {
-        oprot.writeString(struct.topology_status);
-      }
-    }
-
-    @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, ComponentPageInfo struct) throws org.apache.thrift.TException {
-      TTupleProtocol iprot = (TTupleProtocol) prot;
-      struct.component_id = iprot.readString();
-      struct.set_component_id_isSet(true);
-      struct.component_type = backtype.storm.generated.ComponentType.findByValue(iprot.readI32());
-      struct.set_component_type_isSet(true);
-      BitSet incoming = iprot.readBitSet(13);
-      if (incoming.get(0)) {
-        struct.topology_id = iprot.readString();
-        struct.set_topology_id_isSet(true);
-      }
-      if (incoming.get(1)) {
-        struct.topology_name = iprot.readString();
-        struct.set_topology_name_isSet(true);
-      }
-      if (incoming.get(2)) {
-        struct.num_executors = iprot.readI32();
-        struct.set_num_executors_isSet(true);
-      }
-      if (incoming.get(3)) {
-        struct.num_tasks = iprot.readI32();
-        struct.set_num_tasks_isSet(true);
-      }
-      if (incoming.get(4)) {
-        {
-          org.apache.thrift.protocol.TMap _map450 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-          struct.window_to_stats = new HashMap<String,ComponentAggregateStats>(2*_map450.size);
-          String _key451;
-          ComponentAggregateStats _val452;
-          for (int _i453 = 0; _i453 < _map450.size; ++_i453)
-          {
-            _key451 = iprot.readString();
-            _val452 = new ComponentAggregateStats();
-            _val452.read(iprot);
-            struct.window_to_stats.put(_key451, _val452);
-          }
-        }
-        struct.set_window_to_stats_isSet(true);
-      }
-      if (incoming.get(5)) {
-        {
-          org.apache.thrift.protocol.TMap _map454 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-          struct.gsid_to_input_stats = new HashMap<GlobalStreamId,ComponentAggregateStats>(2*_map454.size);
-          GlobalStreamId _key455;
-          ComponentAggregateStats _val456;
-          for (int _i457 = 0; _i457 < _map454.size; ++_i457)
-          {
-            _key455 = new GlobalStreamId();
-            _key455.read(iprot);
-            _val456 = new ComponentAggregateStats();
-            _val456.read(iprot);
-            struct.gsid_to_input_stats.put(_key455, _val456);
-          }
-        }
-        struct.set_gsid_to_input_stats_isSet(true);
-      }
-      if (incoming.get(6)) {
-        {
-          org.apache.thrift.protocol.TMap _map458 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-          struct.sid_to_output_stats = new HashMap<String,ComponentAggregateStats>(2*_map458.size);
-          String _key459;
-          ComponentAggregateStats _val460;
-          for (int _i461 = 0; _i461 < _map458.size; ++_i461)
-          {
-            _key459 = iprot.readString();
-            _val460 = new ComponentAggregateStats();
-            _val460.read(iprot);
-            struct.sid_to_output_stats.put(_key459, _val460);
-          }
-        }
-        struct.set_sid_to_output_stats_isSet(true);
-      }
-      if (incoming.get(7)) {
-        {
-          org.apache.thrift.protocol.TList _list462 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-          struct.exec_stats = new ArrayList<ExecutorAggregateStats>(_list462.size);
-          ExecutorAggregateStats _elem463;
-          for (int _i464 = 0; _i464 < _list462.size; ++_i464)
-          {
-            _elem463 = new ExecutorAggregateStats();
-            _elem463.read(iprot);
-            struct.exec_stats.add(_elem463);
-          }
-        }
-        struct.set_exec_stats_isSet(true);
-      }
-      if (incoming.get(8)) {
-        {
-          org.apache.thrift.protocol.TList _list465 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-          struct.errors = new ArrayList<ErrorInfo>(_list465.size);
-          ErrorInfo _elem466;
-          for (int _i467 = 0; _i467 < _list465.size; ++_i467)
-          {
-            _elem466 = new ErrorInfo();
-            _elem466.read(iprot);
-            struct.errors.add(_elem466);
-          }
-        }
-        struct.set_errors_isSet(true);
-      }
-      if (incoming.get(9)) {
-        struct.eventlog_host = iprot.readString();
-        struct.set_eventlog_host_isSet(true);
-      }
-      if (incoming.get(10)) {
-        struct.eventlog_port = iprot.readI32();
-        struct.set_eventlog_port_isSet(true);
-      }
-      if (incoming.get(11)) {
-        struct.debug_options = new DebugOptions();
-        struct.debug_options.read(iprot);
-        struct.set_debug_options_isSet(true);
-      }
-      if (incoming.get(12)) {
-        struct.topology_status = iprot.readString();
-        struct.set_topology_status_isSet(true);
-      }
-    }
-  }
-
-}
-


[11/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/Config.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/Config.java b/storm-core/src/jvm/backtype/storm/Config.java
deleted file mode 100644
index 8c597e0..0000000
--- a/storm-core/src/jvm/backtype/storm/Config.java
+++ /dev/null
@@ -1,2335 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm;
-
-import backtype.storm.scheduler.resource.strategies.eviction.IEvictionStrategy;
-import backtype.storm.scheduler.resource.strategies.priority.ISchedulingPriorityStrategy;
-import backtype.storm.scheduler.resource.strategies.scheduling.IStrategy;
-import backtype.storm.serialization.IKryoDecorator;
-import backtype.storm.serialization.IKryoFactory;
-import backtype.storm.validation.ConfigValidationAnnotations.*;
-import backtype.storm.validation.ConfigValidation.*;
-import com.esotericsoftware.kryo.Serializer;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-/**
- * Topology configs are specified as a plain old map. This class provides a
- * convenient way to create a topology config map by providing setter methods for
- * all the configs that can be set. It also makes it easier to do things like add
- * serializations.
- *
- * This class also provides constants for all the configurations possible on
- * a Storm cluster and Storm topology. Each constant is paired with an annotation
- * that defines the validity criterion of the corresponding field. Default
- * values for these configs can be found in defaults.yaml.
- *
- * Note that you may put other configurations in any of the configs. Storm
- * will ignore anything it doesn't recognize, but your topologies are free to make
- * use of them by reading them in the prepare method of Bolts or the open method of
- * Spouts.
- */
-public class Config extends HashMap<String, Object> {
-
-    //DO NOT CHANGE UNLESS WE ADD IN STATE NOT STORED IN THE PARENT CLASS
-    private static final long serialVersionUID = -1550278723792864455L;
-
-    /**
-     * This is part of a temporary workaround to a ZK bug, it is the 'scheme:acl' for
-     * the user Nimbus and Supervisors use to authenticate with ZK.
-     */
-    @isString
-    public static final String STORM_ZOOKEEPER_SUPERACL = "storm.zookeeper.superACL";
-
-    /**
-     * The transporter for communication among Storm tasks
-     */
-    @isString
-    public static final String STORM_MESSAGING_TRANSPORT = "storm.messaging.transport";
-
-    /**
-     * Netty based messaging: The buffer size for send/recv buffer
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String STORM_MESSAGING_NETTY_BUFFER_SIZE = "storm.messaging.netty.buffer_size";
-
-    /**
-     * Netty based messaging: Sets the backlog value to specify when the channel binds to a local address
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String STORM_MESSAGING_NETTY_SOCKET_BACKLOG = "storm.messaging.netty.socket.backlog";
-
-    /**
-     * Netty based messaging: The max # of retries that a peer will perform when a remote is not accessible
-     *@deprecated "Since netty clients should never stop reconnecting - this does not make sense anymore.
-     */
-    @Deprecated
-    @isInteger
-    public static final String STORM_MESSAGING_NETTY_MAX_RETRIES = "storm.messaging.netty.max_retries";
-
-    /**
-     * Netty based messaging: The min # of milliseconds that a peer will wait.
-     */
-    @isInteger
-    @isPositiveNumber(includeZero = true)
-    public static final String STORM_MESSAGING_NETTY_MIN_SLEEP_MS = "storm.messaging.netty.min_wait_ms";
-
-    /**
-     * Netty based messaging: The max # of milliseconds that a peer will wait.
-     */
-    @isInteger
-    @isPositiveNumber(includeZero = true)
-    public static final String STORM_MESSAGING_NETTY_MAX_SLEEP_MS = "storm.messaging.netty.max_wait_ms";
-
-    /**
-     * Netty based messaging: The # of worker threads for the server.
-     */
-    @isInteger
-    @isPositiveNumber(includeZero = true)
-    public static final String STORM_MESSAGING_NETTY_SERVER_WORKER_THREADS = "storm.messaging.netty.server_worker_threads";
-
-    /**
-     * Netty based messaging: The # of worker threads for the client.
-     */
-    @isInteger
-    public static final String STORM_MESSAGING_NETTY_CLIENT_WORKER_THREADS = "storm.messaging.netty.client_worker_threads";
-
-    /**
-     * If the Netty messaging layer is busy, the Netty client will try to batch message as more as possible up to the size of STORM_NETTY_MESSAGE_BATCH_SIZE bytes
-     */
-    @isInteger
-    public static final String STORM_NETTY_MESSAGE_BATCH_SIZE = "storm.messaging.netty.transfer.batch.size";
-
-    /**
-     * We check with this interval that whether the Netty channel is writable and try to write pending messages
-     */
-    @isInteger
-    public static final String STORM_NETTY_FLUSH_CHECK_INTERVAL_MS = "storm.messaging.netty.flush.check.interval.ms";
-
-    /**
-     * Netty based messaging: Is authentication required for Netty messaging from client worker process to server worker process.
-     */
-    @isBoolean
-    public static final String STORM_MESSAGING_NETTY_AUTHENTICATION = "storm.messaging.netty.authentication";
-
-    /**
-     * The delegate for serializing metadata, should be used for serialized objects stored in zookeeper and on disk.
-     * This is NOT used for compressing serialized tuples sent between topologies.
-     */
-    @isString
-    public static final String STORM_META_SERIALIZATION_DELEGATE = "storm.meta.serialization.delegate";
-
-    /**
-     * A list of hosts of ZooKeeper servers used to manage the cluster.
-     */
-    @isStringList
-    public static final String STORM_ZOOKEEPER_SERVERS = "storm.zookeeper.servers";
-
-    /**
-     * The port Storm will use to connect to each of the ZooKeeper servers.
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String STORM_ZOOKEEPER_PORT = "storm.zookeeper.port";
-
-    /**
-     * A list of hosts of Exhibitor servers used to discover/maintain connection to ZooKeeper cluster.
-     * Any configured ZooKeeper servers will be used for the curator/exhibitor backup connection string.
-     */
-    @isStringList
-    public static final String STORM_EXHIBITOR_SERVERS = "storm.exhibitor.servers";
-
-    /**
-     * The port Storm will use to connect to each of the exhibitor servers.
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String STORM_EXHIBITOR_PORT = "storm.exhibitor.port";
-
-    /**
-     * A directory on the local filesystem used by Storm for any local
-     * filesystem usage it needs. The directory must exist and the Storm daemons must
-     * have permission to read/write from this location.
-     */
-    @isString
-    public static final String STORM_LOCAL_DIR = "storm.local.dir";
-
-    /**
-     * A directory that holds configuration files for log4j2.
-     * It can be either a relative or an absolute directory.
-     * If relative, it is relative to the storm's home directory.
-     */
-    @isString
-    public static final String STORM_LOG4J2_CONF_DIR = "storm.log4j2.conf.dir";
-
-    /**
-     * A global task scheduler used to assign topologies's tasks to supervisors' workers.
-     *
-     * If this is not set, a default system scheduler will be used.
-     */
-    @isString
-    public static final String STORM_SCHEDULER = "storm.scheduler";
-
-    /**
-     * Whether we want to display all the resource capacity and scheduled usage on the UI page.
-     * We suggest to have this variable set if you are using any kind of resource-related scheduler.
-     *
-     * If this is not set, we will not display resource capacity and usage on the UI.
-     */
-    @isBoolean
-    public static final String SCHEDULER_DISPLAY_RESOURCE = "scheduler.display.resource";
-
-    /**
-     * The mode this Storm cluster is running in. Either "distributed" or "local".
-     */
-    @isString
-    public static final String STORM_CLUSTER_MODE = "storm.cluster.mode";
-
-    /**
-     * What Network Topography detection classes should we use.
-     * Given a list of supervisor hostnames (or IP addresses), this class would return a list of
-     * rack names that correspond to the supervisors. This information is stored in Cluster.java, and
-     * is used in the resource aware scheduler.
-     */
-    @NotNull
-    @isImplementationOfClass(implementsClass = backtype.storm.networktopography.DNSToSwitchMapping.class)
-    public static final String STORM_NETWORK_TOPOGRAPHY_PLUGIN = "storm.network.topography.plugin";
-
-    /**
-     * The hostname the supervisors/workers should report to nimbus. If unset, Storm will
-     * get the hostname to report by calling <code>InetAddress.getLocalHost().getCanonicalHostName()</code>.
-     *
-     * You should set this config when you don't have a DNS which supervisors/workers
-     * can utilize to find each other based on hostname got from calls to
-     * <code>InetAddress.getLocalHost().getCanonicalHostName()</code>.
-     */
-    @isString
-    public static final String STORM_LOCAL_HOSTNAME = "storm.local.hostname";
-
-    /**
-     * The plugin that will convert a principal to a local user.
-     */
-    @isString
-    public static final String STORM_PRINCIPAL_TO_LOCAL_PLUGIN = "storm.principal.tolocal";
-
-    /**
-     * The plugin that will provide user groups service
-     */
-    @isString
-    public static final String STORM_GROUP_MAPPING_SERVICE_PROVIDER_PLUGIN = "storm.group.mapping.service";
-
-    /**
-     * Max no.of seconds group mapping service will cache user groups
-     */
-    @isInteger
-    public static final String STORM_GROUP_MAPPING_SERVICE_CACHE_DURATION_SECS = "storm.group.mapping.service.cache.duration.secs";
-
-    /**
-     * Initialization parameters for the group mapping service plugin.
-     * Provides a way for a @link{STORM_GROUP_MAPPING_SERVICE_PROVIDER_PLUGIN}
-     * implementation to access optional settings.
-     */
-    @isType(type=Map.class)
-    public static final String STORM_GROUP_MAPPING_SERVICE_PARAMS = "storm.group.mapping.service.params";
-
-    /**
-     * The default transport plug-in for Thrift client/server communication
-     */
-    @isString
-    public static final String STORM_THRIFT_TRANSPORT_PLUGIN = "storm.thrift.transport";
-
-    /**
-     * The serializer class for ListDelegate (tuple payload).
-     * The default serializer will be ListDelegateSerializer
-     */
-    @isString
-    public static final String TOPOLOGY_TUPLE_SERIALIZER = "topology.tuple.serializer";
-
-    /**
-     * Disable load aware grouping support.
-     */
-    @isBoolean
-    @NotNull
-    public static final String TOPOLOGY_DISABLE_LOADAWARE_MESSAGING = "topology.disable.loadaware.messaging";
-
-    /**
-     * Try to serialize all tuples, even for local transfers.  This should only be used
-     * for testing, as a sanity check that all of your tuples are setup properly.
-     */
-    @isBoolean
-    public static final String TOPOLOGY_TESTING_ALWAYS_TRY_SERIALIZE = "topology.testing.always.try.serialize";
-
-    /**
-     * Whether or not to use ZeroMQ for messaging in local mode. If this is set
-     * to false, then Storm will use a pure-Java messaging system. The purpose
-     * of this flag is to make it easy to run Storm in local mode by eliminating
-     * the need for native dependencies, which can be difficult to install.
-     *
-     * Defaults to false.
-     */
-    @isBoolean
-    public static final String STORM_LOCAL_MODE_ZMQ = "storm.local.mode.zmq";
-
-    /**
-     * The root location at which Storm stores data in ZooKeeper.
-     */
-    @isString
-    public static final String STORM_ZOOKEEPER_ROOT = "storm.zookeeper.root";
-
-    /**
-     * The session timeout for clients to ZooKeeper.
-     */
-    @isInteger
-    public static final String STORM_ZOOKEEPER_SESSION_TIMEOUT = "storm.zookeeper.session.timeout";
-
-    /**
-     * The connection timeout for clients to ZooKeeper.
-     */
-    @isInteger
-    public static final String STORM_ZOOKEEPER_CONNECTION_TIMEOUT = "storm.zookeeper.connection.timeout";
-
-    /**
-     * The number of times to retry a Zookeeper operation.
-     */
-    @isInteger
-    public static final String STORM_ZOOKEEPER_RETRY_TIMES="storm.zookeeper.retry.times";
-
-    /**
-     * The interval between retries of a Zookeeper operation.
-     */
-    @isInteger
-    public static final String STORM_ZOOKEEPER_RETRY_INTERVAL="storm.zookeeper.retry.interval";
-
-    /**
-     * The ceiling of the interval between retries of a Zookeeper operation.
-     */
-    @isInteger
-    public static final String STORM_ZOOKEEPER_RETRY_INTERVAL_CEILING="storm.zookeeper.retry.intervalceiling.millis";
-
-    /**
-     * The cluster Zookeeper authentication scheme to use, e.g. "digest". Defaults to no authentication.
-     */
-    @isString
-    public static final String STORM_ZOOKEEPER_AUTH_SCHEME="storm.zookeeper.auth.scheme";
-
-    /**
-     * A string representing the payload for cluster Zookeeper authentication.
-     * It gets serialized using UTF-8 encoding during authentication.
-     * Note that if this is set to something with a secret (as when using
-     * digest authentication) then it should only be set in the
-     * storm-cluster-auth.yaml file.
-     * This file storm-cluster-auth.yaml should then be protected with
-     * appropriate permissions that deny access from workers.
-     */
-    @isString
-    public static final String STORM_ZOOKEEPER_AUTH_PAYLOAD="storm.zookeeper.auth.payload";
-
-    /**
-     * The topology Zookeeper authentication scheme to use, e.g. "digest". Defaults to no authentication.
-     */
-    @isString
-    public static final String STORM_ZOOKEEPER_TOPOLOGY_AUTH_SCHEME="storm.zookeeper.topology.auth.scheme";
-
-    /**
-     * A string representing the payload for topology Zookeeper authentication. It gets serialized using UTF-8 encoding during authentication.
-     */
-    @isString
-    public static final String STORM_ZOOKEEPER_TOPOLOGY_AUTH_PAYLOAD="storm.zookeeper.topology.auth.payload";
-
-    /*
-     * How often to poll Exhibitor cluster in millis.
-     */
-    @isString
-    public static final String STORM_EXHIBITOR_URIPATH="storm.exhibitor.poll.uripath";
-
-    /**
-     * How often to poll Exhibitor cluster in millis.
-     */
-    @isInteger
-    public static final String STORM_EXHIBITOR_POLL="storm.exhibitor.poll.millis";
-
-    /**
-     * The number of times to retry an Exhibitor operation.
-     */
-    @isInteger
-    public static final String STORM_EXHIBITOR_RETRY_TIMES="storm.exhibitor.retry.times";
-
-    /**
-     * The interval between retries of an Exhibitor operation.
-     */
-    @isInteger
-    public static final String STORM_EXHIBITOR_RETRY_INTERVAL="storm.exhibitor.retry.interval";
-
-    /**
-     * The ceiling of the interval between retries of an Exhibitor operation.
-     */
-    @isInteger
-    public static final String STORM_EXHIBITOR_RETRY_INTERVAL_CEILING="storm.exhibitor.retry.intervalceiling.millis";
-
-    /**
-     * The id assigned to a running topology. The id is the storm name with a unique nonce appended.
-     */
-    @isString
-    public static final String STORM_ID = "storm.id";
-
-    /**
-     * The workers-artifacts directory (where we place all workers' logs), can be either absolute or relative.
-     * By default, ${storm.log.dir}/workers-artifacts is where worker logs go.
-     * If the setting is a relative directory, it is relative to storm.log.dir.
-     */
-    @isString
-    public static final String STORM_WORKERS_ARTIFACTS_DIR = "storm.workers.artifacts.dir";
-
-    /**
-     * The directory where storm's health scripts go.
-     */
-    @isString
-    public static final String STORM_HEALTH_CHECK_DIR = "storm.health.check.dir";
-
-    /**
-     * The time to allow any given healthcheck script to run before it
-     * is marked failed due to timeout
-     */
-    @isNumber
-    public static final String STORM_HEALTH_CHECK_TIMEOUT_MS = "storm.health.check.timeout.ms";
-
-    /**
-     * The number of times to retry a Nimbus operation.
-     */
-    @isNumber
-    public static final String STORM_NIMBUS_RETRY_TIMES="storm.nimbus.retry.times";
-
-    /**
-     * The starting interval between exponential backoff retries of a Nimbus operation.
-     */
-    @isNumber
-    public static final String STORM_NIMBUS_RETRY_INTERVAL="storm.nimbus.retry.interval.millis";
-
-    /**
-     * The ceiling of the interval between retries of a client connect to Nimbus operation.
-     */
-    @isNumber
-    public static final String STORM_NIMBUS_RETRY_INTERVAL_CEILING="storm.nimbus.retry.intervalceiling.millis";
-
-    /**
-     * The ClusterState factory that worker will use to create a ClusterState
-     * to store state in. Defaults to ZooKeeper.
-     */
-    @isString
-    public static final String STORM_CLUSTER_STATE_STORE = "storm.cluster.state.store";
-
-    /**
-     * The Nimbus transport plug-in for Thrift client/server communication
-     */
-    @isString
-    public static final String NIMBUS_THRIFT_TRANSPORT_PLUGIN = "nimbus.thrift.transport";
-
-    /**
-     * The host that the master server is running on, added only for backward compatibility,
-     * the usage deprecated in favor of nimbus.seeds config.
-     */
-    @Deprecated
-    @isString
-    public static final String NIMBUS_HOST = "nimbus.host";
-
-    /**
-     * List of seed nimbus hosts to use for leader nimbus discovery.
-     */
-    @isStringList
-    public static final String NIMBUS_SEEDS = "nimbus.seeds";
-
-    /**
-     * Which port the Thrift interface of Nimbus should run on. Clients should
-     * connect to this port to upload jars and submit topologies.
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String NIMBUS_THRIFT_PORT = "nimbus.thrift.port";
-
-    /**
-     * The number of threads that should be used by the nimbus thrift server.
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String NIMBUS_THRIFT_THREADS = "nimbus.thrift.threads";
-
-    /**
-     * A list of users that are cluster admins and can run any command.  To use this set
-     * nimbus.authorizer to backtype.storm.security.auth.authorizer.SimpleACLAuthorizer
-     */
-    @isStringList
-    public static final String NIMBUS_ADMINS = "nimbus.admins";
-
-    /**
-     * A list of users that are the only ones allowed to run user operation on storm cluster.
-     * To use this set nimbus.authorizer to backtype.storm.security.auth.authorizer.SimpleACLAuthorizer
-     */
-    @isStringList
-    public static final String NIMBUS_USERS = "nimbus.users";
-
-    /**
-     * A list of groups , users belong to these groups are the only ones allowed to run user operation on storm cluster.
-     * To use this set nimbus.authorizer to backtype.storm.security.auth.authorizer.SimpleACLAuthorizer
-     */
-    @isStringList
-    public static final String NIMBUS_GROUPS = "nimbus.groups";
-
-    /**
-     * A list of users that run the supervisors and should be authorized to interact with
-     * nimbus as a supervisor would.  To use this set
-     * nimbus.authorizer to backtype.storm.security.auth.authorizer.SimpleACLAuthorizer
-     */
-    @isStringList
-    public static final String NIMBUS_SUPERVISOR_USERS = "nimbus.supervisor.users";
-
-    /**
-     * This is the user that the Nimbus daemon process is running as. May be used when security
-     * is enabled to authorize actions in the cluster.
-     */
-    @isString
-    public static final String NIMBUS_DAEMON_USER = "nimbus.daemon.user";
-
-    /**
-     * The maximum buffer size thrift should use when reading messages.
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String NIMBUS_THRIFT_MAX_BUFFER_SIZE = "nimbus.thrift.max_buffer_size";
-
-    /**
-     * This parameter is used by the storm-deploy project to configure the
-     * jvm options for the nimbus daemon.
-     */
-    @isString
-    public static final String NIMBUS_CHILDOPTS = "nimbus.childopts";
-
-
-    /**
-     * How long without heartbeating a task can go before nimbus will consider the
-     * task dead and reassign it to another location.
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String NIMBUS_TASK_TIMEOUT_SECS = "nimbus.task.timeout.secs";
-
-
-    /**
-     * How often nimbus should wake up to check heartbeats and do reassignments. Note
-     * that if a machine ever goes down Nimbus will immediately wake up and take action.
-     * This parameter is for checking for failures when there's no explicit event like that
-     * occurring.
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String NIMBUS_MONITOR_FREQ_SECS = "nimbus.monitor.freq.secs";
-
-    /**
-     * How often nimbus should wake the cleanup thread to clean the inbox.
-     * @see #NIMBUS_INBOX_JAR_EXPIRATION_SECS
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String NIMBUS_CLEANUP_INBOX_FREQ_SECS = "nimbus.cleanup.inbox.freq.secs";
-
-    /**
-     * The length of time a jar file lives in the inbox before being deleted by the cleanup thread.
-     *
-     * Probably keep this value greater than or equal to NIMBUS_CLEANUP_INBOX_JAR_EXPIRATION_SECS.
-     * Note that the time it takes to delete an inbox jar file is going to be somewhat more than
-     * NIMBUS_CLEANUP_INBOX_JAR_EXPIRATION_SECS (depending on how often NIMBUS_CLEANUP_FREQ_SECS
-     * is set to).
-     * @see #NIMBUS_CLEANUP_INBOX_FREQ_SECS
-     */
-    @isInteger
-    public static final String NIMBUS_INBOX_JAR_EXPIRATION_SECS = "nimbus.inbox.jar.expiration.secs";
-
-    /**
-     * How long before a supervisor can go without heartbeating before nimbus considers it dead
-     * and stops assigning new work to it.
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String NIMBUS_SUPERVISOR_TIMEOUT_SECS = "nimbus.supervisor.timeout.secs";
-
-    /**
-     * A special timeout used when a task is initially launched. During launch, this is the timeout
-     * used until the first heartbeat, overriding nimbus.task.timeout.secs.
-     *
-     * <p>A separate timeout exists for launch because there can be quite a bit of overhead
-     * to launching new JVM's and configuring them.</p>
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String NIMBUS_TASK_LAUNCH_SECS = "nimbus.task.launch.secs";
-
-    /**
-     * During upload/download with the master, how long an upload or download connection is idle
-     * before nimbus considers it dead and drops the connection.
-     */
-    @isInteger
-    public static final String NIMBUS_FILE_COPY_EXPIRATION_SECS = "nimbus.file.copy.expiration.secs";
-
-    /**
-     * A custom class that implements ITopologyValidator that is run whenever a
-     * topology is submitted. Can be used to provide business-specific logic for
-     * whether topologies are allowed to run or not.
-     */
-    @isString
-    public static final String NIMBUS_TOPOLOGY_VALIDATOR = "nimbus.topology.validator";
-
-    /**
-     * Class name for authorization plugin for Nimbus
-     */
-    @isString
-    public static final String NIMBUS_AUTHORIZER = "nimbus.authorizer";
-
-    /**
-     * Impersonation user ACL config entries.
-     */
-    @isString
-    public static final String NIMBUS_IMPERSONATION_AUTHORIZER = "nimbus.impersonation.authorizer";
-
-    /**
-     * Impersonation user ACL config entries.
-     */
-    @isMapEntryCustom(keyValidatorClasses = {StringValidator.class}, valueValidatorClasses = {ImpersonationAclUserEntryValidator.class})
-    public static final String NIMBUS_IMPERSONATION_ACL = "nimbus.impersonation.acl";
-
-    /**
-     * How often nimbus should wake up to renew credentials if needed.
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String NIMBUS_CREDENTIAL_RENEW_FREQ_SECS = "nimbus.credential.renewers.freq.secs";
-
-    /**
-     * A list of credential renewers that nimbus should load.
-     */
-    @isStringList
-    public static final String NIMBUS_CREDENTIAL_RENEWERS = "nimbus.credential.renewers.classes";
-
-    /**
-     * A list of plugins that nimbus should load during submit topology to populate
-     * credentials on user's behalf.
-     */
-    @isStringList
-    public static final String NIMBUS_AUTO_CRED_PLUGINS = "nimbus.autocredential.plugins.classes";
-
-    /**
-     * FQCN of a class that implements {@code ISubmitterHook} @see ISubmitterHook for details.
-     */
-
-    @isString
-    public static final String STORM_TOPOLOGY_SUBMISSION_NOTIFIER_PLUGIN = "storm.topology.submission.notifier.plugin.class";
-
-    /**
-     * FQCN of a class that implements {@code I} @see backtype.storm.nimbus.ITopologyActionNotifierPlugin for details.
-     */
-    public static final String NIMBUS_TOPOLOGY_ACTION_NOTIFIER_PLUGIN = "nimbus.topology.action.notifier.plugin.class";
-    public static final Object NIMBUS_TOPOLOGY_ACTION_NOTIFIER_PLUGIN_SCHEMA = String.class;
-
-    /**
-     * Storm UI binds to this host/interface.
-     */
-    @isString
-    public static final String UI_HOST = "ui.host";
-
-    /**
-     * Storm UI binds to this port.
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String UI_PORT = "ui.port";
-
-    /**
-     * Storm UI Project BUGTRACKER Link for reporting issue.
-     */
-    @isString
-    public static final String UI_PROJECT_BUGTRACKER_URL = "ui.project.bugtracker.url";
-
-    /**
-     * Storm UI Central Logging URL.
-     */
-    @isString
-    public static final String UI_CENTRAL_LOGGING_URL = "ui.central.logging.url";
-
-    /**
-     * HTTP UI port for log viewer
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String LOGVIEWER_PORT = "logviewer.port";
-
-    /**
-     * Childopts for log viewer java process.
-     */
-    @isString
-    public static final String LOGVIEWER_CHILDOPTS = "logviewer.childopts";
-
-    /**
-     * How often to clean up old log files
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String LOGVIEWER_CLEANUP_INTERVAL_SECS = "logviewer.cleanup.interval.secs";
-
-    /**
-     * How many minutes since a log was last modified for the log to be considered for clean-up
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String LOGVIEWER_CLEANUP_AGE_MINS = "logviewer.cleanup.age.mins";
-
-    /**
-     * The maximum number of bytes all worker log files can take up in MB
-     */
-    @isPositiveNumber
-    public static final String LOGVIEWER_MAX_SUM_WORKER_LOGS_SIZE_MB = "logviewer.max.sum.worker.logs.size.mb";
-
-    /**
-     * The maximum number of bytes per worker's files can take up in MB
-     */
-    @isPositiveNumber
-    public static final String LOGVIEWER_MAX_PER_WORKER_LOGS_SIZE_MB = "logviewer.max.per.worker.logs.size.mb";
-
-    /**
-     * Storm Logviewer HTTPS port
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String LOGVIEWER_HTTPS_PORT = "logviewer.https.port";
-
-    /**
-     * Path to the keystore containing the certs used by Storm Logviewer for HTTPS communications
-     */
-    @isString
-    public static final String LOGVIEWER_HTTPS_KEYSTORE_PATH = "logviewer.https.keystore.path";
-
-    /**
-     * Password for the keystore for HTTPS for Storm Logviewer
-     */
-    @isString
-    public static final String LOGVIEWER_HTTPS_KEYSTORE_PASSWORD = "logviewer.https.keystore.password";
-
-    /**
-     * Type of the keystore for HTTPS for Storm Logviewer.
-     * see http://docs.oracle.com/javase/8/docs/api/java/security/KeyStore.html for more details.
-     */
-    @isString
-    public static final String LOGVIEWER_HTTPS_KEYSTORE_TYPE = "logviewer.https.keystore.type";
-
-    /**
-     * Password to the private key in the keystore for setting up HTTPS (SSL).
-     */
-    @isString
-    public static final String LOGVIEWER_HTTPS_KEY_PASSWORD = "logviewer.https.key.password";
-
-    /**
-     * Path to the truststore containing the certs used by Storm Logviewer for HTTPS communications
-     */
-    @isString
-    public static final String LOGVIEWER_HTTPS_TRUSTSTORE_PATH = "logviewer.https.truststore.path";
-
-    /**
-     * Password for the truststore for HTTPS for Storm Logviewer
-     */
-    @isString
-    public static final String LOGVIEWER_HTTPS_TRUSTSTORE_PASSWORD = "logviewer.https.truststore.password";
-
-    /**
-     * Type of the truststore for HTTPS for Storm Logviewer.
-     * see http://docs.oracle.com/javase/8/docs/api/java/security/Truststore.html for more details.
-     */
-    @isString
-    public static final String LOGVIEWER_HTTPS_TRUSTSTORE_TYPE = "logviewer.https.truststore.type";
-
-    /**
-     * Password to the truststore used by Storm Logviewer setting up HTTPS (SSL).
-     */
-    @isBoolean
-    public static final String LOGVIEWER_HTTPS_WANT_CLIENT_AUTH = "logviewer.https.want.client.auth";
-
-    @isBoolean
-    public static final String LOGVIEWER_HTTPS_NEED_CLIENT_AUTH = "logviewer.https.need.client.auth";
-
-    /**
-     * A list of users allowed to view logs via the Log Viewer
-     */
-    @isStringList
-    public static final String LOGS_USERS = "logs.users";
-
-    /**
-     * A list of groups allowed to view logs via the Log Viewer
-     */
-    @isStringList
-    public static final String LOGS_GROUPS = "logs.groups";
-
-    /**
-     * Appender name used by log viewer to determine log directory.
-     */
-    @isString
-    public static final String LOGVIEWER_APPENDER_NAME = "logviewer.appender.name";
-
-    /**
-     * Childopts for Storm UI Java process.
-     */
-    @isString
-    public static final String UI_CHILDOPTS = "ui.childopts";
-
-    /**
-     * A class implementing javax.servlet.Filter for authenticating/filtering UI requests
-     */
-    @isString
-    public static final String UI_FILTER = "ui.filter";
-
-    /**
-     * Initialization parameters for the javax.servlet.Filter
-     */
-    @isMapEntryType(keyType = String.class, valueType = String.class)
-    public static final String UI_FILTER_PARAMS = "ui.filter.params";
-
-    /**
-     * The size of the header buffer for the UI in bytes
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String UI_HEADER_BUFFER_BYTES = "ui.header.buffer.bytes";
-
-    /**
-     * This port is used by Storm DRPC for receiving HTTPS (SSL) DPRC requests from clients.
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String UI_HTTPS_PORT = "ui.https.port";
-
-    /**
-     * Path to the keystore used by Storm UI for setting up HTTPS (SSL).
-     */
-    @isString
-    public static final String UI_HTTPS_KEYSTORE_PATH = "ui.https.keystore.path";
-
-    /**
-     * Password to the keystore used by Storm UI for setting up HTTPS (SSL).
-     */
-    @isString
-    public static final String UI_HTTPS_KEYSTORE_PASSWORD = "ui.https.keystore.password";
-
-    /**
-     * Type of keystore used by Storm UI for setting up HTTPS (SSL).
-     * see http://docs.oracle.com/javase/7/docs/api/java/security/KeyStore.html for more details.
-     */
-    @isString
-    public static final String UI_HTTPS_KEYSTORE_TYPE = "ui.https.keystore.type";
-
-    /**
-     * Password to the private key in the keystore for setting up HTTPS (SSL).
-     */
-    @isString
-    public static final String UI_HTTPS_KEY_PASSWORD = "ui.https.key.password";
-
-    /**
-     * Path to the truststore used by Storm UI setting up HTTPS (SSL).
-     */
-    @isString
-    public static final String UI_HTTPS_TRUSTSTORE_PATH = "ui.https.truststore.path";
-
-    /**
-     * Password to the truststore used by Storm UI setting up HTTPS (SSL).
-     */
-    @isString
-    public static final String UI_HTTPS_TRUSTSTORE_PASSWORD = "ui.https.truststore.password";
-
-    /**
-     * Type of truststore used by Storm UI for setting up HTTPS (SSL).
-     * see http://docs.oracle.com/javase/7/docs/api/java/security/KeyStore.html for more details.
-     */
-    @isString
-    public static final String UI_HTTPS_TRUSTSTORE_TYPE = "ui.https.truststore.type";
-
-    /**
-     * Password to the truststore used by Storm DRPC setting up HTTPS (SSL).
-     */
-    @isBoolean
-    public static final String UI_HTTPS_WANT_CLIENT_AUTH = "ui.https.want.client.auth";
-
-    @isBoolean
-    public static final String UI_HTTPS_NEED_CLIENT_AUTH = "ui.https.need.client.auth";
-
-    /**
-     * The host that Pacemaker is running on.
-     */
-    @isString
-    public static final String PACEMAKER_HOST = "pacemaker.host";
-
-    /**
-     * The port Pacemaker should run on. Clients should
-     * connect to this port to submit or read heartbeats.
-     */
-    @isNumber
-    @isPositiveNumber
-    public static final String PACEMAKER_PORT = "pacemaker.port";
-
-    /**
-     * The maximum number of threads that should be used by the Pacemaker.
-     * When Pacemaker gets loaded it will spawn new threads, up to 
-     * this many total, to handle the load.
-     */
-    @isNumber
-    @isPositiveNumber
-    public static final String PACEMAKER_MAX_THREADS = "pacemaker.max.threads";
-
-    /**
-     * This parameter is used by the storm-deploy project to configure the
-     * jvm options for the pacemaker daemon.
-     */
-    @isString
-    public static final String PACEMAKER_CHILDOPTS = "pacemaker.childopts";
-
-    /**
-     * This should be one of "DIGEST", "KERBEROS", or "NONE"
-     * Determines the mode of authentication the pacemaker server and client use.
-     * The client must either match the server, or be NONE. In the case of NONE,
-     * no authentication is performed for the client, and if the server is running with
-     * DIGEST or KERBEROS, the client can only write to the server (no reads).
-     * This is intended to provide a primitive form of access-control.
-     */
-    @CustomValidator(validatorClass=PacemakerAuthTypeValidator.class)
-    public static final String PACEMAKER_AUTH_METHOD = "pacemaker.auth.method";
-    
-    /**
-     * List of DRPC servers so that the DRPCSpout knows who to talk to.
-     */
-    @isStringList
-    public static final String DRPC_SERVERS = "drpc.servers";
-
-    /**
-     * This port is used by Storm DRPC for receiving HTTP DPRC requests from clients.
-     */
-    @isInteger
-    public static final String DRPC_HTTP_PORT = "drpc.http.port";
-
-    /**
-     * This port is used by Storm DRPC for receiving HTTPS (SSL) DPRC requests from clients.
-     */
-    @isInteger
-    public static final String DRPC_HTTPS_PORT = "drpc.https.port";
-
-    /**
-     * Path to the keystore used by Storm DRPC for setting up HTTPS (SSL).
-     */
-    @isString
-    public static final String DRPC_HTTPS_KEYSTORE_PATH = "drpc.https.keystore.path";
-
-    /**
-     * Password to the keystore used by Storm DRPC for setting up HTTPS (SSL).
-     */
-    @isString
-    public static final String DRPC_HTTPS_KEYSTORE_PASSWORD = "drpc.https.keystore.password";
-
-    /**
-     * Type of keystore used by Storm DRPC for setting up HTTPS (SSL).
-     * see http://docs.oracle.com/javase/7/docs/api/java/security/KeyStore.html for more details.
-     */
-    @isString
-    public static final String DRPC_HTTPS_KEYSTORE_TYPE = "drpc.https.keystore.type";
-
-    /**
-     * Password to the private key in the keystore for setting up HTTPS (SSL).
-     */
-    @isString
-    public static final String DRPC_HTTPS_KEY_PASSWORD = "drpc.https.key.password";
-
-    /**
-     * Path to the truststore used by Storm DRPC setting up HTTPS (SSL).
-     */
-    @isString
-    public static final String DRPC_HTTPS_TRUSTSTORE_PATH = "drpc.https.truststore.path";
-
-    /**
-     * Password to the truststore used by Storm DRPC setting up HTTPS (SSL).
-     */
-    @isString
-    public static final String DRPC_HTTPS_TRUSTSTORE_PASSWORD = "drpc.https.truststore.password";
-
-    /**
-     * Type of truststore used by Storm DRPC for setting up HTTPS (SSL).
-     * see http://docs.oracle.com/javase/7/docs/api/java/security/KeyStore.html for more details.
-     */
-    @isString
-    public static final String DRPC_HTTPS_TRUSTSTORE_TYPE = "drpc.https.truststore.type";
-
-    /**
-     * Password to the truststore used by Storm DRPC setting up HTTPS (SSL).
-     */
-    @isBoolean
-    public static final String DRPC_HTTPS_WANT_CLIENT_AUTH = "drpc.https.want.client.auth";
-
-    @isBoolean
-    public static final String DRPC_HTTPS_NEED_CLIENT_AUTH = "drpc.https.need.client.auth";
-
-    /**
-     * The DRPC transport plug-in for Thrift client/server communication
-     */
-    @isString
-    public static final String DRPC_THRIFT_TRANSPORT_PLUGIN = "drpc.thrift.transport";
-
-    /**
-     * This port is used by Storm DRPC for receiving DPRC requests from clients.
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String DRPC_PORT = "drpc.port";
-
-    /**
-     * Class name for authorization plugin for DRPC client
-     */
-    @isString
-    public static final String DRPC_AUTHORIZER = "drpc.authorizer";
-
-    /**
-     * The Access Control List for the DRPC Authorizer.
-     * @see backtype.storm.security.auth.authorizer.DRPCSimpleACLAuthorizer
-     */
-    @isType(type=Map.class)
-    public static final String DRPC_AUTHORIZER_ACL = "drpc.authorizer.acl";
-
-    /**
-     * File name of the DRPC Authorizer ACL.
-     * @see backtype.storm.security.auth.authorizer.DRPCSimpleACLAuthorizer
-     */
-    @isString
-    public static final String DRPC_AUTHORIZER_ACL_FILENAME = "drpc.authorizer.acl.filename";
-
-    /**
-     * Whether the DRPCSimpleAclAuthorizer should deny requests for operations
-     * involving functions that have no explicit ACL entry. When set to false
-     * (the default) DRPC functions that have no entry in the ACL will be
-     * permitted, which is appropriate for a development environment. When set
-     * to true, explicit ACL entries are required for every DRPC function, and
-     * any request for functions will be denied.
-     * @see backtype.storm.security.auth.authorizer.DRPCSimpleACLAuthorizer
-     */
-    @isBoolean
-    public static final String DRPC_AUTHORIZER_ACL_STRICT = "drpc.authorizer.acl.strict";
-
-    /**
-     * DRPC thrift server worker threads
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String DRPC_WORKER_THREADS = "drpc.worker.threads";
-
-    /**
-     * The maximum buffer size thrift should use when reading messages for DRPC.
-     */
-    @isNumber
-    @isPositiveNumber
-    public static final String DRPC_MAX_BUFFER_SIZE = "drpc.max_buffer_size";
-
-    /**
-     * DRPC thrift server queue size
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String DRPC_QUEUE_SIZE = "drpc.queue.size";
-
-    /**
-     * The DRPC invocations transport plug-in for Thrift client/server communication
-     */
-    @isString
-    public static final String DRPC_INVOCATIONS_THRIFT_TRANSPORT_PLUGIN = "drpc.invocations.thrift.transport";
-
-    /**
-     * This port on Storm DRPC is used by DRPC topologies to receive function invocations and send results back.
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String DRPC_INVOCATIONS_PORT = "drpc.invocations.port";
-
-    /**
-     * DRPC invocations thrift server worker threads
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String DRPC_INVOCATIONS_THREADS = "drpc.invocations.threads";
-
-    /**
-     * The timeout on DRPC requests within the DRPC server. Defaults to 10 minutes. Note that requests can also
-     * timeout based on the socket timeout on the DRPC client, and separately based on the topology message
-     * timeout for the topology implementing the DRPC function.
-     */
-
-    @isInteger
-    @isPositiveNumber
-    @NotNull
-    public static final String DRPC_REQUEST_TIMEOUT_SECS  = "drpc.request.timeout.secs";
-
-    /**
-     * Childopts for Storm DRPC Java process.
-     */
-    @isString
-    public static final String DRPC_CHILDOPTS = "drpc.childopts";
-
-    /**
-     * Class name of the HTTP credentials plugin for the UI.
-     */
-    @isString
-    public static final String UI_HTTP_CREDS_PLUGIN = "ui.http.creds.plugin";
-
-    /**
-     * Class name of the HTTP credentials plugin for DRPC.
-     */
-    @isString
-    public static final String DRPC_HTTP_CREDS_PLUGIN = "drpc.http.creds.plugin";
-
-    /**
-     * the metadata configured on the supervisor
-     */
-    @isType(type=Map.class)
-    public static final String SUPERVISOR_SCHEDULER_META = "supervisor.scheduler.meta";
-
-    /**
-     * A list of ports that can run workers on this supervisor. Each worker uses one port, and
-     * the supervisor will only run one worker per port. Use this configuration to tune
-     * how many workers run on each machine.
-     */
-    @isNoDuplicateInList
-    @NotNull
-    @isListEntryCustom(entryValidatorClasses={IntegerValidator.class,PositiveNumberValidator.class})
-    public static final String SUPERVISOR_SLOTS_PORTS = "supervisor.slots.ports";
-
-    /**
-     * What blobstore implementation the supervisor should use.
-     */
-    @isString
-    public static final String SUPERVISOR_BLOBSTORE = "supervisor.blobstore.class";
-
-    /**
-     * The distributed cache target size in MB. This is a soft limit to the size of the distributed
-     * cache contents.
-     */
-    @isPositiveNumber
-    @isInteger
-    public static final String SUPERVISOR_LOCALIZER_CACHE_TARGET_SIZE_MB = "supervisor.localizer.cache.target.size.mb";
-
-    /**
-     * The distributed cache cleanup interval. Controls how often it scans to attempt to cleanup
-     * anything over the cache target size.
-     */
-    @isPositiveNumber
-    @isInteger
-    public static final String SUPERVISOR_LOCALIZER_CACHE_CLEANUP_INTERVAL_MS = "supervisor.localizer.cleanup.interval.ms";
-
-    /**
-     * What blobstore implementation the storm client should use.
-     */
-    @isString
-    public static final String CLIENT_BLOBSTORE = "client.blobstore.class";
-
-    /**
-     * What blobstore download parallelism the supervisor should use.
-     */
-    @isPositiveNumber
-    @isInteger
-    public static final String SUPERVISOR_BLOBSTORE_DOWNLOAD_THREAD_COUNT = "supervisor.blobstore.download.thread.count";
-
-    /**
-     * Maximum number of retries a supervisor is allowed to make for downloading a blob.
-     */
-    @isPositiveNumber
-    @isInteger
-    public static final String SUPERVISOR_BLOBSTORE_DOWNLOAD_MAX_RETRIES = "supervisor.blobstore.download.max_retries";
-
-    /**
-     * The blobstore super user has all read/write/admin permissions to all blobs - user running
-     * the blobstore.
-     */
-    @isString
-    public static final String BLOBSTORE_SUPERUSER = "blobstore.superuser";
-
-    /**
-     * What directory to use for the blobstore. The directory is expected to be an
-     * absolute path when using HDFS blobstore, for LocalFsBlobStore it could be either
-     * absolute or relative.
-     */
-    @isString
-    public static final String BLOBSTORE_DIR = "blobstore.dir";
-
-    /**
-     * What buffer size to use for the blobstore uploads.
-     */
-    @isPositiveNumber
-    @isInteger
-    public static final String STORM_BLOBSTORE_INPUTSTREAM_BUFFER_SIZE_BYTES = "storm.blobstore.inputstream.buffer.size.bytes";
-
-    /**
-     * Enable the blobstore cleaner. Certain blobstores may only want to run the cleaner
-     * on one daemon. Currently Nimbus handles setting this.
-     */
-    @isBoolean
-    public static final String BLOBSTORE_CLEANUP_ENABLE = "blobstore.cleanup.enable";
-
-    /**
-     * principal for nimbus/supervisor to use to access secure hdfs for the blobstore.
-     */
-    @isString
-    public static final String BLOBSTORE_HDFS_PRINCIPAL = "blobstore.hdfs.principal";
-
-    /**
-     * keytab for nimbus/supervisor to use to access secure hdfs for the blobstore.
-     */
-    @isString
-    public static final String BLOBSTORE_HDFS_KEYTAB = "blobstore.hdfs.keytab";
-
-    /**
-     *  Set replication factor for a blob in HDFS Blobstore Implementation
-     */
-    @isPositiveNumber
-    @isInteger
-    public static final String STORM_BLOBSTORE_REPLICATION_FACTOR = "storm.blobstore.replication.factor";
-
-    /**
-     * What blobstore implementation nimbus should use.
-     */
-    @isString
-    public static final String NIMBUS_BLOBSTORE = "nimbus.blobstore.class";
-
-    /**
-     * During operations with the blob store, via master, how long a connection
-     * is idle before nimbus considers it dead and drops the session and any
-     * associated connections.
-     */
-    @isPositiveNumber
-    @isInteger
-    public static final String NIMBUS_BLOBSTORE_EXPIRATION_SECS = "nimbus.blobstore.expiration.secs";
-
-    /**
-     * A map with blobstore keys mapped to each filename the worker will have access to in the
-     * launch directory to the blob by local file name and uncompress flag. Both localname and
-     * uncompress flag are optional. It uses the key is localname is not specified. Each topology
-     * will have different map of blobs.  Example: topology.blobstore.map: {"blobstorekey" :
-     * {"localname": "myblob", "uncompress": false}, "blobstorearchivekey" :
-     * {"localname": "myarchive", "uncompress": true}}
-     */
-    @CustomValidator(validatorClass = MapOfStringToMapOfStringToObjectValidator.class)
-    public static final String TOPOLOGY_BLOBSTORE_MAP = "topology.blobstore.map";
-
-    /**
-     * A number representing the maximum number of workers any single topology can acquire.
-     */
-    @isInteger
-    @isPositiveNumber(includeZero = true)
-    public static final String NIMBUS_SLOTS_PER_TOPOLOGY = "nimbus.slots.perTopology";
-
-    /**
-     * A class implementing javax.servlet.Filter for DRPC HTTP requests
-     */
-    @isString
-    public static final String DRPC_HTTP_FILTER = "drpc.http.filter";
-
-    /**
-     * Initialization parameters for the javax.servlet.Filter of the DRPC HTTP
-     * service
-     */
-    @isMapEntryType(keyType = String.class, valueType = String.class)
-    public static final String DRPC_HTTP_FILTER_PARAMS = "drpc.http.filter.params";
-
-    /**
-     * A number representing the maximum number of executors any single topology can acquire.
-     */
-    @isInteger
-    @isPositiveNumber(includeZero = true)
-    public static final String NIMBUS_EXECUTORS_PER_TOPOLOGY = "nimbus.executors.perTopology";
-
-    /**
-     * This parameter is used by the storm-deploy project to configure the
-     * jvm options for the supervisor daemon.
-     */
-    @isString
-    public static final String SUPERVISOR_CHILDOPTS = "supervisor.childopts";
-
-    /**
-     * How long a worker can go without heartbeating before the supervisor tries to
-     * restart the worker process.
-     */
-    @isInteger
-    @isPositiveNumber
-    @NotNull
-    public static final String SUPERVISOR_WORKER_TIMEOUT_SECS = "supervisor.worker.timeout.secs";
-
-    /**
-     * How many seconds to sleep for before shutting down threads on worker
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String SUPERVISOR_WORKER_SHUTDOWN_SLEEP_SECS = "supervisor.worker.shutdown.sleep.secs";
-
-    /**
-     * How long a worker can go without heartbeating during the initial launch before
-     * the supervisor tries to restart the worker process. This value override
-     * supervisor.worker.timeout.secs during launch because there is additional
-     * overhead to starting and configuring the JVM on launch.
-     */
-    @isInteger
-    @isPositiveNumber
-    @NotNull
-    public static final String SUPERVISOR_WORKER_START_TIMEOUT_SECS = "supervisor.worker.start.timeout.secs";
-
-    /**
-     * Whether or not the supervisor should launch workers assigned to it. Defaults
-     * to true -- and you should probably never change this value. This configuration
-     * is used in the Storm unit tests.
-     */
-    @isBoolean
-    public static final String SUPERVISOR_ENABLE = "supervisor.enable";
-
-    /**
-     * how often the supervisor sends a heartbeat to the master.
-     */
-    @isInteger
-    public static final String SUPERVISOR_HEARTBEAT_FREQUENCY_SECS = "supervisor.heartbeat.frequency.secs";
-
-
-    /**
-     * How often the supervisor checks the worker heartbeats to see if any of them
-     * need to be restarted.
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String SUPERVISOR_MONITOR_FREQUENCY_SECS = "supervisor.monitor.frequency.secs";
-
-    /**
-     * Should the supervior try to run the worker as the lauching user or not.  Defaults to false.
-     */
-    @isBoolean
-    public static final String SUPERVISOR_RUN_WORKER_AS_USER = "supervisor.run.worker.as.user";
-
-    /**
-     * Full path to the worker-laucher executable that will be used to lauch workers when
-     * SUPERVISOR_RUN_WORKER_AS_USER is set to true.
-     */
-    @isString
-    public static final String SUPERVISOR_WORKER_LAUNCHER = "supervisor.worker.launcher";
-
-    /**
-     * The total amount of memory (in MiB) a supervisor is allowed to give to its workers.
-     *  A default value will be set for this config if user does not override
-     */
-    @isPositiveNumber
-    public static final String SUPERVISOR_MEMORY_CAPACITY_MB = "supervisor.memory.capacity.mb";
-
-    /**
-     * The total amount of CPU resources a supervisor is allowed to give to its workers.
-     * By convention 1 cpu core should be about 100, but this can be adjusted if needed
-     * using 100 makes it simple to set the desired value to the capacity measurement
-     * for single threaded bolts.  A default value will be set for this config if user does not override
-     */
-    @isPositiveNumber
-    public static final String SUPERVISOR_CPU_CAPACITY = "supervisor.cpu.capacity";
-
-    /**
-     * The jvm opts provided to workers launched by this supervisor.
-     * All "%ID%", "%WORKER-ID%", "%TOPOLOGY-ID%",
-     * "%WORKER-PORT%" and "%HEAP-MEM%" substrings are replaced with:
-     * %ID%          -> port (for backward compatibility),
-     * %WORKER-ID%   -> worker-id,
-     * %TOPOLOGY-ID%    -> topology-id,
-     * %WORKER-PORT% -> port.
-     * %HEAP-MEM% -> mem-onheap.
-     */
-    @isStringOrStringList
-    public static final String WORKER_CHILDOPTS = "worker.childopts";
-
-    /**
-     * The default heap memory size in MB per worker, used in the jvm -Xmx opts for launching the worker
-      */
-    @isInteger
-    @isPositiveNumber
-    public static final String WORKER_HEAP_MEMORY_MB = "worker.heap.memory.mb";
-
-    /**
-     * The jvm profiler opts provided to workers launched by this supervisor.
-     */
-    @isStringOrStringList
-    public static final String WORKER_PROFILER_CHILDOPTS = "worker.profiler.childopts";
-
-    /**
-     * This configuration would enable or disable component page profiing and debugging for workers.
-     */
-    @isBoolean
-    public static final String WORKER_PROFILER_ENABLED = "worker.profiler.enabled";
-
-    /**
-     * The command launched supervisor with worker arguments
-     * pid, action and [target_directory]
-     * Where action is - start profile, stop profile, jstack, heapdump and kill against pid
-     *
-     */
-    @isString
-    public static final String WORKER_PROFILER_COMMAND = "worker.profiler.command";
-
-    /**
-     * The jvm opts provided to workers launched by this supervisor for GC. All "%ID%" substrings are replaced
-     * with an identifier for this worker.  Because the JVM complains about multiple GC opts the topology
-     * can override this default value by setting topology.worker.gc.childopts.
-     */
-    @isStringOrStringList
-    public static final String WORKER_GC_CHILDOPTS = "worker.gc.childopts";
-
-    /**
-     * How often this worker should heartbeat to the supervisor.
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String WORKER_HEARTBEAT_FREQUENCY_SECS = "worker.heartbeat.frequency.secs";
-
-    /**
-     * How often a task should heartbeat its status to the master.
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String TASK_HEARTBEAT_FREQUENCY_SECS = "task.heartbeat.frequency.secs";
-
-    /**
-     * How often a task should sync its connections with other tasks (if a task is
-     * reassigned, the other tasks sending messages to it need to refresh their connections).
-     * In general though, when a reassignment happens other tasks will be notified
-     * almost immediately. This configuration is here just in case that notification doesn't
-     * come through.
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String TASK_REFRESH_POLL_SECS = "task.refresh.poll.secs";
-
-    /**
-     * How often a worker should check dynamic log level timeouts for expiration.
-     * For expired logger settings, the clean up polling task will reset the log levels
-     * to the original levels (detected at startup), and will clean up the timeout map
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String WORKER_LOG_LEVEL_RESET_POLL_SECS = "worker.log.level.reset.poll.secs";
-
-    /**
-     * How often a task should sync credentials, worst case.
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String TASK_CREDENTIALS_POLL_SECS = "task.credentials.poll.secs";
-
-    /**
-     * Whether to enable backpressure in for a certain topology
-     */
-    @isBoolean
-    public static final String TOPOLOGY_BACKPRESSURE_ENABLE = "topology.backpressure.enable";
-
-    /**
-     * This signifies the tuple congestion in a disruptor queue.
-     * When the used ratio of a disruptor queue is higher than the high watermark,
-     * the backpressure scheme, if enabled, should slow down the tuple sending speed of
-     * the spouts until reaching the low watermark.
-     */
-    @isPositiveNumber
-    public static final String BACKPRESSURE_DISRUPTOR_HIGH_WATERMARK="backpressure.disruptor.high.watermark";
-
-    /**
-     * This signifies a state that a disruptor queue has left the congestion.
-     * If the used ratio of a disruptor queue is lower than the low watermark,
-     * it will unset the backpressure flag.
-     */
-    @isPositiveNumber
-    public static final String BACKPRESSURE_DISRUPTOR_LOW_WATERMARK="backpressure.disruptor.low.watermark";
-
-    /**
-     * A list of users that are allowed to interact with the topology.  To use this set
-     * nimbus.authorizer to backtype.storm.security.auth.authorizer.SimpleACLAuthorizer
-     */
-    @isStringList
-    public static final String TOPOLOGY_USERS = "topology.users";
-
-    /**
-     * A list of groups that are allowed to interact with the topology.  To use this set
-     * nimbus.authorizer to backtype.storm.security.auth.authorizer.SimpleACLAuthorizer
-     */
-    @isStringList
-    public static final String TOPOLOGY_GROUPS = "topology.groups";
-
-    /**
-     * True if Storm should timeout messages or not. Defaults to true. This is meant to be used
-     * in unit tests to prevent tuples from being accidentally timed out during the test.
-     */
-    @isBoolean
-    public static final String TOPOLOGY_ENABLE_MESSAGE_TIMEOUTS = "topology.enable.message.timeouts";
-
-    /**
-     * When set to true, Storm will log every message that's emitted.
-     */
-    @isBoolean
-    public static final String TOPOLOGY_DEBUG = "topology.debug";
-
-    /**
-     * The serializer for communication between shell components and non-JVM
-     * processes
-     */
-    @isString
-    public static final String TOPOLOGY_MULTILANG_SERIALIZER = "topology.multilang.serializer";
-
-    /**
-     * How many processes should be spawned around the cluster to execute this
-     * topology. Each process will execute some number of tasks as threads within
-     * them. This parameter should be used in conjunction with the parallelism hints
-     * on each component in the topology to tune the performance of a topology.
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String TOPOLOGY_WORKERS = "topology.workers";
-
-    /**
-     * How many instances to create for a spout/bolt. A task runs on a thread with zero or more
-     * other tasks for the same spout/bolt. The number of tasks for a spout/bolt is always
-     * the same throughout the lifetime of a topology, but the number of executors (threads) for
-     * a spout/bolt can change over time. This allows a topology to scale to more or less resources
-     * without redeploying the topology or violating the constraints of Storm (such as a fields grouping
-     * guaranteeing that the same value goes to the same task).
-     */
-    @isInteger
-    @isPositiveNumber(includeZero = true)
-    public static final String TOPOLOGY_TASKS = "topology.tasks";
-
-    /**
-     * The maximum amount of memory an instance of a spout/bolt will take on heap. This enables the scheduler
-     * to allocate slots on machines with enough available memory. A default value will be set for this config if user does not override
-     */
-    @isPositiveNumber(includeZero = true)
-    public static final String TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB = "topology.component.resources.onheap.memory.mb";
-
-    /**
-     * The maximum amount of memory an instance of a spout/bolt will take off heap. This enables the scheduler
-     * to allocate slots on machines with enough available memory.  A default value will be set for this config if user does not override
-     */
-    @isPositiveNumber(includeZero = true)
-    public static final String TOPOLOGY_COMPONENT_RESOURCES_OFFHEAP_MEMORY_MB = "topology.component.resources.offheap.memory.mb";
-
-    /**
-     * The config indicates the percentage of cpu for a core an instance(executor) of a component will use.
-     * Assuming the a core value to be 100, a value of 10 indicates 10% of the core.
-     * The P in PCORE represents the term "physical".  A default value will be set for this config if user does not override
-     */
-    @isPositiveNumber(includeZero = true)
-    public static final String TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT = "topology.component.cpu.pcore.percent";
-
-    /**
-     * A per topology config that specifies the maximum amount of memory a worker can use for that specific topology
-     */
-    @isPositiveNumber
-    public static final String TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB = "topology.worker.max.heap.size.mb";
-
-    /**
-     * The strategy to use when scheduling a topology with Resource Aware Scheduler
-     */
-    @NotNull
-    @isImplementationOfClass(implementsClass = IStrategy.class)
-    public static final String TOPOLOGY_SCHEDULER_STRATEGY = "topology.scheduler.strategy";
-
-    /**
-     * How many executors to spawn for ackers.
-     *
-     * <p>By not setting this variable or setting it as null, Storm will set the number of acker executors
-     * to be equal to the number of workers configured for this topology. If this variable is set to 0,
-     * then Storm will immediately ack tuples as soon as they come off the spout, effectively disabling reliability.</p>
-     */
-    @isInteger
-    @isPositiveNumber(includeZero = true)
-    public static final String TOPOLOGY_ACKER_EXECUTORS = "topology.acker.executors";
-
-    /**
-     * How many executors to spawn for event logger.
-     *
-     * <p>By not setting this variable or setting it as null, Storm will set the number of eventlogger executors
-     * to be equal to the number of workers configured for this topology. If this variable is set to 0,
-     * event logging will be disabled.</p>
-     */
-    @isInteger
-    @isPositiveNumber(includeZero = true)
-    public static final String TOPOLOGY_EVENTLOGGER_EXECUTORS = "topology.eventlogger.executors";
-
-    /**
-     * The maximum amount of time given to the topology to fully process a message
-     * emitted by a spout. If the message is not acked within this time frame, Storm
-     * will fail the message on the spout. Some spouts implementations will then replay
-     * the message at a later time.
-     */
-    @isInteger
-    @isPositiveNumber
-    @NotNull
-    public static final String TOPOLOGY_MESSAGE_TIMEOUT_SECS = "topology.message.timeout.secs";
-
-    /**
-     * A list of serialization registrations for Kryo ( https://github.com/EsotericSoftware/kryo ),
-     * the underlying serialization framework for Storm. A serialization can either
-     * be the name of a class (in which case Kryo will automatically create a serializer for the class
-     * that saves all the object's fields), or an implementation of com.esotericsoftware.kryo.Serializer.
-     *
-     * See Kryo's documentation for more information about writing custom serializers.
-     */
-    @isKryoReg
-    public static final String TOPOLOGY_KRYO_REGISTER = "topology.kryo.register";
-
-    /**
-     * A list of classes that customize storm's kryo instance during start-up.
-     * Each listed class name must implement IKryoDecorator. During start-up the
-     * listed class is instantiated with 0 arguments, then its 'decorate' method
-     * is called with storm's kryo instance as the only argument.
-     */
-    @isStringList
-    public static final String TOPOLOGY_KRYO_DECORATORS = "topology.kryo.decorators";
-
-    /**
-     * Class that specifies how to create a Kryo instance for serialization. Storm will then apply
-     * topology.kryo.register and topology.kryo.decorators on top of this. The default implementation
-     * implements topology.fall.back.on.java.serialization and turns references off.
-     */
-    @isString
-    public static final String TOPOLOGY_KRYO_FACTORY = "topology.kryo.factory";
-
-    /**
-     * Whether or not Storm should skip the loading of kryo registrations for which it
-     * does not know the class or have the serializer implementation. Otherwise, the task will
-     * fail to load and will throw an error at runtime. The use case of this is if you want to
-     * declare your serializations on the storm.yaml files on the cluster rather than every single
-     * time you submit a topology. Different applications may use different serializations and so
-     * a single application may not have the code for the other serializers used by other apps.
-     * By setting this config to true, Storm will ignore that it doesn't have those other serializations
-     * rather than throw an error.
-     */
-    @isBoolean
-    public static final String TOPOLOGY_SKIP_MISSING_KRYO_REGISTRATIONS= "topology.skip.missing.kryo.registrations";
-
-    /**
-     * A list of classes implementing IMetricsConsumer (See storm.yaml.example for exact config format).
-     * Each listed class will be routed all the metrics data generated by the storm metrics API.
-     * Each listed class maps 1:1 to a system bolt named __metrics_ClassName#N, and it's parallelism is configurable.
-     */
-
-    @isListEntryCustom(entryValidatorClasses={MetricRegistryValidator.class})
-    public static final String TOPOLOGY_METRICS_CONSUMER_REGISTER = "topology.metrics.consumer.register";
-
-    /**
-     * A map of metric name to class name implementing IMetric that will be created once per worker JVM
-     */
-    @isMapEntryType(keyType = String.class, valueType = String.class)
-    public static final String TOPOLOGY_WORKER_METRICS = "topology.worker.metrics";
-
-    /**
-     * A map of metric name to class name implementing IMetric that will be created once per worker JVM
-     */
-    @isMapEntryType(keyType = String.class, valueType = String.class)
-    public static final String WORKER_METRICS = "worker.metrics";
-
-    /**
-     * The maximum parallelism allowed for a component in this topology. This configuration is
-     * typically used in testing to limit the number of threads spawned in local mode.
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String TOPOLOGY_MAX_TASK_PARALLELISM="topology.max.task.parallelism";
-
-    /**
-     * The maximum number of tuples that can be pending on a spout task at any given time.
-     * This config applies to individual tasks, not to spouts or topologies as a whole.
-     *
-     * A pending tuple is one that has been emitted from a spout but has not been acked or failed yet.
-     * Note that this config parameter has no effect for unreliable spouts that don't tag
-     * their tuples with a message id.
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String TOPOLOGY_MAX_SPOUT_PENDING="topology.max.spout.pending";
-
-    /**
-     * A class that implements a strategy for what to do when a spout needs to wait. Waiting is
-     * triggered in one of two conditions:
-     *
-     * 1. nextTuple emits no tuples
-     * 2. The spout has hit maxSpoutPending and can't emit any more tuples
-     */
-    @isString
-    public static final String TOPOLOGY_SPOUT_WAIT_STRATEGY="topology.spout.wait.strategy";
-
-    /**
-     * The amount of milliseconds the SleepEmptyEmitStrategy should sleep for.
-     */
-    @isInteger
-    @isPositiveNumber(includeZero = true)
-    public static final String TOPOLOGY_SLEEP_SPOUT_WAIT_STRATEGY_TIME_MS="topology.sleep.spout.wait.strategy.time.ms";
-
-    /**
-     * The maximum amount of time a component gives a source of state to synchronize before it requests
-     * synchronization again.
-     */
-    @isInteger
-    @isPositiveNumber
-    @NotNull
-    public static final String TOPOLOGY_STATE_SYNCHRONIZATION_TIMEOUT_SECS="topology.state.synchronization.timeout.secs";
-
-    /**
-     * The percentage of tuples to sample to produce stats for a task.
-     */
-    @isPositiveNumber
-    public static final String TOPOLOGY_STATS_SAMPLE_RATE="topology.stats.sample.rate";
-
-    /**
-     * The time period that builtin metrics data in bucketed into.
-     */
-    @isInteger
-    public static final String TOPOLOGY_BUILTIN_METRICS_BUCKET_SIZE_SECS="topology.builtin.metrics.bucket.size.secs";
-
-    /**
-     * Whether or not to use Java serialization in a topology.
-     */
-    @isBoolean
-    public static final String TOPOLOGY_FALL_BACK_ON_JAVA_SERIALIZATION="topology.fall.back.on.java.serialization";
-
-    /**
-     * Topology-specific options for the worker child process. This is used in addition to WORKER_CHILDOPTS.
-     */
-    @isStringOrStringList
-    public static final String TOPOLOGY_WORKER_CHILDOPTS="topology.worker.childopts";
-
-    /**
-     * Topology-specific options GC for the worker child process. This overrides WORKER_GC_CHILDOPTS.
-     */
-    @isStringOrStringList
-    public static final String TOPOLOGY_WORKER_GC_CHILDOPTS="topology.worker.gc.childopts";
-
-    /**
-     * Topology-specific options for the logwriter process of a worker.
-     */
-    @isStringOrStringList
-    public static final String TOPOLOGY_WORKER_LOGWRITER_CHILDOPTS="topology.worker.logwriter.childopts";
-
-    /**
-     * Topology-specific classpath for the worker child process. This is combined to the usual classpath.
-     */
-    @isStringOrStringList
-    public static final String TOPOLOGY_CLASSPATH="topology.classpath";
-
-    /**
-     * Topology-specific environment variables for the worker child process.
-     * This is added to the existing environment (that of the supervisor)
-     */
-    @isMapEntryType(keyType = String.class, valueType = String.class)
-    public static final String TOPOLOGY_ENVIRONMENT="topology.environment";
-
-    /*
-     * Topology-specific option to disable/enable bolt's outgoing overflow buffer.
-     * Enabling this option ensures that the bolt can always clear the incoming messages,
-     * preventing live-lock for the topology with cyclic flow.
-     * The overflow buffer can fill degrading the performance gradually,
-     * eventually running out of memory.
-     */
-    @isBoolean
-    public static final String TOPOLOGY_BOLTS_OUTGOING_OVERFLOW_BUFFER_ENABLE="topology.bolts.outgoing.overflow.buffer.enable";
-
-    /*
-     * Bolt-specific configuration for windowed bolts to specify the window length as a count of number of tuples
-     * in the window.
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String TOPOLOGY_BOLTS_WINDOW_LENGTH_COUNT = "topology.bolts.window.length.count";
-
-    /*
-     * Bolt-specific configuration for windowed bolts to specify the window length in time duration.
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String TOPOLOGY_BOLTS_WINDOW_LENGTH_DURATION_MS = "topology.bolts.window.length.duration.ms";
-
-    /*
-     * Bolt-specific configuration for windowed bolts to specify the sliding interval as a count of number of tuples.
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String TOPOLOGY_BOLTS_SLIDING_INTERVAL_COUNT = "topology.bolts.window.sliding.interval.count";
-
-    /*
-     * Bolt-specific configuration for windowed bolts to specify the sliding interval in time duration.
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String TOPOLOGY_BOLTS_SLIDING_INTERVAL_DURATION_MS = "topology.bolts.window.sliding.interval.duration.ms";
-
-    /*
-     * Bolt-specific configuration for windowed bolts to specify the name of the field in the tuple that holds
-     * the timestamp (e.g. the ts when the tuple was actually generated). If this config is specified and the
-     * field is not present in the incoming tuple, a java.lang.IllegalArgumentException will be thrown.
-     */
-    @isString
-    public static final String TOPOLOGY_BOLTS_TUPLE_TIMESTAMP_FIELD_NAME = "topology.bolts.tuple.timestamp.field.name";
-
-    /*
-     * Bolt-specific configuration for windowed bolts to specify the maximum time lag of the tuple timestamp
-     * in milliseconds. It means that the tuple timestamps cannot be out of order by more than this amount.
-     * This config will be effective only if the TOPOLOGY_BOLTS_TUPLE_TIMESTAMP_FIELD_NAME is also specified.
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String TOPOLOGY_BOLTS_TUPLE_TIMESTAMP_MAX_LAG_MS = "topology.bolts.tuple.timestamp.max.lag.ms";
-
-    /*
-     * Bolt-specific configuration for windowed bolts to specify the time interval for generating
-     * watermark events. Watermark event tracks the progress of time when tuple timestamp is used.
-     * This config is effective only if TOPOLOGY_BOLTS_TUPLE_TIMESTAMP_FIELD_NAME is also specified.
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String TOPOLOGY_BOLTS_WATERMARK_EVENT_INTERVAL_MS = "topology.bolts.watermark.event.interval.ms";
-
-    /**
-     * This config is available for TransactionalSpouts, and contains the id ( a String) for
-     * the transactional topology. This id is used to store the state of the transactional
-     * topology in Zookeeper.
-     */
-    @isString
-    public static final String TOPOLOGY_TRANSACTIONAL_ID="topology.transactional.id";
-
-    /**
-     * A list of task hooks that are automatically added to every spout and bolt in the topology. An example
-     * of when you'd do this is to add a hook that integrates with your internal
-     * monitoring system. These hooks are instantiated using the zero-arg constructor.
-     */
-    @isStringList
-    public static final String TOPOLOGY_AUTO_TASK_HOOKS="topology.auto.task.hooks";
-
-    /**
-     * The size of the Disruptor receive queue for each executor. Must be a power of 2.
-     */
-    @isPowerOf2
-    public static final String TOPOLOGY_EXECUTOR_RECEIVE_BUFFER_SIZE="topology.executor.receive.buffer.size";
-
-    /**
-     * The size of the Disruptor send queue for each executor. Must be a power of 2.
-     */
-    @isPowerOf2
-    public static final String TOPOLOGY_EXECUTOR_SEND_BUFFER_SIZE="topology.executor.send.buffer.size";
-
-    /**
-     * The size of the Disruptor transfer queue for each worker.
-     */
-    @isInteger
-    @isPowerOf2
-    public static final String TOPOLOGY_TRANSFER_BUFFER_SIZE="topology.transfer.buffer.size";
-
-    /**
-     * How often a tick tuple from the "__system" component and "__tick" stream should be sent
-     * to tasks. Meant to be used as a component-specific configuration.
-     */
-    @isInteger
-    public static final String TOPOLOGY_TICK_TUPLE_FREQ_SECS="topology.tick.tuple.freq.secs";
-
-   /**
-    * @deprecated this is no longer supported
-    * Configure the wait strategy used for internal queuing. Can be used to tradeoff latency
-    * vs. throughput
-    */
-    @Deprecated
-    @isString
-    public static final String TOPOLOGY_DISRUPTOR_WAIT_STRATEGY="topology.disruptor.wait.strategy";
-
-    /**
-     * The size of the shared thread pool for worker tasks to make use of. The thread pool can be accessed
-     * via the TopologyContext.
-     */
-    @isInteger
-    public static final String TOPOLOGY_WORKER_SHARED_THREAD_POOL_SIZE="topology.worker.shared.thread.pool.size";
-
-    /**
-     * The interval in seconds to use for determining whether to throttle error reported to Zookeeper. For example,
-     * an interval of 10 seconds with topology.max.error.report.per.interval set to 5 will only allow 5 errors to be
-     * reported to Zookeeper per task for every 10 second interval of time.
-     */
-    @isInteger
-    public static final String TOPOLOGY_ERROR_THROTTLE_INTERVAL_SECS="topology.error.throttle.interval.secs";
-
-    /**
-     * See doc for TOPOLOGY_ERROR_THROTTLE_INTERVAL_SECS
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String TOPOLOGY_MAX_ERROR_REPORT_PER_INTERVAL="topology.max.error.report.per.interval";
-
-    /**
-     * How often a batch can be emitted in a Trident topology.
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String TOPOLOGY_TRIDENT_BATCH_EMIT_INTERVAL_MILLIS="topology.trident.batch.emit.interval.millis";
-
-    /**
-     * Name of the topology. This config is automatically set by Storm when the topology is submitted.
-     */
-    @isString
-    public final static String TOPOLOGY_NAME="topology.name";
-
-    /**
-     * The principal who submitted a topology
-     */
-    @isString
-    public final static String TOPOLOGY_SUBMITTER_PRINCIPAL = "topology.submitter.principal";
-
-    /**
-     * The local user name of the user who submitted a topology.
-     */
-    @isString
-    public static final String TOPOLOGY_SUBMITTER_USER = "topology.submitter.user";
-
-    /**
-     * Array of components that scheduler should try to place on separate hosts.
-     */
-    @isStringList
-    public static final String TOPOLOGY_SPREAD_COMPONENTS = "topology.spread.components";
-
-    /**
-     * A list of IAutoCredentials that the topology should load and use.
-     */
-    @isStringList
-    public static final String TOPOLOGY_AUTO_CREDENTIALS = "topology.auto-credentials";
-
-    /**
-     * Max pending tuples in one ShellBolt
-     */
-    @NotNull
-    @isInteger
-    @isPositiveNumber
-    public static final String TOPOLOGY_SHELLBOLT_MAX_PENDING="topology.shellbolt.max.pending";
-
-    /**
-     * How long a subprocess can go without heartbeating before the ShellSpout/ShellBolt tries to
-     * suicide itself.
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String TOPOLOGY_SUBPROCESS_TIMEOUT_SECS = "topology.subprocess.timeout.secs";
-
-    /**
-     * Topology central logging sensitivity to determine who has access to logs in central logging system.
-     * The possible values are:
-     *   S0 - Public (open to all users on grid)
-     *   S1 - Restricted
-     *   S2 - Confidential
-     *   S3 - Secret (default.)
-     */
-    @isString(acceptedValues = {"S0", "S1", "S2", "S3"})
-    public static final String TOPOLOGY_LOGGING_SENSITIVITY="topology.logging.sensitivity";
-
-    /**
-     * Sets the priority for a topology
-     */
-    @isInteger
-    @isPositiveNumber(includeZero = true)
-    public static final String TOPOLOGY_PRIORITY = "topology.priority";
-
-    /**
-     * The root directory in ZooKeeper for metadata about TransactionalSpouts.
-     */
-    @isString
-    public static final String TRANSACTIONAL_ZOOKEEPER_ROOT="transactional.zookeeper.root";
-
-    /**
-     * The list of zookeeper servers in which to keep the transactional state. If null (which is default),
-     * will use storm.zookeeper.servers
-     */
-    @isStringList
-    public static final String TRANSACTIONAL_ZOOKEEPER_SERVERS="transactional.zookeeper.servers";
-
-    /**
-     * The port to use to connect to the transactional zookeeper servers. If null (which is default),
-     * will use storm.zookeeper.port
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String TRANSACTIONAL_ZOOKEEPER_PORT="transactional.zookeeper.port";
-
-    /**
-     * The user as which the nimbus client should be acquired to perform the operation.
-     */
-    @isString
-    public static final String STORM_DO_AS_USER="storm.doAsUser";
-
-    /**
-     * The number of threads that should be used by the zeromq context in each worker process.
-     */
-    @Deprecated
-    @isInteger
-    public static final String ZMQ_THREADS = "zmq.threads";
-
-    /**
-     * How long a connection should retry sending messages to a target host when
-     * the connection is closed. This is an advanced configuration and can almost
-     * certainly be ignored.
-     */
-    @Deprecated
-    @isInteger
-    public static final String ZMQ_LINGER_MILLIS = "zmq.linger.millis";
-
-    /**
-     * The high water for the ZeroMQ push sockets used for networking. Use this config to prevent buffer explosion
-     * on the networking layer.
-     */
-    @Deprecated
-    @isInteger
-    public static final String ZMQ_HWM = "zmq.hwm";
-
-    /**
-     * This value is passed to spawned JVMs (e.g., Nimbus, Supervisor, and Workers)
-     * for the java.library.path value. java.library.path tells the JVM where
-     * to look for native libraries. It is necessary to set this config correctly since
-     * Storm uses the ZeroMQ and JZMQ native libs.
-     */
-    @isString
-    public static final String JAVA_LIBRARY_PATH = "java.library.path";
-
-    /**
-     * The path to use as the zookeeper dir when running a zookeeper server via
-     * "storm dev-zookeeper". This zookeeper instance is only intended for development;
-     * it is not a production grade zookeeper setup.
-     */
-    @isString
-    public static final String DEV_ZOOKEEPER_PATH = "dev.zookeeper.path";
-
-    /**
-     * A map from topology name to the number of machines that should be dedicated for that topology. Set storm.scheduler
-     * to backtype.storm.scheduler.IsolationScheduler to make use of the isolation scheduler.
-     */
-    @isMapEntryType(keyType = String.class, valueType = Number.class)
-    public static final String ISOLATION_SCHEDULER_MACHINES = "isolation.scheduler.machines";
-
-    /**
-     * A map from the user name to the number of machines that should that user is allowed to use. Set storm.scheduler
-     * to backtype.storm.scheduler.multitenant.MultitenantScheduler
-     */
-    @isMapEntryType(keyType = String.class, valueType = Number.class)
-    public static final String MULTITENANT_SCHEDULER_USER_POOLS = "multitenant.scheduler.user.pools";
-
-    /**
-     * A map of users to another map of the resource guarantees of the user. Used by Resource Aware Scheduler to ensure
-     * per user resource guarantees.
-     */
-    @isMapEntryCustom(keyValidatorClasses = {StringValidator.class}, valueValidatorClasses = {UserResourcePoolEntryValidator.class})
-    public static final String RESOURCE_AWARE_SCHEDULER_USER_POOLS = "resource.aware.scheduler.user.pools";
-
-    /**
-     * The class that specifies the eviction strategy to use in ResourceAwareScheduler
-     */
-    @NotNull
-    @isImplementationOfClass(implementsClass = IEvictionStrategy.class)
-    public static final String RESOURCE_AWARE_SCHEDULER_EVICTION_STRATEGY = "resource.aware.scheduler.eviction.strategy";
-
-    /**
-     * the class that specifies the scheduling priority strategy to use in ResourceAwareScheduler
-     */
-    @NotNull
-    @isImplementationOfClass(implementsClass = ISchedulingPriorityStrategy.class)
-    public static final String RESOURCE_AWARE_SCHEDULER_PRIORITY_STRATEGY = "resource.aware.scheduler.priority.strategy";
-
-    /**
-     * The number of machines that should be used by this topology to isolate it from all others. Set storm.scheduler
-     * to backtype.storm.scheduler.multitenant.MultitenantScheduler
-     */
-    @isInteger
-    @isPositiveNumber
-    public static final String TOPOLOGY_ISOLATED_MACHINES = "topology.isolate.machines";
-
-    /**
-     * Configure timeout milliseconds used for disruptor queue wait strategy. Can be used to tradeoff latency
-     * vs. CPU usage
-     */
-    @isInteger
-    @NotNull
-    public static final String TOPOLOGY_DISRUPTOR_WAIT_TIMEOUT_MILLIS="topology.disruptor.wait.timeout.millis";
-
-    /**
-     * The number of tuples to batch before sending to the next thread.  This number is just an initial suggestion and
-     * the code may adjust it as your topology runs.
-     */
-    @isInteger
-    @isPositiveNumber
-    @NotNull
-    public static final String TOPOLOGY_DISRUPTOR_BATCH_SIZE="topology.disruptor.batch.size";
-
-    /**
-     * The maximum age in milliseconds a batch can be before being sent to the next thread.  This number is just an
-     * initial suggestion and the code may adjust it as your topology runs.
-     */
-    @isInteger
-    @isPositiveNumber
-    @NotNull
-    public static final String TOPOLOGY_DISRUPTOR_BATCH_TIMEOUT_MILLIS="topology.disruptor.batch.timeout.millis";
-
-    /**
-     * Minimum number of nimbus hosts where the code must be replicated before leader nimbus
-     * is allowed to perform topology activation tasks like setting up heartbeats/assignments
-     * and marking the topology as active. default is 0.
-     */
-    @isNumber
-    public static final String TOPOLOGY_MIN_REPLICATION_COUNT = "topology.min.replication.count";
-
-    /**
-     * Maximum wait time for the nimbus host replication to achieve the nimbus.min.replication.count.
-     * Once this time is elapsed nimbus will go ahead and perform topology activation tasks even
-     * if required nimbus.min.replication.count is not achieved. The default is 0 seconds, a value of
-     * -1 indicates to wait for ever.
-     */
-    @isNumber
-    public static final String TOPOLOGY_MAX_REPLICATION_WAIT_TIME_SEC = "topology.max.replication.wait.time.sec";
-
-    /**
-     * How often nimbus's background thread to sync code for missing topologies should run.
-     */
-    @isInteger
-    public static final String NIMBUS_CODE_SYNC_FREQ_SECS = "nimbus.code.sync.freq.secs";
-
-    /**
-     * An implementation of @{link backtype.storm.daemon.JarTransformer} that will can be used to transform a jar
-     * file before storm jar runs with it. Use with extreme caution.
-     * If you want to enable a transition between backtype.storm and org.apache.storm to run older topologies
-     * you can set this to org.apache.storm.hack.StormShadeTransformer.  But this is likely to be deprecated in
-     * future releases.
-     */
-    @isString
-    public static final Object CLIENT_JAR_TRANSFORMER = "client.jartransformer.class";
-
-    public static void setClasspath(Map conf, String cp) {
-        conf.put(Config.TOPOLOGY_CLASSPATH, cp);
-    }
-
-    public void setClasspath(String cp) {
-        setClasspath(this, cp);
-    }
-
-    public static void setEnvironment(Map conf, Map env) {
-        conf.put(Config.TOPOLOGY_ENVIRONMENT, env);
-    }
-
-    public void setEnvironment(Map env) {
-        setEnvironment(this, env);
-    }
-
-    public static void setDebug(Map conf, boolean isOn) {
-        conf.put(Config.TOPOLOGY_DEBUG, isOn);
-    }
-
-    public void setDebug(boolean isOn) {
-        setDebug(this, isOn);
-    }
-
-    public static void setNumWorkers(Map conf, int workers) {
-        conf.put(Config.TOPOLOGY_WORKERS, workers);
-    }
-
-    public void setNumWorkers(int workers) {
-        setNumWorkers(this, workers);
-    }
-
-    public static void setNumAckers(Map conf, int numExecutors) {
-        conf.put(Config.TOPOLOGY_ACKER_EXECUTORS, numExecutors);
-    }
-
-    public void setNumAckers(int numExecutors) {
-        setNumAckers(this, numExecutors);
-    }
-
-    public static void setNumEventLoggers(Map conf, int numExecutors) {
-        conf.put(Config.TOPOLOGY_EVENTLOGGER_EXECUTORS, numExecutors);
-    }
-
-    public void setNumEventLoggers(int numExecutors) {
-        setNumEventLoggers(this, numExecutors);
-    }
-
-
-    public static void setMessageTimeoutSecs(Map conf, int secs) {
-        conf.put(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS, secs);
-    }
-
-    public void setMessageTimeoutSecs(int secs) {
-        setMessageTimeoutSecs(this, secs);
-    }
-
-    public static void registerSerialization(Map conf, Class klass) {
-        getRegisteredSerializations(conf).add(klass.getName());
-    }
-
-    public void registerSerialization(Class klass) {
-        registerSerialization(this, klass);
-    }
-
-    public static void registerSerialization(Map conf, Class klass, Class<? extends Serializer> serializerClass) {
-        Map<String, String> register = new HashMap<String, String>();
-        register.put(klass.getName(), serializerClass.getName());
-        getRegisteredSerializations(conf).add(register);
-    }
-
-    public void registerSerialization(Class klass, Class<? extends Serializer> serializerClass) {
-        registerSerialization(this, klass, serializerClass);
-    }
-
-    public static void registerMetricsConsumer(Map conf, Class klass, Object argument, long parallelismHint) {
-        HashMap m = new HashMap();
-        m.put("class", klass.getCanonicalName());
-        m.put("parallelism.hint", parallelismHint);
-        m.put("argument", argument);
-
-        List l = (List)conf.get(TOPOLOGY_METRICS_CONSUMER_REGISTER);
-        if (l == null) { l = new ArrayList(); }
-        l.add(m);
-        conf.put(TOPOLOGY_METRICS_CONSUMER_REGISTER, l);
-    }
-
-    public void registerMetricsConsumer(Class klass, Object argument, long parallelismHint) {
-        registerMetricsConsumer(this, klass, argument, parallelismHint);
-    }
-
-    public static void registerMetricsConsumer(Map conf, Class klass, long parallelismHint) {
-        registerMetricsConsumer(conf, klass, null, parallelismHint);
-    }
-
-    public void registerMetricsConsumer(Class klass, long parallelismHint) {
-        registerMetricsConsumer(this, klass, parallelismHint);
-    }
-
-    public static void registerMetricsConsumer(Map conf, Class klass) {
-        registerMetricsConsumer(conf, klass, null, 1L);
-    }
-
-    public void registerMetricsConsumer(Class klass) {
-        registerMetricsConsumer(this, klass);
-    }
-
-    public static void registerDecorator(Map conf, Class<? extends IKryoDecorator> klass) {
-        getRegisteredDecorators(conf).add(klass.getName());
-    }
-
-    public void registerDecorator(Class<? extends IKryoDecorator> klass) {
-        registerDecorator(this, klass);
-    }
-
-    public static void setKryoFactory(Map conf, Class<? extends IKryoFactory> klass) {
-        conf.put(Config.TOPOLOGY_KRYO_FACTORY, klass.getName());
-    }
-
-    public void setKryoFactory(Class<? extends IKryoFactory> klass) {
-        setKryoFactory(this, klass);
-    }
-
-    public static void setSkipMissingKryoRegistrations(Map conf, boolean skip) {
-        conf.put(Config.TOPOLOGY_SKIP_MISSING_KRYO_REGISTRATIONS, skip);
-    }
-
-    public void setSkipMissingKryoRegistrations(boolean skip) {
-        setSkipMissingKryoRegistrations(this, skip);
-    }
-
-    public static void setMaxTaskParallelism(Map conf, int max) {
-        conf.put(Config.TOPOLOGY_MAX_TASK_PARALLELISM, max);
-    }
-
-    public void setMaxTaskParallelism(int max) {
-        setMaxTaskParallelism(this, max);
-    }
-
-    public static void setMaxSpoutPending(Map conf, int max) {
-        conf.put(Config.TOPOLOGY_MAX_SPOUT_PENDING, max);
-    }
-
-  

<TRUNCATED>

[13/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/ui/core.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/ui/core.clj b/storm-core/src/clj/org/apache/storm/ui/core.clj
new file mode 100644
index 0000000..b309b2c
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/ui/core.clj
@@ -0,0 +1,1273 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+
+(ns org.apache.storm.ui.core
+  (:use compojure.core)
+  (:use [clojure.java.shell :only [sh]])
+  (:use ring.middleware.reload
+        ring.middleware.multipart-params)
+  (:use [ring.middleware.json :only [wrap-json-params]])
+  (:use [hiccup core page-helpers])
+  (:use [org.apache.storm config util log stats zookeeper converter])
+  (:use [org.apache.storm.ui helpers])
+  (:use [org.apache.storm.daemon [common :only [ACKER-COMPONENT-ID ACKER-INIT-STREAM-ID ACKER-ACK-STREAM-ID
+                                              ACKER-FAIL-STREAM-ID mk-authorization-handler
+                                              start-metrics-reporters]]])
+  (:import [org.apache.storm.utils Utils]
+           [org.apache.storm.generated NimbusSummary])
+  (:use [clojure.string :only [blank? lower-case trim split]])
+  (:import [org.apache.storm.generated ExecutorSpecificStats
+            ExecutorStats ExecutorSummary ExecutorInfo TopologyInfo SpoutStats BoltStats
+            ErrorInfo ClusterSummary SupervisorSummary TopologySummary
+            Nimbus$Client StormTopology GlobalStreamId RebalanceOptions
+            KillOptions GetInfoOptions NumErrorsChoice DebugOptions TopologyPageInfo
+            TopologyStats CommonAggregateStats ComponentAggregateStats
+            ComponentType BoltAggregateStats SpoutAggregateStats
+            ExecutorAggregateStats SpecificAggregateStats ComponentPageInfo
+            LogConfig LogLevel LogLevelAction])
+  (:import [org.apache.storm.security.auth AuthUtils ReqContext])
+  (:import [org.apache.storm.generated AuthorizationException ProfileRequest ProfileAction NodeInfo])
+  (:import [org.apache.storm.security.auth AuthUtils])
+  (:import [org.apache.storm.utils VersionInfo])
+  (:import [org.apache.storm Config])
+  (:import [java.io File])
+  (:require [compojure.route :as route]
+            [compojure.handler :as handler]
+            [ring.util.response :as resp]
+            [org.apache.storm [thrift :as thrift]])
+  (:require [metrics.meters :refer [defmeter mark!]])
+  (:import [org.apache.commons.lang StringEscapeUtils])
+  (:import [org.apache.logging.log4j Level])
+  (:gen-class))
+
+(def ^:dynamic *STORM-CONF* (read-storm-config))
+(def ^:dynamic *UI-ACL-HANDLER* (mk-authorization-handler (*STORM-CONF* NIMBUS-AUTHORIZER) *STORM-CONF*))
+(def ^:dynamic *UI-IMPERSONATION-HANDLER* (mk-authorization-handler (*STORM-CONF* NIMBUS-IMPERSONATION-AUTHORIZER) *STORM-CONF*))
+(def http-creds-handler (AuthUtils/GetUiHttpCredentialsPlugin *STORM-CONF*))
+(def STORM-VERSION (VersionInfo/getVersion))
+
+(defmeter ui:num-cluster-configuration-http-requests)
+(defmeter ui:num-cluster-summary-http-requests)
+(defmeter ui:num-nimbus-summary-http-requests)
+(defmeter ui:num-supervisor-summary-http-requests)
+(defmeter ui:num-all-topologies-summary-http-requests)
+(defmeter ui:num-topology-page-http-requests)
+(defmeter ui:num-build-visualization-http-requests)
+(defmeter ui:num-mk-visualization-data-http-requests)
+(defmeter ui:num-component-page-http-requests)
+(defmeter ui:num-log-config-http-requests)
+(defmeter ui:num-activate-topology-http-requests)
+(defmeter ui:num-deactivate-topology-http-requests)
+(defmeter ui:num-debug-topology-http-requests)
+(defmeter ui:num-component-op-response-http-requests)
+(defmeter ui:num-topology-op-response-http-requests)
+(defmeter ui:num-topology-op-response-http-requests)
+(defmeter ui:num-topology-op-response-http-requests)
+(defmeter ui:num-main-page-http-requests)
+
+(defn assert-authorized-user
+  ([op]
+    (assert-authorized-user op nil))
+  ([op topology-conf]
+    (let [context (ReqContext/context)]
+      (if (.isImpersonating context)
+        (if *UI-IMPERSONATION-HANDLER*
+            (if-not (.permit *UI-IMPERSONATION-HANDLER* context op topology-conf)
+              (let [principal (.principal context)
+                    real-principal (.realPrincipal context)
+                    user (if principal (.getName principal) "unknown")
+                    real-user (if real-principal (.getName real-principal) "unknown")
+                    remote-address (.remoteAddress context)]
+                (throw (AuthorizationException.
+                         (str "user '" real-user "' is not authorized to impersonate user '" user "' from host '" remote-address "'. Please
+                         see SECURITY.MD to learn how to configure impersonation ACL.")))))
+          (log-warn " principal " (.realPrincipal context) " is trying to impersonate " (.principal context) " but "
+            NIMBUS-IMPERSONATION-AUTHORIZER " has no authorizer configured. This is a potential security hole.
+            Please see SECURITY.MD to learn how to configure an impersonation authorizer.")))
+
+      (if *UI-ACL-HANDLER*
+       (if-not (.permit *UI-ACL-HANDLER* context op topology-conf)
+         (let [principal (.principal context)
+               user (if principal (.getName principal) "unknown")]
+           (throw (AuthorizationException.
+                   (str "UI request '" op "' for '" user "' user is not authorized")))))))))
+
+
+(defn assert-authorized-profiler-action
+  [op]
+  (if-not (*STORM-CONF* WORKER-PROFILER-ENABLED)
+    (throw (AuthorizationException.
+             (str "UI request for profiler action '" op "' is disabled.")))))
+
+
+(defn executor-summary-type
+  [topology ^ExecutorSummary s]
+  (component-type topology (.get_component_id s)))
+
+(defn is-ack-stream
+  [stream]
+  (let [acker-streams
+        [ACKER-INIT-STREAM-ID
+         ACKER-ACK-STREAM-ID
+         ACKER-FAIL-STREAM-ID]]
+    (every? #(not= %1 stream) acker-streams)))
+
+(defn spout-summary?
+  [topology s]
+  (= :spout (executor-summary-type topology s)))
+
+(defn bolt-summary?
+  [topology s]
+  (= :bolt (executor-summary-type topology s)))
+
+(defn group-by-comp
+  [summs]
+  (let [ret (group-by #(.get_component_id ^ExecutorSummary %) summs)]
+    (into (sorted-map) ret )))
+
+(defn logviewer-link [host fname secure?]
+  (if (and secure? (*STORM-CONF* LOGVIEWER-HTTPS-PORT))
+    (url-format "https://%s:%s/log?file=%s"
+      host
+      (*STORM-CONF* LOGVIEWER-HTTPS-PORT)
+      fname)
+    (url-format "http://%s:%s/log?file=%s"
+      host
+      (*STORM-CONF* LOGVIEWER-PORT)
+      fname)))
+
+(defn event-log-link
+  [topology-id component-id host port secure?]
+  (logviewer-link host (event-logs-filename topology-id port) secure?))
+
+(defn worker-log-link [host port topology-id secure?]
+  (let [fname (logs-filename topology-id port)]
+    (logviewer-link host fname secure?)))
+
+(defn nimbus-log-link [host port]
+  (url-format "http://%s:%s/daemonlog?file=nimbus.log" host (*STORM-CONF* LOGVIEWER-PORT) port))
+
+(defn get-error-time
+  [error]
+  (if error
+    (time-delta (.get_error_time_secs ^ErrorInfo error))))
+
+(defn get-error-data
+  [error]
+  (if error
+    (error-subset (.get_error ^ErrorInfo error))
+    ""))
+
+(defn get-error-port
+  [error]
+  (if error
+    (.get_port ^ErrorInfo error)
+    ""))
+
+(defn get-error-host
+  [error]
+  (if error
+    (.get_host ^ErrorInfo error)
+    ""))
+
+(defn get-error-time
+  [error]
+  (if error
+    (.get_error_time_secs ^ErrorInfo error)
+    ""))
+
+(defn worker-dump-link [host port topology-id]
+  (url-format "http://%s:%s/dumps/%s/%s"
+              (url-encode host)
+              (*STORM-CONF* LOGVIEWER-PORT)
+              (url-encode topology-id)
+              (str (url-encode host) ":" (url-encode port))))
+
+(defn stats-times
+  [stats-map]
+  (sort-by #(Integer/parseInt %)
+           (-> stats-map
+               clojurify-structure
+               (dissoc ":all-time")
+               keys)))
+
+(defn window-hint
+  [window]
+  (if (= window ":all-time")
+    "All time"
+    (pretty-uptime-sec window)))
+
+(defn sanitize-stream-name
+  [name]
+  (let [sym-regex #"(?![A-Za-z_\-:\.])."]
+    (str
+     (if (re-find #"^[A-Za-z]" name)
+       (clojure.string/replace name sym-regex "_")
+       (clojure.string/replace (str \s name) sym-regex "_"))
+     (hash name))))
+
+(defn sanitize-transferred
+  [transferred]
+  (into {}
+        (for [[time, stream-map] transferred]
+          [time, (into {}
+                       (for [[stream, trans] stream-map]
+                         [(sanitize-stream-name stream), trans]))])))
+
+(defn visualization-data
+  [spout-bolt spout-comp-summs bolt-comp-summs window storm-id]
+  (let [components (for [[id spec] spout-bolt]
+            [id
+             (let [inputs (.get_inputs (.get_common spec))
+                   bolt-summs (get bolt-comp-summs id)
+                   spout-summs (get spout-comp-summs id)
+                   bolt-cap (if bolt-summs
+                              (compute-bolt-capacity bolt-summs)
+                              0)]
+               {:type (if bolt-summs "bolt" "spout")
+                :capacity bolt-cap
+                :latency (if bolt-summs
+                           (get-in
+                             (bolt-streams-stats bolt-summs true)
+                             [:process-latencies window])
+                           (get-in
+                             (spout-streams-stats spout-summs true)
+                             [:complete-latencies window]))
+                :transferred (or
+                               (get-in
+                                 (spout-streams-stats spout-summs true)
+                                 [:transferred window])
+                               (get-in
+                                 (bolt-streams-stats bolt-summs true)
+                                 [:transferred window]))
+                :stats (let [mapfn (fn [dat]
+                                     (map (fn [^ExecutorSummary summ]
+                                            {:host (.get_host summ)
+                                             :port (.get_port summ)
+                                             :uptime_secs (.get_uptime_secs summ)
+                                             :transferred (if-let [stats (.get_stats summ)]
+                                                            (sanitize-transferred (.get_transferred stats)))})
+                                          dat))]
+                         (if bolt-summs
+                           (mapfn bolt-summs)
+                           (mapfn spout-summs)))
+                :link (url-format "/component.html?id=%s&topology_id=%s" id storm-id)
+                :inputs (for [[global-stream-id group] inputs]
+                          {:component (.get_componentId global-stream-id)
+                           :stream (.get_streamId global-stream-id)
+                           :sani-stream (sanitize-stream-name (.get_streamId global-stream-id))
+                           :grouping (clojure.core/name (thrift/grouping-type group))})})])]
+    (into {} (doall components))))
+
+(defn stream-boxes [datmap]
+  (let [filter-fn (mk-include-sys-fn true)
+        streams
+        (vec (doall (distinct
+                     (apply concat
+                            (for [[k v] datmap]
+                              (for [m (get v :inputs)]
+                                {:stream (get m :stream)
+                                 :sani-stream (get m :sani-stream)
+                                 :checked (is-ack-stream (get m :stream))}))))))]
+    (map (fn [row]
+           {:row row}) (partition 4 4 nil streams))))
+
+(defn- get-topology-info
+  ([^Nimbus$Client nimbus id]
+    (.getTopologyInfo nimbus id))
+  ([^Nimbus$Client nimbus id options]
+    (.getTopologyInfoWithOpts nimbus id options)))
+
+(defn mk-visualization-data
+  [id window include-sys?]
+  (thrift/with-configured-nimbus-connection
+    nimbus
+    (let [window (if window window ":all-time")
+          topology (.getTopology ^Nimbus$Client nimbus id)
+          spouts (.get_spouts topology)
+          bolts (.get_bolts topology)
+          summ (->> (doto
+                      (GetInfoOptions.)
+                      (.set_num_err_choice NumErrorsChoice/NONE))
+                    (.getTopologyInfoWithOpts ^Nimbus$Client nimbus id))
+          execs (.get_executors summ)
+          spout-summs (filter (partial spout-summary? topology) execs)
+          bolt-summs (filter (partial bolt-summary? topology) execs)
+          spout-comp-summs (group-by-comp spout-summs)
+          bolt-comp-summs (group-by-comp bolt-summs)
+          bolt-comp-summs (filter-key (mk-include-sys-fn include-sys?)
+                                      bolt-comp-summs)]
+      (visualization-data
+       (merge (hashmap-to-persistent spouts)
+              (hashmap-to-persistent bolts))
+       spout-comp-summs bolt-comp-summs window id))))
+
+(defn validate-tplg-submit-params [params]
+  (let [tplg-jar-file (params :topologyJar)
+        tplg-config (if (not-nil? (params :topologyConfig)) (from-json (params :topologyConfig)))]
+    (cond
+     (nil? tplg-jar-file) {:valid false :error "missing topology jar file"}
+     (nil? tplg-config) {:valid false :error "missing topology config"}
+     (nil? (tplg-config "topologyMainClass")) {:valid false :error "topologyMainClass missing in topologyConfig"}
+     :else {:valid true})))
+
+(defn run-tplg-submit-cmd [tplg-jar-file tplg-config user]
+  (let [tplg-main-class (if (not-nil? tplg-config) (trim (tplg-config "topologyMainClass")))
+        tplg-main-class-args (if (not-nil? tplg-config) (tplg-config "topologyMainClassArgs"))
+        storm-home (System/getProperty "storm.home")
+        storm-conf-dir (str storm-home file-path-separator "conf")
+        storm-log-dir (if (not-nil? (*STORM-CONF* "storm.log.dir")) (*STORM-CONF* "storm.log.dir")
+                          (str storm-home file-path-separator "logs"))
+        storm-libs (str storm-home file-path-separator "lib" file-path-separator "*")
+        java-cmd (str (System/getProperty "java.home") file-path-separator "bin" file-path-separator "java")
+        storm-cmd (str storm-home file-path-separator "bin" file-path-separator "storm")
+        tplg-cmd-response (apply sh
+                            (flatten
+                              [storm-cmd "jar" tplg-jar-file tplg-main-class
+                                (if (not-nil? tplg-main-class-args) tplg-main-class-args [])
+                                (if (not= user "unknown") (str "-c storm.doAsUser=" user) [])]))]
+    (log-message "tplg-cmd-response " tplg-cmd-response)
+    (cond
+     (= (tplg-cmd-response :exit) 0) {"status" "success"}
+     (and (not= (tplg-cmd-response :exit) 0)
+          (not-nil? (re-find #"already exists on cluster" (tplg-cmd-response :err)))) {"status" "failed" "error" "Topology with the same name exists in cluster"}
+          (not= (tplg-cmd-response :exit) 0) {"status" "failed" "error" (clojure.string/trim-newline (tplg-cmd-response :err))}
+          :else {"status" "success" "response" "topology deployed"}
+          )))
+
+(defn cluster-configuration []
+  (thrift/with-configured-nimbus-connection nimbus
+    (.getNimbusConf ^Nimbus$Client nimbus)))
+
+(defn topology-history-info
+  ([user]
+    (thrift/with-configured-nimbus-connection nimbus
+      (topology-history-info (.getTopologyHistory ^Nimbus$Client nimbus user) user)))
+  ([history user]
+    {"topo-history"
+     (into [] (.get_topo_ids history))}))
+
+(defn cluster-summary
+  ([user]
+     (thrift/with-configured-nimbus-connection nimbus
+        (cluster-summary (.getClusterInfo ^Nimbus$Client nimbus) user)))
+  ([^ClusterSummary summ user]
+     (let [sups (.get_supervisors summ)
+           used-slots (reduce + (map #(.get_num_used_workers ^SupervisorSummary %) sups))
+           total-slots (reduce + (map #(.get_num_workers ^SupervisorSummary %) sups))
+           free-slots (- total-slots used-slots)
+           topologies (.get_topologies_size summ)
+           total-tasks (->> (.get_topologies summ)
+                            (map #(.get_num_tasks ^TopologySummary %))
+                            (reduce +))
+           total-executors (->> (.get_topologies summ)
+                                (map #(.get_num_executors ^TopologySummary %))
+                                (reduce +))]
+       {"user" user
+        "stormVersion" STORM-VERSION
+        "supervisors" (count sups)
+        "topologies" topologies
+        "slotsTotal" total-slots
+        "slotsUsed"  used-slots
+        "slotsFree" free-slots
+        "executorsTotal" total-executors
+        "tasksTotal" total-tasks })))
+
+(defn convert-to-nimbus-summary[nimbus-seed]
+  (let [[host port] (.split nimbus-seed ":")]
+    {
+      "host" host
+      "port" port
+      "nimbusLogLink" (nimbus-log-link host port)
+      "status" "Offline"
+      "version" "Not applicable"
+      "nimbusUpTime" "Not applicable"
+      "nimbusUptimeSeconds" "Not applicable"}
+    ))
+
+(defn nimbus-summary
+  ([]
+    (thrift/with-configured-nimbus-connection nimbus
+      (nimbus-summary
+        (.get_nimbuses (.getClusterInfo ^Nimbus$Client nimbus)))))
+  ([nimbuses]
+    (let [nimbus-seeds (set (map #(str %1 ":" (*STORM-CONF* NIMBUS-THRIFT-PORT)) (set (*STORM-CONF* NIMBUS-SEEDS))))
+          alive-nimbuses (set (map #(str (.get_host %1) ":" (.get_port %1)) nimbuses))
+          offline-nimbuses (clojure.set/difference nimbus-seeds alive-nimbuses)
+          offline-nimbuses-summary (map #(convert-to-nimbus-summary %1) offline-nimbuses)]
+      {"nimbuses"
+       (concat offline-nimbuses-summary
+       (for [^NimbusSummary n nimbuses
+             :let [uptime (.get_uptime_secs n)]]
+         {
+          "host" (.get_host n)
+          "port" (.get_port n)
+          "nimbusLogLink" (nimbus-log-link (.get_host n) (.get_port n))
+          "status" (if (.is_isLeader n) "Leader" "Not a Leader")
+          "version" (.get_version n)
+          "nimbusUpTime" (pretty-uptime-sec uptime)
+          "nimbusUpTimeSeconds" uptime}))})))
+
+(defn supervisor-summary
+  ([]
+   (thrift/with-configured-nimbus-connection nimbus
+                (supervisor-summary
+                  (.get_supervisors (.getClusterInfo ^Nimbus$Client nimbus)))))
+  ([summs]
+   {"supervisors"
+    (for [^SupervisorSummary s summs]
+      {"id" (.get_supervisor_id s)
+       "host" (.get_host s)
+       "uptime" (pretty-uptime-sec (.get_uptime_secs s))
+       "uptimeSeconds" (.get_uptime_secs s)
+       "slotsTotal" (.get_num_workers s)
+       "slotsUsed" (.get_num_used_workers s)
+       "totalMem" (get (.get_total_resources s) Config/SUPERVISOR_MEMORY_CAPACITY_MB)
+       "totalCpu" (get (.get_total_resources s) Config/SUPERVISOR_CPU_CAPACITY)
+       "usedMem" (.get_used_mem s)
+       "usedCpu" (.get_used_cpu s)
+       "version" (.get_version s)})
+    "schedulerDisplayResource" (*STORM-CONF* Config/SCHEDULER_DISPLAY_RESOURCE)}))
+
+(defn all-topologies-summary
+  ([]
+   (thrift/with-configured-nimbus-connection
+     nimbus
+     (all-topologies-summary
+       (.get_topologies (.getClusterInfo ^Nimbus$Client nimbus)))))
+  ([summs]
+   {"topologies"
+    (for [^TopologySummary t summs]
+      {
+       "id" (.get_id t)
+       "encodedId" (url-encode (.get_id t))
+       "owner" (.get_owner t)
+       "name" (.get_name t)
+       "status" (.get_status t)
+       "uptime" (pretty-uptime-sec (.get_uptime_secs t))
+       "uptimeSeconds" (.get_uptime_secs t)
+       "tasksTotal" (.get_num_tasks t)
+       "workersTotal" (.get_num_workers t)
+       "executorsTotal" (.get_num_executors t)
+       "replicationCount" (.get_replication_count t)
+       "schedulerInfo" (.get_sched_status t)
+       "requestedMemOnHeap" (.get_requested_memonheap t)
+       "requestedMemOffHeap" (.get_requested_memoffheap t)
+       "requestedTotalMem" (+ (.get_requested_memonheap t) (.get_requested_memoffheap t))
+       "requestedCpu" (.get_requested_cpu t)
+       "assignedMemOnHeap" (.get_assigned_memonheap t)
+       "assignedMemOffHeap" (.get_assigned_memoffheap t)
+       "assignedTotalMem" (+ (.get_assigned_memonheap t) (.get_assigned_memoffheap t))
+       "assignedCpu" (.get_assigned_cpu t)})
+    "schedulerDisplayResource" (*STORM-CONF* Config/SCHEDULER_DISPLAY_RESOURCE)}))
+
+(defn topology-stats [window stats]
+  (let [times (stats-times (:emitted stats))
+        display-map (into {} (for [t times] [t pretty-uptime-sec]))
+        display-map (assoc display-map ":all-time" (fn [_] "All time"))]
+    (for [w (concat times [":all-time"])
+          :let [disp ((display-map w) w)]]
+      {"windowPretty" disp
+       "window" w
+       "emitted" (get-in stats [:emitted w])
+       "transferred" (get-in stats [:transferred w])
+       "completeLatency" (float-str (get-in stats [:complete-latencies w]))
+       "acked" (get-in stats [:acked w])
+       "failed" (get-in stats [:failed w])})))
+
+(defn build-visualization [id window include-sys?]
+  (thrift/with-configured-nimbus-connection nimbus
+    (let [window (if window window ":all-time")
+          topology-info (->> (doto
+                               (GetInfoOptions.)
+                               (.set_num_err_choice NumErrorsChoice/ONE))
+                             (.getTopologyInfoWithOpts ^Nimbus$Client nimbus
+                                                       id))
+          storm-topology (.getTopology ^Nimbus$Client nimbus id)
+          spout-executor-summaries (filter (partial spout-summary? storm-topology) (.get_executors topology-info))
+          bolt-executor-summaries (filter (partial bolt-summary? storm-topology) (.get_executors topology-info))
+          spout-comp-id->executor-summaries (group-by-comp spout-executor-summaries)
+          bolt-comp-id->executor-summaries (group-by-comp bolt-executor-summaries)
+          bolt-comp-id->executor-summaries (filter-key (mk-include-sys-fn include-sys?) bolt-comp-id->executor-summaries)
+          id->spout-spec (.get_spouts storm-topology)
+          id->bolt (.get_bolts storm-topology)
+          visualizer-data (visualization-data (merge (hashmap-to-persistent id->spout-spec)
+                                                     (hashmap-to-persistent id->bolt))
+                                              spout-comp-id->executor-summaries
+                                              bolt-comp-id->executor-summaries
+                                              window
+                                              id)]
+       {"visualizationTable" (stream-boxes visualizer-data)})))
+
+(defn- get-error-json
+  [topo-id error-info secure?]
+  (let [host (get-error-host error-info)
+        port (get-error-port error-info)]
+    {"lastError" (get-error-data error-info)
+     "errorTime" (get-error-time error-info)
+     "errorHost" host
+     "errorPort" port
+     "errorLapsedSecs" (get-error-time error-info)
+     "errorWorkerLogLink" (worker-log-link host port topo-id secure?)}))
+
+(defn- common-agg-stats-json
+  "Returns a JSON representation of a common aggregated statistics."
+  [^CommonAggregateStats common-stats]
+  {"executors" (.get_num_executors common-stats)
+   "tasks" (.get_num_tasks common-stats)
+   "emitted" (.get_emitted common-stats)
+   "transferred" (.get_transferred common-stats)
+   "acked" (.get_acked common-stats)
+   "failed" (.get_failed common-stats)})
+
+(defmulti comp-agg-stats-json
+  "Returns a JSON representation of aggregated statistics."
+  (fn [_ _ [id ^ComponentAggregateStats s]] (.get_type s)))
+
+(defmethod comp-agg-stats-json ComponentType/SPOUT
+  [topo-id secure? [id ^ComponentAggregateStats s]]
+  (let [^SpoutAggregateStats ss (.. s get_specific_stats get_spout)
+        cs (.get_common_stats s)]
+    (merge
+      (common-agg-stats-json cs)
+      (get-error-json topo-id (.get_last_error s) secure?)
+      {"spoutId" id
+       "encodedSpoutId" (url-encode id)
+       "completeLatency" (float-str (.get_complete_latency_ms ss))})))
+
+(defmethod comp-agg-stats-json ComponentType/BOLT
+  [topo-id secure? [id ^ComponentAggregateStats s]]
+  (let [^BoltAggregateStats ss (.. s get_specific_stats get_bolt)
+        cs (.get_common_stats s)]
+    (merge
+      (common-agg-stats-json cs)
+      (get-error-json topo-id (.get_last_error s) secure?)
+      {"boltId" id
+       "encodedBoltId" (url-encode id)
+       "capacity" (float-str (.get_capacity ss))
+       "executeLatency" (float-str (.get_execute_latency_ms ss))
+       "executed" (.get_executed ss)
+       "processLatency" (float-str (.get_process_latency_ms ss))})))
+
+(defn- unpack-topology-page-info
+  "Unpacks the serialized object to data structures"
+  [^TopologyPageInfo topo-info window secure?]
+  (let [id (.get_id topo-info)
+        ^TopologyStats topo-stats (.get_topology_stats topo-info)
+        stat->window->number
+          {:emitted (.get_window_to_emitted topo-stats)
+           :transferred (.get_window_to_transferred topo-stats)
+           :complete-latencies (.get_window_to_complete_latencies_ms topo-stats)
+           :acked (.get_window_to_acked topo-stats)
+           :failed (.get_window_to_failed topo-stats)}
+        topo-stats (topology-stats window stat->window->number)
+        [debugEnabled
+         samplingPct] (if-let [debug-opts (.get_debug_options topo-info)]
+                        [(.is_enable debug-opts)
+                         (.get_samplingpct debug-opts)])
+        uptime (.get_uptime_secs topo-info)]
+    {"id" id
+     "encodedId" (url-encode id)
+     "owner" (.get_owner topo-info)
+     "name" (.get_name topo-info)
+     "status" (.get_status topo-info)
+     "uptime" (pretty-uptime-sec uptime)
+     "uptimeSeconds" uptime
+     "tasksTotal" (.get_num_tasks topo-info)
+     "workersTotal" (.get_num_workers topo-info)
+     "executorsTotal" (.get_num_executors topo-info)
+     "schedulerInfo" (.get_sched_status topo-info)
+     "requestedMemOnHeap" (.get_requested_memonheap topo-info)
+     "requestedMemOffHeap" (.get_requested_memoffheap topo-info)
+     "requestedCpu" (.get_requested_cpu topo-info)
+     "assignedMemOnHeap" (.get_assigned_memonheap topo-info)
+     "assignedMemOffHeap" (.get_assigned_memoffheap topo-info)
+     "assignedTotalMem" (+ (.get_assigned_memonheap topo-info) (.get_assigned_memoffheap topo-info))
+     "assignedCpu" (.get_assigned_cpu topo-info)
+     "topologyStats" topo-stats
+     "spouts" (map (partial comp-agg-stats-json id secure?)
+                   (.get_id_to_spout_agg_stats topo-info))
+     "bolts" (map (partial comp-agg-stats-json id secure?)
+                  (.get_id_to_bolt_agg_stats topo-info))
+     "configuration" (.get_topology_conf topo-info)
+     "debug" (or debugEnabled false)
+     "samplingPct" (or samplingPct 10)
+     "replicationCount" (.get_replication_count topo-info)}))
+
+(defn exec-host-port
+  [executors]
+  (for [^ExecutorSummary e executors]
+    {"host" (.get_host e)
+     "port" (.get_port e)}))
+
+(defn worker-host-port
+  "Get the set of all worker host/ports"
+  [id]
+  (thrift/with-configured-nimbus-connection nimbus
+    (distinct (exec-host-port (.get_executors (get-topology-info nimbus id))))))
+
+(defn topology-page [id window include-sys? user secure?]
+  (thrift/with-configured-nimbus-connection nimbus
+    (let [window (if window window ":all-time")
+          window-hint (window-hint window)
+          topo-page-info (.getTopologyPageInfo ^Nimbus$Client nimbus
+                                               id
+                                               window
+                                               include-sys?)
+          topology-conf (from-json (.get_topology_conf topo-page-info))
+          msg-timeout (topology-conf TOPOLOGY-MESSAGE-TIMEOUT-SECS)]
+      (merge
+       (unpack-topology-page-info topo-page-info window secure?)
+       {"user" user
+        "window" window
+        "windowHint" window-hint
+        "msgTimeout" msg-timeout
+        "configuration" topology-conf
+        "visualizationTable" []
+        "schedulerDisplayResource" (*STORM-CONF* Config/SCHEDULER_DISPLAY_RESOURCE)}))))
+
+(defn component-errors
+  [errors-list topology-id secure?]
+  (let [errors (->> errors-list
+                    (sort-by #(.get_error_time_secs ^ErrorInfo %))
+                    reverse)]
+    {"componentErrors"
+     (for [^ErrorInfo e errors]
+       {"time" (* 1000 (long (.get_error_time_secs e)))
+        "errorHost" (.get_host e)
+        "errorPort"  (.get_port e)
+        "errorWorkerLogLink"  (worker-log-link (.get_host e)
+                                               (.get_port e)
+                                               topology-id
+                                               secure?)
+        "errorLapsedSecs" (get-error-time e)
+        "error" (.get_error e)})}))
+
+(defmulti unpack-comp-agg-stat
+  (fn [[_ ^ComponentAggregateStats s]] (.get_type s)))
+
+(defmethod unpack-comp-agg-stat ComponentType/BOLT
+  [[window ^ComponentAggregateStats s]]
+  (let [^CommonAggregateStats comm-s (.get_common_stats s)
+        ^SpecificAggregateStats spec-s (.get_specific_stats s)
+        ^BoltAggregateStats bolt-s (.get_bolt spec-s)]
+    {"window" window
+     "windowPretty" (window-hint window)
+     "emitted" (.get_emitted comm-s)
+     "transferred" (.get_transferred comm-s)
+     "acked" (.get_acked comm-s)
+     "failed" (.get_failed comm-s)
+     "executeLatency" (float-str (.get_execute_latency_ms bolt-s))
+     "processLatency"  (float-str (.get_process_latency_ms bolt-s))
+     "executed" (.get_executed bolt-s)
+     "capacity" (float-str (.get_capacity bolt-s))}))
+
+(defmethod unpack-comp-agg-stat ComponentType/SPOUT
+  [[window ^ComponentAggregateStats s]]
+  (let [^CommonAggregateStats comm-s (.get_common_stats s)
+        ^SpecificAggregateStats spec-s (.get_specific_stats s)
+        ^SpoutAggregateStats spout-s (.get_spout spec-s)]
+    {"window" window
+     "windowPretty" (window-hint window)
+     "emitted" (.get_emitted comm-s)
+     "transferred" (.get_transferred comm-s)
+     "acked" (.get_acked comm-s)
+     "failed" (.get_failed comm-s)
+     "completeLatency" (float-str (.get_complete_latency_ms spout-s))}))
+
+(defn- unpack-bolt-input-stat
+  [[^GlobalStreamId s ^ComponentAggregateStats stats]]
+  (let [^SpecificAggregateStats sas (.get_specific_stats stats)
+        ^BoltAggregateStats bas (.get_bolt sas)
+        ^CommonAggregateStats cas (.get_common_stats stats)
+        comp-id (.get_componentId s)]
+    {"component" comp-id
+     "encodedComponentId" (url-encode comp-id)
+     "stream" (.get_streamId s)
+     "executeLatency" (float-str (.get_execute_latency_ms bas))
+     "processLatency" (float-str (.get_process_latency_ms bas))
+     "executed" (nil-to-zero (.get_executed bas))
+     "acked" (nil-to-zero (.get_acked cas))
+     "failed" (nil-to-zero (.get_failed cas))}))
+
+(defmulti unpack-comp-output-stat
+  (fn [[_ ^ComponentAggregateStats s]] (.get_type s)))
+
+(defmethod unpack-comp-output-stat ComponentType/BOLT
+  [[stream-id ^ComponentAggregateStats stats]]
+  (let [^CommonAggregateStats cas (.get_common_stats stats)]
+    {"stream" stream-id
+     "emitted" (nil-to-zero (.get_emitted cas))
+     "transferred" (nil-to-zero (.get_transferred cas))}))
+
+(defmethod unpack-comp-output-stat ComponentType/SPOUT
+  [[stream-id ^ComponentAggregateStats stats]]
+  (let [^CommonAggregateStats cas (.get_common_stats stats)
+        ^SpecificAggregateStats spec-s (.get_specific_stats stats)
+        ^SpoutAggregateStats spout-s (.get_spout spec-s)]
+    {"stream" stream-id
+     "emitted" (nil-to-zero (.get_emitted cas))
+     "transferred" (nil-to-zero (.get_transferred cas))
+     "completeLatency" (float-str (.get_complete_latency_ms spout-s))
+     "acked" (nil-to-zero (.get_acked cas))
+     "failed" (nil-to-zero (.get_failed cas))}))
+
+(defmulti unpack-comp-exec-stat
+  (fn [_ _ ^ComponentAggregateStats cas] (.get_type (.get_stats ^ExecutorAggregateStats cas))))
+
+(defmethod unpack-comp-exec-stat ComponentType/BOLT
+  [topology-id secure? ^ExecutorAggregateStats eas]
+  (let [^ExecutorSummary summ (.get_exec_summary eas)
+        ^ExecutorInfo info (.get_executor_info summ)
+        ^ComponentAggregateStats stats (.get_stats eas)
+        ^SpecificAggregateStats ss (.get_specific_stats stats)
+        ^BoltAggregateStats bas (.get_bolt ss)
+        ^CommonAggregateStats cas (.get_common_stats stats)
+        host (.get_host summ)
+        port (.get_port summ)
+        exec-id (pretty-executor-info info)
+        uptime (.get_uptime_secs summ)]
+    {"id" exec-id
+     "encodedId" (url-encode exec-id)
+     "uptime" (pretty-uptime-sec uptime)
+     "uptimeSeconds" uptime
+     "host" host
+     "port" port
+     "emitted" (nil-to-zero (.get_emitted cas))
+     "transferred" (nil-to-zero (.get_transferred cas))
+     "capacity" (float-str (nil-to-zero (.get_capacity bas)))
+     "executeLatency" (float-str (.get_execute_latency_ms bas))
+     "executed" (nil-to-zero (.get_executed bas))
+     "processLatency" (float-str (.get_process_latency_ms bas))
+     "acked" (nil-to-zero (.get_acked cas))
+     "failed" (nil-to-zero (.get_failed cas))
+     "workerLogLink" (worker-log-link host port topology-id secure?)}))
+
+(defmethod unpack-comp-exec-stat ComponentType/SPOUT
+  [topology-id secure? ^ExecutorAggregateStats eas]
+  (let [^ExecutorSummary summ (.get_exec_summary eas)
+        ^ExecutorInfo info (.get_executor_info summ)
+        ^ComponentAggregateStats stats (.get_stats eas)
+        ^SpecificAggregateStats ss (.get_specific_stats stats)
+        ^SpoutAggregateStats sas (.get_spout ss)
+        ^CommonAggregateStats cas (.get_common_stats stats)
+        host (.get_host summ)
+        port (.get_port summ)
+        exec-id (pretty-executor-info info)
+        uptime (.get_uptime_secs summ)]
+    {"id" exec-id
+     "encodedId" (url-encode exec-id)
+     "uptime" (pretty-uptime-sec uptime)
+     "uptimeSeconds" uptime
+     "host" host
+     "port" port
+     "emitted" (nil-to-zero (.get_emitted cas))
+     "transferred" (nil-to-zero (.get_transferred cas))
+     "completeLatency" (float-str (.get_complete_latency_ms sas))
+     "acked" (nil-to-zero (.get_acked cas))
+     "failed" (nil-to-zero (.get_failed cas))
+     "workerLogLink" (worker-log-link host port topology-id secure?)}))
+
+(defmulti unpack-component-page-info
+  "Unpacks component-specific info to clojure data structures"
+  (fn [^ComponentPageInfo info & _]
+    (.get_component_type info)))
+
+(defmethod unpack-component-page-info ComponentType/BOLT
+  [^ComponentPageInfo info topology-id window include-sys? secure?]
+  (merge
+    {"boltStats" (map unpack-comp-agg-stat (.get_window_to_stats info))
+     "inputStats" (map unpack-bolt-input-stat (.get_gsid_to_input_stats info))
+     "outputStats" (map unpack-comp-output-stat (.get_sid_to_output_stats info))
+     "executorStats" (map (partial unpack-comp-exec-stat topology-id secure?)
+                          (.get_exec_stats info))}
+    (-> info .get_errors (component-errors topology-id secure?))))
+
+(defmethod unpack-component-page-info ComponentType/SPOUT
+  [^ComponentPageInfo info topology-id window include-sys? secure?]
+  (merge
+    {"spoutSummary" (map unpack-comp-agg-stat (.get_window_to_stats info))
+     "outputStats" (map unpack-comp-output-stat (.get_sid_to_output_stats info))
+     "executorStats" (map (partial unpack-comp-exec-stat topology-id secure?)
+                          (.get_exec_stats info))}
+    (-> info .get_errors (component-errors topology-id secure?))))
+
+(defn get-active-profile-actions
+  [nimbus topology-id component]
+  (let [profile-actions  (.getComponentPendingProfileActions nimbus
+                                               topology-id
+                                               component
+                                 ProfileAction/JPROFILE_STOP)
+        latest-profile-actions (map clojurify-profile-request profile-actions)
+        active-actions (map (fn [profile-action]
+                              {"host" (:host profile-action)
+                               "port" (str (:port profile-action))
+                               "dumplink" (worker-dump-link (:host profile-action) (str (:port profile-action)) topology-id)
+                               "timestamp" (str (- (:timestamp profile-action) (System/currentTimeMillis)))})
+                            latest-profile-actions)]
+    (log-message "Latest-active actions are: " (pr active-actions))
+    active-actions))
+
+(defn component-page
+  [topology-id component window include-sys? user secure?]
+  (thrift/with-configured-nimbus-connection nimbus
+    (let [window (or window ":all-time")
+          window-hint (window-hint window)
+          comp-page-info (.getComponentPageInfo ^Nimbus$Client nimbus
+                                                topology-id
+                                                component
+                                                window
+                                                include-sys?)
+          topology-conf (from-json (.getTopologyConf ^Nimbus$Client nimbus
+                                                     topology-id))
+          msg-timeout (topology-conf TOPOLOGY-MESSAGE-TIMEOUT-SECS)
+          [debugEnabled
+           samplingPct] (if-let [debug-opts (.get_debug_options comp-page-info)]
+                          [(.is_enable debug-opts)
+                           (.get_samplingpct debug-opts)])]
+      (assoc
+       (unpack-component-page-info comp-page-info
+                                   topology-id
+                                   window
+                                   include-sys?
+                                   secure?)
+       "user" user
+       "id" component
+       "encodedId" (url-encode component)
+       "name" (.get_topology_name comp-page-info)
+       "executors" (.get_num_executors comp-page-info)
+       "tasks" (.get_num_tasks comp-page-info)
+       "topologyId" topology-id
+       "topologyStatus" (.get_topology_status comp-page-info)
+       "encodedTopologyId" (url-encode topology-id)
+       "window" window
+       "componentType" (-> comp-page-info .get_component_type str lower-case)
+       "windowHint" window-hint
+       "debug" (or debugEnabled false)
+       "samplingPct" (or samplingPct 10)
+       "eventLogLink" (event-log-link topology-id
+                                      component
+                                      (.get_eventlog_host comp-page-info)
+                                      (.get_eventlog_port comp-page-info)
+                                      secure?)
+       "profileActionEnabled" (*STORM-CONF* WORKER-PROFILER-ENABLED)
+       "profilerActive" (if (*STORM-CONF* WORKER-PROFILER-ENABLED)
+                          (get-active-profile-actions nimbus topology-id component)
+                          [])))))
+    
+(defn- level-to-dict [level]
+  (if level
+    (let [timeout (.get_reset_log_level_timeout_secs level)
+          timeout-epoch (.get_reset_log_level_timeout_epoch level)
+          target-level (.get_target_log_level level)
+          reset-level (.get_reset_log_level level)]
+          {"target_level" (.toString (Level/toLevel target-level))
+           "reset_level" (.toString (Level/toLevel reset-level))
+           "timeout" timeout
+           "timeout_epoch" timeout-epoch})))
+
+(defn log-config [topology-id]
+  (thrift/with-configured-nimbus-connection
+    nimbus
+    (let [log-config (.getLogConfig ^Nimbus$Client nimbus topology-id)
+          named-logger-levels (into {}
+                                (for [[key val] (.get_named_logger_level log-config)]
+                                  [(str key) (level-to-dict val)]))]
+      {"namedLoggerLevels" named-logger-levels})))
+
+(defn topology-config [topology-id]
+  (thrift/with-configured-nimbus-connection nimbus
+    (from-json (.getTopologyConf ^Nimbus$Client nimbus topology-id))))
+
+(defn topology-op-response [topology-id op]
+  {"topologyOperation" op,
+   "topologyId" topology-id,
+   "status" "success"
+   })
+
+(defn component-op-response [topology-id component-id op]
+  {"topologyOperation" op,
+   "topologyId" topology-id,
+   "componentId" component-id,
+   "status" "success"
+   })
+
+(defn check-include-sys?
+  [sys?]
+  (if (or (nil? sys?) (= "false" sys?)) false true))
+
+(def http-creds-handler (AuthUtils/GetUiHttpCredentialsPlugin *STORM-CONF*))
+
+(defn populate-context!
+  "Populate the Storm RequestContext from an servlet-request. This should be called in each handler"
+  [servlet-request]
+    (when http-creds-handler
+      (.populateContext http-creds-handler (ReqContext/context) servlet-request)))
+
+(defn get-user-name
+  [servlet-request]
+  (.getUserName http-creds-handler servlet-request))
+
+(defroutes main-routes
+  (GET "/api/v1/cluster/configuration" [& m]
+    (mark! ui:num-cluster-configuration-http-requests)
+    (json-response (cluster-configuration)
+                   (:callback m) :serialize-fn identity))
+  (GET "/api/v1/cluster/summary" [:as {:keys [cookies servlet-request]} & m]
+    (mark! ui:num-cluster-summary-http-requests)
+    (populate-context! servlet-request)
+    (assert-authorized-user "getClusterInfo")
+    (let [user (get-user-name servlet-request)]
+      (json-response (assoc (cluster-summary user)
+                          "bugtracker-url" (*STORM-CONF* UI-PROJECT-BUGTRACKER-URL)
+                          "central-log-url" (*STORM-CONF* UI-CENTRAL-LOGGING-URL)) (:callback m))))
+  (GET "/api/v1/nimbus/summary" [:as {:keys [cookies servlet-request]} & m]
+    (mark! ui:num-nimbus-summary-http-requests)
+    (populate-context! servlet-request)
+    (assert-authorized-user "getClusterInfo")
+    (json-response (nimbus-summary) (:callback m)))
+  (GET "/api/v1/history/summary" [:as {:keys [cookies servlet-request]} & m]
+    (let [user (.getUserName http-creds-handler servlet-request)]
+      (json-response (topology-history-info user) (:callback m))))
+  (GET "/api/v1/supervisor/summary" [:as {:keys [cookies servlet-request]} & m]
+    (mark! ui:num-supervisor-summary-http-requests)
+    (populate-context! servlet-request)
+    (assert-authorized-user "getClusterInfo")
+    (json-response (assoc (supervisor-summary)
+                     "logviewerPort" (*STORM-CONF* LOGVIEWER-PORT)) (:callback m)))
+  (GET "/api/v1/topology/summary" [:as {:keys [cookies servlet-request]} & m]
+    (mark! ui:num-all-topologies-summary-http-requests)
+    (populate-context! servlet-request)
+    (assert-authorized-user "getClusterInfo")
+    (json-response (all-topologies-summary) (:callback m)))
+  (GET  "/api/v1/topology-workers/:id" [:as {:keys [cookies servlet-request]} id & m]
+    (let [id (url-decode id)]
+      (json-response {"hostPortList" (worker-host-port id)
+                      "logviewerPort" (*STORM-CONF* LOGVIEWER-PORT)} (:callback m))))
+  (GET "/api/v1/topology/:id" [:as {:keys [cookies servlet-request scheme]} id & m]
+    (mark! ui:num-topology-page-http-requests)
+    (populate-context! servlet-request)
+    (assert-authorized-user "getTopology" (topology-config id))
+    (let [user (get-user-name servlet-request)]
+      (json-response (topology-page id (:window m) (check-include-sys? (:sys m)) user (= scheme :https)) (:callback m))))
+  (GET "/api/v1/topology/:id/visualization-init" [:as {:keys [cookies servlet-request]} id & m]
+    (mark! ui:num-build-visualization-http-requests)
+    (populate-context! servlet-request)
+    (assert-authorized-user "getTopology" (topology-config id))
+    (json-response (build-visualization id (:window m) (check-include-sys? (:sys m))) (:callback m)))
+  (GET "/api/v1/topology/:id/visualization" [:as {:keys [cookies servlet-request]} id & m]
+    (mark! ui:num-mk-visualization-data-http-requests)
+    (populate-context! servlet-request)
+    (assert-authorized-user "getTopology" (topology-config id))
+    (json-response (mk-visualization-data id (:window m) (check-include-sys? (:sys m))) (:callback m)))
+  (GET "/api/v1/topology/:id/component/:component" [:as {:keys [cookies servlet-request scheme]} id component & m]
+    (mark! ui:num-component-page-http-requests)
+    (populate-context! servlet-request)
+    (assert-authorized-user "getTopology" (topology-config id))
+    (let [user (get-user-name servlet-request)]
+      (json-response
+          (component-page id component (:window m) (check-include-sys? (:sys m)) user (= scheme :https))
+          (:callback m))))
+  (GET "/api/v1/topology/:id/logconfig" [:as {:keys [cookies servlet-request]} id & m]
+    (mark! ui:num-log-config-http-requests)
+    (populate-context! servlet-request)
+    (assert-authorized-user "getTopology" (topology-config id))
+       (json-response (log-config id) (:callback m)))
+  (POST "/api/v1/topology/:id/activate" [:as {:keys [cookies servlet-request]} id & m]
+    (mark! ui:num-activate-topology-http-requests)
+    (populate-context! servlet-request)
+    (assert-authorized-user "activate" (topology-config id))
+    (thrift/with-configured-nimbus-connection nimbus
+       (let [tplg (->> (doto
+                        (GetInfoOptions.)
+                        (.set_num_err_choice NumErrorsChoice/NONE))
+                      (.getTopologyInfoWithOpts ^Nimbus$Client nimbus id))
+            name (.get_name tplg)]
+        (.activate nimbus name)
+        (log-message "Activating topology '" name "'")))
+    (json-response (topology-op-response id "activate") (m "callback")))
+  (POST "/api/v1/topology/:id/deactivate" [:as {:keys [cookies servlet-request]} id & m]
+    (mark! ui:num-deactivate-topology-http-requests)
+    (populate-context! servlet-request)
+    (assert-authorized-user "deactivate" (topology-config id))
+    (thrift/with-configured-nimbus-connection nimbus
+        (let [tplg (->> (doto
+                        (GetInfoOptions.)
+                        (.set_num_err_choice NumErrorsChoice/NONE))
+                      (.getTopologyInfoWithOpts ^Nimbus$Client nimbus id))
+            name (.get_name tplg)]
+        (.deactivate nimbus name)
+        (log-message "Deactivating topology '" name "'")))
+    (json-response (topology-op-response id "deactivate") (m "callback")))
+  (POST "/api/v1/topology/:id/debug/:action/:spct" [:as {:keys [cookies servlet-request]} id action spct & m]
+    (mark! ui:num-debug-topology-http-requests)
+    (populate-context! servlet-request)
+    (assert-authorized-user "debug" (topology-config id))
+    (thrift/with-configured-nimbus-connection nimbus
+        (let [tplg (->> (doto
+                        (GetInfoOptions.)
+                        (.set_num_err_choice NumErrorsChoice/NONE))
+                   (.getTopologyInfoWithOpts ^Nimbus$Client nimbus id))
+            name (.get_name tplg)
+            enable? (= "enable" action)]
+        (.debug nimbus name "" enable? (Integer/parseInt spct))
+        (log-message "Debug topology [" name "] action [" action "] sampling pct [" spct "]")))
+     (json-response (topology-op-response id (str "debug/" action)) (m "callback")))
+  (POST "/api/v1/topology/:id/component/:component/debug/:action/:spct" [:as {:keys [cookies servlet-request]} id component action spct & m]
+    (mark! ui:num-component-op-response-http-requests)
+    (populate-context! servlet-request)
+    (assert-authorized-user "debug" (topology-config id))
+    (thrift/with-configured-nimbus-connection nimbus
+      (let [tplg (->> (doto
+                        (GetInfoOptions.)
+                        (.set_num_err_choice NumErrorsChoice/NONE))
+                   (.getTopologyInfoWithOpts ^Nimbus$Client nimbus id))
+            name (.get_name tplg)
+            enable? (= "enable" action)]
+        (.debug nimbus name component enable? (Integer/parseInt spct))
+        (log-message "Debug topology [" name "] component [" component "] action [" action "] sampling pct [" spct "]")))
+    (json-response (component-op-response id component (str "/debug/" action)) (m "callback")))
+  (POST "/api/v1/topology/:id/rebalance/:wait-time" [:as {:keys [cookies servlet-request]} id wait-time & m]
+    (mark! ui:num-topology-op-response-http-requests)
+    (populate-context! servlet-request)
+    (assert-authorized-user "rebalance" (topology-config id))
+    (thrift/with-configured-nimbus-connection nimbus
+      (let [tplg (->> (doto
+                        (GetInfoOptions.)
+                        (.set_num_err_choice NumErrorsChoice/NONE))
+                      (.getTopologyInfoWithOpts ^Nimbus$Client nimbus id))
+            name (.get_name tplg)
+            rebalance-options (m "rebalanceOptions")
+            options (RebalanceOptions.)]
+        (.set_wait_secs options (Integer/parseInt wait-time))
+        (if (and (not-nil? rebalance-options) (contains? rebalance-options "numWorkers"))
+          (.set_num_workers options (Integer/parseInt (.toString (rebalance-options "numWorkers")))))
+        (if (and (not-nil? rebalance-options) (contains? rebalance-options "executors"))
+          (doseq [keyval (rebalance-options "executors")]
+            (.put_to_num_executors options (key keyval) (Integer/parseInt (.toString (val keyval))))))
+        (.rebalance nimbus name options)
+        (log-message "Rebalancing topology '" name "' with wait time: " wait-time " secs")))
+    (json-response (topology-op-response id "rebalance") (m "callback")))
+  (POST "/api/v1/topology/:id/kill/:wait-time" [:as {:keys [cookies servlet-request]} id wait-time & m]
+    (mark! ui:num-topology-op-response-http-requests)
+    (populate-context! servlet-request)
+    (assert-authorized-user "killTopology" (topology-config id))
+    (thrift/with-configured-nimbus-connection nimbus
+      (let [tplg (->> (doto
+                        (GetInfoOptions.)
+                        (.set_num_err_choice NumErrorsChoice/NONE))
+                      (.getTopologyInfoWithOpts ^Nimbus$Client nimbus id))
+            name (.get_name tplg)
+            options (KillOptions.)]
+        (.set_wait_secs options (Integer/parseInt wait-time))
+        (.killTopologyWithOpts nimbus name options)
+        (log-message "Killing topology '" name "' with wait time: " wait-time " secs")))
+    (json-response (topology-op-response id "kill") (m "callback")))
+  (POST "/api/v1/topology/:id/logconfig" [:as {:keys [cookies servlet-request]} id namedLoggerLevels & m]
+    (mark! ui:num-topology-op-response-http-requests)
+    (populate-context! servlet-request)
+    (assert-authorized-user "setLogConfig" (topology-config id))
+    (thrift/with-configured-nimbus-connection
+      nimbus
+      (let [new-log-config (LogConfig.)]
+        (doseq [[key level] namedLoggerLevels]
+            (let [logger-name (str key)
+                  target-level (.get level "target_level")
+                  timeout (or (.get level "timeout") 0)
+                  named-logger-level (LogLevel.)]
+              ;; if target-level is nil, do not set it, user wants to clear
+              (log-message "The target level for " logger-name " is " target-level)
+              (if (nil? target-level)
+                (do
+                  (.set_action named-logger-level LogLevelAction/REMOVE)
+                  (.unset_target_log_level named-logger-level))
+                (do
+                  (.set_action named-logger-level LogLevelAction/UPDATE)
+                  ;; the toLevel here ensures the string we get is valid
+                  (.set_target_log_level named-logger-level (.name (Level/toLevel target-level)))
+                  (.set_reset_log_level_timeout_secs named-logger-level timeout)))
+              (log-message "Adding this " logger-name " " named-logger-level " to " new-log-config)
+              (.put_to_named_logger_level new-log-config logger-name named-logger-level)))
+        (log-message "Setting topology " id " log config " new-log-config)
+        (.setLogConfig nimbus id new-log-config)
+        (json-response (log-config id) (m "callback")))))
+
+  (GET "/api/v1/topology/:id/profiling/start/:host-port/:timeout"
+       [:as {:keys [servlet-request]} id host-port timeout & m]
+       (thrift/with-configured-nimbus-connection nimbus
+         (assert-authorized-user "setWorkerProfiler" (topology-config id))
+         (assert-authorized-profiler-action "start")
+         (let [[host, port] (split host-port #":")
+               nodeinfo (NodeInfo. host (set [(Long. port)]))
+               timestamp (+ (System/currentTimeMillis) (* 60000 (Long. timeout)))
+               request (ProfileRequest. nodeinfo
+                                        ProfileAction/JPROFILE_STOP)]
+           (.set_time_stamp request timestamp)
+           (.setWorkerProfiler nimbus id request)
+           (json-response {"status" "ok"
+                           "id" host-port
+                           "timeout" timeout
+                           "dumplink" (worker-dump-link
+                                       host
+                                       port
+                                       id)}
+                          (m "callback")))))
+
+  (GET "/api/v1/topology/:id/profiling/stop/:host-port"
+       [:as {:keys [servlet-request]} id host-port & m]
+       (thrift/with-configured-nimbus-connection nimbus
+         (assert-authorized-user "setWorkerProfiler" (topology-config id))
+         (assert-authorized-profiler-action "stop")
+         (let [[host, port] (split host-port #":")
+               nodeinfo (NodeInfo. host (set [(Long. port)]))
+               timestamp 0
+               request (ProfileRequest. nodeinfo
+                                        ProfileAction/JPROFILE_STOP)]
+           (.set_time_stamp request timestamp)
+           (.setWorkerProfiler nimbus id request)
+           (json-response {"status" "ok"
+                           "id" host-port}
+                          (m "callback")))))
+  
+  (GET "/api/v1/topology/:id/profiling/dumpprofile/:host-port"
+       [:as {:keys [servlet-request]} id host-port & m]
+       (thrift/with-configured-nimbus-connection nimbus
+         (assert-authorized-user "setWorkerProfiler" (topology-config id))
+         (assert-authorized-profiler-action "dumpprofile")
+         (let [[host, port] (split host-port #":")
+               nodeinfo (NodeInfo. host (set [(Long. port)]))
+               timestamp (System/currentTimeMillis)
+               request (ProfileRequest. nodeinfo
+                                        ProfileAction/JPROFILE_DUMP)]
+           (.set_time_stamp request timestamp)
+           (.setWorkerProfiler nimbus id request)
+           (json-response {"status" "ok"
+                           "id" host-port}
+                          (m "callback")))))
+
+  (GET "/api/v1/topology/:id/profiling/dumpjstack/:host-port"
+       [:as {:keys [servlet-request]} id host-port & m]
+       (thrift/with-configured-nimbus-connection nimbus
+         (assert-authorized-user "setWorkerProfiler" (topology-config id))
+         (assert-authorized-profiler-action "dumpjstack")
+         (let [[host, port] (split host-port #":")
+               nodeinfo (NodeInfo. host (set [(Long. port)]))
+               timestamp (System/currentTimeMillis)
+               request (ProfileRequest. nodeinfo
+                                        ProfileAction/JSTACK_DUMP)]
+           (.set_time_stamp request timestamp)
+           (.setWorkerProfiler nimbus id request)
+           (json-response {"status" "ok"
+                           "id" host-port}
+                          (m "callback")))))
+
+  (GET "/api/v1/topology/:id/profiling/restartworker/:host-port"
+       [:as {:keys [servlet-request]} id host-port & m]
+       (thrift/with-configured-nimbus-connection nimbus
+         (assert-authorized-user "setWorkerProfiler" (topology-config id))
+         (assert-authorized-profiler-action "restartworker")
+         (let [[host, port] (split host-port #":")
+               nodeinfo (NodeInfo. host (set [(Long. port)]))
+               timestamp (System/currentTimeMillis)
+               request (ProfileRequest. nodeinfo
+                                        ProfileAction/JVM_RESTART)]
+           (.set_time_stamp request timestamp)
+           (.setWorkerProfiler nimbus id request)
+           (json-response {"status" "ok"
+                           "id" host-port}
+                          (m "callback")))))
+       
+  (GET "/api/v1/topology/:id/profiling/dumpheap/:host-port"
+       [:as {:keys [servlet-request]} id host-port & m]
+       (thrift/with-configured-nimbus-connection nimbus
+         (assert-authorized-user "setWorkerProfiler" (topology-config id))
+         (assert-authorized-profiler-action "dumpheap")
+         (let [[host, port] (split host-port #":")
+               nodeinfo (NodeInfo. host (set [(Long. port)]))
+               timestamp (System/currentTimeMillis)
+               request (ProfileRequest. nodeinfo
+                                        ProfileAction/JMAP_DUMP)]
+           (.set_time_stamp request timestamp)
+           (.setWorkerProfiler nimbus id request)
+           (json-response {"status" "ok"
+                           "id" host-port}
+                          (m "callback")))))
+  
+  (GET "/" [:as {cookies :cookies}]
+    (mark! ui:num-main-page-http-requests)
+    (resp/redirect "/index.html"))
+  (route/resources "/")
+  (route/not-found "Page not found"))
+
+(defn catch-errors
+  [handler]
+  (fn [request]
+    (try
+      (handler request)
+      (catch Exception ex
+        (json-response (exception->json ex) ((:query-params request) "callback") :status 500)))))
+
+(def app
+  (handler/site (-> main-routes
+                    (wrap-json-params)
+                    (wrap-multipart-params)
+                    (wrap-reload '[org.apache.storm.ui.core])
+                    requests-middleware
+                    catch-errors)))
+
+(defn start-server!
+  []
+  (try
+    (let [conf *STORM-CONF*
+          header-buffer-size (int (.get conf UI-HEADER-BUFFER-BYTES))
+          filters-confs [{:filter-class (conf UI-FILTER)
+                          :filter-params (conf UI-FILTER-PARAMS)}]
+          https-port (if (not-nil? (conf UI-HTTPS-PORT)) (conf UI-HTTPS-PORT) 0)
+          https-ks-path (conf UI-HTTPS-KEYSTORE-PATH)
+          https-ks-password (conf UI-HTTPS-KEYSTORE-PASSWORD)
+          https-ks-type (conf UI-HTTPS-KEYSTORE-TYPE)
+          https-key-password (conf UI-HTTPS-KEY-PASSWORD)
+          https-ts-path (conf UI-HTTPS-TRUSTSTORE-PATH)
+          https-ts-password (conf UI-HTTPS-TRUSTSTORE-PASSWORD)
+          https-ts-type (conf UI-HTTPS-TRUSTSTORE-TYPE)
+          https-want-client-auth (conf UI-HTTPS-WANT-CLIENT-AUTH)
+          https-need-client-auth (conf UI-HTTPS-NEED-CLIENT-AUTH)]
+      (start-metrics-reporters)
+      (storm-run-jetty {:port (conf UI-PORT)
+                        :host (conf UI-HOST)
+                        :https-port https-port
+                        :configurator (fn [server]
+                                        (config-ssl server
+                                                    https-port
+                                                    https-ks-path
+                                                    https-ks-password
+                                                    https-ks-type
+                                                    https-key-password
+                                                    https-ts-path
+                                                    https-ts-password
+                                                    https-ts-type
+                                                    https-need-client-auth
+                                                    https-want-client-auth)
+                                        (doseq [connector (.getConnectors server)]
+                                          (.setRequestHeaderSize connector header-buffer-size))
+                                        (config-filter server app filters-confs))}))
+   (catch Exception ex
+     (log-error ex))))
+
+(defn -main
+  []
+  (log-message "Starting ui server for storm version '" STORM-VERSION "'")
+  (start-server!))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/ui/helpers.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/ui/helpers.clj b/storm-core/src/clj/org/apache/storm/ui/helpers.clj
new file mode 100644
index 0000000..7ded154
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/ui/helpers.clj
@@ -0,0 +1,240 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+(ns org.apache.storm.ui.helpers
+  (:use compojure.core)
+  (:use [hiccup core page-helpers])
+  (:use [clojure
+         [string :only [blank? join]]
+         [walk :only [keywordize-keys]]])
+  (:use [org.apache.storm config log])
+  (:use [org.apache.storm.util :only [clojurify-structure uuid defnk to-json url-encode not-nil?]])
+  (:use [clj-time coerce format])
+  (:import [org.apache.storm.generated ExecutorInfo ExecutorSummary])
+  (:import [org.apache.storm.logging.filters AccessLoggingFilter])
+  (:import [java.util EnumSet])
+  (:import [org.eclipse.jetty.server Server]
+           [org.eclipse.jetty.server.nio SelectChannelConnector]
+           [org.eclipse.jetty.server.ssl SslSocketConnector]
+           [org.eclipse.jetty.servlet ServletHolder FilterMapping]
+	   [org.eclipse.jetty.util.ssl SslContextFactory]
+           [org.eclipse.jetty.server DispatcherType]
+           [org.eclipse.jetty.servlets CrossOriginFilter])
+  (:require [ring.util servlet])
+  (:require [compojure.route :as route]
+            [compojure.handler :as handler])
+  (:require [metrics.meters :refer [defmeter mark!]]))
+
+(defmeter num-web-requests)
+(defn requests-middleware
+  "Coda Hale metric for counting the number of web requests."
+  [handler]
+  (fn [req]
+    (mark! num-web-requests)
+    (handler req)))
+
+(defn split-divide [val divider]
+  [(Integer. (int (/ val divider))) (mod val divider)]
+  )
+
+(def PRETTY-SEC-DIVIDERS
+     [["s" 60]
+      ["m" 60]
+      ["h" 24]
+      ["d" nil]])
+
+(def PRETTY-MS-DIVIDERS
+     (cons ["ms" 1000]
+           PRETTY-SEC-DIVIDERS))
+
+(defn pretty-uptime-str* [val dividers]
+  (let [val (if (string? val) (Integer/parseInt val) val)
+        vals (reduce (fn [[state val] [_ divider]]
+                       (if (pos? val)
+                         (let [[divided mod] (if divider
+                                               (split-divide val divider)
+                                               [nil val])]
+                           [(concat state [mod])
+                            divided]
+                           )
+                         [state val]
+                         ))
+                     [[] val]
+                     dividers)
+        strs (->>
+              (first vals)
+              (map
+               (fn [[suffix _] val]
+                 (str val suffix))
+               dividers
+               ))]
+    (join " " (reverse strs))
+    ))
+
+(defn pretty-uptime-sec [secs]
+  (pretty-uptime-str* secs PRETTY-SEC-DIVIDERS))
+
+(defn pretty-uptime-ms [ms]
+  (pretty-uptime-str* ms PRETTY-MS-DIVIDERS))
+
+
+(defelem table [headers-map data]
+  [:table
+   [:thead
+    [:tr
+     (for [h headers-map]
+       [:th (if (:text h) [:span (:attr h) (:text h)] h)])
+     ]]
+   [:tbody
+    (for [row data]
+      [:tr
+       (for [col row]
+         [:td col]
+         )]
+      )]
+   ])
+
+(defn url-format [fmt & args]
+  (String/format fmt
+    (to-array (map #(url-encode (str %)) args))))
+
+(defn pretty-executor-info [^ExecutorInfo e]
+  (str "[" (.get_task_start e) "-" (.get_task_end e) "]"))
+
+(defn unauthorized-user-json
+  [user]
+  {"error" "No Authorization"
+   "errorMessage" (str "User " user " is not authorized.")})
+
+(defn unauthorized-user-html [user]
+  [[:h2 "User '" (escape-html user) "' is not authorized."]])
+
+(defn- mk-ssl-connector [port ks-path ks-password ks-type key-password
+                         ts-path ts-password ts-type need-client-auth want-client-auth]
+  (let [sslContextFactory (doto (SslContextFactory.)
+                            (.setExcludeCipherSuites (into-array String ["SSL_RSA_WITH_RC4_128_MD5" "SSL_RSA_WITH_RC4_128_SHA"]))
+                            (.setExcludeProtocols (into-array String ["SSLv3"]))
+                            (.setAllowRenegotiate false)
+                            (.setKeyStorePath ks-path)
+                            (.setKeyStoreType ks-type)
+                            (.setKeyStorePassword ks-password)
+                            (.setKeyManagerPassword key-password))]
+    (if (and (not-nil? ts-path) (not-nil? ts-password) (not-nil? ts-type))
+      (do
+        (.setTrustStore sslContextFactory ts-path)
+        (.setTrustStoreType sslContextFactory ts-type)
+        (.setTrustStorePassword sslContextFactory ts-password)))
+    (cond
+      need-client-auth (.setNeedClientAuth sslContextFactory true)
+      want-client-auth (.setWantClientAuth sslContextFactory true))
+    (doto (SslSocketConnector. sslContextFactory)
+      (.setPort port))))
+
+
+(defn config-ssl [server port ks-path ks-password ks-type key-password
+                  ts-path ts-password ts-type need-client-auth want-client-auth]
+  (when (> port 0)
+    (.addConnector server (mk-ssl-connector port ks-path ks-password ks-type key-password
+                                            ts-path ts-password ts-type need-client-auth want-client-auth))))
+
+(defn cors-filter-handler
+  []
+  (doto (org.eclipse.jetty.servlet.FilterHolder. (CrossOriginFilter.))
+    (.setInitParameter CrossOriginFilter/ALLOWED_ORIGINS_PARAM "*")
+    (.setInitParameter CrossOriginFilter/ALLOWED_METHODS_PARAM "GET, POST, PUT")
+    (.setInitParameter CrossOriginFilter/ALLOWED_HEADERS_PARAM "X-Requested-With, X-Requested-By, Access-Control-Allow-Origin, Content-Type, Content-Length, Accept, Origin")
+    (.setInitParameter CrossOriginFilter/ACCESS_CONTROL_ALLOW_ORIGIN_HEADER "*")
+    ))
+
+(defn mk-access-logging-filter-handler []
+  (org.eclipse.jetty.servlet.FilterHolder. (AccessLoggingFilter.)))
+
+(defn config-filter [server handler filters-confs]
+  (if filters-confs
+    (let [servlet-holder (ServletHolder.
+                           (ring.util.servlet/servlet handler))
+          context (doto (org.eclipse.jetty.servlet.ServletContextHandler. server "/")
+                    (.addServlet servlet-holder "/"))]
+      (.addFilter context (cors-filter-handler) "/*" (EnumSet/allOf DispatcherType))
+      (doseq [{:keys [filter-name filter-class filter-params]} filters-confs]
+        (if filter-class
+          (let [filter-holder (doto (org.eclipse.jetty.servlet.FilterHolder.)
+                                (.setClassName filter-class)
+                                (.setName (or filter-name filter-class))
+                                (.setInitParameters (or filter-params {})))]
+            (.addFilter context filter-holder "/*" FilterMapping/ALL))))
+      (.addFilter context (mk-access-logging-filter-handler) "/*" (EnumSet/allOf DispatcherType))
+      (.setHandler server context))))
+
+(defn ring-response-from-exception [ex]
+  {:headers {}
+   :status 400
+   :body (.getMessage ex)})
+
+(defn- remove-non-ssl-connectors [server]
+  (doseq [c (.getConnectors server)]
+    (when-not (or (nil? c) (instance? SslSocketConnector c))
+      (.removeConnector server c)
+      ))
+  server)
+
+;; Modified from ring.adapter.jetty 1.3.0
+(defn- jetty-create-server
+  "Construct a Jetty Server instance."
+  [options]
+  (let [connector (doto (SelectChannelConnector.)
+                    (.setPort (options :port 80))
+                    (.setHost (options :host))
+                    (.setMaxIdleTime (options :max-idle-time 200000)))
+        server    (doto (Server.)
+                    (.addConnector connector)
+                    (.setSendDateHeader true))
+        https-port (options :https-port)]
+    (if (and (not-nil? https-port) (> https-port 0)) (remove-non-ssl-connectors server))
+    server))
+
+(defn storm-run-jetty
+  "Modified version of run-jetty
+  Assumes configurator sets handler."
+  [config]
+  {:pre [(:configurator config)]}
+  (let [#^Server s (jetty-create-server (dissoc config :configurator))
+        configurator (:configurator config)]
+    (configurator s)
+    (.start s)))
+
+(defn wrap-json-in-callback [callback response]
+  (str callback "(" response ");"))
+
+(defnk json-response
+  [data callback :serialize-fn to-json :status 200 :headers {}]
+  {:status status
+   :headers (merge {"Cache-Control" "no-cache, no-store"
+                    "Access-Control-Allow-Origin" "*"
+                    "Access-Control-Allow-Headers" "Content-Type, Access-Control-Allow-Headers, Access-Controler-Allow-Origin, X-Requested-By, X-Csrf-Token, Authorization, X-Requested-With"}
+              (if (not-nil? callback) {"Content-Type" "application/javascript;charset=utf-8"}
+                {"Content-Type" "application/json;charset=utf-8"})
+              headers)
+   :body (if (not-nil? callback)
+           (wrap-json-in-callback callback (serialize-fn data))
+           (serialize-fn data))})
+
+(defn exception->json
+  [ex]
+  {"error" "Internal Server Error"
+   "errorMessage"
+   (let [sw (java.io.StringWriter.)]
+     (.printStackTrace ex (java.io.PrintWriter. sw))
+     (.toString sw))})


[47/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/storm/starter/BlobStoreAPIWordCountTopology.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/storm/starter/BlobStoreAPIWordCountTopology.java b/examples/storm-starter/src/jvm/storm/starter/BlobStoreAPIWordCountTopology.java
deleted file mode 100644
index 250c418..0000000
--- a/examples/storm-starter/src/jvm/storm/starter/BlobStoreAPIWordCountTopology.java
+++ /dev/null
@@ -1,304 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter;
-
-import backtype.storm.Config;
-import backtype.storm.StormSubmitter;
-import backtype.storm.blobstore.AtomicOutputStream;
-import backtype.storm.blobstore.ClientBlobStore;
-import backtype.storm.blobstore.InputStreamWithMeta;
-import backtype.storm.blobstore.NimbusBlobStore;
-
-import backtype.storm.generated.AccessControl;
-import backtype.storm.generated.AccessControlType;
-import backtype.storm.generated.AlreadyAliveException;
-import backtype.storm.generated.AuthorizationException;
-import backtype.storm.generated.InvalidTopologyException;
-import backtype.storm.generated.KeyAlreadyExistsException;
-import backtype.storm.generated.KeyNotFoundException;
-import backtype.storm.generated.SettableBlobMeta;
-import backtype.storm.spout.SpoutOutputCollector;
-import backtype.storm.task.ShellBolt;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.BasicOutputCollector;
-import backtype.storm.topology.IRichBolt;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.TopologyBuilder;
-import backtype.storm.topology.base.BaseBasicBolt;
-import backtype.storm.topology.base.BaseRichSpout;
-import backtype.storm.blobstore.BlobStoreAclHandler;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.Values;
-import backtype.storm.utils.Utils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.BufferedReader;
-import java.io.BufferedWriter;
-import java.io.File;
-import java.io.FileReader;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.Set;
-import java.util.StringTokenizer;
-
-public class BlobStoreAPIWordCountTopology {
-    private static ClientBlobStore store; // Client API to invoke blob store API functionality
-    private static String key = "key";
-    private static String fileName = "blacklist.txt";
-    private static final Logger LOG = LoggerFactory.getLogger(BlobStoreAPIWordCountTopology.class);
-
-    public static void prepare() {
-        Config conf = new Config();
-        conf.putAll(Utils.readStormConfig());
-        store = Utils.getClientBlobStore(conf);
-    }
-
-    // Spout implementation
-    public static class RandomSentenceSpout extends BaseRichSpout {
-        SpoutOutputCollector _collector;
-
-        @Override
-        public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
-            _collector = collector;
-        }
-
-        @Override
-        public void nextTuple() {
-            Utils.sleep(100);
-            _collector.emit(new Values(getRandomSentence()));
-        }
-
-        @Override
-        public void ack(Object id) {
-        }
-
-        @Override
-        public void fail(Object id) {
-        }
-
-        @Override
-        public void declareOutputFields(OutputFieldsDeclarer declarer) {
-            declarer.declare(new Fields("sentence"));
-        }
-
-    }
-
-    // Bolt implementation
-    public static class SplitSentence extends ShellBolt implements IRichBolt {
-
-        public SplitSentence() {
-            super("python", "splitsentence.py");
-        }
-
-        @Override
-        public void declareOutputFields(OutputFieldsDeclarer declarer) {
-            declarer.declare(new Fields("word"));
-        }
-
-        @Override
-        public Map<String, Object> getComponentConfiguration() {
-            return null;
-        }
-    }
-
-    public static class FilterWords extends BaseBasicBolt {
-        boolean poll = false;
-        long pollTime;
-        Set<String> wordSet;
-        @Override
-        public void execute(Tuple tuple, BasicOutputCollector collector) {
-            String word = tuple.getString(0);
-            // Thread Polling every 5 seconds to update the wordSet seconds which is
-            // used in FilterWords bolt to filter the words
-            try {
-                if (!poll) {
-                    wordSet = parseFile(fileName);
-                    pollTime = System.currentTimeMillis();
-                    poll = true;
-                } else {
-                    if ((System.currentTimeMillis() - pollTime) > 5000) {
-                        wordSet = parseFile(fileName);
-                        pollTime = System.currentTimeMillis();
-                    }
-                }
-            } catch (IOException exp) {
-                throw new RuntimeException(exp);
-            }
-            if (wordSet !=null && !wordSet.contains(word)) {
-                collector.emit(new Values(word));
-            }
-        }
-
-        @Override
-        public void declareOutputFields(OutputFieldsDeclarer declarer) {
-            declarer.declare(new Fields("word"));
-        }
-    }
-
-    public void buildAndLaunchWordCountTopology(String[] args) {
-        TopologyBuilder builder = new TopologyBuilder();
-        builder.setSpout("spout", new RandomSentenceSpout(), 5);
-        builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout");
-        builder.setBolt("filter", new FilterWords(), 6).shuffleGrouping("split");
-
-        Config conf = new Config();
-        conf.setDebug(true);
-        try {
-            conf.setNumWorkers(3);
-            StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
-        } catch (InvalidTopologyException | AuthorizationException | AlreadyAliveException exp) {
-            throw new RuntimeException(exp);
-        }
-    }
-
-    // Equivalent create command on command line
-    // storm blobstore create --file blacklist.txt --acl o::rwa key
-    private static void createBlobWithContent(String blobKey, ClientBlobStore clientBlobStore, File file)
-            throws AuthorizationException, KeyAlreadyExistsException, IOException,KeyNotFoundException {
-        String stringBlobACL = "o::rwa";
-        AccessControl blobACL = BlobStoreAclHandler.parseAccessControl(stringBlobACL);
-        List<AccessControl> acls = new LinkedList<AccessControl>();
-        acls.add(blobACL); // more ACLs can be added here
-        SettableBlobMeta settableBlobMeta = new SettableBlobMeta(acls);
-        AtomicOutputStream blobStream = clientBlobStore.createBlob(blobKey,settableBlobMeta);
-        blobStream.write(readFile(file).toString().getBytes());
-        blobStream.close();
-    }
-
-    // Equivalent update command on command line
-    // storm blobstore update --file blacklist.txt key
-    private static void updateBlobWithContent(String blobKey, ClientBlobStore clientBlobStore, File file)
-            throws KeyNotFoundException, AuthorizationException, IOException {
-        AtomicOutputStream blobOutputStream = clientBlobStore.updateBlob(blobKey);
-        blobOutputStream.write(readFile(file).toString().getBytes());
-        blobOutputStream.close();
-    }
-
-    private static String getRandomSentence() {
-        String[] sentences = new String[]{ "the cow jumped over the moon", "an apple a day keeps the doctor away",
-                "four score and seven years ago", "snow white and the seven dwarfs", "i am at two with nature" };
-        String sentence = sentences[new Random().nextInt(sentences.length)];
-        return sentence;
-    }
-
-    private static Set<String> getRandomWordSet() {
-        Set<String> randomWordSet = new HashSet<>();
-        Random random = new Random();
-        String[] words = new String[]{ "cow", "jumped", "over", "the", "moon", "apple", "day", "doctor", "away",
-                "four", "seven", "ago", "snow", "white", "seven", "dwarfs", "nature", "two" };
-        // Choosing atmost 5 words to update the blacklist file for filtering
-        for (int i=0; i<5; i++) {
-            randomWordSet.add(words[random.nextInt(words.length)]);
-        }
-        return randomWordSet;
-    }
-
-    private static Set<String> parseFile(String fileName) throws IOException {
-        File file = new File(fileName);
-        Set<String> wordSet = new HashSet<>();
-        if (!file.exists()) {
-            return wordSet;
-        }
-        StringTokenizer tokens = new StringTokenizer(readFile(file).toString(), "\r\n");
-        while (tokens.hasMoreElements()) {
-            wordSet.add(tokens.nextToken());
-        }
-        LOG.debug("parseFile {}", wordSet);
-        return wordSet;
-    }
-
-    private static StringBuilder readFile(File file) throws IOException {
-        String line;
-        StringBuilder fileContent = new StringBuilder();
-        // Do not use canonical file name here as we are using
-        // symbolic links to read file data and performing atomic move
-        // while updating files
-        BufferedReader br = new BufferedReader(new FileReader(file));
-        while ((line = br.readLine()) != null) {
-            fileContent.append(line);
-            fileContent.append(System.lineSeparator());
-        }
-        return fileContent;
-    }
-
-    // Creating a blacklist file to read from the disk
-    public static File createFile(String fileName) throws IOException {
-        File file = null;
-        file = new File(fileName);
-        if (!file.exists()) {
-            file.createNewFile();
-        }
-        writeToFile(file, getRandomWordSet());
-        return file;
-    }
-
-    // Updating a blacklist file periodically with random words
-    public static File updateFile(File file) throws IOException {
-        writeToFile(file, getRandomWordSet());
-        return file;
-    }
-
-    // Writing random words to be blacklisted
-    public static void writeToFile(File file, Set<String> content) throws IOException{
-        FileWriter fw = new FileWriter(file, false);
-        BufferedWriter bw = new BufferedWriter(fw);
-        Iterator<String> iter = content.iterator();
-        while(iter.hasNext()) {
-            bw.write(iter.next());
-            bw.write(System.lineSeparator());
-        }
-        bw.close();
-    }
-
-    public static void main(String[] args) {
-        prepare();
-        BlobStoreAPIWordCountTopology wc = new BlobStoreAPIWordCountTopology();
-        try {
-            File file = createFile(fileName);
-            // Creating blob again before launching topology
-            createBlobWithContent(key, store, file);
-
-            // Blostore launch command with topology blobstore map
-            // Here we are giving it a local name so that we can read from the file
-            // bin/storm jar examples/storm-starter/storm-starter-topologies-0.11.0-SNAPSHOT.jar
-            // storm.starter.BlobStoreAPIWordCountTopology bl -c
-            // topology.blobstore.map='{"key":{"localname":"blacklist.txt", "uncompress":"false"}}'
-            wc.buildAndLaunchWordCountTopology(args);
-
-            // Updating file few times every 5 seconds
-            for(int i=0; i<10; i++) {
-                updateBlobWithContent(key, store, updateFile(file));
-                Utils.sleep(5000);
-            }
-        } catch (KeyAlreadyExistsException kae) {
-            LOG.info("Key already exists {}", kae);
-        } catch (AuthorizationException | KeyNotFoundException | IOException exp) {
-            throw new RuntimeException(exp);
-        }
-    }
-}
-
-

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/storm/starter/ExclamationTopology.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/storm/starter/ExclamationTopology.java b/examples/storm-starter/src/jvm/storm/starter/ExclamationTopology.java
deleted file mode 100644
index d7b1b3e..0000000
--- a/examples/storm-starter/src/jvm/storm/starter/ExclamationTopology.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter;
-
-import backtype.storm.Config;
-import backtype.storm.LocalCluster;
-import backtype.storm.StormSubmitter;
-import backtype.storm.task.OutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.testing.TestWordSpout;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.TopologyBuilder;
-import backtype.storm.topology.base.BaseRichBolt;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.Values;
-import backtype.storm.utils.Utils;
-
-import java.util.Map;
-
-/**
- * This is a basic example of a Storm topology.
- */
-public class ExclamationTopology {
-
-  public static class ExclamationBolt extends BaseRichBolt {
-    OutputCollector _collector;
-
-    @Override
-    public void prepare(Map conf, TopologyContext context, OutputCollector collector) {
-      _collector = collector;
-    }
-
-    @Override
-    public void execute(Tuple tuple) {
-      _collector.emit(tuple, new Values(tuple.getString(0) + "!!!"));
-      _collector.ack(tuple);
-    }
-
-    @Override
-    public void declareOutputFields(OutputFieldsDeclarer declarer) {
-      declarer.declare(new Fields("word"));
-    }
-
-
-  }
-
-  public static void main(String[] args) throws Exception {
-    TopologyBuilder builder = new TopologyBuilder();
-
-    builder.setSpout("word", new TestWordSpout(), 10);
-    builder.setBolt("exclaim1", new ExclamationBolt(), 3).shuffleGrouping("word");
-    builder.setBolt("exclaim2", new ExclamationBolt(), 2).shuffleGrouping("exclaim1");
-
-    Config conf = new Config();
-    conf.setDebug(true);
-
-    if (args != null && args.length > 0) {
-      conf.setNumWorkers(3);
-
-      StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
-    }
-    else {
-
-      LocalCluster cluster = new LocalCluster();
-      cluster.submitTopology("test", conf, builder.createTopology());
-      Utils.sleep(10000);
-      cluster.killTopology("test");
-      cluster.shutdown();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/storm/starter/FastWordCountTopology.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/storm/starter/FastWordCountTopology.java b/examples/storm-starter/src/jvm/storm/starter/FastWordCountTopology.java
deleted file mode 100644
index 8f78abd..0000000
--- a/examples/storm-starter/src/jvm/storm/starter/FastWordCountTopology.java
+++ /dev/null
@@ -1,198 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter;
-
-import backtype.storm.Config;
-import backtype.storm.StormSubmitter;
-import backtype.storm.generated.*;
-import backtype.storm.spout.SpoutOutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.BasicOutputCollector;
-import backtype.storm.topology.IRichBolt;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.TopologyBuilder;
-import backtype.storm.topology.base.BaseBasicBolt;
-import backtype.storm.topology.base.BaseRichSpout;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.Values;
-import backtype.storm.utils.NimbusClient;
-import backtype.storm.utils.Utils;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Random;
-import java.util.concurrent.ThreadLocalRandom;
-
-/**
- * WordCount but teh spout does not stop, and the bolts are implemented in
- * java.  This can show how fast the word count can run.
- */
-public class FastWordCountTopology {
-  public static class FastRandomSentenceSpout extends BaseRichSpout {
-    SpoutOutputCollector _collector;
-    Random _rand;
-    private static final String[] CHOICES = {
-        "marry had a little lamb whos fleese was white as snow",
-        "and every where that marry went the lamb was sure to go",
-        "one two three four five six seven eight nine ten",
-        "this is a test of the emergency broadcast system this is only a test",
-        "peter piper picked a peck of pickeled peppers"
-    };
-
-    @Override
-    public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
-      _collector = collector;
-      _rand = ThreadLocalRandom.current();
-    }
-
-    @Override
-    public void nextTuple() {
-      String sentence = CHOICES[_rand.nextInt(CHOICES.length)];
-      _collector.emit(new Values(sentence), sentence);
-    }
-
-    @Override
-    public void ack(Object id) {
-        //Ignored
-    }
-
-    @Override
-    public void fail(Object id) {
-      _collector.emit(new Values(id), id);
-    }
-
-    @Override
-    public void declareOutputFields(OutputFieldsDeclarer declarer) {
-      declarer.declare(new Fields("sentence"));
-    }
-  }
-
-  public static class SplitSentence extends BaseBasicBolt {
-    @Override
-    public void execute(Tuple tuple, BasicOutputCollector collector) {
-      String sentence = tuple.getString(0);
-      for (String word: sentence.split("\\s+")) {
-          collector.emit(new Values(word, 1));
-      }
-    }
-
-    @Override
-    public void declareOutputFields(OutputFieldsDeclarer declarer) {
-      declarer.declare(new Fields("word", "count"));
-    }
-  }
-
-  public static class WordCount extends BaseBasicBolt {
-    Map<String, Integer> counts = new HashMap<String, Integer>();
-
-    @Override
-    public void execute(Tuple tuple, BasicOutputCollector collector) {
-      String word = tuple.getString(0);
-      Integer count = counts.get(word);
-      if (count == null)
-        count = 0;
-      count++;
-      counts.put(word, count);
-      collector.emit(new Values(word, count));
-    }
-
-    @Override
-    public void declareOutputFields(OutputFieldsDeclarer declarer) {
-      declarer.declare(new Fields("word", "count"));
-    }
-  }
-
-  public static void printMetrics(Nimbus.Client client, String name) throws Exception {
-    ClusterSummary summary = client.getClusterInfo();
-    String id = null;
-    for (TopologySummary ts: summary.get_topologies()) {
-      if (name.equals(ts.get_name())) {
-        id = ts.get_id();
-      }
-    }
-    if (id == null) {
-      throw new Exception("Could not find a topology named "+name);
-    }
-    TopologyInfo info = client.getTopologyInfo(id);
-    int uptime = info.get_uptime_secs();
-    long acked = 0;
-    long failed = 0;
-    double weightedAvgTotal = 0.0;
-    for (ExecutorSummary exec: info.get_executors()) {
-      if ("spout".equals(exec.get_component_id())) {
-        SpoutStats stats = exec.get_stats().get_specific().get_spout();
-        Map<String, Long> failedMap = stats.get_failed().get(":all-time");
-        Map<String, Long> ackedMap = stats.get_acked().get(":all-time");
-        Map<String, Double> avgLatMap = stats.get_complete_ms_avg().get(":all-time");
-        for (String key: ackedMap.keySet()) {
-          if (failedMap != null) {
-              Long tmp = failedMap.get(key);
-              if (tmp != null) {
-                  failed += tmp;
-              }
-          }
-          long ackVal = ackedMap.get(key);
-          double latVal = avgLatMap.get(key) * ackVal;
-          acked += ackVal;
-          weightedAvgTotal += latVal;
-        }
-      }
-    }
-    double avgLatency = weightedAvgTotal/acked;
-    System.out.println("uptime: "+uptime+" acked: "+acked+" avgLatency: "+avgLatency+" acked/sec: "+(((double)acked)/uptime+" failed: "+failed));
-  } 
-
-  public static void kill(Nimbus.Client client, String name) throws Exception {
-    KillOptions opts = new KillOptions();
-    opts.set_wait_secs(0);
-    client.killTopologyWithOpts(name, opts);
-  } 
-
-  public static void main(String[] args) throws Exception {
-
-    TopologyBuilder builder = new TopologyBuilder();
-
-    builder.setSpout("spout", new FastRandomSentenceSpout(), 4);
-
-    builder.setBolt("split", new SplitSentence(), 4).shuffleGrouping("spout");
-    builder.setBolt("count", new WordCount(), 4).fieldsGrouping("split", new Fields("word"));
-
-    Config conf = new Config();
-    conf.registerMetricsConsumer(backtype.storm.metric.LoggingMetricsConsumer.class);
-
-    String name = "wc-test";
-    if (args != null && args.length > 0) {
-        name = args[0];
-    }
-
-    conf.setNumWorkers(1);
-    StormSubmitter.submitTopologyWithProgressBar(name, conf, builder.createTopology());
-
-    Map clusterConf = Utils.readStormConfig();
-    clusterConf.putAll(Utils.readCommandLineOpts());
-    Nimbus.Client client = NimbusClient.getConfiguredClient(clusterConf).getClient();
-
-    //Sleep for 5 mins
-    for (int i = 0; i < 10; i++) {
-        Thread.sleep(30 * 1000);
-        printMetrics(client, name);
-    }
-    kill(client, name);
-  }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/storm/starter/InOrderDeliveryTest.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/storm/starter/InOrderDeliveryTest.java b/examples/storm-starter/src/jvm/storm/starter/InOrderDeliveryTest.java
deleted file mode 100644
index 5df0688..0000000
--- a/examples/storm-starter/src/jvm/storm/starter/InOrderDeliveryTest.java
+++ /dev/null
@@ -1,175 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter;
-
-import backtype.storm.Config;
-import backtype.storm.StormSubmitter;
-import backtype.storm.generated.*;
-import backtype.storm.spout.SpoutOutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.BasicOutputCollector;
-import backtype.storm.topology.FailedException;
-import backtype.storm.topology.IRichBolt;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.TopologyBuilder;
-import backtype.storm.topology.base.BaseBasicBolt;
-import backtype.storm.topology.base.BaseRichSpout;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.Values;
-import backtype.storm.utils.NimbusClient;
-import backtype.storm.utils.Utils;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Random;
-
-public class InOrderDeliveryTest {
-  public static class InOrderSpout extends BaseRichSpout {
-    SpoutOutputCollector _collector;
-    int _base = 0;
-    int _i = 0;
-
-    @Override
-    public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
-      _collector = collector;
-      _base = context.getThisTaskIndex();
-    }
-
-    @Override
-    public void nextTuple() {
-      Values v = new Values(_base, _i);
-      _collector.emit(v, "ACK");
-      _i++;
-    }
-
-    @Override
-    public void ack(Object id) {
-      //Ignored
-    }
-
-    @Override
-    public void fail(Object id) {
-      //Ignored
-    }
-
-    @Override
-    public void declareOutputFields(OutputFieldsDeclarer declarer) {
-      declarer.declare(new Fields("c1", "c2"));
-    }
-  }
-
-  public static class Check extends BaseBasicBolt {
-    Map<Integer, Integer> expected = new HashMap<Integer, Integer>();
-
-    @Override
-    public void execute(Tuple tuple, BasicOutputCollector collector) {
-      Integer c1 = tuple.getInteger(0);
-      Integer c2 = tuple.getInteger(1);
-      Integer exp = expected.get(c1);
-      if (exp == null) exp = 0;
-      if (c2.intValue() != exp.intValue()) {
-          System.out.println(c1+" "+c2+" != "+exp);
-          throw new FailedException(c1+" "+c2+" != "+exp);
-      }
-      exp = c2 + 1;
-      expected.put(c1, exp);
-    }
-
-    @Override
-    public void declareOutputFields(OutputFieldsDeclarer declarer) {
-      //Empty
-    }
-  }
-
-  public static void printMetrics(Nimbus.Client client, String name) throws Exception {
-    ClusterSummary summary = client.getClusterInfo();
-    String id = null;
-    for (TopologySummary ts: summary.get_topologies()) {
-      if (name.equals(ts.get_name())) {
-        id = ts.get_id();
-      }
-    }
-    if (id == null) {
-      throw new Exception("Could not find a topology named "+name);
-    }
-    TopologyInfo info = client.getTopologyInfo(id);
-    int uptime = info.get_uptime_secs();
-    long acked = 0;
-    long failed = 0;
-    double weightedAvgTotal = 0.0;
-    for (ExecutorSummary exec: info.get_executors()) {
-      if ("spout".equals(exec.get_component_id())) {
-        SpoutStats stats = exec.get_stats().get_specific().get_spout();
-        Map<String, Long> failedMap = stats.get_failed().get(":all-time");
-        Map<String, Long> ackedMap = stats.get_acked().get(":all-time");
-        Map<String, Double> avgLatMap = stats.get_complete_ms_avg().get(":all-time");
-        for (String key: ackedMap.keySet()) {
-          if (failedMap != null) {
-              Long tmp = failedMap.get(key);
-              if (tmp != null) {
-                  failed += tmp;
-              }
-          }
-          long ackVal = ackedMap.get(key);
-          double latVal = avgLatMap.get(key) * ackVal;
-          acked += ackVal;
-          weightedAvgTotal += latVal;
-        }
-      }
-    }
-    double avgLatency = weightedAvgTotal/acked;
-    System.out.println("uptime: "+uptime+" acked: "+acked+" avgLatency: "+avgLatency+" acked/sec: "+(((double)acked)/uptime+" failed: "+failed));
-  } 
-
-  public static void kill(Nimbus.Client client, String name) throws Exception {
-    KillOptions opts = new KillOptions();
-    opts.set_wait_secs(0);
-    client.killTopologyWithOpts(name, opts);
-  } 
-
-  public static void main(String[] args) throws Exception {
-
-    TopologyBuilder builder = new TopologyBuilder();
-
-    builder.setSpout("spout", new InOrderSpout(), 8);
-    builder.setBolt("count", new Check(), 8).fieldsGrouping("spout", new Fields("c1"));
-
-    Config conf = new Config();
-    conf.registerMetricsConsumer(backtype.storm.metric.LoggingMetricsConsumer.class);
-
-    String name = "in-order-test";
-    if (args != null && args.length > 0) {
-        name = args[0];
-    }
-
-    conf.setNumWorkers(1);
-    StormSubmitter.submitTopologyWithProgressBar(name, conf, builder.createTopology());
-
-    Map clusterConf = Utils.readStormConfig();
-    clusterConf.putAll(Utils.readCommandLineOpts());
-    Nimbus.Client client = NimbusClient.getConfiguredClient(clusterConf).getClient();
-
-    //Sleep for 50 mins
-    for (int i = 0; i < 50; i++) {
-        Thread.sleep(30 * 1000);
-        printMetrics(client, name);
-    }
-    kill(client, name);
-  }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/storm/starter/ManualDRPC.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/storm/starter/ManualDRPC.java b/examples/storm-starter/src/jvm/storm/starter/ManualDRPC.java
deleted file mode 100644
index fe0bae2..0000000
--- a/examples/storm-starter/src/jvm/storm/starter/ManualDRPC.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter;
-
-import backtype.storm.Config;
-import backtype.storm.LocalCluster;
-import backtype.storm.LocalDRPC;
-import backtype.storm.drpc.DRPCSpout;
-import backtype.storm.drpc.ReturnResults;
-import backtype.storm.topology.BasicOutputCollector;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.TopologyBuilder;
-import backtype.storm.topology.base.BaseBasicBolt;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.Values;
-
-
-public class ManualDRPC {
-  public static class ExclamationBolt extends BaseBasicBolt {
-
-    @Override
-    public void declareOutputFields(OutputFieldsDeclarer declarer) {
-      declarer.declare(new Fields("result", "return-info"));
-    }
-
-    @Override
-    public void execute(Tuple tuple, BasicOutputCollector collector) {
-      String arg = tuple.getString(0);
-      Object retInfo = tuple.getValue(1);
-      collector.emit(new Values(arg + "!!!", retInfo));
-    }
-
-  }
-
-  public static void main(String[] args) {
-    TopologyBuilder builder = new TopologyBuilder();
-    LocalDRPC drpc = new LocalDRPC();
-
-    DRPCSpout spout = new DRPCSpout("exclamation", drpc);
-    builder.setSpout("drpc", spout);
-    builder.setBolt("exclaim", new ExclamationBolt(), 3).shuffleGrouping("drpc");
-    builder.setBolt("return", new ReturnResults(), 3).shuffleGrouping("exclaim");
-
-    LocalCluster cluster = new LocalCluster();
-    Config conf = new Config();
-    cluster.submitTopology("exclaim", conf, builder.createTopology());
-
-    System.out.println(drpc.execute("exclamation", "aaa"));
-    System.out.println(drpc.execute("exclamation", "bbb"));
-
-  }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/storm/starter/MultipleLoggerTopology.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/storm/starter/MultipleLoggerTopology.java b/examples/storm-starter/src/jvm/storm/starter/MultipleLoggerTopology.java
deleted file mode 100644
index 4285ff9..0000000
--- a/examples/storm-starter/src/jvm/storm/starter/MultipleLoggerTopology.java
+++ /dev/null
@@ -1,105 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter;
-
-import backtype.storm.Config;
-import backtype.storm.LocalCluster;
-import backtype.storm.StormSubmitter;
-import backtype.storm.task.OutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.testing.TestWordSpout;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.TopologyBuilder;
-import backtype.storm.topology.base.BaseRichBolt;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.Values;
-import backtype.storm.utils.Utils;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.List;
-import java.util.Map;
-
-/**
- * This is a basic example of a Storm topology.
- */
-public class MultipleLoggerTopology {
-  public static class ExclamationLoggingBolt extends BaseRichBolt {
-    OutputCollector _collector;
-    Logger _rootLogger = LoggerFactory.getLogger (Logger.ROOT_LOGGER_NAME);
-    // ensure the loggers are configured in the worker.xml before
-    // trying to use them here
-    Logger _logger = LoggerFactory.getLogger ("com.myapp");
-    Logger _subLogger = LoggerFactory.getLogger ("com.myapp.sub");
-
-    @Override
-    public void prepare(Map conf, TopologyContext context, OutputCollector collector) {
-      _collector = collector;
-    }
-
-    @Override
-    public void execute(Tuple tuple) {
-      _rootLogger.debug ("root: This is a DEBUG message");
-      _rootLogger.info ("root: This is an INFO message");
-      _rootLogger.warn ("root: This is a WARN message");
-      _rootLogger.error ("root: This is an ERROR message");
-
-      _logger.debug ("myapp: This is a DEBUG message");
-      _logger.info ("myapp: This is an INFO message");
-      _logger.warn ("myapp: This is a WARN message");
-      _logger.error ("myapp: This is an ERROR message");
-
-      _subLogger.debug ("myapp.sub: This is a DEBUG message");
-      _subLogger.info ("myapp.sub: This is an INFO message");
-      _subLogger.warn ("myapp.sub: This is a WARN message");
-      _subLogger.error ("myapp.sub: This is an ERROR message");
-
-      _collector.emit(tuple, new Values(tuple.getString(0) + "!!!"));
-      _collector.ack(tuple);
-    }
-
-    @Override
-    public void declareOutputFields(OutputFieldsDeclarer declarer) {
-      declarer.declare(new Fields("word"));
-    }
-  }
-
-  public static void main(String[] args) throws Exception {
-    TopologyBuilder builder = new TopologyBuilder();
-
-    builder.setSpout("word", new TestWordSpout(), 10);
-    builder.setBolt("exclaim1", new ExclamationLoggingBolt(), 3).shuffleGrouping("word");
-    builder.setBolt("exclaim2", new ExclamationLoggingBolt(), 2).shuffleGrouping("exclaim1");
-
-    Config conf = new Config();
-    conf.setDebug(true);
-
-    if (args != null && args.length > 0) {
-      conf.setNumWorkers(2);
-      StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
-    } else {
-      LocalCluster cluster = new LocalCluster();
-      cluster.submitTopology("test", conf, builder.createTopology());
-      Utils.sleep(10000);
-      cluster.killTopology("test");
-      cluster.shutdown();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/storm/starter/PrintSampleStream.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/storm/starter/PrintSampleStream.java b/examples/storm-starter/src/jvm/storm/starter/PrintSampleStream.java
deleted file mode 100644
index 021cc17..0000000
--- a/examples/storm-starter/src/jvm/storm/starter/PrintSampleStream.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package storm.starter;
-
-import java.util.Arrays;
-
-import backtype.storm.Config;
-import backtype.storm.LocalCluster;
-import backtype.storm.topology.TopologyBuilder;
-import backtype.storm.utils.Utils;
-
-import storm.starter.bolt.PrinterBolt;
-import storm.starter.spout.TwitterSampleSpout;
-
-public class PrintSampleStream {        
-    public static void main(String[] args) {
-        String consumerKey = args[0]; 
-        String consumerSecret = args[1]; 
-        String accessToken = args[2]; 
-        String accessTokenSecret = args[3];
-        String[] arguments = args.clone();
-        String[] keyWords = Arrays.copyOfRange(arguments, 4, arguments.length);
-        
-        TopologyBuilder builder = new TopologyBuilder();
-        
-        builder.setSpout("twitter", new TwitterSampleSpout(consumerKey, consumerSecret,
-                                accessToken, accessTokenSecret, keyWords));
-        builder.setBolt("print", new PrinterBolt())
-                .shuffleGrouping("twitter");
-                
-                
-        Config conf = new Config();
-        
-        
-        LocalCluster cluster = new LocalCluster();
-        
-        cluster.submitTopology("test", conf, builder.createTopology());
-        
-        Utils.sleep(10000);
-        cluster.shutdown();
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/storm/starter/ReachTopology.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/storm/starter/ReachTopology.java b/examples/storm-starter/src/jvm/storm/starter/ReachTopology.java
deleted file mode 100644
index 73ed45a..0000000
--- a/examples/storm-starter/src/jvm/storm/starter/ReachTopology.java
+++ /dev/null
@@ -1,196 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter;
-
-import backtype.storm.Config;
-import backtype.storm.LocalCluster;
-import backtype.storm.LocalDRPC;
-import backtype.storm.StormSubmitter;
-import backtype.storm.coordination.BatchOutputCollector;
-import backtype.storm.drpc.LinearDRPCTopologyBuilder;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.BasicOutputCollector;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.base.BaseBasicBolt;
-import backtype.storm.topology.base.BaseBatchBolt;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.Values;
-
-import java.util.*;
-
-/**
- * This is a good example of doing complex Distributed RPC on top of Storm. This program creates a topology that can
- * compute the reach for any URL on Twitter in realtime by parallelizing the whole computation.
- * <p/>
- * Reach is the number of unique people exposed to a URL on Twitter. To compute reach, you have to get all the people
- * who tweeted the URL, get all the followers of all those people, unique that set of followers, and then count the
- * unique set. It's an intense computation that can involve thousands of database calls and tens of millions of follower
- * records.
- * <p/>
- * This Storm topology does every piece of that computation in parallel, turning what would be a computation that takes
- * minutes on a single machine into one that takes just a couple seconds.
- * <p/>
- * For the purposes of demonstration, this topology replaces the use of actual DBs with in-memory hashmaps.
- *
- * @see <a href="http://storm.apache.org/documentation/Distributed-RPC.html">Distributed RPC</a>
- */
-public class ReachTopology {
-  public static Map<String, List<String>> TWEETERS_DB = new HashMap<String, List<String>>() {{
-    put("foo.com/blog/1", Arrays.asList("sally", "bob", "tim", "george", "nathan"));
-    put("engineering.twitter.com/blog/5", Arrays.asList("adam", "david", "sally", "nathan"));
-    put("tech.backtype.com/blog/123", Arrays.asList("tim", "mike", "john"));
-  }};
-
-  public static Map<String, List<String>> FOLLOWERS_DB = new HashMap<String, List<String>>() {{
-    put("sally", Arrays.asList("bob", "tim", "alice", "adam", "jim", "chris", "jai"));
-    put("bob", Arrays.asList("sally", "nathan", "jim", "mary", "david", "vivian"));
-    put("tim", Arrays.asList("alex"));
-    put("nathan", Arrays.asList("sally", "bob", "adam", "harry", "chris", "vivian", "emily", "jordan"));
-    put("adam", Arrays.asList("david", "carissa"));
-    put("mike", Arrays.asList("john", "bob"));
-    put("john", Arrays.asList("alice", "nathan", "jim", "mike", "bob"));
-  }};
-
-  public static class GetTweeters extends BaseBasicBolt {
-    @Override
-    public void execute(Tuple tuple, BasicOutputCollector collector) {
-      Object id = tuple.getValue(0);
-      String url = tuple.getString(1);
-      List<String> tweeters = TWEETERS_DB.get(url);
-      if (tweeters != null) {
-        for (String tweeter : tweeters) {
-          collector.emit(new Values(id, tweeter));
-        }
-      }
-    }
-
-    @Override
-    public void declareOutputFields(OutputFieldsDeclarer declarer) {
-      declarer.declare(new Fields("id", "tweeter"));
-    }
-  }
-
-  public static class GetFollowers extends BaseBasicBolt {
-    @Override
-    public void execute(Tuple tuple, BasicOutputCollector collector) {
-      Object id = tuple.getValue(0);
-      String tweeter = tuple.getString(1);
-      List<String> followers = FOLLOWERS_DB.get(tweeter);
-      if (followers != null) {
-        for (String follower : followers) {
-          collector.emit(new Values(id, follower));
-        }
-      }
-    }
-
-    @Override
-    public void declareOutputFields(OutputFieldsDeclarer declarer) {
-      declarer.declare(new Fields("id", "follower"));
-    }
-  }
-
-  public static class PartialUniquer extends BaseBatchBolt {
-    BatchOutputCollector _collector;
-    Object _id;
-    Set<String> _followers = new HashSet<String>();
-
-    @Override
-    public void prepare(Map conf, TopologyContext context, BatchOutputCollector collector, Object id) {
-      _collector = collector;
-      _id = id;
-    }
-
-    @Override
-    public void execute(Tuple tuple) {
-      _followers.add(tuple.getString(1));
-    }
-
-    @Override
-    public void finishBatch() {
-      _collector.emit(new Values(_id, _followers.size()));
-    }
-
-    @Override
-    public void declareOutputFields(OutputFieldsDeclarer declarer) {
-      declarer.declare(new Fields("id", "partial-count"));
-    }
-  }
-
-  public static class CountAggregator extends BaseBatchBolt {
-    BatchOutputCollector _collector;
-    Object _id;
-    int _count = 0;
-
-    @Override
-    public void prepare(Map conf, TopologyContext context, BatchOutputCollector collector, Object id) {
-      _collector = collector;
-      _id = id;
-    }
-
-    @Override
-    public void execute(Tuple tuple) {
-      _count += tuple.getInteger(1);
-    }
-
-    @Override
-    public void finishBatch() {
-      _collector.emit(new Values(_id, _count));
-    }
-
-    @Override
-    public void declareOutputFields(OutputFieldsDeclarer declarer) {
-      declarer.declare(new Fields("id", "reach"));
-    }
-  }
-
-  public static LinearDRPCTopologyBuilder construct() {
-    LinearDRPCTopologyBuilder builder = new LinearDRPCTopologyBuilder("reach");
-    builder.addBolt(new GetTweeters(), 4);
-    builder.addBolt(new GetFollowers(), 12).shuffleGrouping();
-    builder.addBolt(new PartialUniquer(), 6).fieldsGrouping(new Fields("id", "follower"));
-    builder.addBolt(new CountAggregator(), 3).fieldsGrouping(new Fields("id"));
-    return builder;
-  }
-
-  public static void main(String[] args) throws Exception {
-    LinearDRPCTopologyBuilder builder = construct();
-
-
-    Config conf = new Config();
-
-    if (args == null || args.length == 0) {
-      conf.setMaxTaskParallelism(3);
-      LocalDRPC drpc = new LocalDRPC();
-      LocalCluster cluster = new LocalCluster();
-      cluster.submitTopology("reach-drpc", conf, builder.createLocalTopology(drpc));
-
-      String[] urlsToTry = new String[]{ "foo.com/blog/1", "engineering.twitter.com/blog/5", "notaurl.com" };
-      for (String url : urlsToTry) {
-        System.out.println("Reach of " + url + ": " + drpc.execute("reach", url));
-      }
-
-      cluster.shutdown();
-      drpc.shutdown();
-    }
-    else {
-      conf.setNumWorkers(6);
-      StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createRemoteTopology());
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/storm/starter/ResourceAwareExampleTopology.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/storm/starter/ResourceAwareExampleTopology.java b/examples/storm-starter/src/jvm/storm/starter/ResourceAwareExampleTopology.java
deleted file mode 100644
index 0fb3724..0000000
--- a/examples/storm-starter/src/jvm/storm/starter/ResourceAwareExampleTopology.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter;
-
-import backtype.storm.Config;
-import backtype.storm.LocalCluster;
-import backtype.storm.StormSubmitter;
-import backtype.storm.task.OutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.testing.TestWordSpout;
-import backtype.storm.topology.BoltDeclarer;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.SpoutDeclarer;
-import backtype.storm.topology.TopologyBuilder;
-import backtype.storm.topology.base.BaseRichBolt;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.Values;
-import backtype.storm.utils.Utils;
-
-import java.util.Map;
-
-public class ResourceAwareExampleTopology {
-  public static class ExclamationBolt extends BaseRichBolt {
-    OutputCollector _collector;
-
-    @Override
-    public void prepare(Map conf, TopologyContext context, OutputCollector collector) {
-      _collector = collector;
-    }
-
-    @Override
-    public void execute(Tuple tuple) {
-      _collector.emit(tuple, new Values(tuple.getString(0) + "!!!"));
-      _collector.ack(tuple);
-    }
-
-    @Override
-    public void declareOutputFields(OutputFieldsDeclarer declarer) {
-      declarer.declare(new Fields("word"));
-    }
-  }
-
-  public static void main(String[] args) throws Exception {
-    TopologyBuilder builder = new TopologyBuilder();
-
-    SpoutDeclarer spout =  builder.setSpout("word", new TestWordSpout(), 10);
-    //set cpu requirement
-    spout.setCPULoad(20);
-    //set onheap and offheap memory requirement
-    spout.setMemoryLoad(64, 16);
-
-    BoltDeclarer bolt1 = builder.setBolt("exclaim1", new ExclamationBolt(), 3).shuffleGrouping("word");
-    //sets cpu requirement.  Not neccessary to set both CPU and memory.
-    //For requirements not set, a default value will be used
-    bolt1.setCPULoad(15);
-
-    BoltDeclarer bolt2 = builder.setBolt("exclaim2", new ExclamationBolt(), 2).shuffleGrouping("exclaim1");
-    bolt2.setMemoryLoad(100);
-
-    Config conf = new Config();
-    conf.setDebug(true);
-
-    /**
-     * Use to limit the maximum amount of memory (in MB) allocated to one worker process.
-     * Can be used to spread executors to to multiple workers
-     */
-    conf.setTopologyWorkerMaxHeapSize(1024.0);
-
-    //topology priority describing the importance of the topology in decreasing importance starting from 0 (i.e. 0 is the highest priority and the priority importance decreases as the priority number increases).
-    //Recommended range of 0-29 but no hard limit set.
-    conf.setTopologyPriority(29);
-
-    // Set strategy to schedule topology. If not specified, default to backtype.storm.scheduler.resource.strategies.scheduling.DefaultResourceAwareStrategy
-    conf.setTopologyStrategy(backtype.storm.scheduler.resource.strategies.scheduling.DefaultResourceAwareStrategy.class);
-
-    if (args != null && args.length > 0) {
-      conf.setNumWorkers(3);
-
-      StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
-    }
-    else {
-
-      LocalCluster cluster = new LocalCluster();
-      cluster.submitTopology("test", conf, builder.createTopology());
-      Utils.sleep(10000);
-      cluster.killTopology("test");
-      cluster.shutdown();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/storm/starter/RollingTopWords.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/storm/starter/RollingTopWords.java b/examples/storm-starter/src/jvm/storm/starter/RollingTopWords.java
deleted file mode 100644
index 762c22a..0000000
--- a/examples/storm-starter/src/jvm/storm/starter/RollingTopWords.java
+++ /dev/null
@@ -1,130 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter;
-
-import backtype.storm.Config;
-import backtype.storm.testing.TestWordSpout;
-import backtype.storm.topology.TopologyBuilder;
-import backtype.storm.tuple.Fields;
-import org.apache.log4j.Logger;
-import storm.starter.bolt.IntermediateRankingsBolt;
-import storm.starter.bolt.RollingCountBolt;
-import storm.starter.bolt.TotalRankingsBolt;
-import storm.starter.util.StormRunner;
-
-/**
- * This topology does a continuous computation of the top N words that the topology has seen in terms of cardinality.
- * The top N computation is done in a completely scalable way, and a similar approach could be used to compute things
- * like trending topics or trending images on Twitter.
- */
-public class RollingTopWords {
-
-  private static final Logger LOG = Logger.getLogger(RollingTopWords.class);
-  private static final int DEFAULT_RUNTIME_IN_SECONDS = 60;
-  private static final int TOP_N = 5;
-
-  private final TopologyBuilder builder;
-  private final String topologyName;
-  private final Config topologyConfig;
-  private final int runtimeInSeconds;
-
-  public RollingTopWords(String topologyName) throws InterruptedException {
-    builder = new TopologyBuilder();
-    this.topologyName = topologyName;
-    topologyConfig = createTopologyConfiguration();
-    runtimeInSeconds = DEFAULT_RUNTIME_IN_SECONDS;
-
-    wireTopology();
-  }
-
-  private static Config createTopologyConfiguration() {
-    Config conf = new Config();
-    conf.setDebug(true);
-    return conf;
-  }
-
-  private void wireTopology() throws InterruptedException {
-    String spoutId = "wordGenerator";
-    String counterId = "counter";
-    String intermediateRankerId = "intermediateRanker";
-    String totalRankerId = "finalRanker";
-    builder.setSpout(spoutId, new TestWordSpout(), 5);
-    builder.setBolt(counterId, new RollingCountBolt(9, 3), 4).fieldsGrouping(spoutId, new Fields("word"));
-    builder.setBolt(intermediateRankerId, new IntermediateRankingsBolt(TOP_N), 4).fieldsGrouping(counterId, new Fields(
-        "obj"));
-    builder.setBolt(totalRankerId, new TotalRankingsBolt(TOP_N)).globalGrouping(intermediateRankerId);
-  }
-
-  public void runLocally() throws InterruptedException {
-    StormRunner.runTopologyLocally(builder.createTopology(), topologyName, topologyConfig, runtimeInSeconds);
-  }
-
-  public void runRemotely() throws Exception {
-    StormRunner.runTopologyRemotely(builder.createTopology(), topologyName, topologyConfig);
-  }
-
-  /**
-   * Submits (runs) the topology.
-   *
-   * Usage: "RollingTopWords [topology-name] [local|remote]"
-   *
-   * By default, the topology is run locally under the name "slidingWindowCounts".
-   *
-   * Examples:
-   *
-   * ```
-   *
-   * # Runs in local mode (LocalCluster), with topology name "slidingWindowCounts"
-   * $ storm jar storm-starter-jar-with-dependencies.jar storm.starter.RollingTopWords
-   *
-   * # Runs in local mode (LocalCluster), with topology name "foobar"
-   * $ storm jar storm-starter-jar-with-dependencies.jar storm.starter.RollingTopWords foobar
-   *
-   * # Runs in local mode (LocalCluster), with topology name "foobar"
-   * $ storm jar storm-starter-jar-with-dependencies.jar storm.starter.RollingTopWords foobar local
-   *
-   * # Runs in remote/cluster mode, with topology name "production-topology"
-   * $ storm jar storm-starter-jar-with-dependencies.jar storm.starter.RollingTopWords production-topology remote
-   * ```
-   *
-   * @param args First positional argument (optional) is topology name, second positional argument (optional) defines
-   *             whether to run the topology locally ("local") or remotely, i.e. on a real cluster ("remote").
-   * @throws Exception
-   */
-  public static void main(String[] args) throws Exception {
-    String topologyName = "slidingWindowCounts";
-    if (args.length >= 1) {
-      topologyName = args[0];
-    }
-    boolean runLocally = true;
-    if (args.length >= 2 && args[1].equalsIgnoreCase("remote")) {
-      runLocally = false;
-    }
-
-    LOG.info("Topology name: " + topologyName);
-    RollingTopWords rtw = new RollingTopWords(topologyName);
-    if (runLocally) {
-      LOG.info("Running in local mode");
-      rtw.runLocally();
-    }
-    else {
-      LOG.info("Running in remote (cluster) mode");
-      rtw.runRemotely();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/storm/starter/SingleJoinExample.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/storm/starter/SingleJoinExample.java b/examples/storm-starter/src/jvm/storm/starter/SingleJoinExample.java
deleted file mode 100644
index cb1d98c..0000000
--- a/examples/storm-starter/src/jvm/storm/starter/SingleJoinExample.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter;
-
-import backtype.storm.Config;
-import backtype.storm.LocalCluster;
-import backtype.storm.testing.FeederSpout;
-import backtype.storm.topology.TopologyBuilder;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Values;
-import backtype.storm.utils.Utils;
-import storm.starter.bolt.SingleJoinBolt;
-
-public class SingleJoinExample {
-  public static void main(String[] args) {
-    FeederSpout genderSpout = new FeederSpout(new Fields("id", "gender"));
-    FeederSpout ageSpout = new FeederSpout(new Fields("id", "age"));
-
-    TopologyBuilder builder = new TopologyBuilder();
-    builder.setSpout("gender", genderSpout);
-    builder.setSpout("age", ageSpout);
-    builder.setBolt("join", new SingleJoinBolt(new Fields("gender", "age"))).fieldsGrouping("gender", new Fields("id"))
-        .fieldsGrouping("age", new Fields("id"));
-
-    Config conf = new Config();
-    conf.setDebug(true);
-
-    LocalCluster cluster = new LocalCluster();
-    cluster.submitTopology("join-example", conf, builder.createTopology());
-
-    for (int i = 0; i < 10; i++) {
-      String gender;
-      if (i % 2 == 0) {
-        gender = "male";
-      }
-      else {
-        gender = "female";
-      }
-      genderSpout.feed(new Values(i, gender));
-    }
-
-    for (int i = 9; i >= 0; i--) {
-      ageSpout.feed(new Values(i, i + 20));
-    }
-
-    Utils.sleep(2000);
-    cluster.shutdown();
-  }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/storm/starter/SkewedRollingTopWords.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/storm/starter/SkewedRollingTopWords.java b/examples/storm-starter/src/jvm/storm/starter/SkewedRollingTopWords.java
deleted file mode 100644
index 443c051..0000000
--- a/examples/storm-starter/src/jvm/storm/starter/SkewedRollingTopWords.java
+++ /dev/null
@@ -1,132 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter;
-
-import backtype.storm.Config;
-import backtype.storm.testing.TestWordSpout;
-import backtype.storm.topology.TopologyBuilder;
-import backtype.storm.tuple.Fields;
-import org.apache.log4j.Logger;
-import storm.starter.bolt.IntermediateRankingsBolt;
-import storm.starter.bolt.RollingCountBolt;
-import storm.starter.bolt.RollingCountAggBolt;
-import storm.starter.bolt.TotalRankingsBolt;
-import storm.starter.util.StormRunner;
-
-/**
- * This topology does a continuous computation of the top N words that the topology has seen in terms of cardinality.
- * The top N computation is done in a completely scalable way, and a similar approach could be used to compute things
- * like trending topics or trending images on Twitter. It takes an approach that assumes that some works will be much
- * more common then other words, and uses partialKeyGrouping to better balance the skewed load.
- */
-public class SkewedRollingTopWords {
-  private static final Logger LOG = Logger.getLogger(SkewedRollingTopWords.class);
-  private static final int DEFAULT_RUNTIME_IN_SECONDS = 60;
-  private static final int TOP_N = 5;
-
-  private final TopologyBuilder builder;
-  private final String topologyName;
-  private final Config topologyConfig;
-  private final int runtimeInSeconds;
-
-  public SkewedRollingTopWords(String topologyName) throws InterruptedException {
-    builder = new TopologyBuilder();
-    this.topologyName = topologyName;
-    topologyConfig = createTopologyConfiguration();
-    runtimeInSeconds = DEFAULT_RUNTIME_IN_SECONDS;
-
-    wireTopology();
-  }
-
-  private static Config createTopologyConfiguration() {
-    Config conf = new Config();
-    conf.setDebug(true);
-    return conf;
-  }
-
-  private void wireTopology() throws InterruptedException {
-    String spoutId = "wordGenerator";
-    String counterId = "counter";
-    String aggId = "aggregator";
-    String intermediateRankerId = "intermediateRanker";
-    String totalRankerId = "finalRanker";
-    builder.setSpout(spoutId, new TestWordSpout(), 5);
-    builder.setBolt(counterId, new RollingCountBolt(9, 3), 4).partialKeyGrouping(spoutId, new Fields("word"));
-    builder.setBolt(aggId, new RollingCountAggBolt(), 4).fieldsGrouping(counterId, new Fields("obj"));
-    builder.setBolt(intermediateRankerId, new IntermediateRankingsBolt(TOP_N), 4).fieldsGrouping(aggId, new Fields("obj"));
-    builder.setBolt(totalRankerId, new TotalRankingsBolt(TOP_N)).globalGrouping(intermediateRankerId);
-  }
-
-  public void runLocally() throws InterruptedException {
-    StormRunner.runTopologyLocally(builder.createTopology(), topologyName, topologyConfig, runtimeInSeconds);
-  }
-
-  public void runRemotely() throws Exception {
-    StormRunner.runTopologyRemotely(builder.createTopology(), topologyName, topologyConfig);
-  }
-
-  /**
-   * Submits (runs) the topology.
-   *
-   * Usage: "RollingTopWords [topology-name] [local|remote]"
-   *
-   * By default, the topology is run locally under the name "slidingWindowCounts".
-   *
-   * Examples:
-   *
-   * ```
-   *
-   * # Runs in local mode (LocalCluster), with topology name "slidingWindowCounts"
-   * $ storm jar storm-starter-jar-with-dependencies.jar storm.starter.RollingTopWords
-   *
-   * # Runs in local mode (LocalCluster), with topology name "foobar"
-   * $ storm jar storm-starter-jar-with-dependencies.jar storm.starter.RollingTopWords foobar
-   *
-   * # Runs in local mode (LocalCluster), with topology name "foobar"
-   * $ storm jar storm-starter-jar-with-dependencies.jar storm.starter.RollingTopWords foobar local
-   *
-   * # Runs in remote/cluster mode, with topology name "production-topology"
-   * $ storm jar storm-starter-jar-with-dependencies.jar storm.starter.RollingTopWords production-topology remote
-   * ```
-   *
-   * @param args First positional argument (optional) is topology name, second positional argument (optional) defines
-   *             whether to run the topology locally ("local") or remotely, i.e. on a real cluster ("remote").
-   * @throws Exception
-   */
-  public static void main(String[] args) throws Exception {
-    String topologyName = "slidingWindowCounts";
-    if (args.length >= 1) {
-      topologyName = args[0];
-    }
-    boolean runLocally = true;
-    if (args.length >= 2 && args[1].equalsIgnoreCase("remote")) {
-      runLocally = false;
-    }
-
-    LOG.info("Topology name: " + topologyName);
-    SkewedRollingTopWords rtw = new SkewedRollingTopWords(topologyName);
-    if (runLocally) {
-      LOG.info("Running in local mode");
-      rtw.runLocally();
-    }
-    else {
-      LOG.info("Running in remote (cluster) mode");
-      rtw.runRemotely();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/storm/starter/SlidingTupleTsTopology.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/storm/starter/SlidingTupleTsTopology.java b/examples/storm-starter/src/jvm/storm/starter/SlidingTupleTsTopology.java
deleted file mode 100644
index 598335d..0000000
--- a/examples/storm-starter/src/jvm/storm/starter/SlidingTupleTsTopology.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter;
-
-import backtype.storm.Config;
-import backtype.storm.LocalCluster;
-import backtype.storm.StormSubmitter;
-import backtype.storm.topology.TopologyBuilder;
-import backtype.storm.topology.base.BaseWindowedBolt;
-import backtype.storm.utils.Utils;
-import storm.starter.bolt.PrinterBolt;
-import storm.starter.bolt.SlidingWindowSumBolt;
-import storm.starter.spout.RandomIntegerSpout;
-
-import java.util.concurrent.TimeUnit;
-
-import static backtype.storm.topology.base.BaseWindowedBolt.Duration;
-
-/**
- * Windowing based on tuple timestamp (e.g. the time when tuple is generated
- * rather than when its processed).
- */
-public class SlidingTupleTsTopology {
-    public static void main(String[] args) throws Exception {
-        TopologyBuilder builder = new TopologyBuilder();
-        BaseWindowedBolt bolt = new SlidingWindowSumBolt()
-                .withWindow(new Duration(5, TimeUnit.SECONDS), new Duration(3, TimeUnit.SECONDS))
-                .withTimestampField("ts")
-                .withLag(new Duration(5, TimeUnit.SECONDS));
-        builder.setSpout("integer", new RandomIntegerSpout(), 1);
-        builder.setBolt("slidingsum", bolt, 1).shuffleGrouping("integer");
-        builder.setBolt("printer", new PrinterBolt(), 1).shuffleGrouping("slidingsum");
-        Config conf = new Config();
-        conf.setDebug(true);
-
-        if (args != null && args.length > 0) {
-            conf.setNumWorkers(1);
-            StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
-        } else {
-            LocalCluster cluster = new LocalCluster();
-            cluster.submitTopology("test", conf, builder.createTopology());
-            Utils.sleep(40000);
-            cluster.killTopology("test");
-            cluster.shutdown();
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/storm/starter/SlidingWindowTopology.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/storm/starter/SlidingWindowTopology.java b/examples/storm-starter/src/jvm/storm/starter/SlidingWindowTopology.java
deleted file mode 100644
index 5031f8d..0000000
--- a/examples/storm-starter/src/jvm/storm/starter/SlidingWindowTopology.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter;
-
-import backtype.storm.Config;
-import backtype.storm.LocalCluster;
-import backtype.storm.StormSubmitter;
-import backtype.storm.task.OutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.TopologyBuilder;
-import backtype.storm.topology.base.BaseWindowedBolt;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.Values;
-import backtype.storm.utils.Utils;
-import backtype.storm.windowing.TupleWindow;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import storm.starter.bolt.PrinterBolt;
-import storm.starter.bolt.SlidingWindowSumBolt;
-import storm.starter.spout.RandomIntegerSpout;
-
-import java.util.List;
-import java.util.Map;
-
-import static backtype.storm.topology.base.BaseWindowedBolt.Count;
-
-/**
- * A sample topology that demonstrates the usage of {@link backtype.storm.topology.IWindowedBolt}
- * to calculate sliding window sum.
- */
-public class SlidingWindowTopology {
-
-    private static final Logger LOG = LoggerFactory.getLogger(SlidingWindowTopology.class);
-
-    /*
-     * Computes tumbling window average
-     */
-    private static class TumblingWindowAvgBolt extends BaseWindowedBolt {
-        private OutputCollector collector;
-
-        @Override
-        public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
-            this.collector = collector;
-        }
-
-        @Override
-        public void execute(TupleWindow inputWindow) {
-            int sum = 0;
-            List<Tuple> tuplesInWindow = inputWindow.get();
-            LOG.debug("Events in current window: " + tuplesInWindow.size());
-            if (tuplesInWindow.size() > 0) {
-                /*
-                * Since this is a tumbling window calculation,
-                * we use all the tuples in the window to compute the avg.
-                */
-                for (Tuple tuple : tuplesInWindow) {
-                    sum += (int) tuple.getValue(0);
-                }
-                collector.emit(new Values(sum / tuplesInWindow.size()));
-            }
-        }
-
-        @Override
-        public void declareOutputFields(OutputFieldsDeclarer declarer) {
-            declarer.declare(new Fields("avg"));
-        }
-    }
-
-
-    public static void main(String[] args) throws Exception {
-        TopologyBuilder builder = new TopologyBuilder();
-        builder.setSpout("integer", new RandomIntegerSpout(), 1);
-        builder.setBolt("slidingsum", new SlidingWindowSumBolt().withWindow(new Count(30), new Count(10)), 1)
-                .shuffleGrouping("integer");
-        builder.setBolt("tumblingavg", new TumblingWindowAvgBolt().withTumblingWindow(new Count(3)), 1)
-                .shuffleGrouping("slidingsum");
-        builder.setBolt("printer", new PrinterBolt(), 1).shuffleGrouping("tumblingavg");
-        Config conf = new Config();
-        conf.setDebug(true);
-        if (args != null && args.length > 0) {
-            conf.setNumWorkers(1);
-            StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
-        } else {
-            LocalCluster cluster = new LocalCluster();
-            cluster.submitTopology("test", conf, builder.createTopology());
-            Utils.sleep(40000);
-            cluster.killTopology("test");
-            cluster.shutdown();
-        }
-    }
-}


[16/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/daemon/worker.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/daemon/worker.clj b/storm-core/src/clj/org/apache/storm/daemon/worker.clj
new file mode 100644
index 0000000..9607d77
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/daemon/worker.clj
@@ -0,0 +1,763 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+(ns org.apache.storm.daemon.worker
+  (:use [org.apache.storm.daemon common])
+  (:use [org.apache.storm config log util timer local-state])
+  (:require [clj-time.core :as time])
+  (:require [clj-time.coerce :as coerce])
+  (:require [org.apache.storm.daemon [executor :as executor]])
+  (:require [org.apache.storm [disruptor :as disruptor] [cluster :as cluster]])
+  (:require [clojure.set :as set])
+  (:require [org.apache.storm.messaging.loader :as msg-loader])
+  (:import [java.util.concurrent Executors]
+           [org.apache.storm.hooks IWorkerHook BaseWorkerHook])
+  (:import [java.util ArrayList HashMap])
+  (:import [org.apache.storm.utils Utils TransferDrainer ThriftTopologyUtils WorkerBackpressureThread DisruptorQueue])
+  (:import [org.apache.storm.grouping LoadMapping])
+  (:import [org.apache.storm.messaging TransportFactory])
+  (:import [org.apache.storm.messaging TaskMessage IContext IConnection ConnectionWithStatus ConnectionWithStatus$Status])
+  (:import [org.apache.storm.daemon Shutdownable])
+  (:import [org.apache.storm.serialization KryoTupleSerializer])
+  (:import [org.apache.storm.generated StormTopology])
+  (:import [org.apache.storm.tuple AddressedTuple Fields])
+  (:import [org.apache.storm.task WorkerTopologyContext])
+  (:import [org.apache.storm Constants])
+  (:import [org.apache.storm.security.auth AuthUtils])
+  (:import [org.apache.storm.cluster ClusterStateContext DaemonType])
+  (:import [javax.security.auth Subject])
+  (:import [java.security PrivilegedExceptionAction])
+  (:import [org.apache.logging.log4j LogManager])
+  (:import [org.apache.logging.log4j Level])
+  (:import [org.apache.logging.log4j.core.config LoggerConfig])
+  (:import [org.apache.storm.generated LogConfig LogLevelAction])
+  (:gen-class))
+
+(defmulti mk-suicide-fn cluster-mode)
+
+(defn read-worker-executors [storm-conf storm-cluster-state storm-id assignment-id port assignment-versions]
+  (log-message "Reading Assignments.")
+  (let [assignment (:executor->node+port (.assignment-info storm-cluster-state storm-id nil))]
+    (doall
+     (concat
+      [Constants/SYSTEM_EXECUTOR_ID]
+      (mapcat (fn [[executor loc]]
+                (if (= loc [assignment-id port])
+                  [executor]
+                  ))
+              assignment)))))
+
+(defnk do-executor-heartbeats [worker :executors nil]
+  ;; stats is how we know what executors are assigned to this worker 
+  (let [stats (if-not executors
+                  (into {} (map (fn [e] {e nil}) (:executors worker)))
+                  (->> executors
+                    (map (fn [e] {(executor/get-executor-id e) (executor/render-stats e)}))
+                    (apply merge)))
+        zk-hb {:storm-id (:storm-id worker)
+               :executor-stats stats
+               :uptime ((:uptime worker))
+               :time-secs (current-time-secs)
+               }]
+    ;; do the zookeeper heartbeat
+    (.worker-heartbeat! (:storm-cluster-state worker) (:storm-id worker) (:assignment-id worker) (:port worker) zk-hb)
+    ))
+
+(defn do-heartbeat [worker]
+  (let [conf (:conf worker)
+        state (worker-state conf (:worker-id worker))]
+    ;; do the local-file-system heartbeat.
+    (ls-worker-heartbeat! state (current-time-secs) (:storm-id worker) (:executors worker) (:port worker))
+    (.cleanup state 60) ; this is just in case supervisor is down so that disk doesn't fill up.
+                         ; it shouldn't take supervisor 120 seconds between listing dir and reading it
+
+    ))
+
+(defn worker-outbound-tasks
+  "Returns seq of task-ids that receive messages from this worker"
+  [worker]
+  (let [context (worker-context worker)
+        components (mapcat
+                     (fn [task-id]
+                       (->> (.getComponentId context (int task-id))
+                            (.getTargets context)
+                            vals
+                            (map keys)
+                            (apply concat)))
+                     (:task-ids worker))]
+    (-> worker
+        :task->component
+        reverse-map
+        (select-keys components)
+        vals
+        flatten
+        set )))
+
+(defn get-dest
+  [^AddressedTuple addressed-tuple]
+  "get the destination for an AddressedTuple"
+  (.getDest addressed-tuple))
+
+(defn mk-transfer-local-fn [worker]
+  (let [short-executor-receive-queue-map (:short-executor-receive-queue-map worker)
+        task->short-executor (:task->short-executor worker)
+        task-getter (comp #(get task->short-executor %) get-dest)]
+    (fn [tuple-batch]
+      (let [grouped (fast-group-by task-getter tuple-batch)]
+        (fast-map-iter [[short-executor pairs] grouped]
+          (let [q (short-executor-receive-queue-map short-executor)]
+            (if q
+              (disruptor/publish q pairs)
+              (log-warn "Received invalid messages for unknown tasks. Dropping... ")
+              )))))))
+
+(defn- assert-can-serialize [^KryoTupleSerializer serializer tuple-batch]
+  "Check that all of the tuples can be serialized by serializing them."
+  (fast-list-iter [[task tuple :as pair] tuple-batch]
+    (.serialize serializer tuple)))
+
+(defn- mk-backpressure-handler [executors]
+  "make a handler that checks and updates worker's backpressure flag"
+  (disruptor/worker-backpressure-handler
+    (fn [worker]
+      (let [storm-id (:storm-id worker)
+            assignment-id (:assignment-id worker)
+            port (:port worker)
+            storm-cluster-state (:storm-cluster-state worker)
+            prev-backpressure-flag @(:backpressure worker)]
+        (when executors
+          (reset! (:backpressure worker)
+                  (or @(:transfer-backpressure worker)
+                      (reduce #(or %1 %2) (map #(.get-backpressure-flag %1) executors)))))
+        ;; update the worker's backpressure flag to zookeeper only when it has changed
+        (log-debug "BP " @(:backpressure worker) " WAS " prev-backpressure-flag)
+        (when (not= prev-backpressure-flag @(:backpressure worker))
+          (.worker-backpressure! storm-cluster-state storm-id assignment-id port @(:backpressure worker)))
+        ))))
+
+(defn- mk-disruptor-backpressure-handler [worker]
+  "make a handler for the worker's send disruptor queue to
+  check highWaterMark and lowWaterMark for backpressure"
+  (disruptor/disruptor-backpressure-handler
+    (fn []
+      (reset! (:transfer-backpressure worker) true)
+      (WorkerBackpressureThread/notifyBackpressureChecker (:backpressure-trigger worker)))
+    (fn []
+      (reset! (:transfer-backpressure worker) false)
+      (WorkerBackpressureThread/notifyBackpressureChecker (:backpressure-trigger worker)))))
+
+(defn mk-transfer-fn [worker]
+  (let [local-tasks (-> worker :task-ids set)
+        local-transfer (:transfer-local-fn worker)
+        ^DisruptorQueue transfer-queue (:transfer-queue worker)
+        task->node+port (:cached-task->node+port worker)
+        try-serialize-local ((:storm-conf worker) TOPOLOGY-TESTING-ALWAYS-TRY-SERIALIZE)
+
+        transfer-fn
+          (fn [^KryoTupleSerializer serializer tuple-batch]
+            (let [^ArrayList local (ArrayList.)
+                  ^HashMap remoteMap (HashMap.)]
+              (fast-list-iter [^AddressedTuple addressed-tuple tuple-batch]
+                (let [task (.getDest addressed-tuple)
+                      tuple (.getTuple addressed-tuple)]
+                  (if (local-tasks task)
+                    (.add local addressed-tuple)
+
+                    ;;Using java objects directly to avoid performance issues in java code
+                    (do
+                      (when (not (.get remoteMap task))
+                        (.put remoteMap task (ArrayList.)))
+                      (let [^ArrayList remote (.get remoteMap task)]
+                        (if (not-nil? task)
+                          (.add remote (TaskMessage. task ^bytes (.serialize serializer tuple)))
+                          (log-warn "Can't transfer tuple - task value is nil. tuple type: " (pr-str (type tuple)) " and information: " (pr-str tuple)))
+                       )))))
+
+              (when (not (.isEmpty local)) (local-transfer local))
+              (when (not (.isEmpty remoteMap)) (disruptor/publish transfer-queue remoteMap))))]
+    (if try-serialize-local
+      (do
+        (log-warn "WILL TRY TO SERIALIZE ALL TUPLES (Turn off " TOPOLOGY-TESTING-ALWAYS-TRY-SERIALIZE " for production)")
+        (fn [^KryoTupleSerializer serializer tuple-batch]
+          (assert-can-serialize serializer tuple-batch)
+          (transfer-fn serializer tuple-batch)))
+      transfer-fn)))
+
+(defn- mk-receive-queue-map [storm-conf executors]
+  (->> executors
+       ;; TODO: this depends on the type of executor
+       (map (fn [e] [e (disruptor/disruptor-queue (str "receive-queue" e)
+                                                  (storm-conf TOPOLOGY-EXECUTOR-RECEIVE-BUFFER-SIZE)
+                                                  (storm-conf TOPOLOGY-DISRUPTOR-WAIT-TIMEOUT-MILLIS)
+                                                  :batch-size (storm-conf TOPOLOGY-DISRUPTOR-BATCH-SIZE)
+                                                  :batch-timeout (storm-conf TOPOLOGY-DISRUPTOR-BATCH-TIMEOUT-MILLIS))]))
+       (into {})
+       ))
+
+(defn- stream->fields [^StormTopology topology component]
+  (->> (ThriftTopologyUtils/getComponentCommon topology component)
+       .get_streams
+       (map (fn [[s info]] [s (Fields. (.get_output_fields info))]))
+       (into {})
+       (HashMap.)))
+
+(defn component->stream->fields [^StormTopology topology]
+  (->> (ThriftTopologyUtils/getComponentIds topology)
+       (map (fn [c] [c (stream->fields topology c)]))
+       (into {})
+       (HashMap.)))
+
+(defn- mk-default-resources [worker]
+  (let [conf (:conf worker)
+        thread-pool-size (int (conf TOPOLOGY-WORKER-SHARED-THREAD-POOL-SIZE))]
+    {WorkerTopologyContext/SHARED_EXECUTOR (Executors/newFixedThreadPool thread-pool-size)}
+    ))
+
+(defn- mk-user-resources [worker]
+  ;;TODO: need to invoke a hook provided by the topology, giving it a chance to create user resources.
+  ;; this would be part of the initialization hook
+  ;; need to separate workertopologycontext into WorkerContext and WorkerUserContext.
+  ;; actually just do it via interfaces. just need to make sure to hide setResource from tasks
+  {})
+
+(defn mk-halting-timer [timer-name]
+  (mk-timer :kill-fn (fn [t]
+                       (log-error t "Error when processing event")
+                       (exit-process! 20 "Error when processing an event")
+                       )
+            :timer-name timer-name))
+
+(defn worker-data [conf mq-context storm-id assignment-id port worker-id storm-conf cluster-state storm-cluster-state]
+  (let [assignment-versions (atom {})
+        executors (set (read-worker-executors storm-conf storm-cluster-state storm-id assignment-id port assignment-versions))
+        transfer-queue (disruptor/disruptor-queue "worker-transfer-queue" (storm-conf TOPOLOGY-TRANSFER-BUFFER-SIZE)
+                                                  (storm-conf TOPOLOGY-DISRUPTOR-WAIT-TIMEOUT-MILLIS)
+                                                  :batch-size (storm-conf TOPOLOGY-DISRUPTOR-BATCH-SIZE)
+                                                  :batch-timeout (storm-conf TOPOLOGY-DISRUPTOR-BATCH-TIMEOUT-MILLIS))
+        executor-receive-queue-map (mk-receive-queue-map storm-conf executors)
+
+        receive-queue-map (->> executor-receive-queue-map
+                               (mapcat (fn [[e queue]] (for [t (executor-id->tasks e)] [t queue])))
+                               (into {}))
+
+        topology (read-supervisor-topology conf storm-id)
+        mq-context  (if mq-context
+                      mq-context
+                      (TransportFactory/makeContext storm-conf))]
+
+    (recursive-map
+      :conf conf
+      :mq-context mq-context
+      :receiver (.bind ^IContext mq-context storm-id port)
+      :storm-id storm-id
+      :assignment-id assignment-id
+      :port port
+      :worker-id worker-id
+      :cluster-state cluster-state
+      :storm-cluster-state storm-cluster-state
+      ;; when worker bootup, worker will start to setup initial connections to
+      ;; other workers. When all connection is ready, we will enable this flag
+      ;; and spout and bolt will be activated.
+      :worker-active-flag (atom false)
+      :storm-active-atom (atom false)
+      :storm-component->debug-atom (atom {})
+      :executors executors
+      :task-ids (->> receive-queue-map keys (map int) sort)
+      :storm-conf storm-conf
+      :topology topology
+      :system-topology (system-topology! storm-conf topology)
+      :heartbeat-timer (mk-halting-timer "heartbeat-timer")
+      :refresh-load-timer (mk-halting-timer "refresh-load-timer")
+      :refresh-connections-timer (mk-halting-timer "refresh-connections-timer")
+      :refresh-credentials-timer (mk-halting-timer "refresh-credentials-timer")
+      :reset-log-levels-timer (mk-halting-timer "reset-log-levels-timer")
+      :refresh-active-timer (mk-halting-timer "refresh-active-timer")
+      :executor-heartbeat-timer (mk-halting-timer "executor-heartbeat-timer")
+      :user-timer (mk-halting-timer "user-timer")
+      :task->component (HashMap. (storm-task-info topology storm-conf)) ; for optimized access when used in tasks later on
+      :component->stream->fields (component->stream->fields (:system-topology <>))
+      :component->sorted-tasks (->> (:task->component <>) reverse-map (map-val sort))
+      :endpoint-socket-lock (mk-rw-lock)
+      :cached-node+port->socket (atom {})
+      :cached-task->node+port (atom {})
+      :transfer-queue transfer-queue
+      :executor-receive-queue-map executor-receive-queue-map
+      :short-executor-receive-queue-map (map-key first executor-receive-queue-map)
+      :task->short-executor (->> executors
+                                 (mapcat (fn [e] (for [t (executor-id->tasks e)] [t (first e)])))
+                                 (into {})
+                                 (HashMap.))
+      :suicide-fn (mk-suicide-fn conf)
+      :uptime (uptime-computer)
+      :default-shared-resources (mk-default-resources <>)
+      :user-shared-resources (mk-user-resources <>)
+      :transfer-local-fn (mk-transfer-local-fn <>)
+      :transfer-fn (mk-transfer-fn <>)
+      :load-mapping (LoadMapping.)
+      :assignment-versions assignment-versions
+      :backpressure (atom false) ;; whether this worker is going slow
+      :transfer-backpressure (atom false) ;; if the transfer queue is backed-up
+      :backpressure-trigger (atom false) ;; a trigger for synchronization with executors
+      :throttle-on (atom false) ;; whether throttle is activated for spouts
+      )))
+
+(defn- endpoint->string [[node port]]
+  (str port "/" node))
+
+(defn string->endpoint [^String s]
+  (let [[port-str node] (.split s "/" 2)]
+    [node (Integer/valueOf port-str)]
+    ))
+
+(def LOAD-REFRESH-INTERVAL-MS 5000)
+
+(defn mk-refresh-load [worker]
+  (let [local-tasks (set (:task-ids worker))
+        remote-tasks (set/difference (worker-outbound-tasks worker) local-tasks)
+        short-executor-receive-queue-map (:short-executor-receive-queue-map worker)
+        next-update (atom 0)]
+    (fn this
+      ([]
+        (let [^LoadMapping load-mapping (:load-mapping worker)
+              local-pop (map-val (fn [queue]
+                                   (let [q-metrics (.getMetrics queue)]
+                                     (/ (double (.population q-metrics)) (.capacity q-metrics))))
+                                 short-executor-receive-queue-map)
+              remote-load (reduce merge (for [[np conn] @(:cached-node+port->socket worker)] (into {} (.getLoad conn remote-tasks))))
+              now (System/currentTimeMillis)]
+          (.setLocal load-mapping local-pop)
+          (.setRemote load-mapping remote-load)
+          (when (> now @next-update)
+            (.sendLoadMetrics (:receiver worker) local-pop)
+            (reset! next-update (+ LOAD-REFRESH-INTERVAL-MS now))))))))
+
+(defn mk-refresh-connections [worker]
+  (let [outbound-tasks (worker-outbound-tasks worker)
+        conf (:conf worker)
+        storm-cluster-state (:storm-cluster-state worker)
+        storm-id (:storm-id worker)]
+    (fn this
+      ([]
+        (this (fn [& ignored] (schedule (:refresh-connections-timer worker) 0 this))))
+      ([callback]
+         (let [version (.assignment-version storm-cluster-state storm-id callback)
+               assignment (if (= version (:version (get @(:assignment-versions worker) storm-id)))
+                            (:data (get @(:assignment-versions worker) storm-id))
+                            (let [new-assignment (.assignment-info-with-version storm-cluster-state storm-id callback)]
+                              (swap! (:assignment-versions worker) assoc storm-id new-assignment)
+                              (:data new-assignment)))
+              my-assignment (-> assignment
+                                :executor->node+port
+                                to-task->node+port
+                                (select-keys outbound-tasks)
+                                (#(map-val endpoint->string %)))
+              ;; we dont need a connection for the local tasks anymore
+              needed-assignment (->> my-assignment
+                                      (filter-key (complement (-> worker :task-ids set))))
+              needed-connections (-> needed-assignment vals set)
+              needed-tasks (-> needed-assignment keys)
+
+              current-connections (set (keys @(:cached-node+port->socket worker)))
+              new-connections (set/difference needed-connections current-connections)
+              remove-connections (set/difference current-connections needed-connections)]
+              (swap! (:cached-node+port->socket worker)
+                     #(HashMap. (merge (into {} %1) %2))
+                     (into {}
+                       (dofor [endpoint-str new-connections
+                               :let [[node port] (string->endpoint endpoint-str)]]
+                         [endpoint-str
+                          (.connect
+                           ^IContext (:mq-context worker)
+                           storm-id
+                           ((:node->host assignment) node)
+                           port)
+                          ]
+                         )))
+              (write-locked (:endpoint-socket-lock worker)
+                (reset! (:cached-task->node+port worker)
+                        (HashMap. my-assignment)))
+              (doseq [endpoint remove-connections]
+                (.close (get @(:cached-node+port->socket worker) endpoint)))
+              (apply swap!
+                     (:cached-node+port->socket worker)
+                     #(HashMap. (apply dissoc (into {} %1) %&))
+                     remove-connections)
+
+           )))))
+
+(defn refresh-storm-active
+  ([worker]
+    (refresh-storm-active worker (fn [& ignored] (schedule (:refresh-active-timer worker) 0 (partial refresh-storm-active worker)))))
+  ([worker callback]
+    (let [base (.storm-base (:storm-cluster-state worker) (:storm-id worker) callback)]
+      (reset!
+        (:storm-active-atom worker)
+        (and (= :active (-> base :status :type)) @(:worker-active-flag worker)))
+      (reset! (:storm-component->debug-atom worker) (-> base :component->debug))
+      (log-debug "Event debug options " @(:storm-component->debug-atom worker)))))
+
+;; TODO: consider having a max batch size besides what disruptor does automagically to prevent latency issues
+(defn mk-transfer-tuples-handler [worker]
+  (let [^DisruptorQueue transfer-queue (:transfer-queue worker)
+        drainer (TransferDrainer.)
+        node+port->socket (:cached-node+port->socket worker)
+        task->node+port (:cached-task->node+port worker)
+        endpoint-socket-lock (:endpoint-socket-lock worker)
+        ]
+    (disruptor/clojure-handler
+      (fn [packets _ batch-end?]
+        (.add drainer packets)
+
+        (when batch-end?
+          (read-locked endpoint-socket-lock
+             (let [node+port->socket @node+port->socket
+                   task->node+port @task->node+port]
+               (.send drainer task->node+port node+port->socket)))
+          (.clear drainer))))))
+
+;; Check whether this messaging connection is ready to send data
+(defn is-connection-ready [^IConnection connection]
+  (if (instance?  ConnectionWithStatus connection)
+    (let [^ConnectionWithStatus connection connection
+          status (.status connection)]
+      (= status ConnectionWithStatus$Status/Ready))
+    true))
+
+;; all connections are ready
+(defn all-connections-ready [worker]
+    (let [connections (vals @(:cached-node+port->socket worker))]
+      (every? is-connection-ready connections)))
+
+;; we will wait all connections to be ready and then activate the spout/bolt
+;; when the worker bootup
+(defn activate-worker-when-all-connections-ready
+  [worker]
+  (let [timer (:refresh-active-timer worker)
+        delay-secs 0
+        recur-secs 1]
+    (schedule timer
+      delay-secs
+      (fn this []
+        (if (all-connections-ready worker)
+          (do
+            (log-message "All connections are ready for worker " (:assignment-id worker) ":" (:port worker)
+              " with id "(:worker-id worker))
+            (reset! (:worker-active-flag worker) true))
+          (schedule timer recur-secs this :check-active false)
+            )))))
+
+(defn register-callbacks [worker]
+  (log-message "Registering IConnectionCallbacks for " (:assignment-id worker) ":" (:port worker))
+  (msg-loader/register-callback (:transfer-local-fn worker)
+                                (:receiver worker)
+                                (:storm-conf worker)
+                                (worker-context worker)))
+
+(defn- close-resources [worker]
+  (let [dr (:default-shared-resources worker)]
+    (log-message "Shutting down default resources")
+    (.shutdownNow (get dr WorkerTopologyContext/SHARED_EXECUTOR))
+    (log-message "Shut down default resources")))
+
+(defn- get-logger-levels []
+  (into {}
+    (let [logger-config (.getConfiguration (LogManager/getContext false))]
+      (for [[logger-name logger] (.getLoggers logger-config)]
+        {logger-name (.getLevel logger)}))))
+
+(defn set-logger-level [logger-context logger-name new-level]
+  (let [config (.getConfiguration logger-context)
+        logger-config (.getLoggerConfig config logger-name)]
+    (if (not (= (.getName logger-config) logger-name))
+      ;; create a new config. Make it additive (true) s.t. inherit
+      ;; parents appenders
+      (let [new-logger-config (LoggerConfig. logger-name new-level true)]
+        (log-message "Adding config for: " new-logger-config " with level: " new-level)
+        (.addLogger config logger-name new-logger-config))
+      (do
+        (log-message "Setting " logger-config " log level to: " new-level)
+        (.setLevel logger-config new-level)))))
+
+;; function called on timer to reset log levels last set to DEBUG
+;; also called from process-log-config-change
+(defn reset-log-levels [latest-log-config-atom]
+  (let [latest-log-config @latest-log-config-atom
+        logger-context (LogManager/getContext false)]
+    (doseq [[logger-name logger-setting] (sort latest-log-config)]
+      (let [timeout (:timeout logger-setting)
+            target-log-level (:target-log-level logger-setting)
+            reset-log-level (:reset-log-level logger-setting)]
+        (when (> (coerce/to-long (time/now)) timeout)
+          (log-message logger-name ": Resetting level to " reset-log-level) 
+          (set-logger-level logger-context logger-name reset-log-level)
+          (swap! latest-log-config-atom
+            (fn [prev]
+              (dissoc prev logger-name))))))
+    (.updateLoggers logger-context)))
+
+;; when a new log level is received from zookeeper, this function is called
+(defn process-log-config-change [latest-log-config original-log-levels log-config]
+  (when log-config
+    (log-debug "Processing received log config: " log-config)
+    ;; merge log configs together
+    (let [loggers (.get_named_logger_level log-config)
+          logger-context (LogManager/getContext false)]
+      (def new-log-configs
+        (into {}
+         ;; merge named log levels
+         (for [[msg-logger-name logger-level] loggers]
+           (let [logger-name (if (= msg-logger-name "ROOT")
+                                LogManager/ROOT_LOGGER_NAME
+                                msg-logger-name)]
+             ;; the new-timeouts map now contains logger => timeout 
+             (when (.is_set_reset_log_level_timeout_epoch logger-level)
+               {logger-name {:action (.get_action logger-level)
+                             :target-log-level (Level/toLevel (.get_target_log_level logger-level))
+                             :reset-log-level (or (.get @original-log-levels logger-name) (Level/INFO))
+                             :timeout (.get_reset_log_level_timeout_epoch logger-level)}})))))
+
+      ;; look for deleted log timeouts
+      (doseq [[logger-name logger-val] (sort @latest-log-config)]
+        (when (not (contains? new-log-configs logger-name))
+          ;; if we had a timeout, but the timeout is no longer active
+          (set-logger-level
+            logger-context logger-name (:reset-log-level logger-val))))
+
+      ;; apply new log settings we just received
+      ;; the merged configs are only for the reset logic
+      (doseq [[msg-logger-name logger-level] (sort (into {} (.get_named_logger_level log-config)))]
+        (let [logger-name (if (= msg-logger-name "ROOT")
+                                LogManager/ROOT_LOGGER_NAME
+                                msg-logger-name)
+              level (Level/toLevel (.get_target_log_level logger-level))
+              action (.get_action logger-level)]
+          (if (= action LogLevelAction/UPDATE)
+            (set-logger-level logger-context logger-name level))))
+   
+      (.updateLoggers logger-context)
+      (reset! latest-log-config new-log-configs)
+      (log-debug "New merged log config is " @latest-log-config))))
+
+(defn run-worker-start-hooks [worker]
+  (let [topology (:topology worker)
+        topo-conf (:storm-conf worker)
+        worker-topology-context (worker-context worker)
+        hooks (.get_worker_hooks topology)]
+    (dofor [hook hooks]
+      (let [hook-bytes (Utils/toByteArray hook)
+            deser-hook (Utils/javaDeserialize hook-bytes BaseWorkerHook)]
+        (.start deser-hook topo-conf worker-topology-context)))))
+
+(defn run-worker-shutdown-hooks [worker]
+  (let [topology (:topology worker)
+        hooks (.get_worker_hooks topology)]
+    (dofor [hook hooks]
+      (let [hook-bytes (Utils/toByteArray hook)
+            deser-hook (Utils/javaDeserialize hook-bytes BaseWorkerHook)]
+        (.shutdown deser-hook)))))
+
+;; TODO: should worker even take the storm-id as input? this should be
+;; deducable from cluster state (by searching through assignments)
+;; what about if there's inconsistency in assignments? -> but nimbus
+;; should guarantee this consistency
+(defserverfn mk-worker [conf shared-mq-context storm-id assignment-id port worker-id]
+  (log-message "Launching worker for " storm-id " on " assignment-id ":" port " with id " worker-id
+               " and conf " conf)
+  (if-not (local-mode? conf)
+    (redirect-stdio-to-slf4j!))
+  ;; because in local mode, its not a separate
+  ;; process. supervisor will register it in this case
+  (when (= :distributed (cluster-mode conf))
+    (let [pid (process-pid)]
+      (touch (worker-pid-path conf worker-id pid))
+      (spit (worker-artifacts-pid-path conf storm-id port) pid)))
+
+  (declare establish-log-setting-callback)
+
+  ;; start out with empty list of timeouts 
+  (def latest-log-config (atom {}))
+  (def original-log-levels (atom {}))
+
+  (let [storm-conf (read-supervisor-storm-conf conf storm-id)
+        storm-conf (override-login-config-with-system-property storm-conf)
+        acls (Utils/getWorkerACL storm-conf)
+        cluster-state (cluster/mk-distributed-cluster-state conf :auth-conf storm-conf :acls acls :context (ClusterStateContext. DaemonType/WORKER))
+        storm-cluster-state (cluster/mk-storm-cluster-state cluster-state :acls acls)
+        initial-credentials (.credentials storm-cluster-state storm-id nil)
+        auto-creds (AuthUtils/GetAutoCredentials storm-conf)
+        subject (AuthUtils/populateSubject nil auto-creds initial-credentials)]
+      (Subject/doAs subject (reify PrivilegedExceptionAction
+        (run [this]
+          (let [worker (worker-data conf shared-mq-context storm-id assignment-id port worker-id storm-conf cluster-state storm-cluster-state)
+        heartbeat-fn #(do-heartbeat worker)
+
+        ;; do this here so that the worker process dies if this fails
+        ;; it's important that worker heartbeat to supervisor ASAP when launching so that the supervisor knows it's running (and can move on)
+        _ (heartbeat-fn)
+
+        executors (atom nil)
+        ;; launch heartbeat threads immediately so that slow-loading tasks don't cause the worker to timeout
+        ;; to the supervisor
+        _ (schedule-recurring (:heartbeat-timer worker) 0 (conf WORKER-HEARTBEAT-FREQUENCY-SECS) heartbeat-fn)
+        _ (schedule-recurring (:executor-heartbeat-timer worker) 0 (conf TASK-HEARTBEAT-FREQUENCY-SECS) #(do-executor-heartbeats worker :executors @executors))
+
+        _ (register-callbacks worker)
+
+        refresh-connections (mk-refresh-connections worker)
+        refresh-load (mk-refresh-load worker)
+
+        _ (refresh-connections nil)
+
+        _ (activate-worker-when-all-connections-ready worker)
+
+        _ (refresh-storm-active worker nil)
+
+        _ (run-worker-start-hooks worker)
+
+        _ (reset! executors (dofor [e (:executors worker)] (executor/mk-executor worker e initial-credentials)))
+
+        transfer-tuples (mk-transfer-tuples-handler worker)
+        
+        transfer-thread (disruptor/consume-loop* (:transfer-queue worker) transfer-tuples)               
+
+        disruptor-handler (mk-disruptor-backpressure-handler worker)
+        _ (.registerBackpressureCallback (:transfer-queue worker) disruptor-handler)
+        _ (-> (.setHighWaterMark (:transfer-queue worker) ((:storm-conf worker) BACKPRESSURE-DISRUPTOR-HIGH-WATERMARK))
+              (.setLowWaterMark ((:storm-conf worker) BACKPRESSURE-DISRUPTOR-LOW-WATERMARK))
+              (.setEnableBackpressure ((:storm-conf worker) TOPOLOGY-BACKPRESSURE-ENABLE)))
+        backpressure-handler (mk-backpressure-handler @executors)        
+        backpressure-thread (WorkerBackpressureThread. (:backpressure-trigger worker) worker backpressure-handler)
+        _ (if ((:storm-conf worker) TOPOLOGY-BACKPRESSURE-ENABLE) 
+            (.start backpressure-thread))
+        callback (fn cb [& ignored]
+                   (let [throttle-on (.topology-backpressure storm-cluster-state storm-id cb)]
+                     (reset! (:throttle-on worker) throttle-on)))
+        _ (if ((:storm-conf worker) TOPOLOGY-BACKPRESSURE-ENABLE)
+            (.topology-backpressure storm-cluster-state storm-id callback))
+
+        shutdown* (fn []
+                    (log-message "Shutting down worker " storm-id " " assignment-id " " port)
+                    (doseq [[_ socket] @(:cached-node+port->socket worker)]
+                      ;; this will do best effort flushing since the linger period
+                      ;; was set on creation
+                      (.close socket))
+                    (log-message "Terminating messaging context")
+                    (log-message "Shutting down executors")
+                    (doseq [executor @executors] (.shutdown executor))
+                    (log-message "Shut down executors")
+
+                    ;;this is fine because the only time this is shared is when it's a local context,
+                    ;;in which case it's a noop
+                    (.term ^IContext (:mq-context worker))
+                    (log-message "Shutting down transfer thread")
+                    (disruptor/halt-with-interrupt! (:transfer-queue worker))
+
+                    (.interrupt transfer-thread)
+                    (.join transfer-thread)
+                    (log-message "Shut down transfer thread")
+                    (.interrupt backpressure-thread)
+                    (.join backpressure-thread)
+                    (log-message "Shut down backpressure thread")
+                    (cancel-timer (:heartbeat-timer worker))
+                    (cancel-timer (:refresh-connections-timer worker))
+                    (cancel-timer (:refresh-credentials-timer worker))
+                    (cancel-timer (:refresh-active-timer worker))
+                    (cancel-timer (:executor-heartbeat-timer worker))
+                    (cancel-timer (:user-timer worker))
+                    (cancel-timer (:refresh-load-timer worker))
+
+                    (close-resources worker)
+
+                    (log-message "Trigger any worker shutdown hooks")
+                    (run-worker-shutdown-hooks worker)
+
+                    (.remove-worker-heartbeat! (:storm-cluster-state worker) storm-id assignment-id port)
+                    (log-message "Disconnecting from storm cluster state context")
+                    (.disconnect (:storm-cluster-state worker))
+                    (.close (:cluster-state worker))
+                    (log-message "Shut down worker " storm-id " " assignment-id " " port))
+        ret (reify
+             Shutdownable
+             (shutdown
+              [this]
+              (shutdown*))
+             DaemonCommon
+             (waiting? [this]
+               (and
+                 (timer-waiting? (:heartbeat-timer worker))
+                 (timer-waiting? (:refresh-connections-timer worker))
+                 (timer-waiting? (:refresh-load-timer worker))
+                 (timer-waiting? (:refresh-credentials-timer worker))
+                 (timer-waiting? (:refresh-active-timer worker))
+                 (timer-waiting? (:executor-heartbeat-timer worker))
+                 (timer-waiting? (:user-timer worker))
+                 ))
+             )
+        credentials (atom initial-credentials)
+        check-credentials-changed (fn []
+                                    (let [new-creds (.credentials (:storm-cluster-state worker) storm-id nil)]
+                                      (when-not (= new-creds @credentials) ;;This does not have to be atomic, worst case we update when one is not needed
+                                        (AuthUtils/updateSubject subject auto-creds new-creds)
+                                        (dofor [e @executors] (.credentials-changed e new-creds))
+                                        (reset! credentials new-creds))))
+       check-throttle-changed (fn []
+                                (let [callback (fn cb [& ignored]
+                                                 (let [throttle-on (.topology-backpressure (:storm-cluster-state worker) storm-id cb)]
+                                                   (reset! (:throttle-on worker) throttle-on)))
+                                      new-throttle-on (.topology-backpressure (:storm-cluster-state worker) storm-id callback)]
+                                    (reset! (:throttle-on worker) new-throttle-on)))
+        check-log-config-changed (fn []
+                                  (let [log-config (.topology-log-config (:storm-cluster-state worker) storm-id nil)]
+                                    (process-log-config-change latest-log-config original-log-levels log-config)
+                                    (establish-log-setting-callback)))]
+    (reset! original-log-levels (get-logger-levels))
+    (log-message "Started with log levels: " @original-log-levels)
+  
+    (defn establish-log-setting-callback []
+      (.topology-log-config (:storm-cluster-state worker) storm-id (fn [args] (check-log-config-changed))))
+
+    (establish-log-setting-callback)
+    (.credentials (:storm-cluster-state worker) storm-id (fn [args] (check-credentials-changed)))
+    (schedule-recurring (:refresh-credentials-timer worker) 0 (conf TASK-CREDENTIALS-POLL-SECS)
+                        (fn [& args]
+                          (check-credentials-changed)
+                          (if ((:storm-conf worker) TOPOLOGY-BACKPRESSURE-ENABLE)
+                            (check-throttle-changed))))
+    ;; The jitter allows the clients to get the data at different times, and avoids thundering herd
+    (when-not (.get conf TOPOLOGY-DISABLE-LOADAWARE-MESSAGING)
+      (schedule-recurring-with-jitter (:refresh-load-timer worker) 0 1 500 refresh-load))
+    (schedule-recurring (:refresh-connections-timer worker) 0 (conf TASK-REFRESH-POLL-SECS) refresh-connections)
+    (schedule-recurring (:reset-log-levels-timer worker) 0 (conf WORKER-LOG-LEVEL-RESET-POLL-SECS) (fn [] (reset-log-levels latest-log-config)))
+    (schedule-recurring (:refresh-active-timer worker) 0 (conf TASK-REFRESH-POLL-SECS) (partial refresh-storm-active worker))
+
+    (log-message "Worker has topology config " (redact-value (:storm-conf worker) STORM-ZOOKEEPER-TOPOLOGY-AUTH-PAYLOAD))
+    (log-message "Worker " worker-id " for storm " storm-id " on " assignment-id ":" port " has finished loading")
+    ret
+    ))))))
+
+(defmethod mk-suicide-fn
+  :local [conf]
+  (fn [] (exit-process! 1 "Worker died")))
+
+(defmethod mk-suicide-fn
+  :distributed [conf]
+  (fn [] (exit-process! 1 "Worker died")))
+
+(defn -main [storm-id assignment-id port-str worker-id]
+  (let [conf (read-storm-config)]
+    (setup-default-uncaught-exception-handler)
+    (validate-distributed-mode! conf)
+    (let [worker (mk-worker conf nil storm-id assignment-id (Integer/parseInt port-str) worker-id)]
+      (add-shutdown-hook-with-force-kill-in-1-sec #(.shutdown worker)))))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/disruptor.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/disruptor.clj b/storm-core/src/clj/org/apache/storm/disruptor.clj
new file mode 100644
index 0000000..1546b3f
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/disruptor.clj
@@ -0,0 +1,89 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+
+(ns org.apache.storm.disruptor
+  (:import [org.apache.storm.utils DisruptorQueue WorkerBackpressureCallback DisruptorBackpressureCallback])
+  (:import [com.lmax.disruptor.dsl ProducerType])
+  (:require [clojure [string :as str]])
+  (:require [clojure [set :as set]])
+  (:use [clojure walk])
+  (:use [org.apache.storm util log]))
+
+(def PRODUCER-TYPE
+  {:multi-threaded ProducerType/MULTI
+   :single-threaded ProducerType/SINGLE})
+
+(defnk disruptor-queue
+  [^String queue-name buffer-size timeout :producer-type :multi-threaded :batch-size 100 :batch-timeout 1]
+  (DisruptorQueue. queue-name
+                   (PRODUCER-TYPE producer-type) buffer-size
+                   timeout batch-size batch-timeout))
+
+(defn clojure-handler
+  [afn]
+  (reify com.lmax.disruptor.EventHandler
+    (onEvent
+      [this o seq-id batchEnd?]
+      (afn o seq-id batchEnd?))))
+
+(defn disruptor-backpressure-handler
+  [afn-high-wm afn-low-wm]
+  (reify DisruptorBackpressureCallback
+    (highWaterMark
+      [this]
+      (afn-high-wm))
+    (lowWaterMark
+      [this]
+      (afn-low-wm))))
+
+(defn worker-backpressure-handler
+  [afn]
+  (reify WorkerBackpressureCallback
+    (onEvent
+      [this o]
+      (afn o))))
+
+(defmacro handler
+  [& args]
+  `(clojure-handler (fn ~@args)))
+
+(defn publish
+  [^DisruptorQueue q o]
+  (.publish q o))
+
+(defn consume-batch
+  [^DisruptorQueue queue handler]
+  (.consumeBatch queue handler))
+
+(defn consume-batch-when-available
+  [^DisruptorQueue queue handler]
+  (.consumeBatchWhenAvailable queue handler))
+
+(defn halt-with-interrupt!
+  [^DisruptorQueue queue]
+  (.haltWithInterrupt queue))
+
+(defnk consume-loop*
+  [^DisruptorQueue queue handler
+   :kill-fn (fn [error] (exit-process! 1 "Async loop died!"))]
+  (async-loop
+          (fn [] (consume-batch-when-available queue handler) 0)
+          :kill-fn kill-fn
+          :thread-name (.getName queue)))
+
+(defmacro consume-loop [queue & handler-args]
+  `(let [handler# (handler ~@handler-args)]
+     (consume-loop* ~queue handler#)))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/event.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/event.clj b/storm-core/src/clj/org/apache/storm/event.clj
new file mode 100644
index 0000000..edc7616
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/event.clj
@@ -0,0 +1,71 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+
+(ns org.apache.storm.event
+  (:use [org.apache.storm log util])
+  (:import [org.apache.storm.utils Time Utils])
+  (:import [java.io InterruptedIOException])
+  (:import [java.util.concurrent LinkedBlockingQueue TimeUnit]))
+
+(defprotocol EventManager
+  (add [this event-fn])
+  (waiting? [this])
+  (shutdown [this]))
+
+(defn event-manager
+  "Creates a thread to respond to events. Any error will cause process to halt"
+  [daemon?]
+  (let [added (atom 0)
+        processed (atom 0)
+        ^LinkedBlockingQueue queue (LinkedBlockingQueue.)
+        running (atom true)
+        runner (Thread.
+                 (fn []
+                   (try-cause
+                     (while @running
+                       (let [r (.take queue)]
+                         (r)
+                         (swap! processed inc)))
+                     (catch InterruptedIOException t
+                       (log-message "Event manager interrupted while doing IO"))
+                     (catch InterruptedException t
+                       (log-message "Event manager interrupted"))
+                     (catch Throwable t
+                       (log-error t "Error when processing event")
+                       (exit-process! 20 "Error when processing an event")))))]
+    (.setDaemon runner daemon?)
+    (.start runner)
+    (reify
+      EventManager
+
+      (add
+        [this event-fn]
+        ;; should keep track of total added and processed to know if this is finished yet
+        (when-not @running
+          (throw (RuntimeException. "Cannot add events to a shutdown event manager")))
+        (swap! added inc)
+        (.put queue event-fn))
+
+      (waiting?
+        [this]
+        (or (Time/isThreadWaiting runner)
+            (= @processed @added)))
+
+      (shutdown
+        [this]
+        (reset! running false)
+        (.interrupt runner)
+        (.join runner)))))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/local_state.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/local_state.clj b/storm-core/src/clj/org/apache/storm/local_state.clj
new file mode 100644
index 0000000..a95a85b
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/local_state.clj
@@ -0,0 +1,131 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+(ns org.apache.storm.local-state
+  (:use [org.apache.storm log util])
+  (:import [org.apache.storm.generated StormTopology
+            InvalidTopologyException GlobalStreamId
+            LSSupervisorId LSApprovedWorkers
+            LSSupervisorAssignments LocalAssignment
+            ExecutorInfo LSWorkerHeartbeat
+            LSTopoHistory LSTopoHistoryList
+            WorkerResources])
+  (:import [org.apache.storm.utils LocalState]))
+
+(def LS-WORKER-HEARTBEAT "worker-heartbeat")
+(def LS-ID "supervisor-id")
+(def LS-LOCAL-ASSIGNMENTS "local-assignments")
+(def LS-APPROVED-WORKERS "approved-workers")
+(def LS-TOPO-HISTORY "topo-hist")
+
+(defn ->LSTopoHistory
+  [{topoid :topoid timestamp :timestamp users :users groups :groups}]
+  (LSTopoHistory. topoid timestamp users groups))
+
+(defn ->topo-history
+  [thrift-topo-hist]
+  {
+    :topoid (.get_topology_id thrift-topo-hist)
+    :timestamp (.get_time_stamp thrift-topo-hist)
+    :users (.get_users thrift-topo-hist)
+    :groups (.get_groups thrift-topo-hist)})
+
+(defn ls-topo-hist!
+  [^LocalState local-state hist-list]
+  (.put local-state LS-TOPO-HISTORY
+    (LSTopoHistoryList. (map ->LSTopoHistory hist-list))))
+
+(defn ls-topo-hist
+  [^LocalState local-state]
+  (if-let [thrift-hist-list (.get local-state LS-TOPO-HISTORY)]
+    (map ->topo-history (.get_topo_history thrift-hist-list))))
+
+(defn ls-supervisor-id!
+  [^LocalState local-state ^String id]
+    (.put local-state LS-ID (LSSupervisorId. id)))
+
+(defn ls-supervisor-id
+  [^LocalState local-state]
+  (if-let [super-id (.get local-state LS-ID)]
+    (.get_supervisor_id super-id)))
+
+(defn ls-approved-workers!
+  [^LocalState local-state workers]
+    (.put local-state LS-APPROVED-WORKERS (LSApprovedWorkers. workers)))
+
+(defn ls-approved-workers
+  [^LocalState local-state]
+  (if-let [tmp (.get local-state LS-APPROVED-WORKERS)]
+    (into {} (.get_approved_workers tmp))))
+
+(defn ->ExecutorInfo
+  [[low high]] (ExecutorInfo. low high))
+
+(defn ->ExecutorInfo-list
+  [executors]
+  (map ->ExecutorInfo executors))
+
+(defn ->executor-list
+  [executors]
+  (into [] 
+    (for [exec-info executors] 
+      [(.get_task_start exec-info) (.get_task_end exec-info)])))
+
+(defn ->LocalAssignment
+  [{storm-id :storm-id executors :executors resources :resources}]
+  (let [assignment (LocalAssignment. storm-id (->ExecutorInfo-list executors))]
+    (if resources (.set_resources assignment
+                                  (doto (WorkerResources. )
+                                    (.set_mem_on_heap (first resources))
+                                    (.set_mem_off_heap (second resources))
+                                    (.set_cpu (last resources)))))
+    assignment))
+
+(defn mk-local-assignment
+  [storm-id executors resources]
+  {:storm-id storm-id :executors executors :resources resources})
+
+(defn ->local-assignment
+  [^LocalAssignment thrift-local-assignment]
+    (mk-local-assignment
+      (.get_topology_id thrift-local-assignment)
+      (->executor-list (.get_executors thrift-local-assignment))
+      (.get_resources thrift-local-assignment)))
+
+(defn ls-local-assignments!
+  [^LocalState local-state assignments]
+    (let [local-assignment-map (map-val ->LocalAssignment assignments)]
+    (.put local-state LS-LOCAL-ASSIGNMENTS 
+          (LSSupervisorAssignments. local-assignment-map))))
+
+(defn ls-local-assignments
+  [^LocalState local-state]
+    (if-let [thrift-local-assignments (.get local-state LS-LOCAL-ASSIGNMENTS)]
+      (map-val
+        ->local-assignment
+        (.get_assignments thrift-local-assignments))))
+
+(defn ls-worker-heartbeat!
+  [^LocalState local-state time-secs storm-id executors port]
+  (.put local-state LS-WORKER-HEARTBEAT (LSWorkerHeartbeat. time-secs storm-id (->ExecutorInfo-list executors) port) false))
+
+(defn ls-worker-heartbeat 
+  [^LocalState local-state]
+  (if-let [worker-hb (.get local-state LS-WORKER-HEARTBEAT)]
+    {:time-secs (.get_time_secs worker-hb)
+     :storm-id (.get_topology_id worker-hb)
+     :executors (->executor-list (.get_executors worker-hb))
+     :port (.get_port worker-hb)}))
+

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/log.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/log.clj b/storm-core/src/clj/org/apache/storm/log.clj
new file mode 100644
index 0000000..96570e3
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/log.clj
@@ -0,0 +1,56 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+
+(ns org.apache.storm.log
+  (:require [clojure.tools.logging :as log])
+  (:use [clojure pprint])
+  (:import [java.io StringWriter]))
+
+(defmacro log-message
+  [& args]
+  `(log/info (str ~@args)))
+
+(defmacro log-error
+  [e & args]
+  `(log/log :error ~e (str ~@args)))
+
+(defmacro log-debug
+  [& args]
+  `(log/debug (str ~@args)))
+
+(defmacro log-warn-error
+  [e & args]
+  `(log/warn (str ~@args) ~e))
+
+(defmacro log-warn
+  [& args]
+  `(log/warn (str ~@args)))
+
+(defn log-capture!
+  [& args]
+  (apply log/log-capture! args))
+
+(defn log-stream
+  [& args]
+  (apply log/log-stream args))
+
+(defmacro log-pprint
+  [& args]
+  `(let [^StringWriter writer# (StringWriter.)]
+     (doall
+       (for [object# [~@args]]
+         (pprint object# writer#)))
+     (log-message "\n" writer#)))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/messaging/loader.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/messaging/loader.clj b/storm-core/src/clj/org/apache/storm/messaging/loader.clj
new file mode 100644
index 0000000..b190ab0
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/messaging/loader.clj
@@ -0,0 +1,34 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+(ns org.apache.storm.messaging.loader
+  (:import [org.apache.storm.messaging IConnection DeserializingConnectionCallback])
+  (:require [org.apache.storm.messaging [local :as local]]))
+
+(defn mk-local-context []
+  (local/mk-context))
+
+(defn- mk-connection-callback
+  "make an IConnectionCallback"
+  [transfer-local-fn storm-conf worker-context]
+  (DeserializingConnectionCallback. storm-conf
+                                    worker-context
+                                    (fn [batch]
+                                      (transfer-local-fn batch))))
+
+(defn register-callback
+  "register the local-transfer-fn with the server"
+  [transfer-local-fn ^IConnection socket storm-conf worker-context]
+  (.registerRecv socket (mk-connection-callback transfer-local-fn storm-conf worker-context)))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/messaging/local.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/messaging/local.clj b/storm-core/src/clj/org/apache/storm/messaging/local.clj
new file mode 100644
index 0000000..32fbb34
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/messaging/local.clj
@@ -0,0 +1,23 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+(ns org.apache.storm.messaging.local
+  (:import [org.apache.storm.messaging IContext])
+  (:import [org.apache.storm.messaging.local Context]))
+
+(defn mk-context [] 
+  (let [context  (Context.)]
+    (.prepare ^IContext context nil)
+    context))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/metric/testing.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/metric/testing.clj b/storm-core/src/clj/org/apache/storm/metric/testing.clj
new file mode 100644
index 0000000..a8ec438
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/metric/testing.clj
@@ -0,0 +1,68 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+(ns org.apache.storm.metric.testing
+  "This namespace is for AOT dependent metrics testing code."
+  (:gen-class))
+
+(letfn [(for- [threader arg seq-exprs body]
+          `(reduce #(%2 %1)
+                   ~arg
+                   (for ~seq-exprs
+                     (fn [arg#] (~threader arg# ~@body)))))]
+  (defmacro for->
+    "Apply a thread expression to a sequence.
+   eg.
+      (-> 1
+        (for-> [x [1 2 3]]
+          (+ x)))
+   => 7"
+    {:indent 1}
+    [arg seq-exprs & body]
+    (for- 'clojure.core/-> arg seq-exprs body)))
+
+(gen-class
+ :name clojure.storm.metric.testing.FakeMetricConsumer
+ :implements [org.apache.storm.metric.api.IMetricsConsumer]
+ :prefix "impl-")
+
+(def buffer (atom nil))
+
+(defn impl-prepare [this conf argument ctx error-reporter]
+  (reset! buffer {}))
+
+(defn impl-cleanup [this]
+  (reset! buffer {}))
+
+(defn vec-conj [coll x] (if coll
+                          (conj coll x)
+                          [x]))
+
+(defn expand-complex-datapoint [dp]
+  (if (or (map? (.value dp))
+          (instance? java.util.AbstractMap (.value dp)))
+    (into [] (for [[k v] (.value dp)]
+               [(str (.name dp) "/" k) v]))
+    [[(.name dp) (.value dp)]]))
+
+(defn impl-handleDataPoints [this task-info data-points]  
+  (swap! buffer
+         (fn [old]
+           (-> old
+            (for-> [dp data-points
+                    [name val] (expand-complex-datapoint dp)]
+                   (update-in [(.srcComponentId task-info) name (.srcTaskId task-info)] vec-conj val))))))
+ 
+

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/pacemaker/pacemaker.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/pacemaker/pacemaker.clj b/storm-core/src/clj/org/apache/storm/pacemaker/pacemaker.clj
index e9d5db5..70313e4 100644
--- a/storm-core/src/clj/org/apache/storm/pacemaker/pacemaker.clj
+++ b/storm-core/src/clj/org/apache/storm/pacemaker/pacemaker.clj
@@ -18,11 +18,11 @@
   (:import [org.apache.storm.pacemaker PacemakerServer IServerMessageHandler]
            [java.util.concurrent ConcurrentHashMap]
            [java.util.concurrent.atomic AtomicInteger]
-           [backtype.storm.generated HBNodes
+           [org.apache.storm.generated HBNodes
                                      HBServerMessageType HBMessage HBMessageData HBPulse]
-           [backtype.storm.utils VersionInfo])
+           [org.apache.storm.utils VersionInfo])
   (:use [clojure.string :only [replace-first split]]
-        [backtype.storm log config util])
+        [org.apache.storm log config util])
   (:require [clojure.java.jmx :as jmx])
   (:gen-class))
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/pacemaker/pacemaker_state_factory.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/pacemaker/pacemaker_state_factory.clj b/storm-core/src/clj/org/apache/storm/pacemaker/pacemaker_state_factory.clj
index d99442b..cede59e 100644
--- a/storm-core/src/clj/org/apache/storm/pacemaker/pacemaker_state_factory.clj
+++ b/storm-core/src/clj/org/apache/storm/pacemaker/pacemaker_state_factory.clj
@@ -16,20 +16,20 @@
 
 (ns org.apache.storm.pacemaker.pacemaker-state-factory
   (:require [org.apache.storm.pacemaker pacemaker]
-            [backtype.storm.cluster-state [zookeeper-state-factory :as zk-factory]]
-            [backtype.storm
+            [org.apache.storm.cluster-state [zookeeper-state-factory :as zk-factory]]
+            [org.apache.storm
              [config :refer :all]
              [cluster :refer :all]
              [log :refer :all]
              [util :as util]])
-  (:import [backtype.storm.generated
+  (:import [org.apache.storm.generated
             HBExecutionException HBServerMessageType HBMessage
             HBMessageData HBPulse]
-           [backtype.storm.cluster_state zookeeper_state_factory]
-           [backtype.storm.cluster ClusterState]
+           [org.apache.storm.cluster_state zookeeper_state_factory]
+           [org.apache.storm.cluster ClusterState]
            [org.apache.storm.pacemaker PacemakerClient])
   (:gen-class
-   :implements [backtype.storm.cluster.ClusterStateFactory]))
+   :implements [org.apache.storm.cluster.ClusterStateFactory]))
 
 ;; So we can mock the client for testing
 (defn makeClient [conf]

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/process_simulator.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/process_simulator.clj b/storm-core/src/clj/org/apache/storm/process_simulator.clj
new file mode 100644
index 0000000..03c3dd9
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/process_simulator.clj
@@ -0,0 +1,51 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+
+(ns org.apache.storm.process-simulator
+  (:use [org.apache.storm log util]))
+
+(def pid-counter (mk-counter))
+
+(def process-map (atom {}))
+
+(def kill-lock (Object.))
+
+(defn register-process [pid shutdownable]
+  (swap! process-map assoc pid shutdownable))
+
+(defn process-handle
+  [pid]
+  (@process-map pid))
+
+(defn all-processes
+  []
+  (vals @process-map))
+
+(defn kill-process
+  "Uses `locking` in case cluster shuts down while supervisor is
+  killing a task"
+  [pid]
+  (locking kill-lock
+    (log-message "Killing process " pid)
+    (let [shutdownable (process-handle pid)]
+      (swap! process-map dissoc pid)
+      (when shutdownable
+        (.shutdown shutdownable)))))
+
+(defn kill-all-processes
+  []
+  (doseq [pid (keys @process-map)]
+    (kill-process pid)))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/scheduler/DefaultScheduler.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/scheduler/DefaultScheduler.clj b/storm-core/src/clj/org/apache/storm/scheduler/DefaultScheduler.clj
new file mode 100644
index 0000000..f6f89f8
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/scheduler/DefaultScheduler.clj
@@ -0,0 +1,77 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+(ns org.apache.storm.scheduler.DefaultScheduler
+  (:use [org.apache.storm util config])
+  (:require [org.apache.storm.scheduler.EvenScheduler :as EvenScheduler])
+  (:import [org.apache.storm.scheduler IScheduler Topologies
+            Cluster TopologyDetails WorkerSlot SchedulerAssignment
+            EvenScheduler ExecutorDetails])
+  (:gen-class
+    :implements [org.apache.storm.scheduler.IScheduler]))
+
+(defn- bad-slots [existing-slots num-executors num-workers]
+  (if (= 0 num-workers)
+    '()
+    (let [distribution (atom (integer-divided num-executors num-workers))
+          keepers (atom {})]
+      (doseq [[node+port executor-list] existing-slots :let [executor-count (count executor-list)]]
+        (when (pos? (get @distribution executor-count 0))
+          (swap! keepers assoc node+port executor-list)
+          (swap! distribution update-in [executor-count] dec)
+          ))
+      (->> @keepers
+           keys
+           (apply dissoc existing-slots)
+           keys
+           (map (fn [[node port]]
+                  (WorkerSlot. node port)))))))
+
+(defn slots-can-reassign [^Cluster cluster slots]
+  (->> slots
+      (filter
+        (fn [[node port]]
+          (if-not (.isBlackListed cluster node)
+            (if-let [supervisor (.getSupervisorById cluster node)]
+              (.contains (.getAllPorts supervisor) (int port))
+              ))))))
+
+(defn -prepare [this conf]
+  )
+
+(defn default-schedule [^Topologies topologies ^Cluster cluster]
+  (let [needs-scheduling-topologies (.needsSchedulingTopologies cluster topologies)]
+    (doseq [^TopologyDetails topology needs-scheduling-topologies
+            :let [topology-id (.getId topology)
+                  available-slots (->> (.getAvailableSlots cluster)
+                                       (map #(vector (.getNodeId %) (.getPort %))))
+                  all-executors (->> topology
+                                     .getExecutors
+                                     (map #(vector (.getStartTask %) (.getEndTask %)))
+                                     set)
+                  alive-assigned (EvenScheduler/get-alive-assigned-node+port->executors cluster topology-id)
+                  alive-executors (->> alive-assigned vals (apply concat) set)
+                  can-reassign-slots (slots-can-reassign cluster (keys alive-assigned))
+                  total-slots-to-use (min (.getNumWorkers topology)
+                                          (+ (count can-reassign-slots) (count available-slots)))
+                  bad-slots (if (or (> total-slots-to-use (count alive-assigned)) 
+                                    (not= alive-executors all-executors))
+                                (bad-slots alive-assigned (count all-executors) total-slots-to-use)
+                                [])]]
+      (.freeSlots cluster bad-slots)
+      (EvenScheduler/schedule-topologies-evenly (Topologies. {topology-id topology}) cluster))))
+
+(defn -schedule [this ^Topologies topologies ^Cluster cluster]
+  (default-schedule topologies cluster))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/scheduler/EvenScheduler.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/scheduler/EvenScheduler.clj b/storm-core/src/clj/org/apache/storm/scheduler/EvenScheduler.clj
new file mode 100644
index 0000000..783da26
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/scheduler/EvenScheduler.clj
@@ -0,0 +1,81 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+(ns org.apache.storm.scheduler.EvenScheduler
+  (:use [org.apache.storm util log config])
+  (:require [clojure.set :as set])
+  (:import [org.apache.storm.scheduler IScheduler Topologies
+            Cluster TopologyDetails WorkerSlot ExecutorDetails])
+  (:gen-class
+    :implements [org.apache.storm.scheduler.IScheduler]))
+
+(defn sort-slots [all-slots]
+  (let [split-up (sort-by count > (vals (group-by first all-slots)))]
+    (apply interleave-all split-up)
+    ))
+
+(defn get-alive-assigned-node+port->executors [cluster topology-id]
+  (let [existing-assignment (.getAssignmentById cluster topology-id)
+        executor->slot (if existing-assignment
+                         (.getExecutorToSlot existing-assignment)
+                         {}) 
+        executor->node+port (into {} (for [[^ExecutorDetails executor ^WorkerSlot slot] executor->slot
+                                           :let [executor [(.getStartTask executor) (.getEndTask executor)]
+                                                 node+port [(.getNodeId slot) (.getPort slot)]]]
+                                       {executor node+port}))
+        alive-assigned (reverse-map executor->node+port)]
+    alive-assigned))
+
+(defn- schedule-topology [^TopologyDetails topology ^Cluster cluster]
+  (let [topology-id (.getId topology)
+        available-slots (->> (.getAvailableSlots cluster)
+                             (map #(vector (.getNodeId %) (.getPort %))))
+        all-executors (->> topology
+                          .getExecutors
+                          (map #(vector (.getStartTask %) (.getEndTask %)))
+                          set)
+        alive-assigned (get-alive-assigned-node+port->executors cluster topology-id)
+        total-slots-to-use (min (.getNumWorkers topology)
+                                (+ (count available-slots) (count alive-assigned)))
+        reassign-slots (take (- total-slots-to-use (count alive-assigned))
+                             (sort-slots available-slots))
+        reassign-executors (sort (set/difference all-executors (set (apply concat (vals alive-assigned)))))
+        reassignment (into {}
+                           (map vector
+                                reassign-executors
+                                ;; for some reason it goes into infinite loop without limiting the repeat-seq
+                                (repeat-seq (count reassign-executors) reassign-slots)))]
+    (when-not (empty? reassignment)
+      (log-message "Available slots: " (pr-str available-slots))
+      )
+    reassignment))
+
+(defn schedule-topologies-evenly [^Topologies topologies ^Cluster cluster]
+  (let [needs-scheduling-topologies (.needsSchedulingTopologies cluster topologies)]
+    (doseq [^TopologyDetails topology needs-scheduling-topologies
+            :let [topology-id (.getId topology)
+                  new-assignment (schedule-topology topology cluster)
+                  node+port->executors (reverse-map new-assignment)]]
+      (doseq [[node+port executors] node+port->executors
+              :let [^WorkerSlot slot (WorkerSlot. (first node+port) (last node+port))
+                    executors (for [[start-task end-task] executors]
+                                (ExecutorDetails. start-task end-task))]]
+        (.assign cluster slot topology-id executors)))))
+
+(defn -prepare [this conf]
+  )
+
+(defn -schedule [this ^Topologies topologies ^Cluster cluster]
+  (schedule-topologies-evenly topologies cluster))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/scheduler/IsolationScheduler.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/scheduler/IsolationScheduler.clj b/storm-core/src/clj/org/apache/storm/scheduler/IsolationScheduler.clj
new file mode 100644
index 0000000..2e86748
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/scheduler/IsolationScheduler.clj
@@ -0,0 +1,219 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+(ns org.apache.storm.scheduler.IsolationScheduler
+  (:use [org.apache.storm util config log])
+  (:require [org.apache.storm.scheduler.DefaultScheduler :as DefaultScheduler])
+  (:import [java.util HashSet Set List LinkedList ArrayList Map HashMap])
+  (:import [org.apache.storm.scheduler IScheduler Topologies
+            Cluster TopologyDetails WorkerSlot SchedulerAssignment
+            EvenScheduler ExecutorDetails])
+  (:gen-class
+    :init init
+    :constructors {[] []}
+    :state state 
+    :implements [org.apache.storm.scheduler.IScheduler]))
+
+(defn -init []
+  [[] (container)])
+
+(defn -prepare [this conf]
+  (container-set! (.state this) conf))
+
+(defn- compute-worker-specs "Returns mutable set of sets of executors"
+  [^TopologyDetails details]
+  (->> (.getExecutorToComponent details)
+       reverse-map
+       (map second)
+       (apply concat)
+       (map vector (repeat-seq (range (.getNumWorkers details))))
+       (group-by first)
+       (map-val #(map second %))
+       vals
+       (map set)
+       (HashSet.)
+       ))
+
+(defn isolated-topologies [conf topologies]
+  (let [tset (-> conf (get ISOLATION-SCHEDULER-MACHINES) keys set)]
+    (filter (fn [^TopologyDetails t] (contains? tset (.getName t))) topologies)
+    ))
+
+;; map from topology id -> set of sets of executors
+(defn topology-worker-specs [iso-topologies]
+  (->> iso-topologies
+       (map (fn [t] {(.getId t) (compute-worker-specs t)}))
+       (apply merge)))
+
+(defn machine-distribution [conf ^TopologyDetails topology]
+  (let [name->machines (get conf ISOLATION-SCHEDULER-MACHINES)
+        machines (get name->machines (.getName topology))
+        workers (.getNumWorkers topology)]
+    (-> (integer-divided workers machines)
+        (dissoc 0)
+        (HashMap.)
+        )))
+
+(defn topology-machine-distribution [conf iso-topologies]
+  (->> iso-topologies
+       (map (fn [t] {(.getId t) (machine-distribution conf t)}))
+       (apply merge)))
+
+(defn host-assignments [^Cluster cluster]
+  (letfn [(to-slot-specs [^SchedulerAssignment ass]
+            (->> ass
+                 .getExecutorToSlot
+                 reverse-map
+                 (map (fn [[slot executors]]
+                        [slot (.getTopologyId ass) (set executors)]))))]
+  (->> cluster
+       .getAssignments
+       vals
+       (mapcat to-slot-specs)
+       (group-by (fn [[^WorkerSlot slot & _]] (.getHost cluster (.getNodeId slot))))
+       )))
+
+(defn- decrement-distribution! [^Map distribution value]
+  (let [v (-> distribution (get value) dec)]
+    (if (zero? v)
+      (.remove distribution value)
+      (.put distribution value v))))
+
+;; returns list of list of slots, reverse sorted by number of slots
+(defn- host-assignable-slots [^Cluster cluster]
+  (-<> cluster
+       .getAssignableSlots
+       (group-by #(.getHost cluster (.getNodeId ^WorkerSlot %)) <>)
+       (dissoc <> nil)
+       (sort-by #(-> % second count -) <>)
+       shuffle
+       (LinkedList. <>)
+       ))
+
+(defn- host->used-slots [^Cluster cluster]
+  (->> cluster
+       .getUsedSlots
+       (group-by #(.getHost cluster (.getNodeId ^WorkerSlot %)))
+       ))
+
+(defn- distribution->sorted-amts [distribution]
+  (->> distribution
+       (mapcat (fn [[val amt]] (repeat amt val)))
+       (sort-by -)
+       ))
+
+(defn- allocated-topologies [topology-worker-specs]
+  (->> topology-worker-specs
+    (filter (fn [[_ worker-specs]] (empty? worker-specs)))
+    (map first)
+    set
+    ))
+
+(defn- leftover-topologies [^Topologies topologies filter-ids-set]
+  (->> topologies
+       .getTopologies
+       (filter (fn [^TopologyDetails t] (not (contains? filter-ids-set (.getId t)))))
+       (map (fn [^TopologyDetails t] {(.getId t) t}))
+       (apply merge)
+       (Topologies.)
+       ))
+
+;; for each isolated topology:
+;;   compute even distribution of executors -> workers on the number of workers specified for the topology
+;;   compute distribution of workers to machines
+;; determine host -> list of [slot, topology id, executors]
+;; iterate through hosts and: a machine is good if:
+;;   1. only running workers from one isolated topology
+;;   2. all workers running on it match one of the distributions of executors for that topology
+;;   3. matches one of the # of workers
+;; blacklist the good hosts and remove those workers from the list of need to be assigned workers
+;; otherwise unassign all other workers for isolated topologies if assigned
+
+(defn remove-elem-from-set! [^Set aset]
+  (let [elem (-> aset .iterator .next)]
+    (.remove aset elem)
+    elem
+    ))
+
+;; get host -> all assignable worker slots for non-blacklisted machines (assigned or not assigned)
+;; will then have a list of machines that need to be assigned (machine -> [topology, list of list of executors])
+;; match each spec to a machine (who has the right number of workers), free everything else on that machine and assign those slots (do one topology at a time)
+;; blacklist all machines who had production slots defined
+;; log isolated topologies who weren't able to get enough slots / machines
+;; run default scheduler on isolated topologies that didn't have enough slots + non-isolated topologies on remaining machines
+;; set blacklist to what it was initially
+(defn -schedule [this ^Topologies topologies ^Cluster cluster]
+  (let [conf (container-get (.state this))        
+        orig-blacklist (HashSet. (.getBlacklistedHosts cluster))
+        iso-topologies (isolated-topologies conf (.getTopologies topologies))
+        iso-ids-set (->> iso-topologies (map #(.getId ^TopologyDetails %)) set)
+        topology-worker-specs (topology-worker-specs iso-topologies)
+        topology-machine-distribution (topology-machine-distribution conf iso-topologies)
+        host-assignments (host-assignments cluster)]
+    (doseq [[host assignments] host-assignments]
+      (let [top-id (-> assignments first second)
+            distribution (get topology-machine-distribution top-id)
+            ^Set worker-specs (get topology-worker-specs top-id)
+            num-workers (count assignments)
+            ]
+        (if (and (contains? iso-ids-set top-id)
+                 (every? #(= (second %) top-id) assignments)
+                 (contains? distribution num-workers)
+                 (every? #(contains? worker-specs (nth % 2)) assignments))
+          (do (decrement-distribution! distribution num-workers)
+              (doseq [[_ _ executors] assignments] (.remove worker-specs executors))
+              (.blacklistHost cluster host))
+          (doseq [[slot top-id _] assignments]
+            (when (contains? iso-ids-set top-id)
+              (.freeSlot cluster slot)
+              ))
+          )))
+    
+    (let [host->used-slots (host->used-slots cluster)
+          ^LinkedList sorted-assignable-hosts (host-assignable-slots cluster)]
+      ;; TODO: can improve things further by ordering topologies in terms of who needs the least workers
+      (doseq [[top-id worker-specs] topology-worker-specs
+              :let [amts (distribution->sorted-amts (get topology-machine-distribution top-id))]]
+        (doseq [amt amts
+                :let [[host host-slots] (.peek sorted-assignable-hosts)]]
+          (when (and host-slots (>= (count host-slots) amt))
+            (.poll sorted-assignable-hosts)
+            (.freeSlots cluster (get host->used-slots host))
+            (doseq [slot (take amt host-slots)
+                    :let [executors-set (remove-elem-from-set! worker-specs)]]
+              (.assign cluster slot top-id executors-set))
+            (.blacklistHost cluster host))
+          )))
+    
+    (let [failed-iso-topologies (->> topology-worker-specs
+                                  (mapcat (fn [[top-id worker-specs]]
+                                    (if-not (empty? worker-specs) [top-id])
+                                    )))]
+      (if (empty? failed-iso-topologies)
+        ;; run default scheduler on non-isolated topologies
+        (-<> topology-worker-specs
+             allocated-topologies
+             (leftover-topologies topologies <>)
+             (DefaultScheduler/default-schedule <> cluster))
+        (do
+          (log-warn "Unable to isolate topologies " (pr-str failed-iso-topologies) ". No machine had enough worker slots to run the remaining workers for these topologies. Clearing all other resources and will wait for enough resources for isolated topologies before allocating any other resources.")
+          ;; clear workers off all hosts that are not blacklisted
+          (doseq [[host slots] (host->used-slots cluster)]
+            (if-not (.isBlacklistedHost cluster host)
+              (.freeSlots cluster slots)
+              )))
+        ))
+    (.setBlacklistedHosts cluster orig-blacklist)
+    ))


[20/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/daemon/executor.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/daemon/executor.clj b/storm-core/src/clj/org/apache/storm/daemon/executor.clj
new file mode 100644
index 0000000..c65f5d8
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/daemon/executor.clj
@@ -0,0 +1,855 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+(ns org.apache.storm.daemon.executor
+  (:use [org.apache.storm.daemon common])
+  (:import [org.apache.storm.generated Grouping]
+           [java.io Serializable])
+  (:use [org.apache.storm util config log timer stats])
+  (:import [java.util List Random HashMap ArrayList LinkedList Map])
+  (:import [org.apache.storm ICredentialsListener])
+  (:import [org.apache.storm.hooks ITaskHook])
+  (:import [org.apache.storm.tuple AddressedTuple Tuple Fields TupleImpl MessageId])
+  (:import [org.apache.storm.spout ISpoutWaitStrategy ISpout SpoutOutputCollector ISpoutOutputCollector])
+  (:import [org.apache.storm.hooks.info SpoutAckInfo SpoutFailInfo
+            EmitInfo BoltFailInfo BoltAckInfo BoltExecuteInfo])
+  (:import [org.apache.storm.grouping CustomStreamGrouping])
+  (:import [org.apache.storm.task WorkerTopologyContext IBolt OutputCollector IOutputCollector])
+  (:import [org.apache.storm.generated GlobalStreamId])
+  (:import [org.apache.storm.utils Utils TupleUtils MutableObject RotatingMap RotatingMap$ExpiredCallback MutableLong Time DisruptorQueue WorkerBackpressureThread])
+  (:import [com.lmax.disruptor InsufficientCapacityException])
+  (:import [org.apache.storm.serialization KryoTupleSerializer])
+  (:import [org.apache.storm.daemon Shutdownable])
+  (:import [org.apache.storm.metric.api IMetric IMetricsConsumer$TaskInfo IMetricsConsumer$DataPoint StateMetric])
+  (:import [org.apache.storm Config Constants])
+  (:import [org.apache.storm.cluster ClusterStateContext DaemonType])
+  (:import [org.apache.storm.grouping LoadAwareCustomStreamGrouping LoadAwareShuffleGrouping LoadMapping ShuffleGrouping])
+  (:import [java.util.concurrent ConcurrentLinkedQueue])
+  (:require [org.apache.storm [thrift :as thrift]
+             [cluster :as cluster] [disruptor :as disruptor] [stats :as stats]])
+  (:require [org.apache.storm.daemon [task :as task]])
+  (:require [org.apache.storm.daemon.builtin-metrics :as builtin-metrics])
+  (:require [clojure.set :as set]))
+
+(defn- mk-fields-grouper
+  [^Fields out-fields ^Fields group-fields ^List target-tasks]
+  (let [num-tasks (count target-tasks)
+        task-getter (fn [i] (.get target-tasks i))]
+    (fn [task-id ^List values load]
+      (-> (.select out-fields group-fields values)
+          (TupleUtils/listHashCode)
+          (mod num-tasks)
+          task-getter))))
+
+(defn- mk-custom-grouper
+  [^CustomStreamGrouping grouping ^WorkerTopologyContext context ^String component-id ^String stream-id target-tasks]
+  (.prepare grouping context (GlobalStreamId. component-id stream-id) target-tasks)
+  (if (instance? LoadAwareCustomStreamGrouping grouping)
+    (fn [task-id ^List values load]
+      (.chooseTasks grouping task-id values load))
+    (fn [task-id ^List values load]
+      (.chooseTasks grouping task-id values))))
+
+(defn mk-shuffle-grouper
+  [^List target-tasks topo-conf ^WorkerTopologyContext context ^String component-id ^String stream-id]
+  (if (.get topo-conf TOPOLOGY-DISABLE-LOADAWARE-MESSAGING)
+    (mk-custom-grouper (ShuffleGrouping.) context component-id stream-id target-tasks)
+    (mk-custom-grouper (LoadAwareShuffleGrouping.) context component-id stream-id target-tasks)))
+
+(defn- mk-grouper
+  "Returns a function that returns a vector of which task indices to send tuple to, or just a single task index."
+  [^WorkerTopologyContext context component-id stream-id ^Fields out-fields thrift-grouping ^List target-tasks topo-conf]
+  (let [num-tasks (count target-tasks)
+        random (Random.)
+        target-tasks (vec (sort target-tasks))]
+    (condp = (thrift/grouping-type thrift-grouping)
+      :fields
+        (if (thrift/global-grouping? thrift-grouping)
+          (fn [task-id tuple load]
+            ;; It's possible for target to have multiple tasks if it reads multiple sources
+            (first target-tasks))
+          (let [group-fields (Fields. (thrift/field-grouping thrift-grouping))]
+            (mk-fields-grouper out-fields group-fields target-tasks)
+            ))
+      :all
+        (fn [task-id tuple load] target-tasks)
+      :shuffle
+        (mk-shuffle-grouper target-tasks topo-conf context component-id stream-id)
+      :local-or-shuffle
+        (let [same-tasks (set/intersection
+                           (set target-tasks)
+                           (set (.getThisWorkerTasks context)))]
+          (if-not (empty? same-tasks)
+            (mk-shuffle-grouper (vec same-tasks) topo-conf context component-id stream-id)
+            (mk-shuffle-grouper target-tasks topo-conf context component-id stream-id)))
+      :none
+        (fn [task-id tuple load]
+          (let [i (mod (.nextInt random) num-tasks)]
+            (get target-tasks i)
+            ))
+      :custom-object
+        (let [grouping (thrift/instantiate-java-object (.get_custom_object thrift-grouping))]
+          (mk-custom-grouper grouping context component-id stream-id target-tasks))
+      :custom-serialized
+        (let [grouping (Utils/javaDeserialize (.get_custom_serialized thrift-grouping) Serializable)]
+          (mk-custom-grouper grouping context component-id stream-id target-tasks))
+      :direct
+        :direct
+      )))
+
+(defn- outbound-groupings
+  [^WorkerTopologyContext worker-context this-component-id stream-id out-fields component->grouping topo-conf]
+  (->> component->grouping
+       (filter-key #(-> worker-context
+                        (.getComponentTasks %)
+                        count
+                        pos?))
+       (map (fn [[component tgrouping]]
+               [component
+                (mk-grouper worker-context
+                            this-component-id
+                            stream-id
+                            out-fields
+                            tgrouping
+                            (.getComponentTasks worker-context component)
+                            topo-conf)]))
+       (into {})
+       (HashMap.)))
+
+(defn outbound-components
+  "Returns map of stream id to component id to grouper"
+  [^WorkerTopologyContext worker-context component-id topo-conf]
+  (->> (.getTargets worker-context component-id)
+        clojurify-structure
+        (map (fn [[stream-id component->grouping]]
+               [stream-id
+                (outbound-groupings
+                  worker-context
+                  component-id
+                  stream-id
+                  (.getComponentOutputFields worker-context component-id stream-id)
+                  component->grouping
+                  topo-conf)]))
+         (into {})
+         (HashMap.)))
+
+(defn executor-type [^WorkerTopologyContext context component-id]
+  (let [topology (.getRawTopology context)
+        spouts (.get_spouts topology)
+        bolts (.get_bolts topology)]
+    (cond (contains? spouts component-id) :spout
+          (contains? bolts component-id) :bolt
+          :else (throw-runtime "Could not find " component-id " in topology " topology))))
+
+(defn executor-selector [executor-data & _] (:type executor-data))
+
+(defmulti mk-threads executor-selector)
+(defmulti mk-executor-stats executor-selector)
+(defmulti close-component executor-selector)
+
+(defn- normalized-component-conf [storm-conf general-context component-id]
+  (let [to-remove (disj (set ALL-CONFIGS)
+                        TOPOLOGY-DEBUG
+                        TOPOLOGY-MAX-SPOUT-PENDING
+                        TOPOLOGY-MAX-TASK-PARALLELISM
+                        TOPOLOGY-TRANSACTIONAL-ID
+                        TOPOLOGY-TICK-TUPLE-FREQ-SECS
+                        TOPOLOGY-SLEEP-SPOUT-WAIT-STRATEGY-TIME-MS
+                        TOPOLOGY-SPOUT-WAIT-STRATEGY
+                        TOPOLOGY-BOLTS-WINDOW-LENGTH-COUNT
+                        TOPOLOGY-BOLTS-WINDOW-LENGTH-DURATION-MS
+                        TOPOLOGY-BOLTS-SLIDING-INTERVAL-COUNT
+                        TOPOLOGY-BOLTS-SLIDING-INTERVAL-DURATION-MS
+                        TOPOLOGY-BOLTS-TUPLE-TIMESTAMP-FIELD-NAME
+                        TOPOLOGY-BOLTS-TUPLE-TIMESTAMP-MAX-LAG-MS
+                        )
+        spec-conf (-> general-context
+                      (.getComponentCommon component-id)
+                      .get_json_conf
+                      from-json)]
+    (merge storm-conf (apply dissoc spec-conf to-remove))
+    ))
+
+(defprotocol RunningExecutor
+  (render-stats [this])
+  (get-executor-id [this])
+  (credentials-changed [this creds])
+  (get-backpressure-flag [this]))
+
+(defn throttled-report-error-fn [executor]
+  (let [storm-conf (:storm-conf executor)
+        error-interval-secs (storm-conf TOPOLOGY-ERROR-THROTTLE-INTERVAL-SECS)
+        max-per-interval (storm-conf TOPOLOGY-MAX-ERROR-REPORT-PER-INTERVAL)
+        interval-start-time (atom (current-time-secs))
+        interval-errors (atom 0)
+        ]
+    (fn [error]
+      (log-error error)
+      (when (> (time-delta @interval-start-time)
+               error-interval-secs)
+        (reset! interval-errors 0)
+        (reset! interval-start-time (current-time-secs)))
+      (swap! interval-errors inc)
+
+      (when (<= @interval-errors max-per-interval)
+        (cluster/report-error (:storm-cluster-state executor) (:storm-id executor) (:component-id executor)
+                              (hostname storm-conf)
+                              (.getThisWorkerPort (:worker-context executor)) error)
+        ))))
+
+;; in its own function so that it can be mocked out by tracked topologies
+(defn mk-executor-transfer-fn [batch-transfer->worker storm-conf]
+  (fn this
+    [task tuple]
+    (let [val (AddressedTuple. task tuple)]
+      (when (= true (storm-conf TOPOLOGY-DEBUG))
+        (log-message "TRANSFERING tuple " val))
+      (disruptor/publish batch-transfer->worker val))))
+
+(defn mk-executor-data [worker executor-id]
+  (let [worker-context (worker-context worker)
+        task-ids (executor-id->tasks executor-id)
+        component-id (.getComponentId worker-context (first task-ids))
+        storm-conf (normalized-component-conf (:storm-conf worker) worker-context component-id)
+        executor-type (executor-type worker-context component-id)
+        batch-transfer->worker (disruptor/disruptor-queue
+                                  (str "executor"  executor-id "-send-queue")
+                                  (storm-conf TOPOLOGY-EXECUTOR-SEND-BUFFER-SIZE)
+                                  (storm-conf TOPOLOGY-DISRUPTOR-WAIT-TIMEOUT-MILLIS)
+                                  :producer-type :single-threaded
+                                  :batch-size (storm-conf TOPOLOGY-DISRUPTOR-BATCH-SIZE)
+                                  :batch-timeout (storm-conf TOPOLOGY-DISRUPTOR-BATCH-TIMEOUT-MILLIS))
+        ]
+    (recursive-map
+     :worker worker
+     :worker-context worker-context
+     :executor-id executor-id
+     :task-ids task-ids
+     :component-id component-id
+     :open-or-prepare-was-called? (atom false)
+     :storm-conf storm-conf
+     :receive-queue ((:executor-receive-queue-map worker) executor-id)
+     :storm-id (:storm-id worker)
+     :conf (:conf worker)
+     :shared-executor-data (HashMap.)
+     :storm-active-atom (:storm-active-atom worker)
+     :storm-component->debug-atom (:storm-component->debug-atom worker)
+     :batch-transfer-queue batch-transfer->worker
+     :transfer-fn (mk-executor-transfer-fn batch-transfer->worker storm-conf)
+     :suicide-fn (:suicide-fn worker)
+     :storm-cluster-state (cluster/mk-storm-cluster-state (:cluster-state worker) 
+                                                          :acls (Utils/getWorkerACL storm-conf)
+                                                          :context (ClusterStateContext. DaemonType/WORKER))
+     :type executor-type
+     ;; TODO: should refactor this to be part of the executor specific map (spout or bolt with :common field)
+     :stats (mk-executor-stats <> (sampling-rate storm-conf))
+     :interval->task->metric-registry (HashMap.)
+     :task->component (:task->component worker)
+     :stream->component->grouper (outbound-components worker-context component-id storm-conf)
+     :report-error (throttled-report-error-fn <>)
+     :report-error-and-die (fn [error]
+                             ((:report-error <>) error)
+                             (if (or
+                                    (exception-cause? InterruptedException error)
+                                    (exception-cause? java.io.InterruptedIOException error))
+                               (log-message "Got interrupted excpetion shutting thread down...")
+                               ((:suicide-fn <>))))
+     :sampler (mk-stats-sampler storm-conf)
+     :backpressure (atom false)
+     :spout-throttling-metrics (if (= executor-type :spout) 
+                                (builtin-metrics/make-spout-throttling-data)
+                                nil)
+     ;; TODO: add in the executor-specific stuff in a :specific... or make a spout-data, bolt-data function?
+     )))
+
+(defn- mk-disruptor-backpressure-handler [executor-data]
+  "make a handler for the executor's receive disruptor queue to
+  check highWaterMark and lowWaterMark for backpressure"
+  (disruptor/disruptor-backpressure-handler
+    (fn []
+      "When receive queue is above highWaterMark"
+      (if (not @(:backpressure executor-data))
+        (do (reset! (:backpressure executor-data) true)
+            (log-debug "executor " (:executor-id executor-data) " is congested, set backpressure flag true")
+            (WorkerBackpressureThread/notifyBackpressureChecker (:backpressure-trigger (:worker executor-data))))))
+    (fn []
+      "When receive queue is below lowWaterMark"
+      (if @(:backpressure executor-data)
+        (do (reset! (:backpressure executor-data) false)
+            (log-debug "executor " (:executor-id executor-data) " is not-congested, set backpressure flag false")
+            (WorkerBackpressureThread/notifyBackpressureChecker (:backpressure-trigger (:worker executor-data))))))))
+
+(defn start-batch-transfer->worker-handler! [worker executor-data]
+  (let [worker-transfer-fn (:transfer-fn worker)
+        cached-emit (MutableObject. (ArrayList.))
+        storm-conf (:storm-conf executor-data)
+        serializer (KryoTupleSerializer. storm-conf (:worker-context executor-data))
+        ]
+    (disruptor/consume-loop*
+      (:batch-transfer-queue executor-data)
+      (disruptor/handler [o seq-id batch-end?]
+        (let [^ArrayList alist (.getObject cached-emit)]
+          (.add alist o)
+          (when batch-end?
+            (worker-transfer-fn serializer alist)
+            (.setObject cached-emit (ArrayList.)))))
+      :kill-fn (:report-error-and-die executor-data))))
+
+(defn setup-metrics! [executor-data]
+  (let [{:keys [storm-conf receive-queue worker-context interval->task->metric-registry]} executor-data
+        distinct-time-bucket-intervals (keys interval->task->metric-registry)]
+    (doseq [interval distinct-time-bucket-intervals]
+      (schedule-recurring 
+       (:user-timer (:worker executor-data)) 
+       interval
+       interval
+       (fn []
+         (let [val [(AddressedTuple. AddressedTuple/BROADCAST_DEST (TupleImpl. worker-context [interval] Constants/SYSTEM_TASK_ID Constants/METRICS_TICK_STREAM_ID))]]
+           (disruptor/publish receive-queue val)))))))
+
+(defn metrics-tick
+  [executor-data task-data ^TupleImpl tuple]
+   (let [{:keys [interval->task->metric-registry ^WorkerTopologyContext worker-context]} executor-data
+         interval (.getInteger tuple 0)
+         task-id (:task-id task-data)
+         name->imetric (-> interval->task->metric-registry (get interval) (get task-id))
+         task-info (IMetricsConsumer$TaskInfo.
+                     (hostname (:storm-conf executor-data))
+                     (.getThisWorkerPort worker-context)
+                     (:component-id executor-data)
+                     task-id
+                     (long (/ (System/currentTimeMillis) 1000))
+                     interval)
+         data-points (->> name->imetric
+                          (map (fn [[name imetric]]
+                                 (let [value (.getValueAndReset ^IMetric imetric)]
+                                   (if value
+                                     (IMetricsConsumer$DataPoint. name value)))))
+                          (filter identity)
+                          (into []))]
+     (when (seq data-points)
+       (task/send-unanchored task-data Constants/METRICS_STREAM_ID [task-info data-points]))))
+
+(defn setup-ticks! [worker executor-data]
+  (let [storm-conf (:storm-conf executor-data)
+        tick-time-secs (storm-conf TOPOLOGY-TICK-TUPLE-FREQ-SECS)
+        receive-queue (:receive-queue executor-data)
+        context (:worker-context executor-data)]
+    (when tick-time-secs
+      (if (or (Utils/isSystemId (:component-id executor-data))
+              (and (= false (storm-conf TOPOLOGY-ENABLE-MESSAGE-TIMEOUTS))
+                   (= :spout (:type executor-data))))
+        (log-message "Timeouts disabled for executor " (:component-id executor-data) ":" (:executor-id executor-data))
+        (schedule-recurring
+          (:user-timer worker)
+          tick-time-secs
+          tick-time-secs
+          (fn []
+            (let [val [(AddressedTuple. AddressedTuple/BROADCAST_DEST (TupleImpl. context [tick-time-secs] Constants/SYSTEM_TASK_ID Constants/SYSTEM_TICK_STREAM_ID))]]
+              (disruptor/publish receive-queue val))))))))
+
+(defn mk-executor [worker executor-id initial-credentials]
+  (let [executor-data (mk-executor-data worker executor-id)
+        _ (log-message "Loading executor " (:component-id executor-data) ":" (pr-str executor-id))
+        task-datas (->> executor-data
+                        :task-ids
+                        (map (fn [t] [t (task/mk-task executor-data t)]))
+                        (into {})
+                        (HashMap.))
+        _ (log-message "Loaded executor tasks " (:component-id executor-data) ":" (pr-str executor-id))
+        report-error-and-die (:report-error-and-die executor-data)
+        component-id (:component-id executor-data)
+
+
+        disruptor-handler (mk-disruptor-backpressure-handler executor-data)
+        _ (.registerBackpressureCallback (:receive-queue executor-data) disruptor-handler)
+        _ (-> (.setHighWaterMark (:receive-queue executor-data) ((:storm-conf executor-data) BACKPRESSURE-DISRUPTOR-HIGH-WATERMARK))
+              (.setLowWaterMark ((:storm-conf executor-data) BACKPRESSURE-DISRUPTOR-LOW-WATERMARK))
+              (.setEnableBackpressure ((:storm-conf executor-data) TOPOLOGY-BACKPRESSURE-ENABLE)))
+
+        ;; starting the batch-transfer->worker ensures that anything publishing to that queue 
+        ;; doesn't block (because it's a single threaded queue and the caching/consumer started
+        ;; trick isn't thread-safe)
+        system-threads [(start-batch-transfer->worker-handler! worker executor-data)]
+        handlers (with-error-reaction report-error-and-die
+                   (mk-threads executor-data task-datas initial-credentials))
+        threads (concat handlers system-threads)]    
+    (setup-ticks! worker executor-data)
+
+    (log-message "Finished loading executor " component-id ":" (pr-str executor-id))
+    ;; TODO: add method here to get rendered stats... have worker call that when heartbeating
+    (reify
+      RunningExecutor
+      (render-stats [this]
+        (stats/render-stats! (:stats executor-data)))
+      (get-executor-id [this]
+        executor-id)
+      (credentials-changed [this creds]
+        (let [receive-queue (:receive-queue executor-data)
+              context (:worker-context executor-data)
+              val [(AddressedTuple. AddressedTuple/BROADCAST_DEST (TupleImpl. context [creds] Constants/SYSTEM_TASK_ID Constants/CREDENTIALS_CHANGED_STREAM_ID))]]
+          (disruptor/publish receive-queue val)))
+      (get-backpressure-flag [this]
+        @(:backpressure executor-data))
+      Shutdownable
+      (shutdown
+        [this]
+        (log-message "Shutting down executor " component-id ":" (pr-str executor-id))
+        (disruptor/halt-with-interrupt! (:receive-queue executor-data))
+        (disruptor/halt-with-interrupt! (:batch-transfer-queue executor-data))
+        (doseq [t threads]
+          (.interrupt t)
+          (.join t))
+        
+        (doseq [user-context (map :user-context (vals task-datas))]
+          (doseq [hook (.getHooks user-context)]
+            (.cleanup hook)))
+        (.disconnect (:storm-cluster-state executor-data))
+        (when @(:open-or-prepare-was-called? executor-data)
+          (doseq [obj (map :object (vals task-datas))]
+            (close-component executor-data obj)))
+        (log-message "Shut down executor " component-id ":" (pr-str executor-id)))
+        )))
+
+(defn- fail-spout-msg [executor-data task-data msg-id tuple-info time-delta reason id]
+  (let [^ISpout spout (:object task-data)
+        storm-conf (:storm-conf executor-data)
+        task-id (:task-id task-data)]
+    ;;TODO: need to throttle these when there's lots of failures
+    (when (= true (storm-conf TOPOLOGY-DEBUG))
+      (log-message "SPOUT Failing " id ": " tuple-info " REASON: " reason " MSG-ID: " msg-id))
+    (.fail spout msg-id)
+    (task/apply-hooks (:user-context task-data) .spoutFail (SpoutFailInfo. msg-id task-id time-delta))
+    (when time-delta
+      (stats/spout-failed-tuple! (:stats executor-data) (:stream tuple-info) time-delta))))
+
+(defn- ack-spout-msg [executor-data task-data msg-id tuple-info time-delta id]
+  (let [storm-conf (:storm-conf executor-data)
+        ^ISpout spout (:object task-data)
+        task-id (:task-id task-data)]
+    (when (= true (storm-conf TOPOLOGY-DEBUG))
+      (log-message "SPOUT Acking message " id " " msg-id))
+    (.ack spout msg-id)
+    (task/apply-hooks (:user-context task-data) .spoutAck (SpoutAckInfo. msg-id task-id time-delta))
+    (when time-delta
+      (stats/spout-acked-tuple! (:stats executor-data) (:stream tuple-info) time-delta))))
+
+(defn mk-task-receiver [executor-data tuple-action-fn]
+  (let [task-ids (:task-ids executor-data)
+        debug? (= true (-> executor-data :storm-conf (get TOPOLOGY-DEBUG)))
+        ]
+    (disruptor/clojure-handler
+      (fn [tuple-batch sequence-id end-of-batch?]
+        (fast-list-iter [^AddressedTuple addressed-tuple tuple-batch]
+          (let [^TupleImpl tuple (.getTuple addressed-tuple)
+                task-id (.getDest addressed-tuple)]
+            (when debug? (log-message "Processing received message FOR " task-id " TUPLE: " tuple))
+            (if (not= task-id AddressedTuple/BROADCAST_DEST)
+              (tuple-action-fn task-id tuple)
+              ;; null task ids are broadcast tuples
+              (fast-list-iter [task-id task-ids]
+                (tuple-action-fn task-id tuple)
+                ))
+            ))))))
+
+(defn executor-max-spout-pending [storm-conf num-tasks]
+  (let [p (storm-conf TOPOLOGY-MAX-SPOUT-PENDING)]
+    (if p (* p num-tasks))))
+
+(defn init-spout-wait-strategy [storm-conf]
+  (let [ret (-> storm-conf (get TOPOLOGY-SPOUT-WAIT-STRATEGY) new-instance)]
+    (.prepare ret storm-conf)
+    ret
+    ))
+
+;; Send sampled data to the eventlogger if the global or component level
+;; debug flag is set (via nimbus api).
+(defn send-to-eventlogger [executor-data task-data values component-id message-id random]
+    (let [c->d @(:storm-component->debug-atom executor-data)
+          options (get c->d component-id (get c->d (:storm-id executor-data)))
+          spct    (if (and (not-nil? options) (:enable options)) (:samplingpct options) 0)]
+      ;; the thread's initialized random number generator is used to generate
+      ;; uniformily distributed random numbers.
+      (when (and (> spct 0) (< (* 100 (.nextDouble random)) spct))
+        (task/send-unanchored
+          task-data
+          EVENTLOGGER-STREAM-ID
+          [component-id message-id (System/currentTimeMillis) values]))))
+
+(defmethod mk-threads :spout [executor-data task-datas initial-credentials]
+  (let [{:keys [storm-conf component-id worker-context transfer-fn report-error sampler open-or-prepare-was-called?]} executor-data
+        ^ISpoutWaitStrategy spout-wait-strategy (init-spout-wait-strategy storm-conf)
+        max-spout-pending (executor-max-spout-pending storm-conf (count task-datas))
+        ^Integer max-spout-pending (if max-spout-pending (int max-spout-pending))        
+        last-active (atom false)        
+        spouts (ArrayList. (map :object (vals task-datas)))
+        rand (Random. (Utils/secureRandomLong))
+        ^DisruptorQueue transfer-queue (executor-data :batch-transfer-queue)
+        debug? (= true (storm-conf TOPOLOGY-DEBUG))
+
+        pending (RotatingMap.
+                 2 ;; microoptimize for performance of .size method
+                 (reify RotatingMap$ExpiredCallback
+                   (expire [this id [task-id spout-id tuple-info start-time-ms]]
+                     (let [time-delta (if start-time-ms (time-delta-ms start-time-ms))]
+                       (fail-spout-msg executor-data (get task-datas task-id) spout-id tuple-info time-delta "TIMEOUT" id)
+                       ))))
+        tuple-action-fn (fn [task-id ^TupleImpl tuple]
+                          (let [stream-id (.getSourceStreamId tuple)]
+                            (condp = stream-id
+                              Constants/SYSTEM_TICK_STREAM_ID (.rotate pending)
+                              Constants/METRICS_TICK_STREAM_ID (metrics-tick executor-data (get task-datas task-id) tuple)
+                              Constants/CREDENTIALS_CHANGED_STREAM_ID 
+                                (let [task-data (get task-datas task-id)
+                                      spout-obj (:object task-data)]
+                                  (when (instance? ICredentialsListener spout-obj)
+                                    (.setCredentials spout-obj (.getValue tuple 0))))
+                              (let [id (.getValue tuple 0)
+                                    [stored-task-id spout-id tuple-finished-info start-time-ms] (.remove pending id)]
+                                (when spout-id
+                                  (when-not (= stored-task-id task-id)
+                                    (throw-runtime "Fatal error, mismatched task ids: " task-id " " stored-task-id))
+                                  (let [time-delta (if start-time-ms (time-delta-ms start-time-ms))]
+                                    (condp = stream-id
+                                      ACKER-ACK-STREAM-ID (ack-spout-msg executor-data (get task-datas task-id)
+                                                                         spout-id tuple-finished-info time-delta id)
+                                      ACKER-FAIL-STREAM-ID (fail-spout-msg executor-data (get task-datas task-id)
+                                                                           spout-id tuple-finished-info time-delta "FAIL-STREAM" id)
+                                      )))
+                                ;; TODO: on failure, emit tuple to failure stream
+                                ))))
+        receive-queue (:receive-queue executor-data)
+        event-handler (mk-task-receiver executor-data tuple-action-fn)
+        has-ackers? (has-ackers? storm-conf)
+        has-eventloggers? (has-eventloggers? storm-conf)
+        emitted-count (MutableLong. 0)
+        empty-emit-streak (MutableLong. 0)]
+   
+    [(async-loop
+      (fn []
+        ;; If topology was started in inactive state, don't call (.open spout) until it's activated first.
+        (while (not @(:storm-active-atom executor-data))
+          (Thread/sleep 100))
+        
+        (log-message "Opening spout " component-id ":" (keys task-datas))
+        (builtin-metrics/register-spout-throttling-metrics (:spout-throttling-metrics executor-data) storm-conf (:user-context (first (vals task-datas))))
+        (doseq [[task-id task-data] task-datas
+                :let [^ISpout spout-obj (:object task-data)
+                     tasks-fn (:tasks-fn task-data)
+                     send-spout-msg (fn [out-stream-id values message-id out-task-id]
+                                       (.increment emitted-count)
+                                       (let [out-tasks (if out-task-id
+                                                         (tasks-fn out-task-id out-stream-id values)
+                                                         (tasks-fn out-stream-id values))
+                                             rooted? (and message-id has-ackers?)
+                                             root-id (if rooted? (MessageId/generateId rand))
+                                             ^List out-ids (fast-list-for [t out-tasks] (if rooted? (MessageId/generateId rand)))]
+                                         (fast-list-iter [out-task out-tasks id out-ids]
+                                                         (let [tuple-id (if rooted?
+                                                                          (MessageId/makeRootId root-id id)
+                                                                          (MessageId/makeUnanchored))
+                                                               out-tuple (TupleImpl. worker-context
+                                                                                     values
+                                                                                     task-id
+                                                                                     out-stream-id
+                                                                                     tuple-id)]
+                                                           (transfer-fn out-task out-tuple)))
+                                         (if has-eventloggers?
+                                           (send-to-eventlogger executor-data task-data values component-id message-id rand))
+                                         (if (and rooted?
+                                                  (not (.isEmpty out-ids)))
+                                           (do
+                                             (.put pending root-id [task-id
+                                                                    message-id
+                                                                    {:stream out-stream-id 
+                                                                     :values (if debug? values nil)}
+                                                                    (if (sampler) (System/currentTimeMillis))])
+                                             (task/send-unanchored task-data
+                                                                   ACKER-INIT-STREAM-ID
+                                                                   [root-id (bit-xor-vals out-ids) task-id]))
+                                           (when message-id
+                                             (ack-spout-msg executor-data task-data message-id
+                                                            {:stream out-stream-id :values values}
+                                                            (if (sampler) 0) "0:")))
+                                         (or out-tasks [])
+                                         ))]]
+          (builtin-metrics/register-all (:builtin-metrics task-data) storm-conf (:user-context task-data))
+          (builtin-metrics/register-queue-metrics {:sendqueue (:batch-transfer-queue executor-data)
+                                                   :receive receive-queue}
+                                                  storm-conf (:user-context task-data))
+          (when (instance? ICredentialsListener spout-obj) (.setCredentials spout-obj initial-credentials))
+
+          (.open spout-obj
+                 storm-conf
+                 (:user-context task-data)
+                 (SpoutOutputCollector.
+                  (reify ISpoutOutputCollector
+                    (^long getPendingCount[this]
+                      (.size pending)
+                      )
+                    (^List emit [this ^String stream-id ^List tuple ^Object message-id]
+                      (send-spout-msg stream-id tuple message-id nil)
+                      )
+                    (^void emitDirect [this ^int out-task-id ^String stream-id
+                                       ^List tuple ^Object message-id]
+                      (send-spout-msg stream-id tuple message-id out-task-id)
+                      )
+                    (reportError [this error]
+                      (report-error error)
+                      )))))
+        (reset! open-or-prepare-was-called? true) 
+        (log-message "Opened spout " component-id ":" (keys task-datas))
+        (setup-metrics! executor-data)
+        
+        (fn []
+          ;; This design requires that spouts be non-blocking
+          (disruptor/consume-batch receive-queue event-handler)
+          
+          (let [active? @(:storm-active-atom executor-data)
+                curr-count (.get emitted-count)
+                backpressure-enabled ((:storm-conf executor-data) TOPOLOGY-BACKPRESSURE-ENABLE)
+                throttle-on (and backpressure-enabled
+                              @(:throttle-on (:worker executor-data)))
+                reached-max-spout-pending (and max-spout-pending
+                                               (>= (.size pending) max-spout-pending))
+                ]
+            (if active?
+              ; activated
+              (do
+                (when-not @last-active
+                  (reset! last-active true)
+                  (log-message "Activating spout " component-id ":" (keys task-datas))
+                  (fast-list-iter [^ISpout spout spouts] (.activate spout)))
+
+                (if (and (not (.isFull transfer-queue))
+                      (not throttle-on)
+                      (not reached-max-spout-pending))
+                  (fast-list-iter [^ISpout spout spouts] (.nextTuple spout))))
+              ; deactivated
+              (do
+                (when @last-active
+                  (reset! last-active false)
+                  (log-message "Deactivating spout " component-id ":" (keys task-datas))
+                  (fast-list-iter [^ISpout spout spouts] (.deactivate spout)))
+                ;; TODO: log that it's getting throttled
+                (Time/sleep 100)
+                (builtin-metrics/skipped-inactive! (:spout-throttling-metrics executor-data) (:stats executor-data))))
+
+            (if (and (= curr-count (.get emitted-count)) active?)
+              (do (.increment empty-emit-streak)
+                  (.emptyEmit spout-wait-strategy (.get empty-emit-streak))
+                  ;; update the spout throttling metrics
+                  (if throttle-on
+                    (builtin-metrics/skipped-throttle! (:spout-throttling-metrics executor-data) (:stats executor-data))
+                    (if reached-max-spout-pending
+                      (builtin-metrics/skipped-max-spout! (:spout-throttling-metrics executor-data) (:stats executor-data)))))
+              (.set empty-emit-streak 0)
+              ))
+          0))
+      :kill-fn (:report-error-and-die executor-data)
+      :factory? true
+      :thread-name (str component-id "-executor" (:executor-id executor-data)))]))
+
+(defn- tuple-time-delta! [^TupleImpl tuple]
+  (let [ms (.getProcessSampleStartTime tuple)]
+    (if ms
+      (time-delta-ms ms))))
+      
+(defn- tuple-execute-time-delta! [^TupleImpl tuple]
+  (let [ms (.getExecuteSampleStartTime tuple)]
+    (if ms
+      (time-delta-ms ms))))
+
+(defn put-xor! [^Map pending key id]
+  (let [curr (or (.get pending key) (long 0))]
+    (.put pending key (bit-xor curr id))))
+
+(defmethod mk-threads :bolt [executor-data task-datas initial-credentials]
+  (let [storm-conf (:storm-conf executor-data)
+        execute-sampler (mk-stats-sampler storm-conf)
+        executor-stats (:stats executor-data)
+        {:keys [storm-conf component-id worker-context transfer-fn report-error sampler
+                open-or-prepare-was-called?]} executor-data
+        rand (Random. (Utils/secureRandomLong))
+
+        tuple-action-fn (fn [task-id ^TupleImpl tuple]
+                          ;; synchronization needs to be done with a key provided by this bolt, otherwise:
+                          ;; spout 1 sends synchronization (s1), dies, same spout restarts somewhere else, sends synchronization (s2) and incremental update. s2 and update finish before s1 -> lose the incremental update
+                          ;; TODO: for state sync, need to first send sync messages in a loop and receive tuples until synchronization
+                          ;; buffer other tuples until fully synchronized, then process all of those tuples
+                          ;; then go into normal loop
+                          ;; spill to disk?
+                          ;; could be receiving incremental updates while waiting for sync or even a partial sync because of another failed task
+                          ;; should remember sync requests and include a random sync id in the request. drop anything not related to active sync requests
+                          ;; or just timeout the sync messages that are coming in until full sync is hit from that task
+                          ;; need to drop incremental updates from tasks where waiting for sync. otherwise, buffer the incremental updates
+                          ;; TODO: for state sync, need to check if tuple comes from state spout. if so, update state
+                          ;; TODO: how to handle incremental updates as well as synchronizations at same time
+                          ;; TODO: need to version tuples somehow
+                          
+                          ;;(log-debug "Received tuple " tuple " at task " task-id)
+                          ;; need to do it this way to avoid reflection
+                          (let [stream-id (.getSourceStreamId tuple)]
+                            (condp = stream-id
+                              Constants/CREDENTIALS_CHANGED_STREAM_ID 
+                                (let [task-data (get task-datas task-id)
+                                      bolt-obj (:object task-data)]
+                                  (when (instance? ICredentialsListener bolt-obj)
+                                    (.setCredentials bolt-obj (.getValue tuple 0))))
+                              Constants/METRICS_TICK_STREAM_ID (metrics-tick executor-data (get task-datas task-id) tuple)
+                              (let [task-data (get task-datas task-id)
+                                    ^IBolt bolt-obj (:object task-data)
+                                    user-context (:user-context task-data)
+                                    sampler? (sampler)
+                                    execute-sampler? (execute-sampler)
+                                    now (if (or sampler? execute-sampler?) (System/currentTimeMillis))
+                                    receive-queue (:receive-queue executor-data)]
+                                (when sampler?
+                                  (.setProcessSampleStartTime tuple now))
+                                (when execute-sampler?
+                                  (.setExecuteSampleStartTime tuple now))
+                                (.execute bolt-obj tuple)
+                                (let [delta (tuple-execute-time-delta! tuple)]
+                                  (when (= true (storm-conf TOPOLOGY-DEBUG))
+                                    (log-message "Execute done TUPLE " tuple " TASK: " task-id " DELTA: " delta))
+ 
+                                  (task/apply-hooks user-context .boltExecute (BoltExecuteInfo. tuple task-id delta))
+                                  (when delta
+                                    (stats/bolt-execute-tuple! executor-stats
+                                                               (.getSourceComponent tuple)
+                                                               (.getSourceStreamId tuple)
+                                                               delta)))))))
+        has-eventloggers? (has-eventloggers? storm-conf)]
+    
+    ;; TODO: can get any SubscribedState objects out of the context now
+
+    [(async-loop
+      (fn []
+        ;; If topology was started in inactive state, don't call prepare bolt until it's activated first.
+        (while (not @(:storm-active-atom executor-data))          
+          (Thread/sleep 100))
+        
+        (log-message "Preparing bolt " component-id ":" (keys task-datas))
+        (doseq [[task-id task-data] task-datas
+                :let [^IBolt bolt-obj (:object task-data)
+                      tasks-fn (:tasks-fn task-data)
+                      user-context (:user-context task-data)
+                      bolt-emit (fn [stream anchors values task]
+                                  (let [out-tasks (if task
+                                                    (tasks-fn task stream values)
+                                                    (tasks-fn stream values))]
+                                    (fast-list-iter [t out-tasks]
+                                                    (let [anchors-to-ids (HashMap.)]
+                                                      (fast-list-iter [^TupleImpl a anchors]
+                                                                      (let [root-ids (-> a .getMessageId .getAnchorsToIds .keySet)]
+                                                                        (when (pos? (count root-ids))
+                                                                          (let [edge-id (MessageId/generateId rand)]
+                                                                            (.updateAckVal a edge-id)
+                                                                            (fast-list-iter [root-id root-ids]
+                                                                                            (put-xor! anchors-to-ids root-id edge-id))
+                                                                            ))))
+                                                        (let [tuple (TupleImpl. worker-context
+                                                                               values
+                                                                               task-id
+                                                                               stream
+                                                                               (MessageId/makeId anchors-to-ids))]
+                                                          (transfer-fn t tuple))))
+                                    (if has-eventloggers?
+                                      (send-to-eventlogger executor-data task-data values component-id nil rand))
+                                    (or out-tasks [])))]]
+          (builtin-metrics/register-all (:builtin-metrics task-data) storm-conf user-context)
+          (when (instance? ICredentialsListener bolt-obj) (.setCredentials bolt-obj initial-credentials)) 
+          (if (= component-id Constants/SYSTEM_COMPONENT_ID)
+            (do
+              (builtin-metrics/register-queue-metrics {:sendqueue (:batch-transfer-queue executor-data)
+                                                       :receive (:receive-queue executor-data)
+                                                       :transfer (:transfer-queue (:worker executor-data))}
+                                                      storm-conf user-context)
+              (builtin-metrics/register-iconnection-client-metrics (:cached-node+port->socket (:worker executor-data)) storm-conf user-context)
+              (builtin-metrics/register-iconnection-server-metric (:receiver (:worker executor-data)) storm-conf user-context))
+            (builtin-metrics/register-queue-metrics {:sendqueue (:batch-transfer-queue executor-data)
+                                                     :receive (:receive-queue executor-data)}
+                                                    storm-conf user-context)
+            )
+
+          (.prepare bolt-obj
+                    storm-conf
+                    user-context
+                    (OutputCollector.
+                     (reify IOutputCollector
+                       (emit [this stream anchors values]
+                         (bolt-emit stream anchors values nil))
+                       (emitDirect [this task stream anchors values]
+                         (bolt-emit stream anchors values task))
+                       (^void ack [this ^Tuple tuple]
+                         (let [^TupleImpl tuple tuple
+                               ack-val (.getAckVal tuple)]
+                           (fast-map-iter [[root id] (.. tuple getMessageId getAnchorsToIds)]
+                                          (task/send-unanchored task-data
+                                                                ACKER-ACK-STREAM-ID
+                                                                [root (bit-xor id ack-val)])))
+                         (let [delta (tuple-time-delta! tuple)
+                               debug? (= true (storm-conf TOPOLOGY-DEBUG))]
+                           (when debug? 
+                             (log-message "BOLT ack TASK: " task-id " TIME: " delta " TUPLE: " tuple))
+                           (task/apply-hooks user-context .boltAck (BoltAckInfo. tuple task-id delta))
+                           (when delta
+                             (stats/bolt-acked-tuple! executor-stats
+                                                      (.getSourceComponent tuple)
+                                                      (.getSourceStreamId tuple)
+                                                      delta))))
+                       (^void fail [this ^Tuple tuple]
+                         (fast-list-iter [root (.. tuple getMessageId getAnchors)]
+                                         (task/send-unanchored task-data
+                                                               ACKER-FAIL-STREAM-ID
+                                                               [root]))
+                         (let [delta (tuple-time-delta! tuple)
+                               debug? (= true (storm-conf TOPOLOGY-DEBUG))]
+                           (when debug? 
+                             (log-message "BOLT fail TASK: " task-id " TIME: " delta " TUPLE: " tuple))
+                           (task/apply-hooks user-context .boltFail (BoltFailInfo. tuple task-id delta))
+                           (when delta
+                             (stats/bolt-failed-tuple! executor-stats
+                                                       (.getSourceComponent tuple)
+                                                       (.getSourceStreamId tuple)
+                                                       delta))))
+                       (reportError [this error]
+                         (report-error error)
+                         )))))
+        (reset! open-or-prepare-was-called? true)        
+        (log-message "Prepared bolt " component-id ":" (keys task-datas))
+        (setup-metrics! executor-data)
+
+        (let [receive-queue (:receive-queue executor-data)
+              event-handler (mk-task-receiver executor-data tuple-action-fn)]
+          (fn []            
+            (disruptor/consume-batch-when-available receive-queue event-handler)
+            0)))
+      :kill-fn (:report-error-and-die executor-data)
+      :factory? true
+      :thread-name (str component-id "-executor" (:executor-id executor-data)))]))
+
+(defmethod close-component :spout [executor-data spout]
+  (.close spout))
+
+(defmethod close-component :bolt [executor-data bolt]
+  (.cleanup bolt))
+
+;; TODO: refactor this to be part of an executor-specific map
+(defmethod mk-executor-stats :spout [_ rate]
+  (stats/mk-spout-stats rate))
+
+(defmethod mk-executor-stats :bolt [_ rate]
+  (stats/mk-bolt-stats rate))


[10/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/Constants.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/Constants.java b/storm-core/src/jvm/backtype/storm/Constants.java
deleted file mode 100644
index 35c252f..0000000
--- a/storm-core/src/jvm/backtype/storm/Constants.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm;
-
-import backtype.storm.coordination.CoordinatedBolt;
-import clojure.lang.RT;
-
-
-public class Constants {
-    public static final String COORDINATED_STREAM_ID = CoordinatedBolt.class.getName() + "/coord-stream"; 
-
-    public static final long SYSTEM_TASK_ID = -1;
-    public static final Object SYSTEM_EXECUTOR_ID = RT.readString("[-1 -1]");
-    public static final String SYSTEM_COMPONENT_ID = "__system";
-    public static final String SYSTEM_TICK_STREAM_ID = "__tick";
-    public static final String METRICS_COMPONENT_ID_PREFIX = "__metrics";
-    public static final String METRICS_STREAM_ID = "__metrics";
-    public static final String METRICS_TICK_STREAM_ID = "__metrics_tick";
-    public static final String CREDENTIALS_CHANGED_STREAM_ID = "__credentials";
-}
-    

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/ICredentialsListener.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/ICredentialsListener.java b/storm-core/src/jvm/backtype/storm/ICredentialsListener.java
deleted file mode 100644
index 1a7bc1b..0000000
--- a/storm-core/src/jvm/backtype/storm/ICredentialsListener.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package backtype.storm;
-
-import java.util.Map;
-
-/**
- * Allows a bolt or a spout to be informed when the credentials of the topology have changed.
- */
-public interface ICredentialsListener {
-    /**
-     * Called when the credentials of a topology have changed.
-     * @param credentials the new credentials, could be null.
-     */
-    public void setCredentials(Map<String,String> credentials);
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/ILocalCluster.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/ILocalCluster.java b/storm-core/src/jvm/backtype/storm/ILocalCluster.java
deleted file mode 100644
index 7d5aa35..0000000
--- a/storm-core/src/jvm/backtype/storm/ILocalCluster.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm;
-
-import backtype.storm.generated.AlreadyAliveException;
-import backtype.storm.generated.ClusterSummary;
-import backtype.storm.generated.InvalidTopologyException;
-import backtype.storm.generated.KillOptions;
-import backtype.storm.generated.SubmitOptions;
-import backtype.storm.generated.NotAliveException;
-import backtype.storm.generated.RebalanceOptions;
-import backtype.storm.generated.StormTopology;
-import backtype.storm.generated.TopologyInfo;
-import backtype.storm.generated.Credentials;
-
-import java.util.Map;
-
-
-public interface ILocalCluster {
-    void submitTopology(String topologyName, Map conf, StormTopology topology) throws AlreadyAliveException, InvalidTopologyException;
-    void submitTopologyWithOpts(String topologyName, Map conf, StormTopology topology, SubmitOptions submitOpts) throws AlreadyAliveException, InvalidTopologyException;
-    void uploadNewCredentials(String topologyName, Credentials creds);
-    void killTopology(String topologyName) throws NotAliveException;
-    void killTopologyWithOpts(String name, KillOptions options) throws NotAliveException;
-    void activate(String topologyName) throws NotAliveException;
-    void deactivate(String topologyName) throws NotAliveException;
-    void rebalance(String name, RebalanceOptions options) throws NotAliveException;
-    void shutdown();
-    String getTopologyConf(String id);
-    StormTopology getTopology(String id);
-    ClusterSummary getClusterInfo();
-    TopologyInfo getTopologyInfo(String id);
-    Map getState();
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/ILocalDRPC.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/ILocalDRPC.java b/storm-core/src/jvm/backtype/storm/ILocalDRPC.java
deleted file mode 100644
index e478dca..0000000
--- a/storm-core/src/jvm/backtype/storm/ILocalDRPC.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm;
-
-import backtype.storm.daemon.Shutdownable;
-import backtype.storm.generated.DistributedRPC;
-import backtype.storm.generated.DistributedRPCInvocations;
-
-
-public interface ILocalDRPC extends DistributedRPC.Iface, DistributedRPCInvocations.Iface, Shutdownable {
-    public String getServiceId();    
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/ISubmitterHook.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/ISubmitterHook.java b/storm-core/src/jvm/backtype/storm/ISubmitterHook.java
deleted file mode 100644
index 331c88f..0000000
--- a/storm-core/src/jvm/backtype/storm/ISubmitterHook.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm;
-
-import backtype.storm.generated.StormTopology;
-import backtype.storm.generated.TopologyInfo;
-
-import java.util.Map;
-
-/**
- * if FQCN of an implementation of this class is specified by setting the config storm.topology.submission.notifier.plugin.class,
- * that class's notify method will be invoked when a topology is successfully submitted via StormSubmitter class.
- */
-public interface ISubmitterHook {
-    public void notify(TopologyInfo topologyInfo, Map stormConf, StormTopology topology) throws IllegalAccessException;
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/LogWriter.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/LogWriter.java b/storm-core/src/jvm/backtype/storm/LogWriter.java
deleted file mode 100644
index 849f5ca..0000000
--- a/storm-core/src/jvm/backtype/storm/LogWriter.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm;
-
-import java.io.BufferedReader;
-import java.io.InputStreamReader;
-import java.io.InputStream;
-import java.io.IOException;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-
-/**
- * Launch a sub process and write files out to logs.
- */
-public class LogWriter extends Thread {
-    private Logger logger;
-    private BufferedReader in;
-
-    public LogWriter(InputStream in, Logger logger) {
-        this.in = new BufferedReader(new InputStreamReader(in));
-        this.logger = logger;
-    }
-
-    public void run() {
-        Logger logger = this.logger;
-        BufferedReader in = this.in;
-        String line;
-        try {
-            while ((line = in.readLine()) != null) {
-                logger.info(line);
-            }
-        } catch (IOException e) {
-            logger.error("Internal ERROR", e);
-        } finally {
-            try {
-                in.close();
-            } catch (IOException e) {
-                logger.error("Internal ERROR", e);
-            }
-        }
-    }
-
-    public void close() throws Exception {
-        this.join();
-    }
-
-    public static void main(String [] args) throws Exception {
-        ProcessBuilder pb = new ProcessBuilder(args);
-        Process p = pb.start();
-        LogWriter err = null;
-        LogWriter in = null;
-        int ret = -1;
-        try {
-            Logger logger = LoggerFactory.getLogger("STDERR");
-            err = new LogWriter(p.getErrorStream(), logger);
-            err.start();
-            in = new LogWriter(p.getInputStream(), logger);
-            in.start();
-            ret = p.waitFor();
-        } finally {
-          if (err != null) err.close();
-          if (in != null) in.close();
-        }
-        System.exit(ret);
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/StormSubmitter.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/StormSubmitter.java b/storm-core/src/jvm/backtype/storm/StormSubmitter.java
deleted file mode 100644
index 725b0b1..0000000
--- a/storm-core/src/jvm/backtype/storm/StormSubmitter.java
+++ /dev/null
@@ -1,496 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm;
-
-import java.io.File;
-import java.nio.ByteBuffer;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-import java.util.HashMap;
-import java.util.Map;
-
-import backtype.storm.scheduler.resource.ResourceUtils;
-import backtype.storm.validation.ConfigValidation;
-import org.apache.commons.lang.StringUtils;
-import org.apache.thrift.TException;
-import org.json.simple.JSONValue;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import backtype.storm.security.auth.IAutoCredentials;
-import backtype.storm.security.auth.AuthUtils;
-import backtype.storm.generated.*;
-import backtype.storm.utils.BufferFileInputStream;
-import backtype.storm.utils.NimbusClient;
-import backtype.storm.utils.Utils;
-
-/**
- * Use this class to submit topologies to run on the Storm cluster. You should run your program
- * with the "storm jar" command from the command-line, and then use this class to
- * submit your topologies.
- */
-public class StormSubmitter {
-    public static final Logger LOG = LoggerFactory.getLogger(StormSubmitter.class);
-
-    private static final int THRIFT_CHUNK_SIZE_BYTES = 307200;
-
-    private static ILocalCluster localNimbus = null;
-
-    private static String generateZookeeperDigestSecretPayload() {
-        return Utils.secureRandomLong() + ":" + Utils.secureRandomLong();
-    }
-
-    public static final Pattern zkDigestPattern = Pattern.compile("\\S+:\\S+");
-
-    public static boolean validateZKDigestPayload(String payload) {
-        if (payload != null) {
-            Matcher m = zkDigestPattern.matcher(payload);
-            return m.matches();
-        }
-        return false;
-    }
-
-    @SuppressWarnings("unchecked")
-    public static Map prepareZookeeperAuthentication(Map conf) {
-        Map toRet = new HashMap();
-
-        // Is the topology ZooKeeper authentication configuration unset?
-        if (! conf.containsKey(Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_PAYLOAD) ||
-                conf.get(Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_PAYLOAD) == null ||
-                !  validateZKDigestPayload((String)
-                    conf.get(Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_PAYLOAD))) {
-
-            String secretPayload = generateZookeeperDigestSecretPayload();
-            toRet.put(Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_PAYLOAD, secretPayload);
-            LOG.info("Generated ZooKeeper secret payload for MD5-digest: " + secretPayload);
-        }
-
-        // This should always be set to digest.
-        toRet.put(Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_SCHEME, "digest");
-
-        return toRet;
-    }
-
-    private static Map<String,String> populateCredentials(Map conf, Map<String, String> creds) {
-        Map<String,String> ret = new HashMap<>();
-        for (IAutoCredentials autoCred: AuthUtils.GetAutoCredentials(conf)) {
-            LOG.info("Running "+autoCred);
-            autoCred.populateCredentials(ret);
-        }
-        if (creds != null) {
-            ret.putAll(creds);
-        }
-        return ret;
-    }
-
-    /**
-     * Push a new set of credentials to the running topology.
-     * @param name the name of the topology to push credentials to.
-     * @param stormConf the topology-specific configuration, if desired. See {@link Config}.
-     * @param credentials the credentials to push.
-     * @throws AuthorizationException if you are not authorized ot push credentials.
-     * @throws NotAliveException if the topology is not alive
-     * @throws InvalidTopologyException if any other error happens
-     */
-    public static void pushCredentials(String name, Map stormConf, Map<String, String> credentials)
-            throws AuthorizationException, NotAliveException, InvalidTopologyException {
-        stormConf = new HashMap(stormConf);
-        stormConf.putAll(Utils.readCommandLineOpts());
-        Map conf = Utils.readStormConfig();
-        conf.putAll(stormConf);
-        Map<String,String> fullCreds = populateCredentials(conf, credentials);
-        if (fullCreds.isEmpty()) {
-            LOG.warn("No credentials were found to push to " + name);
-            return;
-        }
-        try {
-            if(localNimbus!=null) {
-                LOG.info("Pushing Credentials to topology " + name + " in local mode");
-                localNimbus.uploadNewCredentials(name, new Credentials(fullCreds));
-            } else {
-                NimbusClient client = NimbusClient.getConfiguredClient(conf);
-                try {
-                    LOG.info("Uploading new credentials to " +  name);
-                    client.getClient().uploadNewCredentials(name, new Credentials(fullCreds));
-                } finally {
-                    client.close();
-                }
-            }
-            LOG.info("Finished submitting topology: " +  name);
-        } catch(TException e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-
-    /**
-     * Submits a topology to run on the cluster. A topology runs forever or until
-     * explicitly killed.
-     *
-     *
-     * @param name the name of the storm.
-     * @param stormConf the topology-specific configuration. See {@link Config}.
-     * @param topology the processing to execute.
-     * @throws AlreadyAliveException if a topology with this name is already running
-     * @throws InvalidTopologyException if an invalid topology was submitted
-     * @throws AuthorizationException if authorization is failed
-     */
-    public static void submitTopology(String name, Map stormConf, StormTopology topology)
-            throws AlreadyAliveException, InvalidTopologyException, AuthorizationException {
-        submitTopology(name, stormConf, topology, null, null);
-    }
-
-    /**
-     * Submits a topology to run on the cluster. A topology runs forever or until
-     * explicitly killed.
-     *
-     * @param name the name of the storm.
-     * @param stormConf the topology-specific configuration. See {@link Config}.
-     * @param topology the processing to execute.
-     * @param opts to manipulate the starting of the topology.
-     * @throws AlreadyAliveException if a topology with this name is already running
-     * @throws InvalidTopologyException if an invalid topology was submitted
-     * @throws AuthorizationException if authorization is failed
-     */
-    public static void submitTopology(String name, Map stormConf, StormTopology topology, SubmitOptions opts)
-            throws AlreadyAliveException, InvalidTopologyException, AuthorizationException {
-        submitTopology(name, stormConf, topology, opts, null);
-    }
-
-    /**
-     * Submits a topology to run on the cluster as a particular user. A topology runs forever or until
-     * explicitly killed.
-     *
-     * @param name
-     * @param stormConf
-     * @param topology
-     * @param opts
-     * @param progressListener
-     * @param asUser The user as which this topology should be submitted.
-     * @throws AlreadyAliveException
-     * @throws InvalidTopologyException
-     * @throws AuthorizationException
-     * @throws IllegalArgumentException thrown if configs will yield an unschedulable topology. validateConfs validates confs
-     */
-    public static void submitTopologyAs(String name, Map stormConf, StormTopology topology, SubmitOptions opts, ProgressListener progressListener, String asUser)
-            throws AlreadyAliveException, InvalidTopologyException, AuthorizationException, IllegalArgumentException {
-        if(!Utils.isValidConf(stormConf)) {
-            throw new IllegalArgumentException("Storm conf is not valid. Must be json-serializable");
-        }
-        stormConf = new HashMap(stormConf);
-        stormConf.putAll(Utils.readCommandLineOpts());
-        Map conf = Utils.readStormConfig();
-        conf.putAll(stormConf);
-        stormConf.putAll(prepareZookeeperAuthentication(conf));
-
-        validateConfs(conf, topology);
-
-        Map<String,String> passedCreds = new HashMap<>();
-        if (opts != null) {
-            Credentials tmpCreds = opts.get_creds();
-            if (tmpCreds != null) {
-                passedCreds = tmpCreds.get_creds();
-            }
-        }
-        Map<String,String> fullCreds = populateCredentials(conf, passedCreds);
-        if (!fullCreds.isEmpty()) {
-            if (opts == null) {
-                opts = new SubmitOptions(TopologyInitialStatus.ACTIVE);
-            }
-            opts.set_creds(new Credentials(fullCreds));
-        }
-        try {
-            if(localNimbus!=null) {
-                LOG.info("Submitting topology " + name + " in local mode");
-                if(opts!=null) {
-                    localNimbus.submitTopologyWithOpts(name, stormConf, topology, opts);
-                } else {
-                    // this is for backwards compatibility
-                    localNimbus.submitTopology(name, stormConf, topology);
-                }
-            } else {
-                String serConf = JSONValue.toJSONString(stormConf);
-                NimbusClient client = NimbusClient.getConfiguredClientAs(conf, asUser);
-                if(topologyNameExists(conf, name, asUser)) {
-                    throw new RuntimeException("Topology with name `" + name + "` already exists on cluster");
-                }
-                String jar = submitJarAs(conf, System.getProperty("storm.jar"), progressListener, asUser);
-                try {
-                    LOG.info("Submitting topology " +  name + " in distributed mode with conf " + serConf);
-                    if(opts!=null) {
-                        client.getClient().submitTopologyWithOpts(name, jar, serConf, topology, opts);
-                    } else {
-                        // this is for backwards compatibility
-                        client.getClient().submitTopology(name, jar, serConf, topology);
-                    }
-                } catch(InvalidTopologyException e) {
-                    LOG.warn("Topology submission exception: "+e.get_msg());
-                    throw e;
-                } catch(AlreadyAliveException e) {
-                    LOG.warn("Topology already alive exception", e);
-                    throw e;
-                } finally {
-                    client.close();
-                }
-            }
-            LOG.info("Finished submitting topology: " +  name);
-        } catch(TException e) {
-            throw new RuntimeException(e);
-        }
-        invokeSubmitterHook(name, asUser, conf, topology);
-
-    }
-
-    private static void invokeSubmitterHook(String name, String asUser, Map stormConf, StormTopology topology) {
-        try {
-            if (stormConf.containsKey(Config.STORM_TOPOLOGY_SUBMISSION_NOTIFIER_PLUGIN)) {
-                ISubmitterHook submitterHook = (ISubmitterHook) Class.forName(stormConf.get(Config.STORM_TOPOLOGY_SUBMISSION_NOTIFIER_PLUGIN).toString()).newInstance();
-                TopologyInfo topologyInfo = Utils.getTopologyInfo(name, asUser, stormConf);
-                submitterHook.notify(topologyInfo, stormConf, topology);
-            }
-        } catch (Exception e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    /**
-     * Submits a topology to run on the cluster. A topology runs forever or until
-     * explicitly killed.
-     *
-     *
-     * @param name the name of the storm.
-     * @param stormConf the topology-specific configuration. See {@link Config}.
-     * @param topology the processing to execute.
-     * @param opts to manipulate the starting of the topology
-     * @param progressListener to track the progress of the jar upload process
-     * @throws AlreadyAliveException if a topology with this name is already running
-     * @throws InvalidTopologyException if an invalid topology was submitted
-     * @throws AuthorizationException if authorization is failed
-     */
-    @SuppressWarnings("unchecked")
-    public static void submitTopology(String name, Map stormConf, StormTopology topology, SubmitOptions opts,
-             ProgressListener progressListener) throws AlreadyAliveException, InvalidTopologyException, AuthorizationException {
-        submitTopologyAs(name, stormConf, topology, opts, progressListener, null);
-    }
-
-    /**
-     * Submits a topology to run on the cluster with a progress bar. A topology runs forever or until
-     * explicitly killed.
-     *
-     *
-     * @param name the name of the storm.
-     * @param stormConf the topology-specific configuration. See {@link Config}.
-     * @param topology the processing to execute.
-     * @throws AlreadyAliveException if a topology with this name is already running
-     * @throws InvalidTopologyException if an invalid topology was submitted
-     * @throws AuthorizationException if authorization is failed
-     */
-
-    public static void submitTopologyWithProgressBar(String name, Map stormConf, StormTopology topology) throws AlreadyAliveException, InvalidTopologyException, AuthorizationException {
-        submitTopologyWithProgressBar(name, stormConf, topology, null);
-    }
-
-    /**
-     * Submits a topology to run on the cluster with a progress bar. A topology runs forever or until
-     * explicitly killed.
-     *
-     *
-     * @param name the name of the storm.
-     * @param stormConf the topology-specific configuration. See {@link Config}.
-     * @param topology the processing to execute.
-     * @param opts to manipulate the starting of the topology
-     * @throws AlreadyAliveException if a topology with this name is already running
-     * @throws InvalidTopologyException if an invalid topology was submitted
-     * @throws AuthorizationException if authorization is failed
-     */
-
-    public static void submitTopologyWithProgressBar(String name, Map stormConf, StormTopology topology, SubmitOptions opts) throws AlreadyAliveException, InvalidTopologyException, AuthorizationException {
-        // show a progress bar so we know we're not stuck (especially on slow connections)
-        submitTopology(name, stormConf, topology, opts, new StormSubmitter.ProgressListener() {
-            @Override
-            public void onStart(String srcFile, String targetFile, long totalBytes) {
-                System.out.printf("Start uploading file '%s' to '%s' (%d bytes)\n", srcFile, targetFile, totalBytes);
-            }
-
-            @Override
-            public void onProgress(String srcFile, String targetFile, long bytesUploaded, long totalBytes) {
-                int length = 50;
-                int p = (int)((length * bytesUploaded) / totalBytes);
-                String progress = StringUtils.repeat("=", p);
-                String todo = StringUtils.repeat(" ", length - p);
-
-                System.out.printf("\r[%s%s] %d / %d", progress, todo, bytesUploaded, totalBytes);
-            }
-
-            @Override
-            public void onCompleted(String srcFile, String targetFile, long totalBytes) {
-                System.out.printf("\nFile '%s' uploaded to '%s' (%d bytes)\n", srcFile, targetFile, totalBytes);
-            }
-        });
-    }
-
-    private static boolean topologyNameExists(Map conf, String name, String asUser) {
-        NimbusClient client = NimbusClient.getConfiguredClientAs(conf, asUser);
-        try {
-            ClusterSummary summary = client.getClient().getClusterInfo();
-            for(TopologySummary s : summary.get_topologies()) {
-                if(s.get_name().equals(name)) {
-                    return true;
-                }
-            }
-            return false;
-
-        } catch(Exception e) {
-            throw new RuntimeException(e);
-        } finally {
-            client.close();
-        }
-    }
-
-    private static String submitJar(Map conf, ProgressListener listener) {
-        return  submitJar(conf, System.getProperty("storm.jar"), listener);
-    }
-
-    /**
-     * Submit jar file
-     * @param conf the topology-specific configuration. See {@link Config}.
-     * @param localJar file path of the jar file to submit
-     * @return the remote location of the submitted jar
-     */
-    public static String submitJar(Map conf, String localJar) {
-        return submitJar(conf, localJar, null);
-    }
-
-
-    public static String submitJarAs(Map conf, String localJar, ProgressListener listener, String asUser) {
-        if (localJar == null) {
-            throw new RuntimeException("Must submit topologies using the 'storm' client script so that StormSubmitter knows which jar to upload.");
-        }
-
-        NimbusClient client = NimbusClient.getConfiguredClientAs(conf, asUser);
-        try {
-            String uploadLocation = client.getClient().beginFileUpload();
-            LOG.info("Uploading topology jar " + localJar + " to assigned location: " + uploadLocation);
-            BufferFileInputStream is = new BufferFileInputStream(localJar, THRIFT_CHUNK_SIZE_BYTES);
-
-            long totalSize = new File(localJar).length();
-            if (listener != null) {
-                listener.onStart(localJar, uploadLocation, totalSize);
-            }
-
-            long bytesUploaded = 0;
-            while(true) {
-                byte[] toSubmit = is.read();
-                bytesUploaded += toSubmit.length;
-                if (listener != null) {
-                    listener.onProgress(localJar, uploadLocation, bytesUploaded, totalSize);
-                }
-
-                if(toSubmit.length==0) break;
-                client.getClient().uploadChunk(uploadLocation, ByteBuffer.wrap(toSubmit));
-            }
-            client.getClient().finishFileUpload(uploadLocation);
-
-            if (listener != null) {
-                listener.onCompleted(localJar, uploadLocation, totalSize);
-            }
-
-            LOG.info("Successfully uploaded topology jar to assigned location: " + uploadLocation);
-            return uploadLocation;
-        } catch(Exception e) {
-            throw new RuntimeException(e);
-        } finally {
-            client.close();
-        }
-    }
-
-    /**
-     * Submit jar file
-     * @param conf the topology-specific configuration. See {@link Config}.
-     * @param localJar file path of the jar file to submit
-     * @param listener progress listener to track the jar file upload
-     * @return the remote location of the submitted jar
-     */
-    public static String submitJar(Map conf, String localJar, ProgressListener listener) {
-        return submitJarAs(conf,localJar, listener, null);
-    }
-
-    /**
-     * Interface use to track progress of file upload
-     */
-    public interface ProgressListener {
-        /**
-         * called before file is uploaded
-         * @param srcFile - jar file to be uploaded
-         * @param targetFile - destination file
-         * @param totalBytes - total number of bytes of the file
-         */
-        public void onStart(String srcFile, String targetFile, long totalBytes);
-
-        /**
-         * called whenever a chunk of bytes is uploaded
-         * @param srcFile - jar file to be uploaded
-         * @param targetFile - destination file
-         * @param bytesUploaded - number of bytes transferred so far
-         * @param totalBytes - total number of bytes of the file
-         */
-        public void onProgress(String srcFile, String targetFile, long bytesUploaded, long totalBytes);
-
-        /**
-         * called when the file is uploaded
-         * @param srcFile - jar file to be uploaded
-         * @param targetFile - destination file
-         * @param totalBytes - total number of bytes of the file
-         */
-        public void onCompleted(String srcFile, String targetFile, long totalBytes);
-    }
-
-    private static void validateConfs(Map stormConf, StormTopology topology) throws IllegalArgumentException {
-        ConfigValidation.validateFields(stormConf);
-        validateTopologyWorkerMaxHeapSizeMBConfigs(stormConf, topology);
-    }
-
-    private static void validateTopologyWorkerMaxHeapSizeMBConfigs(Map stormConf, StormTopology topology) {
-        double largestMemReq = getMaxExecutorMemoryUsageForTopo(topology, stormConf);
-        Double topologyWorkerMaxHeapSize = Utils.getDouble(stormConf.get(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB));
-        if(topologyWorkerMaxHeapSize < largestMemReq) {
-            throw new IllegalArgumentException("Topology will not be able to be successfully scheduled: Config TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB="
-                    +Utils.getDouble(stormConf.get(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB)) + " < "
-                    + largestMemReq + " (Largest memory requirement of a component in the topology). Perhaps set TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB to a larger amount");
-        }
-    }
-
-    private static double getMaxExecutorMemoryUsageForTopo(StormTopology topology, Map topologyConf) {
-        double largestMemoryOperator = 0.0;
-        for(Map<String, Double> entry : ResourceUtils.getBoltsResources(topology, topologyConf).values()) {
-            double memoryRequirement = entry.get(Config.TOPOLOGY_COMPONENT_RESOURCES_OFFHEAP_MEMORY_MB)
-                    + entry.get(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB);
-            if(memoryRequirement > largestMemoryOperator) {
-                largestMemoryOperator = memoryRequirement;
-            }
-        }
-        for(Map<String, Double> entry : ResourceUtils.getSpoutsResources(topology, topologyConf).values()) {
-            double memoryRequirement = entry.get(Config.TOPOLOGY_COMPONENT_RESOURCES_OFFHEAP_MEMORY_MB)
-                    + entry.get(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB);
-            if(memoryRequirement > largestMemoryOperator) {
-                largestMemoryOperator = memoryRequirement;
-            }
-        }
-        return largestMemoryOperator;
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/blobstore/AtomicOutputStream.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/blobstore/AtomicOutputStream.java b/storm-core/src/jvm/backtype/storm/blobstore/AtomicOutputStream.java
deleted file mode 100644
index f35b7a7..0000000
--- a/storm-core/src/jvm/backtype/storm/blobstore/AtomicOutputStream.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm.blobstore;
-
-import java.io.IOException;
-import java.io.OutputStream;
-
-/**
- * An output stream where all of the data is committed on close,
- * or can be canceled with cancel.
- */
-public abstract class AtomicOutputStream extends OutputStream {
-    /**
-     * Cancel all of the writes associated with this stream and close it.
-     */ 
-    public abstract void cancel() throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/blobstore/BlobKeySequenceInfo.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/blobstore/BlobKeySequenceInfo.java b/storm-core/src/jvm/backtype/storm/blobstore/BlobKeySequenceInfo.java
deleted file mode 100644
index 53cfa15..0000000
--- a/storm-core/src/jvm/backtype/storm/blobstore/BlobKeySequenceInfo.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm.blobstore;
-
-public class BlobKeySequenceInfo {
-    private String nimbusHostPort;
-    private String sequenceNumber;
-
-    public void setNimbusHostPort(String nimbusHostPort) {
-     this.nimbusHostPort = nimbusHostPort;
-    }
-
-    public void setSequenceNumber(String sequenceNumber) {
-        this.sequenceNumber = sequenceNumber;
-    }
-
-    public String getNimbusHostPort() {
-        return nimbusHostPort;
-    }
-
-    public String getSequenceNumber() {
-        return sequenceNumber;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/blobstore/BlobStore.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/blobstore/BlobStore.java b/storm-core/src/jvm/backtype/storm/blobstore/BlobStore.java
deleted file mode 100644
index 16a408e..0000000
--- a/storm-core/src/jvm/backtype/storm/blobstore/BlobStore.java
+++ /dev/null
@@ -1,447 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm.blobstore;
-
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.NoSuchElementException;
-import java.util.Set;
-import java.util.regex.Pattern;
-
-import javax.security.auth.Subject;
-
-import backtype.storm.nimbus.NimbusInfo;
-import org.apache.commons.lang.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import backtype.storm.daemon.Shutdownable;
-import backtype.storm.generated.AuthorizationException;
-import backtype.storm.generated.KeyNotFoundException;
-import backtype.storm.generated.KeyAlreadyExistsException;
-import backtype.storm.generated.ReadableBlobMeta;
-import backtype.storm.generated.SettableBlobMeta;
-
-/**
- * Provides a way to store blobs that can be downloaded.
- * Blobs must be able to be uploaded and listed from Nimbus,
- * and downloaded from the Supervisors. It is a key value based
- * store. Key being a string and value being the blob data.
- *
- * ACL checking must take place against the provided subject.
- * If the blob store does not support Security it must validate
- * that all ACLs set are always WORLD, everything.
- *
- * The users can upload their blobs through the blob store command
- * line. The command line also allows us to update and delete blobs.
- *
- * Modifying the replication factor only works for HdfsBlobStore
- * as for the LocalFsBlobStore the replication is dependent on
- * the number of Nimbodes available.
- */
-public abstract class BlobStore implements Shutdownable {
-    private static final Logger LOG = LoggerFactory.getLogger(BlobStore.class);
-    private static final Pattern KEY_PATTERN = Pattern.compile("^[\\w \\t\\.:_-]+$");
-    protected static final String BASE_BLOBS_DIR_NAME = "blobs";
-
-    /**
-     * Allows us to initialize the blob store
-     * @param conf The storm configuration
-     * @param baseDir The directory path to store the blobs
-     * @param nimbusInfo Contains the nimbus host, port and leadership information.
-     */
-    public abstract void prepare(Map conf, String baseDir, NimbusInfo nimbusInfo);
-
-    /**
-     * Creates the blob.
-     * @param key Key for the blob.
-     * @param meta Metadata which contains the acls information
-     * @param who Is the subject creating the blob.
-     * @return AtomicOutputStream returns a stream into which the data
-     * can be written.
-     * @throws AuthorizationException
-     * @throws KeyAlreadyExistsException
-     */
-    public abstract AtomicOutputStream createBlob(String key, SettableBlobMeta meta, Subject who) throws AuthorizationException, KeyAlreadyExistsException;
-
-    /**
-     * Updates the blob data.
-     * @param key Key for the blob.
-     * @param who Is the subject having the write privilege for the blob.
-     * @return AtomicOutputStream returns a stream into which the data
-     * can be written.
-     * @throws AuthorizationException
-     * @throws KeyNotFoundException
-     */
-    public abstract AtomicOutputStream updateBlob(String key, Subject who) throws AuthorizationException, KeyNotFoundException;
-
-    /**
-     * Gets the current version of metadata for a blob
-     * to be viewed by the user or downloaded by the supervisor.
-     * @param key Key for the blob.
-     * @param who Is the subject having the read privilege for the blob.
-     * @return AtomicOutputStream returns a stream into which the data
-     * can be written.
-     * @throws AuthorizationException
-     * @throws KeyNotFoundException
-     */
-    public abstract ReadableBlobMeta getBlobMeta(String key, Subject who) throws AuthorizationException, KeyNotFoundException;
-
-    /**
-     * Sets the metadata with renewed acls for the blob.
-     * @param key Key for the blob.
-     * @param meta Metadata which contains the updated
-     * acls information.
-     * @param who Is the subject having the write privilege for the blob.
-     * @throws AuthorizationException
-     * @throws KeyNotFoundException
-     */
-    public abstract void setBlobMeta(String key, SettableBlobMeta meta, Subject who) throws AuthorizationException, KeyNotFoundException;
-
-    /**
-     * Deletes the blob data and metadata.
-     * @param key Key for the blob.
-     * @param who Is the subject having write privilege for the blob.
-     * @throws AuthorizationException
-     * @throws KeyNotFoundException
-     */
-    public abstract void deleteBlob(String key, Subject who) throws AuthorizationException, KeyNotFoundException;
-
-    /**
-     * Gets the InputStream to read the blob details
-     * @param key Key for the blob.
-     * @param who Is the subject having the read privilege for the blob.
-     * @return InputStreamWithMeta has the additional
-     * file length and version information.
-     * @throws AuthorizationException
-     * @throws KeyNotFoundException
-     */
-    public abstract InputStreamWithMeta getBlob(String key, Subject who) throws AuthorizationException, KeyNotFoundException;
-
-    /**
-     * Returns an iterator with all the list of
-     * keys currently available on the blob store.
-     * @return Iterator<String>
-     */
-    public abstract Iterator<String> listKeys();
-
-    /**
-     * Gets the replication factor of the blob.
-     * @param key Key for the blob.
-     * @param who Is the subject having the read privilege for the blob.
-     * @return BlobReplication object containing the
-     * replication factor for the blob.
-     * @throws Exception
-     */
-    public abstract int getBlobReplication(String key, Subject who) throws Exception;
-
-    /**
-     * Modifies the replication factor of the blob.
-     * @param key Key for the blob.
-     * @param replication The replication factor the
-     * blob has to be set.
-     * @param who Is the subject having the update privilege for the blob
-     * @return BlobReplication object containing the
-     * updated replication factor for the blob.
-     * @throws AuthorizationException
-     * @throws KeyNotFoundException
-     * @throws IOException
-     */
-    public abstract int updateBlobReplication(String key, int replication, Subject who) throws AuthorizationException, KeyNotFoundException, IOException;
-
-    /**
-     * Filters keys based on the KeyFilter
-     * passed as the argument.
-     * @param filter KeyFilter
-     * @param <R> Type
-     * @return Set of filtered keys
-     */
-    public <R> Set<R> filterAndListKeys(KeyFilter<R> filter) {
-        Set<R> ret = new HashSet<R>();
-        Iterator<String> keys = listKeys();
-        while (keys.hasNext()) {
-            String key = keys.next();
-            R filtered = filter.filter(key);
-            if (filtered != null) {
-                ret.add(filtered);
-            }
-        }
-        return ret;
-    }
-
-    /**
-     * Validates key checking for potentially harmful patterns
-     * @param key Key for the blob.
-     */
-    public static final void validateKey(String key) throws AuthorizationException {
-        if (StringUtils.isEmpty(key) || "..".equals(key) || ".".equals(key) || !KEY_PATTERN.matcher(key).matches()) {
-            LOG.error("'{}' does not appear to be valid {}", key, KEY_PATTERN);
-            throw new AuthorizationException(key+" does not appear to be a valid blob key");
-        }
-    }
-
-    /**
-     * Wrapper called to create the blob which contains
-     * the byte data
-     * @param key Key for the blob.
-     * @param data Byte data that needs to be uploaded.
-     * @param meta Metadata which contains the acls information
-     * @param who Is the subject creating the blob.
-     * @throws AuthorizationException
-     * @throws KeyAlreadyExistsException
-     * @throws IOException
-     */
-    public void createBlob(String key, byte [] data, SettableBlobMeta meta, Subject who) throws AuthorizationException, KeyAlreadyExistsException, IOException {
-        AtomicOutputStream out = null;
-        try {
-            out = createBlob(key, meta, who);
-            out.write(data);
-            out.close();
-            out = null;
-        } finally {
-            if (out != null) {
-                out.cancel();
-            }
-        }
-    }
-
-    /**
-     * Wrapper called to create the blob which contains
-     * the byte data
-     * @param key Key for the blob.
-     * @param in InputStream from which the data is read to be
-     * written as a part of the blob.
-     * @param meta Metadata which contains the acls information
-     * @param who Is the subject creating the blob.
-     * @throws AuthorizationException
-     * @throws KeyAlreadyExistsException
-     * @throws IOException
-     */
-    public void createBlob(String key, InputStream in, SettableBlobMeta meta, Subject who) throws AuthorizationException, KeyAlreadyExistsException, IOException {
-        AtomicOutputStream out = null;
-        try {
-            out = createBlob(key, meta, who);
-            byte[] buffer = new byte[2048];
-            int len = 0;
-            while ((len = in.read(buffer)) > 0) {
-                out.write(buffer, 0, len);
-            }
-            out.close();
-        } catch (AuthorizationException | IOException | RuntimeException e) {
-            if (out !=null) {
-                out.cancel();
-            }
-        } finally {
-            in.close();
-        }
-    }
-
-    /**
-     * Reads the blob from the blob store
-     * and writes it into the output stream.
-     * @param key Key for the blob.
-     * @param out Output stream
-     * @param who Is the subject having read
-     * privilege for the blob.
-     * @throws IOException
-     * @throws KeyNotFoundException
-     * @throws AuthorizationException
-     */
-    public void readBlobTo(String key, OutputStream out, Subject who) throws IOException, KeyNotFoundException, AuthorizationException {
-        InputStreamWithMeta in = getBlob(key, who);
-        if (in == null) {
-            throw new IOException("Could not find " + key);
-        }
-        byte[] buffer = new byte[2048];
-        int len = 0;
-        try{
-            while ((len = in.read(buffer)) > 0) {
-                out.write(buffer, 0, len);
-            }
-        } finally {
-            in.close();
-            out.flush();
-        }
-    }
-
-    /**
-     * Wrapper around readBlobTo which
-     * returns a ByteArray output stream.
-     * @param key  Key for the blob.
-     * @param who Is the subject having
-     * the read privilege for the blob.
-     * @return ByteArrayOutputStream
-     * @throws IOException
-     * @throws KeyNotFoundException
-     * @throws AuthorizationException
-     */
-    public byte[] readBlob(String key, Subject who) throws IOException, KeyNotFoundException, AuthorizationException {
-        ByteArrayOutputStream out = new ByteArrayOutputStream();
-        readBlobTo(key, out, who);
-        byte[] bytes = out.toByteArray();
-        out.close();
-        return bytes;
-    }
-
-    /**
-     * Output stream implementation used for reading the
-     * metadata and data information.
-     */
-    protected class BlobStoreFileOutputStream extends AtomicOutputStream {
-        private BlobStoreFile part;
-        private OutputStream out;
-
-        public BlobStoreFileOutputStream(BlobStoreFile part) throws IOException {
-            this.part = part;
-            this.out = part.getOutputStream();
-        }
-
-        @Override
-        public void close() throws IOException {
-            try {
-                //close means commit
-                out.close();
-                part.commit();
-            } catch (IOException | RuntimeException e) {
-                cancel();
-                throw e;
-            }
-        }
-
-        @Override
-        public void cancel() throws IOException {
-            try {
-                out.close();
-            } finally {
-                part.cancel();
-            }
-        }
-
-        @Override
-        public void write(int b) throws IOException {
-            out.write(b);
-        }
-
-        @Override
-        public void write(byte []b) throws IOException {
-            out.write(b);
-        }
-
-        @Override
-        public void write(byte []b, int offset, int len) throws IOException {
-            out.write(b, offset, len);
-        }
-    }
-
-    /**
-     * Input stream implementation used for writing
-     * both the metadata containing the acl information
-     * and the blob data.
-     */
-    protected class BlobStoreFileInputStream extends InputStreamWithMeta {
-        private BlobStoreFile part;
-        private InputStream in;
-
-        public BlobStoreFileInputStream(BlobStoreFile part) throws IOException {
-            this.part = part;
-            this.in = part.getInputStream();
-        }
-
-        @Override
-        public long getVersion() throws IOException {
-            return part.getModTime();
-        }
-
-        @Override
-        public int read() throws IOException {
-            return in.read();
-        }
-
-        @Override
-        public int read(byte[] b, int off, int len) throws IOException {
-            return in.read(b, off, len);
-        }
-
-        @Override
-        public int read(byte[] b) throws IOException {
-            return in.read(b);
-        }
-
-        @Override
-        public int available() throws IOException {
-            return in.available();
-        }
-
-        @Override
-        public long getFileLength() throws IOException {
-            return part.getFileLength();
-        }
-    }
-
-    /**
-     * Blob store implements its own version of iterator
-     * to list the blobs
-     */
-    public static class KeyTranslationIterator implements Iterator<String> {
-        private Iterator<String> it = null;
-        private String next = null;
-        private String prefix = null;
-
-        public KeyTranslationIterator(Iterator<String> it, String prefix) throws IOException {
-            this.it = it;
-            this.prefix = prefix;
-            primeNext();
-        }
-
-        private void primeNext() {
-            next = null;
-            while (it.hasNext()) {
-                String tmp = it.next();
-                if (tmp.startsWith(prefix)) {
-                    next = tmp.substring(prefix.length());
-                    return;
-                }
-            }
-        }
-
-        @Override
-        public boolean hasNext() {
-            return next != null;
-        }
-
-        @Override
-        public String next() {
-            if (!hasNext()) {
-                throw new NoSuchElementException();
-            }
-            String current = next;
-            primeNext();
-            return current;
-        }
-
-        @Override
-        public void remove() {
-            throw new UnsupportedOperationException("Delete Not Supported");
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/blobstore/BlobStoreAclHandler.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/blobstore/BlobStoreAclHandler.java b/storm-core/src/jvm/backtype/storm/blobstore/BlobStoreAclHandler.java
deleted file mode 100644
index c0c4e5c..0000000
--- a/storm-core/src/jvm/backtype/storm/blobstore/BlobStoreAclHandler.java
+++ /dev/null
@@ -1,399 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm.blobstore;
-
-import backtype.storm.Config;
-import backtype.storm.generated.AccessControl;
-import backtype.storm.generated.AccessControlType;
-import backtype.storm.generated.AuthorizationException;
-import backtype.storm.generated.SettableBlobMeta;
-import backtype.storm.security.auth.AuthUtils;
-import backtype.storm.security.auth.IPrincipalToLocal;
-import backtype.storm.security.auth.NimbusPrincipal;
-import org.apache.commons.lang.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.security.auth.Subject;
-import java.security.Principal;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Provides common handling of acls for Blobstores.
- * Also contains some static utility functions related to Blobstores.
- */
-public class BlobStoreAclHandler {
-    public static final Logger LOG = LoggerFactory.getLogger(BlobStoreAclHandler.class);
-    private final IPrincipalToLocal _ptol;
-
-    public static final int READ = 0x01;
-    public static final int WRITE = 0x02;
-    public static final int ADMIN = 0x04;
-    public static final List<AccessControl> WORLD_EVERYTHING =
-            Arrays.asList(new AccessControl(AccessControlType.OTHER, READ | WRITE | ADMIN));
-    public static final List<AccessControl> DEFAULT = new ArrayList<AccessControl>();
-    private Set<String> _supervisors;
-    private Set<String> _admins;
-
-    public BlobStoreAclHandler(Map conf) {
-        _ptol = AuthUtils.GetPrincipalToLocalPlugin(conf);
-        _supervisors = new HashSet<String>();
-        _admins = new HashSet<String>();
-        if (conf.containsKey(Config.NIMBUS_SUPERVISOR_USERS)) {
-            _supervisors.addAll((List<String>)conf.get(Config.NIMBUS_SUPERVISOR_USERS));
-        }
-        if (conf.containsKey(Config.NIMBUS_ADMINS)) {
-            _admins.addAll((List<String>)conf.get(Config.NIMBUS_ADMINS));
-        }
-    }
-
-    private static AccessControlType parseACLType(String type) {
-        if ("other".equalsIgnoreCase(type) || "o".equalsIgnoreCase(type)) {
-            return AccessControlType.OTHER;
-        } else if ("user".equalsIgnoreCase(type) || "u".equalsIgnoreCase(type)) {
-            return AccessControlType.USER;
-        }
-        throw new IllegalArgumentException(type+" is not a valid access control type");
-    }
-
-    private static int parseAccess(String access) {
-        int ret = 0;
-        for (char c: access.toCharArray()) {
-            if ('r' == c) {
-                ret = ret | READ;
-            } else if ('w' == c) {
-                ret = ret | WRITE;
-            } else if ('a' == c) {
-                ret = ret | ADMIN;
-            } else if ('-' == c) {
-                //ignored
-            } else {
-                throw new IllegalArgumentException("");
-            }
-        }
-        return ret;
-    }
-
-    public static AccessControl parseAccessControl(String str) {
-        String[] parts = str.split(":");
-        String type = "other";
-        String name = "";
-        String access = "-";
-        if (parts.length > 3) {
-            throw new IllegalArgumentException("Don't know how to parse "+str+" into an ACL value");
-        } else if (parts.length == 1) {
-            type = "other";
-            name = "";
-            access = parts[0];
-        } else if (parts.length == 2) {
-            type = "user";
-            name = parts[0];
-            access = parts[1];
-        } else if (parts.length == 3) {
-            type = parts[0];
-            name = parts[1];
-            access = parts[2];
-        }
-        AccessControl ret = new AccessControl();
-        ret.set_type(parseACLType(type));
-        ret.set_name(name);
-        ret.set_access(parseAccess(access));
-        return ret;
-    }
-
-    private static String accessToString(int access) {
-        StringBuilder ret = new StringBuilder();
-        ret.append(((access & READ) > 0) ? "r" : "-");
-        ret.append(((access & WRITE) > 0) ? "w" : "-");
-        ret.append(((access & ADMIN) > 0) ? "a" : "-");
-        return ret.toString();
-    }
-
-    public static String accessControlToString(AccessControl ac) {
-        StringBuilder ret = new StringBuilder();
-        switch(ac.get_type()) {
-            case OTHER:
-                ret.append("o");
-                break;
-            case USER:
-                ret.append("u");
-                break;
-            default:
-                throw new IllegalArgumentException("Don't know what a type of "+ac.get_type()+" means ");
-        }
-        ret.append(":");
-        if (ac.is_set_name()) {
-            ret.append(ac.get_name());
-        }
-        ret.append(":");
-        ret.append(accessToString(ac.get_access()));
-        return ret.toString();
-    }
-
-    public static void validateSettableACLs(String key, List<AccessControl> acls) throws AuthorizationException {
-        Set<String> aclUsers = new HashSet<>();
-        List<String> duplicateUsers = new ArrayList<>();
-        for (AccessControl acl : acls) {
-            String aclUser = acl.get_name();
-            if (!StringUtils.isEmpty(aclUser) && !aclUsers.add(aclUser)) {
-                LOG.error("'{}' user can't appear more than once in the ACLs", aclUser);
-                duplicateUsers.add(aclUser);
-            }
-        }
-        if (duplicateUsers.size() > 0) {
-            String errorMessage  = "user " + Arrays.toString(duplicateUsers.toArray())
-                    + " can't appear more than once in the ACLs for key [" + key +"].";
-            throw new AuthorizationException(errorMessage);
-        }
-    }
-
-    private Set<String> constructUserFromPrincipals(Subject who) {
-        Set<String> user = new HashSet<String>();
-        if (who != null) {
-            for (Principal p : who.getPrincipals()) {
-                user.add(_ptol.toLocal(p));
-            }
-        }
-        return user;
-    }
-
-    private boolean isAdmin(Subject who) {
-        Set<String> user = constructUserFromPrincipals(who);
-        for (String u : user) {
-            if (_admins.contains(u)) {
-                return true;
-            }
-        }
-        return false;
-    }
-
-    private boolean isReadOperation(int operation) {
-        if (operation == 1) {
-            return true;
-        }
-        return false;
-    }
-
-    private boolean isSupervisor(Subject who, int operation) {
-        Set<String> user = constructUserFromPrincipals(who);
-        if (isReadOperation(operation)) {
-            for (String u : user) {
-                if (_supervisors.contains(u)) {
-                    return true;
-                }
-            }
-        }
-        return false;
-    }
-
-    private boolean isNimbus(Subject who) {
-        Set<Principal> principals;
-        boolean isNimbusInstance = false;
-        if (who != null) {
-            principals = who.getPrincipals();
-            for (Principal principal : principals) {
-                if (principal instanceof NimbusPrincipal) {
-                    isNimbusInstance = true;
-                }
-            }
-        }
-        return isNimbusInstance;
-    }
-
-    public boolean checkForValidUsers(Subject who, int mask) {
-        return isNimbus(who) || isAdmin(who) || isSupervisor(who,mask);
-    }
-
-    /**
-     * The user should be able to see the metadata if and only if they have any of READ, WRITE, or ADMIN
-     */
-    public void validateUserCanReadMeta(List<AccessControl> acl, Subject who, String key) throws AuthorizationException {
-        hasAnyPermissions(acl, (READ|WRITE|ADMIN), who, key);
-    }
-
-    /**
-     * Validates if the user has any of the permissions
-     * mentioned in the mask.
-     * @param acl ACL for the key.
-     * @param mask mask holds the cumulative value of
-     * READ = 1, WRITE = 2 or ADMIN = 4 permissions.
-     * mask = 1 implies READ privilege.
-     * mask = 5 implies READ and ADMIN privileges.
-     * @param who Is the user against whom the permissions
-     * are validated for a key using the ACL and the mask.
-     * @param key Key used to identify the blob.
-     * @throws AuthorizationException
-     */
-    public void hasAnyPermissions(List<AccessControl> acl, int mask, Subject who, String key) throws AuthorizationException {
-        Set<String> user = constructUserFromPrincipals(who);
-        LOG.debug("user {}", user);
-        if (checkForValidUsers(who, mask)) {
-            return;
-        }
-        for (AccessControl ac : acl) {
-            int allowed = getAllowed(ac, user);
-            LOG.debug(" user: {} allowed: {} key: {}", user, allowed, key);
-            if ((allowed & mask) > 0) {
-                return;
-            }
-        }
-        throw new AuthorizationException(
-                user + " does not have access to " + key);
-    }
-
-    /**
-     * Validates if the user has at least the set of permissions
-     * mentioned in the mask.
-     * @param acl ACL for the key.
-     * @param mask mask holds the cumulative value of
-     * READ = 1, WRITE = 2 or ADMIN = 4 permissions.
-     * mask = 1 implies READ privilege.
-     * mask = 5 implies READ and ADMIN privileges.
-     * @param who Is the user against whom the permissions
-     * are validated for a key using the ACL and the mask.
-     * @param key Key used to identify the blob.
-     * @throws AuthorizationException
-     */
-    public void hasPermissions(List<AccessControl> acl, int mask, Subject who, String key) throws AuthorizationException {
-        Set<String> user = constructUserFromPrincipals(who);
-        LOG.debug("user {}", user);
-        if (checkForValidUsers(who, mask)) {
-            return;
-        }
-        for (AccessControl ac : acl) {
-            int allowed = getAllowed(ac, user);
-            mask = ~allowed & mask;
-            LOG.debug(" user: {} allowed: {} disallowed: {} key: {}", user, allowed, mask, key);
-        }
-        if (mask == 0) {
-            return;
-        }
-        throw new AuthorizationException(
-                user + " does not have " + namedPerms(mask) + " access to " + key);
-    }
-
-    public void normalizeSettableBlobMeta(String key, SettableBlobMeta meta, Subject who, int opMask) {
-        meta.set_acl(normalizeSettableACLs(key, meta.get_acl(), who, opMask));
-    }
-
-    private String namedPerms(int mask) {
-        StringBuilder b = new StringBuilder();
-        b.append("[");
-        if ((mask & READ) > 0) {
-            b.append("READ ");
-        }
-        if ((mask & WRITE) > 0) {
-            b.append("WRITE ");
-        }
-        if ((mask & ADMIN) > 0) {
-            b.append("ADMIN ");
-        }
-        b.append("]");
-        return b.toString();
-    }
-
-    private int getAllowed(AccessControl ac, Set<String> users) {
-        switch (ac.get_type()) {
-            case OTHER:
-                return ac.get_access();
-            case USER:
-                if (users.contains(ac.get_name())) {
-                    return ac.get_access();
-                }
-                return 0;
-            default:
-                return 0;
-        }
-    }
-
-    private List<AccessControl> removeBadACLs(List<AccessControl> accessControls) {
-        List<AccessControl> resultAcl = new ArrayList<AccessControl>();
-        for (AccessControl control : accessControls) {
-            if(control.get_type().equals(AccessControlType.OTHER) && (control.get_access() == 0 )) {
-                LOG.debug("Removing invalid blobstore world ACL " +
-                        BlobStoreAclHandler.accessControlToString(control));
-                continue;
-            }
-            resultAcl.add(control);
-        }
-        return resultAcl;
-    }
-
-    private final List<AccessControl> normalizeSettableACLs(String key, List<AccessControl> acls, Subject who,
-                                                            int opMask) {
-        List<AccessControl> cleanAcls = removeBadACLs(acls);
-        Set<String> userNames = getUserNamesFromSubject(who);
-        for (String user : userNames) {
-            fixACLsForUser(cleanAcls, user, opMask);
-        }
-        if ((who == null || userNames.isEmpty()) && !worldEverything(acls)) {
-            cleanAcls.addAll(BlobStoreAclHandler.WORLD_EVERYTHING);
-            LOG.debug("Access Control for key {} is normalized to world everything {}", key, cleanAcls);
-            if (!acls.isEmpty())
-                LOG.warn("Access control for blob with key {} is normalized to WORLD_EVERYTHING", key);
-        }
-        return cleanAcls;
-    }
-
-    private boolean worldEverything(List<AccessControl> acls) {
-        boolean isWorldEverything = false;
-        for (AccessControl acl : acls) {
-            if (acl.get_type() == AccessControlType.OTHER && acl.get_access() == (READ|WRITE|ADMIN)) {
-                isWorldEverything = true;
-                break;
-            }
-        }
-        return isWorldEverything;
-    }
-
-    private void fixACLsForUser(List<AccessControl> acls, String user, int mask) {
-        boolean foundUserACL = false;
-        for (AccessControl control : acls) {
-            if (control.get_type() == AccessControlType.USER && control.get_name().equals(user)) {
-                int currentAccess = control.get_access();
-                if ((currentAccess & mask) != mask) {
-                    control.set_access(currentAccess | mask);
-                }
-                foundUserACL = true;
-                break;
-            }
-        }
-        if (!foundUserACL) {
-            AccessControl userACL = new AccessControl();
-            userACL.set_type(AccessControlType.USER);
-            userACL.set_name(user);
-            userACL.set_access(mask);
-            acls.add(userACL);
-        }
-    }
-
-    private Set<String> getUserNamesFromSubject(Subject who) {
-        Set<String> user = new HashSet<String>();
-        if (who != null) {
-            for(Principal p: who.getPrincipals()) {
-                user.add(_ptol.toLocal(p));
-            }
-        }
-        return user;
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/blobstore/BlobStoreFile.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/blobstore/BlobStoreFile.java b/storm-core/src/jvm/backtype/storm/blobstore/BlobStoreFile.java
deleted file mode 100644
index 22ccf97..0000000
--- a/storm-core/src/jvm/backtype/storm/blobstore/BlobStoreFile.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm.blobstore;
-
-import backtype.storm.generated.SettableBlobMeta;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.util.regex.Pattern;
-
-/**
- * Provides an base implementation for creating a blobstore based on file backed storage.
- */
-public abstract class BlobStoreFile {
-    public static final Logger LOG = LoggerFactory.getLogger(BlobStoreFile.class);
-
-    protected static final String TMP_EXT = ".tmp";
-    protected static final Pattern TMP_NAME_PATTERN = Pattern.compile("^\\d+\\" + TMP_EXT + "$");
-    protected static final String BLOBSTORE_DATA_FILE = "data";
-
-    public abstract void delete() throws IOException;
-    public abstract String getKey();
-    public abstract boolean isTmp();
-    public abstract void setMetadata(SettableBlobMeta meta);
-    public abstract SettableBlobMeta getMetadata();
-    public abstract long getModTime() throws IOException;
-    public abstract InputStream getInputStream() throws IOException;
-    public abstract OutputStream getOutputStream() throws IOException;
-    public abstract void commit() throws IOException;
-    public abstract void cancel() throws IOException;
-    public abstract long getFileLength() throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/blobstore/BlobStoreUtils.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/blobstore/BlobStoreUtils.java b/storm-core/src/jvm/backtype/storm/blobstore/BlobStoreUtils.java
deleted file mode 100644
index 97fb262..0000000
--- a/storm-core/src/jvm/backtype/storm/blobstore/BlobStoreUtils.java
+++ /dev/null
@@ -1,257 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm.blobstore;
-
-import backtype.storm.Config;
-import backtype.storm.generated.AuthorizationException;
-import backtype.storm.generated.KeyAlreadyExistsException;
-import backtype.storm.generated.KeyNotFoundException;
-import backtype.storm.generated.ReadableBlobMeta;
-import backtype.storm.nimbus.NimbusInfo;
-import backtype.storm.security.auth.NimbusPrincipal;
-import backtype.storm.utils.NimbusClient;
-import backtype.storm.utils.Utils;
-import backtype.storm.utils.ZookeeperAuthInfo;
-import org.apache.curator.framework.CuratorFramework;
-import org.apache.thrift.transport.TTransportException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.security.auth.Subject;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-public class BlobStoreUtils {
-    private static final String BLOBSTORE_SUBTREE="/blobstore";
-    private static final Logger LOG = LoggerFactory.getLogger(BlobStoreUtils.class);
-
-    public static CuratorFramework createZKClient(Map conf) {
-        List<String> zkServers = (List<String>) conf.get(Config.STORM_ZOOKEEPER_SERVERS);
-        Object port = conf.get(Config.STORM_ZOOKEEPER_PORT);
-        ZookeeperAuthInfo zkAuthInfo = new ZookeeperAuthInfo(conf);
-        CuratorFramework zkClient = Utils.newCurator(conf, zkServers, port, (String) conf.get(Config.STORM_ZOOKEEPER_ROOT), zkAuthInfo);
-        zkClient.start();
-        return zkClient;
-    }
-
-    public static Subject getNimbusSubject() {
-        Subject subject = new Subject();
-        subject.getPrincipals().add(new NimbusPrincipal());
-        return subject;
-    }
-
-    // Normalize state
-    public static BlobKeySequenceInfo normalizeNimbusHostPortSequenceNumberInfo(String nimbusSeqNumberInfo) {
-        BlobKeySequenceInfo keySequenceInfo = new BlobKeySequenceInfo();
-        int lastIndex = nimbusSeqNumberInfo.lastIndexOf("-");
-        keySequenceInfo.setNimbusHostPort(nimbusSeqNumberInfo.substring(0, lastIndex));
-        keySequenceInfo.setSequenceNumber(nimbusSeqNumberInfo.substring(lastIndex + 1));
-        return keySequenceInfo;
-    }
-
-    // Check for latest sequence number of a key inside zookeeper and return nimbodes containing the latest sequence number
-    public static Set<NimbusInfo> getNimbodesWithLatestSequenceNumberOfBlob(CuratorFramework zkClient, String key) throws Exception {
-        List<String> stateInfoList = zkClient.getChildren().forPath("/blobstore/" + key);
-        Set<NimbusInfo> nimbusInfoSet = new HashSet<NimbusInfo>();
-        int latestSeqNumber = getLatestSequenceNumber(stateInfoList);
-        LOG.debug("getNimbodesWithLatestSequenceNumberOfBlob stateInfo {} version {}", stateInfoList, latestSeqNumber);
-        // Get the nimbodes with the latest version
-        for(String state : stateInfoList) {
-            BlobKeySequenceInfo sequenceInfo = normalizeNimbusHostPortSequenceNumberInfo(state);
-            if (latestSeqNumber == Integer.parseInt(sequenceInfo.getSequenceNumber())) {
-                nimbusInfoSet.add(NimbusInfo.parse(sequenceInfo.getNimbusHostPort()));
-            }
-        }
-        LOG.debug("nimbusInfoList {}", nimbusInfoSet);
-        return nimbusInfoSet;
-    }
-
-    // Get sequence number details from latest sequence number of the blob
-    public static int getLatestSequenceNumber(List<String> stateInfoList) {
-        int seqNumber = 0;
-        // Get latest sequence number of the blob present in the zookeeper --> possible to refactor this piece of code
-        for (String state : stateInfoList) {
-            BlobKeySequenceInfo sequenceInfo = normalizeNimbusHostPortSequenceNumberInfo(state);
-            int currentSeqNumber = Integer.parseInt(sequenceInfo.getSequenceNumber());
-            if (seqNumber < currentSeqNumber) {
-                seqNumber = currentSeqNumber;
-                LOG.debug("Sequence Info {}", seqNumber);
-            }
-        }
-        LOG.debug("Latest Sequence Number {}", seqNumber);
-        return seqNumber;
-    }
-
-    // Download missing blobs from potential nimbodes
-    public static boolean downloadMissingBlob(Map conf, BlobStore blobStore, String key, Set<NimbusInfo> nimbusInfos)
-            throws TTransportException {
-        NimbusClient client;
-        ReadableBlobMeta rbm;
-        ClientBlobStore remoteBlobStore;
-        InputStreamWithMeta in;
-        boolean isSuccess = false;
-        LOG.debug("Download blob NimbusInfos {}", nimbusInfos);
-        for (NimbusInfo nimbusInfo : nimbusInfos) {
-            if(isSuccess) {
-                break;
-            }
-            try {
-                client = new NimbusClient(conf, nimbusInfo.getHost(), nimbusInfo.getPort(), null);
-                rbm = client.getClient().getBlobMeta(key);
-                remoteBlobStore = new NimbusBlobStore();
-                remoteBlobStore.setClient(conf, client);
-                in = remoteBlobStore.getBlob(key);
-                blobStore.createBlob(key, in, rbm.get_settable(), getNimbusSubject());
-                // if key already exists while creating the blob else update it
-                Iterator<String> keyIterator = blobStore.listKeys();
-                while (keyIterator.hasNext()) {
-                    if (keyIterator.next().equals(key)) {
-                        LOG.debug("Success creating key, {}", key);
-                        isSuccess = true;
-                        break;
-                    }
-                }
-            } catch (IOException | AuthorizationException exception) {
-                throw new RuntimeException(exception);
-            } catch (KeyAlreadyExistsException kae) {
-                LOG.info("KeyAlreadyExistsException Key: {} {}", key, kae);
-            } catch (KeyNotFoundException knf) {
-                // Catching and logging KeyNotFoundException because, if
-                // there is a subsequent update and delete, the non-leader
-                // nimbodes might throw an exception.
-                LOG.info("KeyNotFoundException Key: {} {}", key, knf);
-            } catch (Exception exp) {
-                // Logging an exception while client is connecting
-                LOG.error("Exception {}", exp);
-            }
-        }
-
-        if (!isSuccess) {
-            LOG.error("Could not download blob with key" + key);
-        }
-        return isSuccess;
-    }
-
-    // Download updated blobs from potential nimbodes
-    public static boolean downloadUpdatedBlob(Map conf, BlobStore blobStore, String key, Set<NimbusInfo> nimbusInfos)
-            throws TTransportException {
-        NimbusClient client;
-        ClientBlobStore remoteBlobStore;
-        InputStreamWithMeta in;
-        AtomicOutputStream out;
-        boolean isSuccess = false;
-        LOG.debug("Download blob NimbusInfos {}", nimbusInfos);
-        for (NimbusInfo nimbusInfo : nimbusInfos) {
-            if (isSuccess) {
-                break;
-            }
-            try {
-                client = new NimbusClient(conf, nimbusInfo.getHost(), nimbusInfo.getPort(), null);
-                remoteBlobStore = new NimbusBlobStore();
-                remoteBlobStore.setClient(conf, client);
-                in = remoteBlobStore.getBlob(key);
-                out = blobStore.updateBlob(key, getNimbusSubject());
-                byte[] buffer = new byte[2048];
-                int len = 0;
-                while ((len = in.read(buffer)) > 0) {
-                    out.write(buffer, 0, len);
-                }
-                if (out != null) {
-                    out.close();
-                }
-                isSuccess = true;
-            } catch (IOException | AuthorizationException exception) {
-                throw new RuntimeException(exception);
-            } catch (KeyNotFoundException knf) {
-                // Catching and logging KeyNotFoundException because, if
-                // there is a subsequent update and delete, the non-leader
-                // nimbodes might throw an exception.
-                LOG.info("KeyNotFoundException {}", knf);
-            } catch (Exception exp) {
-                // Logging an exception while client is connecting
-                LOG.error("Exception {}", exp);
-            }
-        }
-
-        if (!isSuccess) {
-            LOG.error("Could not update the blob with key" + key);
-        }
-        return isSuccess;
-    }
-
-    // Get the list of keys from blobstore
-    public static List<String> getKeyListFromBlobStore(BlobStore blobStore) throws Exception {
-        Iterator<String> keys = blobStore.listKeys();
-        List<String> keyList = new ArrayList<String>();
-        if (keys != null) {
-            while (keys.hasNext()) {
-                keyList.add(keys.next());
-            }
-        }
-        LOG.debug("KeyList from blobstore {}", keyList);
-        return keyList;
-    }
-
-    public static void createStateInZookeeper(Map conf, String key, NimbusInfo nimbusInfo) throws TTransportException {
-        ClientBlobStore cb = new NimbusBlobStore();
-        cb.setClient(conf, new NimbusClient(conf, nimbusInfo.getHost(), nimbusInfo.getPort(), null));
-        cb.createStateInZookeeper(key);
-    }
-
-    public static void updateKeyForBlobStore (Map conf, BlobStore blobStore, CuratorFramework zkClient, String key, NimbusInfo nimbusDetails) {
-        try {
-            // Most of clojure tests currently try to access the blobs using getBlob. Since, updateKeyForBlobStore
-            // checks for updating the correct version of the blob as a part of nimbus ha before performing any
-            // operation on it, there is a neccessity to stub several test cases to ignore this method. It is a valid
-            // trade off to return if nimbusDetails which include the details of the current nimbus host port data are
-            // not initialized as a part of the test. Moreover, this applies to only local blobstore when used along with
-            // nimbus ha.
-            if (nimbusDetails == null) {
-                return;
-            }
-            boolean isListContainsCurrentNimbusInfo = false;
-            List<String> stateInfo;
-            if (zkClient.checkExists().forPath(BLOBSTORE_SUBTREE + "/" + key) == null) {
-                return;
-            }
-            stateInfo = zkClient.getChildren().forPath(BLOBSTORE_SUBTREE + "/" + key);
-            LOG.debug("StateInfo for update {}", stateInfo);
-            Set<NimbusInfo> nimbusInfoList = getNimbodesWithLatestSequenceNumberOfBlob(zkClient, key);
-
-            for (NimbusInfo nimbusInfo:nimbusInfoList) {
-                if (nimbusInfo.getHost().equals(nimbusDetails.getHost())) {
-                    isListContainsCurrentNimbusInfo = true;
-                    break;
-                }
-            }
-
-            if (!isListContainsCurrentNimbusInfo && downloadUpdatedBlob(conf, blobStore, key, nimbusInfoList)) {
-                LOG.debug("Updating state inside zookeeper for an update");
-                createStateInZookeeper(conf, key, nimbusDetails);
-            }
-        } catch (Exception exp) {
-            throw new RuntimeException(exp);
-        }
-    }
-
-}


[24/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/ui/core.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/ui/core.clj b/storm-core/src/clj/backtype/storm/ui/core.clj
deleted file mode 100644
index 61ddfa9..0000000
--- a/storm-core/src/clj/backtype/storm/ui/core.clj
+++ /dev/null
@@ -1,1273 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-
-(ns backtype.storm.ui.core
-  (:use compojure.core)
-  (:use [clojure.java.shell :only [sh]])
-  (:use ring.middleware.reload
-        ring.middleware.multipart-params)
-  (:use [ring.middleware.json :only [wrap-json-params]])
-  (:use [hiccup core page-helpers])
-  (:use [backtype.storm config util log stats zookeeper converter])
-  (:use [backtype.storm.ui helpers])
-  (:use [backtype.storm.daemon [common :only [ACKER-COMPONENT-ID ACKER-INIT-STREAM-ID ACKER-ACK-STREAM-ID
-                                              ACKER-FAIL-STREAM-ID mk-authorization-handler
-                                              start-metrics-reporters]]])
-  (:import [backtype.storm.utils Utils]
-           [backtype.storm.generated NimbusSummary])
-  (:use [clojure.string :only [blank? lower-case trim split]])
-  (:import [backtype.storm.generated ExecutorSpecificStats
-            ExecutorStats ExecutorSummary ExecutorInfo TopologyInfo SpoutStats BoltStats
-            ErrorInfo ClusterSummary SupervisorSummary TopologySummary
-            Nimbus$Client StormTopology GlobalStreamId RebalanceOptions
-            KillOptions GetInfoOptions NumErrorsChoice DebugOptions TopologyPageInfo
-            TopologyStats CommonAggregateStats ComponentAggregateStats
-            ComponentType BoltAggregateStats SpoutAggregateStats
-            ExecutorAggregateStats SpecificAggregateStats ComponentPageInfo
-            LogConfig LogLevel LogLevelAction])
-  (:import [backtype.storm.security.auth AuthUtils ReqContext])
-  (:import [backtype.storm.generated AuthorizationException ProfileRequest ProfileAction NodeInfo])
-  (:import [backtype.storm.security.auth AuthUtils])
-  (:import [backtype.storm.utils VersionInfo])
-  (:import [backtype.storm Config])
-  (:import [java.io File])
-  (:require [compojure.route :as route]
-            [compojure.handler :as handler]
-            [ring.util.response :as resp]
-            [backtype.storm [thrift :as thrift]])
-  (:require [metrics.meters :refer [defmeter mark!]])
-  (:import [org.apache.commons.lang StringEscapeUtils])
-  (:import [org.apache.logging.log4j Level])
-  (:gen-class))
-
-(def ^:dynamic *STORM-CONF* (read-storm-config))
-(def ^:dynamic *UI-ACL-HANDLER* (mk-authorization-handler (*STORM-CONF* NIMBUS-AUTHORIZER) *STORM-CONF*))
-(def ^:dynamic *UI-IMPERSONATION-HANDLER* (mk-authorization-handler (*STORM-CONF* NIMBUS-IMPERSONATION-AUTHORIZER) *STORM-CONF*))
-(def http-creds-handler (AuthUtils/GetUiHttpCredentialsPlugin *STORM-CONF*))
-(def STORM-VERSION (VersionInfo/getVersion))
-
-(defmeter ui:num-cluster-configuration-http-requests)
-(defmeter ui:num-cluster-summary-http-requests)
-(defmeter ui:num-nimbus-summary-http-requests)
-(defmeter ui:num-supervisor-summary-http-requests)
-(defmeter ui:num-all-topologies-summary-http-requests)
-(defmeter ui:num-topology-page-http-requests)
-(defmeter ui:num-build-visualization-http-requests)
-(defmeter ui:num-mk-visualization-data-http-requests)
-(defmeter ui:num-component-page-http-requests)
-(defmeter ui:num-log-config-http-requests)
-(defmeter ui:num-activate-topology-http-requests)
-(defmeter ui:num-deactivate-topology-http-requests)
-(defmeter ui:num-debug-topology-http-requests)
-(defmeter ui:num-component-op-response-http-requests)
-(defmeter ui:num-topology-op-response-http-requests)
-(defmeter ui:num-topology-op-response-http-requests)
-(defmeter ui:num-topology-op-response-http-requests)
-(defmeter ui:num-main-page-http-requests)
-
-(defn assert-authorized-user
-  ([op]
-    (assert-authorized-user op nil))
-  ([op topology-conf]
-    (let [context (ReqContext/context)]
-      (if (.isImpersonating context)
-        (if *UI-IMPERSONATION-HANDLER*
-            (if-not (.permit *UI-IMPERSONATION-HANDLER* context op topology-conf)
-              (let [principal (.principal context)
-                    real-principal (.realPrincipal context)
-                    user (if principal (.getName principal) "unknown")
-                    real-user (if real-principal (.getName real-principal) "unknown")
-                    remote-address (.remoteAddress context)]
-                (throw (AuthorizationException.
-                         (str "user '" real-user "' is not authorized to impersonate user '" user "' from host '" remote-address "'. Please
-                         see SECURITY.MD to learn how to configure impersonation ACL.")))))
-          (log-warn " principal " (.realPrincipal context) " is trying to impersonate " (.principal context) " but "
-            NIMBUS-IMPERSONATION-AUTHORIZER " has no authorizer configured. This is a potential security hole.
-            Please see SECURITY.MD to learn how to configure an impersonation authorizer.")))
-
-      (if *UI-ACL-HANDLER*
-       (if-not (.permit *UI-ACL-HANDLER* context op topology-conf)
-         (let [principal (.principal context)
-               user (if principal (.getName principal) "unknown")]
-           (throw (AuthorizationException.
-                   (str "UI request '" op "' for '" user "' user is not authorized")))))))))
-
-
-(defn assert-authorized-profiler-action
-  [op]
-  (if-not (*STORM-CONF* WORKER-PROFILER-ENABLED)
-    (throw (AuthorizationException.
-             (str "UI request for profiler action '" op "' is disabled.")))))
-
-
-(defn executor-summary-type
-  [topology ^ExecutorSummary s]
-  (component-type topology (.get_component_id s)))
-
-(defn is-ack-stream
-  [stream]
-  (let [acker-streams
-        [ACKER-INIT-STREAM-ID
-         ACKER-ACK-STREAM-ID
-         ACKER-FAIL-STREAM-ID]]
-    (every? #(not= %1 stream) acker-streams)))
-
-(defn spout-summary?
-  [topology s]
-  (= :spout (executor-summary-type topology s)))
-
-(defn bolt-summary?
-  [topology s]
-  (= :bolt (executor-summary-type topology s)))
-
-(defn group-by-comp
-  [summs]
-  (let [ret (group-by #(.get_component_id ^ExecutorSummary %) summs)]
-    (into (sorted-map) ret )))
-
-(defn logviewer-link [host fname secure?]
-  (if (and secure? (*STORM-CONF* LOGVIEWER-HTTPS-PORT))
-    (url-format "https://%s:%s/log?file=%s"
-      host
-      (*STORM-CONF* LOGVIEWER-HTTPS-PORT)
-      fname)
-    (url-format "http://%s:%s/log?file=%s"
-      host
-      (*STORM-CONF* LOGVIEWER-PORT)
-      fname)))
-
-(defn event-log-link
-  [topology-id component-id host port secure?]
-  (logviewer-link host (event-logs-filename topology-id port) secure?))
-
-(defn worker-log-link [host port topology-id secure?]
-  (let [fname (logs-filename topology-id port)]
-    (logviewer-link host fname secure?)))
-
-(defn nimbus-log-link [host port]
-  (url-format "http://%s:%s/daemonlog?file=nimbus.log" host (*STORM-CONF* LOGVIEWER-PORT) port))
-
-(defn get-error-time
-  [error]
-  (if error
-    (time-delta (.get_error_time_secs ^ErrorInfo error))))
-
-(defn get-error-data
-  [error]
-  (if error
-    (error-subset (.get_error ^ErrorInfo error))
-    ""))
-
-(defn get-error-port
-  [error]
-  (if error
-    (.get_port ^ErrorInfo error)
-    ""))
-
-(defn get-error-host
-  [error]
-  (if error
-    (.get_host ^ErrorInfo error)
-    ""))
-
-(defn get-error-time
-  [error]
-  (if error
-    (.get_error_time_secs ^ErrorInfo error)
-    ""))
-
-(defn worker-dump-link [host port topology-id]
-  (url-format "http://%s:%s/dumps/%s/%s"
-              (url-encode host)
-              (*STORM-CONF* LOGVIEWER-PORT)
-              (url-encode topology-id)
-              (str (url-encode host) ":" (url-encode port))))
-
-(defn stats-times
-  [stats-map]
-  (sort-by #(Integer/parseInt %)
-           (-> stats-map
-               clojurify-structure
-               (dissoc ":all-time")
-               keys)))
-
-(defn window-hint
-  [window]
-  (if (= window ":all-time")
-    "All time"
-    (pretty-uptime-sec window)))
-
-(defn sanitize-stream-name
-  [name]
-  (let [sym-regex #"(?![A-Za-z_\-:\.])."]
-    (str
-     (if (re-find #"^[A-Za-z]" name)
-       (clojure.string/replace name sym-regex "_")
-       (clojure.string/replace (str \s name) sym-regex "_"))
-     (hash name))))
-
-(defn sanitize-transferred
-  [transferred]
-  (into {}
-        (for [[time, stream-map] transferred]
-          [time, (into {}
-                       (for [[stream, trans] stream-map]
-                         [(sanitize-stream-name stream), trans]))])))
-
-(defn visualization-data
-  [spout-bolt spout-comp-summs bolt-comp-summs window storm-id]
-  (let [components (for [[id spec] spout-bolt]
-            [id
-             (let [inputs (.get_inputs (.get_common spec))
-                   bolt-summs (get bolt-comp-summs id)
-                   spout-summs (get spout-comp-summs id)
-                   bolt-cap (if bolt-summs
-                              (compute-bolt-capacity bolt-summs)
-                              0)]
-               {:type (if bolt-summs "bolt" "spout")
-                :capacity bolt-cap
-                :latency (if bolt-summs
-                           (get-in
-                             (bolt-streams-stats bolt-summs true)
-                             [:process-latencies window])
-                           (get-in
-                             (spout-streams-stats spout-summs true)
-                             [:complete-latencies window]))
-                :transferred (or
-                               (get-in
-                                 (spout-streams-stats spout-summs true)
-                                 [:transferred window])
-                               (get-in
-                                 (bolt-streams-stats bolt-summs true)
-                                 [:transferred window]))
-                :stats (let [mapfn (fn [dat]
-                                     (map (fn [^ExecutorSummary summ]
-                                            {:host (.get_host summ)
-                                             :port (.get_port summ)
-                                             :uptime_secs (.get_uptime_secs summ)
-                                             :transferred (if-let [stats (.get_stats summ)]
-                                                            (sanitize-transferred (.get_transferred stats)))})
-                                          dat))]
-                         (if bolt-summs
-                           (mapfn bolt-summs)
-                           (mapfn spout-summs)))
-                :link (url-format "/component.html?id=%s&topology_id=%s" id storm-id)
-                :inputs (for [[global-stream-id group] inputs]
-                          {:component (.get_componentId global-stream-id)
-                           :stream (.get_streamId global-stream-id)
-                           :sani-stream (sanitize-stream-name (.get_streamId global-stream-id))
-                           :grouping (clojure.core/name (thrift/grouping-type group))})})])]
-    (into {} (doall components))))
-
-(defn stream-boxes [datmap]
-  (let [filter-fn (mk-include-sys-fn true)
-        streams
-        (vec (doall (distinct
-                     (apply concat
-                            (for [[k v] datmap]
-                              (for [m (get v :inputs)]
-                                {:stream (get m :stream)
-                                 :sani-stream (get m :sani-stream)
-                                 :checked (is-ack-stream (get m :stream))}))))))]
-    (map (fn [row]
-           {:row row}) (partition 4 4 nil streams))))
-
-(defn- get-topology-info
-  ([^Nimbus$Client nimbus id]
-    (.getTopologyInfo nimbus id))
-  ([^Nimbus$Client nimbus id options]
-    (.getTopologyInfoWithOpts nimbus id options)))
-
-(defn mk-visualization-data
-  [id window include-sys?]
-  (thrift/with-configured-nimbus-connection
-    nimbus
-    (let [window (if window window ":all-time")
-          topology (.getTopology ^Nimbus$Client nimbus id)
-          spouts (.get_spouts topology)
-          bolts (.get_bolts topology)
-          summ (->> (doto
-                      (GetInfoOptions.)
-                      (.set_num_err_choice NumErrorsChoice/NONE))
-                    (.getTopologyInfoWithOpts ^Nimbus$Client nimbus id))
-          execs (.get_executors summ)
-          spout-summs (filter (partial spout-summary? topology) execs)
-          bolt-summs (filter (partial bolt-summary? topology) execs)
-          spout-comp-summs (group-by-comp spout-summs)
-          bolt-comp-summs (group-by-comp bolt-summs)
-          bolt-comp-summs (filter-key (mk-include-sys-fn include-sys?)
-                                      bolt-comp-summs)]
-      (visualization-data
-       (merge (hashmap-to-persistent spouts)
-              (hashmap-to-persistent bolts))
-       spout-comp-summs bolt-comp-summs window id))))
-
-(defn validate-tplg-submit-params [params]
-  (let [tplg-jar-file (params :topologyJar)
-        tplg-config (if (not-nil? (params :topologyConfig)) (from-json (params :topologyConfig)))]
-    (cond
-     (nil? tplg-jar-file) {:valid false :error "missing topology jar file"}
-     (nil? tplg-config) {:valid false :error "missing topology config"}
-     (nil? (tplg-config "topologyMainClass")) {:valid false :error "topologyMainClass missing in topologyConfig"}
-     :else {:valid true})))
-
-(defn run-tplg-submit-cmd [tplg-jar-file tplg-config user]
-  (let [tplg-main-class (if (not-nil? tplg-config) (trim (tplg-config "topologyMainClass")))
-        tplg-main-class-args (if (not-nil? tplg-config) (tplg-config "topologyMainClassArgs"))
-        storm-home (System/getProperty "storm.home")
-        storm-conf-dir (str storm-home file-path-separator "conf")
-        storm-log-dir (if (not-nil? (*STORM-CONF* "storm.log.dir")) (*STORM-CONF* "storm.log.dir")
-                          (str storm-home file-path-separator "logs"))
-        storm-libs (str storm-home file-path-separator "lib" file-path-separator "*")
-        java-cmd (str (System/getProperty "java.home") file-path-separator "bin" file-path-separator "java")
-        storm-cmd (str storm-home file-path-separator "bin" file-path-separator "storm")
-        tplg-cmd-response (apply sh
-                            (flatten
-                              [storm-cmd "jar" tplg-jar-file tplg-main-class
-                                (if (not-nil? tplg-main-class-args) tplg-main-class-args [])
-                                (if (not= user "unknown") (str "-c storm.doAsUser=" user) [])]))]
-    (log-message "tplg-cmd-response " tplg-cmd-response)
-    (cond
-     (= (tplg-cmd-response :exit) 0) {"status" "success"}
-     (and (not= (tplg-cmd-response :exit) 0)
-          (not-nil? (re-find #"already exists on cluster" (tplg-cmd-response :err)))) {"status" "failed" "error" "Topology with the same name exists in cluster"}
-          (not= (tplg-cmd-response :exit) 0) {"status" "failed" "error" (clojure.string/trim-newline (tplg-cmd-response :err))}
-          :else {"status" "success" "response" "topology deployed"}
-          )))
-
-(defn cluster-configuration []
-  (thrift/with-configured-nimbus-connection nimbus
-    (.getNimbusConf ^Nimbus$Client nimbus)))
-
-(defn topology-history-info
-  ([user]
-    (thrift/with-configured-nimbus-connection nimbus
-      (topology-history-info (.getTopologyHistory ^Nimbus$Client nimbus user) user)))
-  ([history user]
-    {"topo-history"
-     (into [] (.get_topo_ids history))}))
-
-(defn cluster-summary
-  ([user]
-     (thrift/with-configured-nimbus-connection nimbus
-        (cluster-summary (.getClusterInfo ^Nimbus$Client nimbus) user)))
-  ([^ClusterSummary summ user]
-     (let [sups (.get_supervisors summ)
-           used-slots (reduce + (map #(.get_num_used_workers ^SupervisorSummary %) sups))
-           total-slots (reduce + (map #(.get_num_workers ^SupervisorSummary %) sups))
-           free-slots (- total-slots used-slots)
-           topologies (.get_topologies_size summ)
-           total-tasks (->> (.get_topologies summ)
-                            (map #(.get_num_tasks ^TopologySummary %))
-                            (reduce +))
-           total-executors (->> (.get_topologies summ)
-                                (map #(.get_num_executors ^TopologySummary %))
-                                (reduce +))]
-       {"user" user
-        "stormVersion" STORM-VERSION
-        "supervisors" (count sups)
-        "topologies" topologies
-        "slotsTotal" total-slots
-        "slotsUsed"  used-slots
-        "slotsFree" free-slots
-        "executorsTotal" total-executors
-        "tasksTotal" total-tasks })))
-
-(defn convert-to-nimbus-summary[nimbus-seed]
-  (let [[host port] (.split nimbus-seed ":")]
-    {
-      "host" host
-      "port" port
-      "nimbusLogLink" (nimbus-log-link host port)
-      "status" "Offline"
-      "version" "Not applicable"
-      "nimbusUpTime" "Not applicable"
-      "nimbusUptimeSeconds" "Not applicable"}
-    ))
-
-(defn nimbus-summary
-  ([]
-    (thrift/with-configured-nimbus-connection nimbus
-      (nimbus-summary
-        (.get_nimbuses (.getClusterInfo ^Nimbus$Client nimbus)))))
-  ([nimbuses]
-    (let [nimbus-seeds (set (map #(str %1 ":" (*STORM-CONF* NIMBUS-THRIFT-PORT)) (set (*STORM-CONF* NIMBUS-SEEDS))))
-          alive-nimbuses (set (map #(str (.get_host %1) ":" (.get_port %1)) nimbuses))
-          offline-nimbuses (clojure.set/difference nimbus-seeds alive-nimbuses)
-          offline-nimbuses-summary (map #(convert-to-nimbus-summary %1) offline-nimbuses)]
-      {"nimbuses"
-       (concat offline-nimbuses-summary
-       (for [^NimbusSummary n nimbuses
-             :let [uptime (.get_uptime_secs n)]]
-         {
-          "host" (.get_host n)
-          "port" (.get_port n)
-          "nimbusLogLink" (nimbus-log-link (.get_host n) (.get_port n))
-          "status" (if (.is_isLeader n) "Leader" "Not a Leader")
-          "version" (.get_version n)
-          "nimbusUpTime" (pretty-uptime-sec uptime)
-          "nimbusUpTimeSeconds" uptime}))})))
-
-(defn supervisor-summary
-  ([]
-   (thrift/with-configured-nimbus-connection nimbus
-                (supervisor-summary
-                  (.get_supervisors (.getClusterInfo ^Nimbus$Client nimbus)))))
-  ([summs]
-   {"supervisors"
-    (for [^SupervisorSummary s summs]
-      {"id" (.get_supervisor_id s)
-       "host" (.get_host s)
-       "uptime" (pretty-uptime-sec (.get_uptime_secs s))
-       "uptimeSeconds" (.get_uptime_secs s)
-       "slotsTotal" (.get_num_workers s)
-       "slotsUsed" (.get_num_used_workers s)
-       "totalMem" (get (.get_total_resources s) Config/SUPERVISOR_MEMORY_CAPACITY_MB)
-       "totalCpu" (get (.get_total_resources s) Config/SUPERVISOR_CPU_CAPACITY)
-       "usedMem" (.get_used_mem s)
-       "usedCpu" (.get_used_cpu s)
-       "version" (.get_version s)})
-    "schedulerDisplayResource" (*STORM-CONF* Config/SCHEDULER_DISPLAY_RESOURCE)}))
-
-(defn all-topologies-summary
-  ([]
-   (thrift/with-configured-nimbus-connection
-     nimbus
-     (all-topologies-summary
-       (.get_topologies (.getClusterInfo ^Nimbus$Client nimbus)))))
-  ([summs]
-   {"topologies"
-    (for [^TopologySummary t summs]
-      {
-       "id" (.get_id t)
-       "encodedId" (url-encode (.get_id t))
-       "owner" (.get_owner t)
-       "name" (.get_name t)
-       "status" (.get_status t)
-       "uptime" (pretty-uptime-sec (.get_uptime_secs t))
-       "uptimeSeconds" (.get_uptime_secs t)
-       "tasksTotal" (.get_num_tasks t)
-       "workersTotal" (.get_num_workers t)
-       "executorsTotal" (.get_num_executors t)
-       "replicationCount" (.get_replication_count t)
-       "schedulerInfo" (.get_sched_status t)
-       "requestedMemOnHeap" (.get_requested_memonheap t)
-       "requestedMemOffHeap" (.get_requested_memoffheap t)
-       "requestedTotalMem" (+ (.get_requested_memonheap t) (.get_requested_memoffheap t))
-       "requestedCpu" (.get_requested_cpu t)
-       "assignedMemOnHeap" (.get_assigned_memonheap t)
-       "assignedMemOffHeap" (.get_assigned_memoffheap t)
-       "assignedTotalMem" (+ (.get_assigned_memonheap t) (.get_assigned_memoffheap t))
-       "assignedCpu" (.get_assigned_cpu t)})
-    "schedulerDisplayResource" (*STORM-CONF* Config/SCHEDULER_DISPLAY_RESOURCE)}))
-
-(defn topology-stats [window stats]
-  (let [times (stats-times (:emitted stats))
-        display-map (into {} (for [t times] [t pretty-uptime-sec]))
-        display-map (assoc display-map ":all-time" (fn [_] "All time"))]
-    (for [w (concat times [":all-time"])
-          :let [disp ((display-map w) w)]]
-      {"windowPretty" disp
-       "window" w
-       "emitted" (get-in stats [:emitted w])
-       "transferred" (get-in stats [:transferred w])
-       "completeLatency" (float-str (get-in stats [:complete-latencies w]))
-       "acked" (get-in stats [:acked w])
-       "failed" (get-in stats [:failed w])})))
-
-(defn build-visualization [id window include-sys?]
-  (thrift/with-configured-nimbus-connection nimbus
-    (let [window (if window window ":all-time")
-          topology-info (->> (doto
-                               (GetInfoOptions.)
-                               (.set_num_err_choice NumErrorsChoice/ONE))
-                             (.getTopologyInfoWithOpts ^Nimbus$Client nimbus
-                                                       id))
-          storm-topology (.getTopology ^Nimbus$Client nimbus id)
-          spout-executor-summaries (filter (partial spout-summary? storm-topology) (.get_executors topology-info))
-          bolt-executor-summaries (filter (partial bolt-summary? storm-topology) (.get_executors topology-info))
-          spout-comp-id->executor-summaries (group-by-comp spout-executor-summaries)
-          bolt-comp-id->executor-summaries (group-by-comp bolt-executor-summaries)
-          bolt-comp-id->executor-summaries (filter-key (mk-include-sys-fn include-sys?) bolt-comp-id->executor-summaries)
-          id->spout-spec (.get_spouts storm-topology)
-          id->bolt (.get_bolts storm-topology)
-          visualizer-data (visualization-data (merge (hashmap-to-persistent id->spout-spec)
-                                                     (hashmap-to-persistent id->bolt))
-                                              spout-comp-id->executor-summaries
-                                              bolt-comp-id->executor-summaries
-                                              window
-                                              id)]
-       {"visualizationTable" (stream-boxes visualizer-data)})))
-
-(defn- get-error-json
-  [topo-id error-info secure?]
-  (let [host (get-error-host error-info)
-        port (get-error-port error-info)]
-    {"lastError" (get-error-data error-info)
-     "errorTime" (get-error-time error-info)
-     "errorHost" host
-     "errorPort" port
-     "errorLapsedSecs" (get-error-time error-info)
-     "errorWorkerLogLink" (worker-log-link host port topo-id secure?)}))
-
-(defn- common-agg-stats-json
-  "Returns a JSON representation of a common aggregated statistics."
-  [^CommonAggregateStats common-stats]
-  {"executors" (.get_num_executors common-stats)
-   "tasks" (.get_num_tasks common-stats)
-   "emitted" (.get_emitted common-stats)
-   "transferred" (.get_transferred common-stats)
-   "acked" (.get_acked common-stats)
-   "failed" (.get_failed common-stats)})
-
-(defmulti comp-agg-stats-json
-  "Returns a JSON representation of aggregated statistics."
-  (fn [_ _ [id ^ComponentAggregateStats s]] (.get_type s)))
-
-(defmethod comp-agg-stats-json ComponentType/SPOUT
-  [topo-id secure? [id ^ComponentAggregateStats s]]
-  (let [^SpoutAggregateStats ss (.. s get_specific_stats get_spout)
-        cs (.get_common_stats s)]
-    (merge
-      (common-agg-stats-json cs)
-      (get-error-json topo-id (.get_last_error s) secure?)
-      {"spoutId" id
-       "encodedSpoutId" (url-encode id)
-       "completeLatency" (float-str (.get_complete_latency_ms ss))})))
-
-(defmethod comp-agg-stats-json ComponentType/BOLT
-  [topo-id secure? [id ^ComponentAggregateStats s]]
-  (let [^BoltAggregateStats ss (.. s get_specific_stats get_bolt)
-        cs (.get_common_stats s)]
-    (merge
-      (common-agg-stats-json cs)
-      (get-error-json topo-id (.get_last_error s) secure?)
-      {"boltId" id
-       "encodedBoltId" (url-encode id)
-       "capacity" (float-str (.get_capacity ss))
-       "executeLatency" (float-str (.get_execute_latency_ms ss))
-       "executed" (.get_executed ss)
-       "processLatency" (float-str (.get_process_latency_ms ss))})))
-
-(defn- unpack-topology-page-info
-  "Unpacks the serialized object to data structures"
-  [^TopologyPageInfo topo-info window secure?]
-  (let [id (.get_id topo-info)
-        ^TopologyStats topo-stats (.get_topology_stats topo-info)
-        stat->window->number
-          {:emitted (.get_window_to_emitted topo-stats)
-           :transferred (.get_window_to_transferred topo-stats)
-           :complete-latencies (.get_window_to_complete_latencies_ms topo-stats)
-           :acked (.get_window_to_acked topo-stats)
-           :failed (.get_window_to_failed topo-stats)}
-        topo-stats (topology-stats window stat->window->number)
-        [debugEnabled
-         samplingPct] (if-let [debug-opts (.get_debug_options topo-info)]
-                        [(.is_enable debug-opts)
-                         (.get_samplingpct debug-opts)])
-        uptime (.get_uptime_secs topo-info)]
-    {"id" id
-     "encodedId" (url-encode id)
-     "owner" (.get_owner topo-info)
-     "name" (.get_name topo-info)
-     "status" (.get_status topo-info)
-     "uptime" (pretty-uptime-sec uptime)
-     "uptimeSeconds" uptime
-     "tasksTotal" (.get_num_tasks topo-info)
-     "workersTotal" (.get_num_workers topo-info)
-     "executorsTotal" (.get_num_executors topo-info)
-     "schedulerInfo" (.get_sched_status topo-info)
-     "requestedMemOnHeap" (.get_requested_memonheap topo-info)
-     "requestedMemOffHeap" (.get_requested_memoffheap topo-info)
-     "requestedCpu" (.get_requested_cpu topo-info)
-     "assignedMemOnHeap" (.get_assigned_memonheap topo-info)
-     "assignedMemOffHeap" (.get_assigned_memoffheap topo-info)
-     "assignedTotalMem" (+ (.get_assigned_memonheap topo-info) (.get_assigned_memoffheap topo-info))
-     "assignedCpu" (.get_assigned_cpu topo-info)
-     "topologyStats" topo-stats
-     "spouts" (map (partial comp-agg-stats-json id secure?)
-                   (.get_id_to_spout_agg_stats topo-info))
-     "bolts" (map (partial comp-agg-stats-json id secure?)
-                  (.get_id_to_bolt_agg_stats topo-info))
-     "configuration" (.get_topology_conf topo-info)
-     "debug" (or debugEnabled false)
-     "samplingPct" (or samplingPct 10)
-     "replicationCount" (.get_replication_count topo-info)}))
-
-(defn exec-host-port
-  [executors]
-  (for [^ExecutorSummary e executors]
-    {"host" (.get_host e)
-     "port" (.get_port e)}))
-
-(defn worker-host-port
-  "Get the set of all worker host/ports"
-  [id]
-  (thrift/with-configured-nimbus-connection nimbus
-    (distinct (exec-host-port (.get_executors (get-topology-info nimbus id))))))
-
-(defn topology-page [id window include-sys? user secure?]
-  (thrift/with-configured-nimbus-connection nimbus
-    (let [window (if window window ":all-time")
-          window-hint (window-hint window)
-          topo-page-info (.getTopologyPageInfo ^Nimbus$Client nimbus
-                                               id
-                                               window
-                                               include-sys?)
-          topology-conf (from-json (.get_topology_conf topo-page-info))
-          msg-timeout (topology-conf TOPOLOGY-MESSAGE-TIMEOUT-SECS)]
-      (merge
-       (unpack-topology-page-info topo-page-info window secure?)
-       {"user" user
-        "window" window
-        "windowHint" window-hint
-        "msgTimeout" msg-timeout
-        "configuration" topology-conf
-        "visualizationTable" []
-        "schedulerDisplayResource" (*STORM-CONF* Config/SCHEDULER_DISPLAY_RESOURCE)}))))
-
-(defn component-errors
-  [errors-list topology-id secure?]
-  (let [errors (->> errors-list
-                    (sort-by #(.get_error_time_secs ^ErrorInfo %))
-                    reverse)]
-    {"componentErrors"
-     (for [^ErrorInfo e errors]
-       {"time" (* 1000 (long (.get_error_time_secs e)))
-        "errorHost" (.get_host e)
-        "errorPort"  (.get_port e)
-        "errorWorkerLogLink"  (worker-log-link (.get_host e)
-                                               (.get_port e)
-                                               topology-id
-                                               secure?)
-        "errorLapsedSecs" (get-error-time e)
-        "error" (.get_error e)})}))
-
-(defmulti unpack-comp-agg-stat
-  (fn [[_ ^ComponentAggregateStats s]] (.get_type s)))
-
-(defmethod unpack-comp-agg-stat ComponentType/BOLT
-  [[window ^ComponentAggregateStats s]]
-  (let [^CommonAggregateStats comm-s (.get_common_stats s)
-        ^SpecificAggregateStats spec-s (.get_specific_stats s)
-        ^BoltAggregateStats bolt-s (.get_bolt spec-s)]
-    {"window" window
-     "windowPretty" (window-hint window)
-     "emitted" (.get_emitted comm-s)
-     "transferred" (.get_transferred comm-s)
-     "acked" (.get_acked comm-s)
-     "failed" (.get_failed comm-s)
-     "executeLatency" (float-str (.get_execute_latency_ms bolt-s))
-     "processLatency"  (float-str (.get_process_latency_ms bolt-s))
-     "executed" (.get_executed bolt-s)
-     "capacity" (float-str (.get_capacity bolt-s))}))
-
-(defmethod unpack-comp-agg-stat ComponentType/SPOUT
-  [[window ^ComponentAggregateStats s]]
-  (let [^CommonAggregateStats comm-s (.get_common_stats s)
-        ^SpecificAggregateStats spec-s (.get_specific_stats s)
-        ^SpoutAggregateStats spout-s (.get_spout spec-s)]
-    {"window" window
-     "windowPretty" (window-hint window)
-     "emitted" (.get_emitted comm-s)
-     "transferred" (.get_transferred comm-s)
-     "acked" (.get_acked comm-s)
-     "failed" (.get_failed comm-s)
-     "completeLatency" (float-str (.get_complete_latency_ms spout-s))}))
-
-(defn- unpack-bolt-input-stat
-  [[^GlobalStreamId s ^ComponentAggregateStats stats]]
-  (let [^SpecificAggregateStats sas (.get_specific_stats stats)
-        ^BoltAggregateStats bas (.get_bolt sas)
-        ^CommonAggregateStats cas (.get_common_stats stats)
-        comp-id (.get_componentId s)]
-    {"component" comp-id
-     "encodedComponentId" (url-encode comp-id)
-     "stream" (.get_streamId s)
-     "executeLatency" (float-str (.get_execute_latency_ms bas))
-     "processLatency" (float-str (.get_process_latency_ms bas))
-     "executed" (nil-to-zero (.get_executed bas))
-     "acked" (nil-to-zero (.get_acked cas))
-     "failed" (nil-to-zero (.get_failed cas))}))
-
-(defmulti unpack-comp-output-stat
-  (fn [[_ ^ComponentAggregateStats s]] (.get_type s)))
-
-(defmethod unpack-comp-output-stat ComponentType/BOLT
-  [[stream-id ^ComponentAggregateStats stats]]
-  (let [^CommonAggregateStats cas (.get_common_stats stats)]
-    {"stream" stream-id
-     "emitted" (nil-to-zero (.get_emitted cas))
-     "transferred" (nil-to-zero (.get_transferred cas))}))
-
-(defmethod unpack-comp-output-stat ComponentType/SPOUT
-  [[stream-id ^ComponentAggregateStats stats]]
-  (let [^CommonAggregateStats cas (.get_common_stats stats)
-        ^SpecificAggregateStats spec-s (.get_specific_stats stats)
-        ^SpoutAggregateStats spout-s (.get_spout spec-s)]
-    {"stream" stream-id
-     "emitted" (nil-to-zero (.get_emitted cas))
-     "transferred" (nil-to-zero (.get_transferred cas))
-     "completeLatency" (float-str (.get_complete_latency_ms spout-s))
-     "acked" (nil-to-zero (.get_acked cas))
-     "failed" (nil-to-zero (.get_failed cas))}))
-
-(defmulti unpack-comp-exec-stat
-  (fn [_ _ ^ComponentAggregateStats cas] (.get_type (.get_stats ^ExecutorAggregateStats cas))))
-
-(defmethod unpack-comp-exec-stat ComponentType/BOLT
-  [topology-id secure? ^ExecutorAggregateStats eas]
-  (let [^ExecutorSummary summ (.get_exec_summary eas)
-        ^ExecutorInfo info (.get_executor_info summ)
-        ^ComponentAggregateStats stats (.get_stats eas)
-        ^SpecificAggregateStats ss (.get_specific_stats stats)
-        ^BoltAggregateStats bas (.get_bolt ss)
-        ^CommonAggregateStats cas (.get_common_stats stats)
-        host (.get_host summ)
-        port (.get_port summ)
-        exec-id (pretty-executor-info info)
-        uptime (.get_uptime_secs summ)]
-    {"id" exec-id
-     "encodedId" (url-encode exec-id)
-     "uptime" (pretty-uptime-sec uptime)
-     "uptimeSeconds" uptime
-     "host" host
-     "port" port
-     "emitted" (nil-to-zero (.get_emitted cas))
-     "transferred" (nil-to-zero (.get_transferred cas))
-     "capacity" (float-str (nil-to-zero (.get_capacity bas)))
-     "executeLatency" (float-str (.get_execute_latency_ms bas))
-     "executed" (nil-to-zero (.get_executed bas))
-     "processLatency" (float-str (.get_process_latency_ms bas))
-     "acked" (nil-to-zero (.get_acked cas))
-     "failed" (nil-to-zero (.get_failed cas))
-     "workerLogLink" (worker-log-link host port topology-id secure?)}))
-
-(defmethod unpack-comp-exec-stat ComponentType/SPOUT
-  [topology-id secure? ^ExecutorAggregateStats eas]
-  (let [^ExecutorSummary summ (.get_exec_summary eas)
-        ^ExecutorInfo info (.get_executor_info summ)
-        ^ComponentAggregateStats stats (.get_stats eas)
-        ^SpecificAggregateStats ss (.get_specific_stats stats)
-        ^SpoutAggregateStats sas (.get_spout ss)
-        ^CommonAggregateStats cas (.get_common_stats stats)
-        host (.get_host summ)
-        port (.get_port summ)
-        exec-id (pretty-executor-info info)
-        uptime (.get_uptime_secs summ)]
-    {"id" exec-id
-     "encodedId" (url-encode exec-id)
-     "uptime" (pretty-uptime-sec uptime)
-     "uptimeSeconds" uptime
-     "host" host
-     "port" port
-     "emitted" (nil-to-zero (.get_emitted cas))
-     "transferred" (nil-to-zero (.get_transferred cas))
-     "completeLatency" (float-str (.get_complete_latency_ms sas))
-     "acked" (nil-to-zero (.get_acked cas))
-     "failed" (nil-to-zero (.get_failed cas))
-     "workerLogLink" (worker-log-link host port topology-id secure?)}))
-
-(defmulti unpack-component-page-info
-  "Unpacks component-specific info to clojure data structures"
-  (fn [^ComponentPageInfo info & _]
-    (.get_component_type info)))
-
-(defmethod unpack-component-page-info ComponentType/BOLT
-  [^ComponentPageInfo info topology-id window include-sys? secure?]
-  (merge
-    {"boltStats" (map unpack-comp-agg-stat (.get_window_to_stats info))
-     "inputStats" (map unpack-bolt-input-stat (.get_gsid_to_input_stats info))
-     "outputStats" (map unpack-comp-output-stat (.get_sid_to_output_stats info))
-     "executorStats" (map (partial unpack-comp-exec-stat topology-id secure?)
-                          (.get_exec_stats info))}
-    (-> info .get_errors (component-errors topology-id secure?))))
-
-(defmethod unpack-component-page-info ComponentType/SPOUT
-  [^ComponentPageInfo info topology-id window include-sys? secure?]
-  (merge
-    {"spoutSummary" (map unpack-comp-agg-stat (.get_window_to_stats info))
-     "outputStats" (map unpack-comp-output-stat (.get_sid_to_output_stats info))
-     "executorStats" (map (partial unpack-comp-exec-stat topology-id secure?)
-                          (.get_exec_stats info))}
-    (-> info .get_errors (component-errors topology-id secure?))))
-
-(defn get-active-profile-actions
-  [nimbus topology-id component]
-  (let [profile-actions  (.getComponentPendingProfileActions nimbus
-                                               topology-id
-                                               component
-                                 ProfileAction/JPROFILE_STOP)
-        latest-profile-actions (map clojurify-profile-request profile-actions)
-        active-actions (map (fn [profile-action]
-                              {"host" (:host profile-action)
-                               "port" (str (:port profile-action))
-                               "dumplink" (worker-dump-link (:host profile-action) (str (:port profile-action)) topology-id)
-                               "timestamp" (str (- (:timestamp profile-action) (System/currentTimeMillis)))})
-                            latest-profile-actions)]
-    (log-message "Latest-active actions are: " (pr active-actions))
-    active-actions))
-
-(defn component-page
-  [topology-id component window include-sys? user secure?]
-  (thrift/with-configured-nimbus-connection nimbus
-    (let [window (or window ":all-time")
-          window-hint (window-hint window)
-          comp-page-info (.getComponentPageInfo ^Nimbus$Client nimbus
-                                                topology-id
-                                                component
-                                                window
-                                                include-sys?)
-          topology-conf (from-json (.getTopologyConf ^Nimbus$Client nimbus
-                                                     topology-id))
-          msg-timeout (topology-conf TOPOLOGY-MESSAGE-TIMEOUT-SECS)
-          [debugEnabled
-           samplingPct] (if-let [debug-opts (.get_debug_options comp-page-info)]
-                          [(.is_enable debug-opts)
-                           (.get_samplingpct debug-opts)])]
-      (assoc
-       (unpack-component-page-info comp-page-info
-                                   topology-id
-                                   window
-                                   include-sys?
-                                   secure?)
-       "user" user
-       "id" component
-       "encodedId" (url-encode component)
-       "name" (.get_topology_name comp-page-info)
-       "executors" (.get_num_executors comp-page-info)
-       "tasks" (.get_num_tasks comp-page-info)
-       "topologyId" topology-id
-       "topologyStatus" (.get_topology_status comp-page-info)
-       "encodedTopologyId" (url-encode topology-id)
-       "window" window
-       "componentType" (-> comp-page-info .get_component_type str lower-case)
-       "windowHint" window-hint
-       "debug" (or debugEnabled false)
-       "samplingPct" (or samplingPct 10)
-       "eventLogLink" (event-log-link topology-id
-                                      component
-                                      (.get_eventlog_host comp-page-info)
-                                      (.get_eventlog_port comp-page-info)
-                                      secure?)
-       "profileActionEnabled" (*STORM-CONF* WORKER-PROFILER-ENABLED)
-       "profilerActive" (if (*STORM-CONF* WORKER-PROFILER-ENABLED)
-                          (get-active-profile-actions nimbus topology-id component)
-                          [])))))
-    
-(defn- level-to-dict [level]
-  (if level
-    (let [timeout (.get_reset_log_level_timeout_secs level)
-          timeout-epoch (.get_reset_log_level_timeout_epoch level)
-          target-level (.get_target_log_level level)
-          reset-level (.get_reset_log_level level)]
-          {"target_level" (.toString (Level/toLevel target-level))
-           "reset_level" (.toString (Level/toLevel reset-level))
-           "timeout" timeout
-           "timeout_epoch" timeout-epoch})))
-
-(defn log-config [topology-id]
-  (thrift/with-configured-nimbus-connection
-    nimbus
-    (let [log-config (.getLogConfig ^Nimbus$Client nimbus topology-id)
-          named-logger-levels (into {}
-                                (for [[key val] (.get_named_logger_level log-config)]
-                                  [(str key) (level-to-dict val)]))]
-      {"namedLoggerLevels" named-logger-levels})))
-
-(defn topology-config [topology-id]
-  (thrift/with-configured-nimbus-connection nimbus
-    (from-json (.getTopologyConf ^Nimbus$Client nimbus topology-id))))
-
-(defn topology-op-response [topology-id op]
-  {"topologyOperation" op,
-   "topologyId" topology-id,
-   "status" "success"
-   })
-
-(defn component-op-response [topology-id component-id op]
-  {"topologyOperation" op,
-   "topologyId" topology-id,
-   "componentId" component-id,
-   "status" "success"
-   })
-
-(defn check-include-sys?
-  [sys?]
-  (if (or (nil? sys?) (= "false" sys?)) false true))
-
-(def http-creds-handler (AuthUtils/GetUiHttpCredentialsPlugin *STORM-CONF*))
-
-(defn populate-context!
-  "Populate the Storm RequestContext from an servlet-request. This should be called in each handler"
-  [servlet-request]
-    (when http-creds-handler
-      (.populateContext http-creds-handler (ReqContext/context) servlet-request)))
-
-(defn get-user-name
-  [servlet-request]
-  (.getUserName http-creds-handler servlet-request))
-
-(defroutes main-routes
-  (GET "/api/v1/cluster/configuration" [& m]
-    (mark! ui:num-cluster-configuration-http-requests)
-    (json-response (cluster-configuration)
-                   (:callback m) :serialize-fn identity))
-  (GET "/api/v1/cluster/summary" [:as {:keys [cookies servlet-request]} & m]
-    (mark! ui:num-cluster-summary-http-requests)
-    (populate-context! servlet-request)
-    (assert-authorized-user "getClusterInfo")
-    (let [user (get-user-name servlet-request)]
-      (json-response (assoc (cluster-summary user)
-                          "bugtracker-url" (*STORM-CONF* UI-PROJECT-BUGTRACKER-URL)
-                          "central-log-url" (*STORM-CONF* UI-CENTRAL-LOGGING-URL)) (:callback m))))
-  (GET "/api/v1/nimbus/summary" [:as {:keys [cookies servlet-request]} & m]
-    (mark! ui:num-nimbus-summary-http-requests)
-    (populate-context! servlet-request)
-    (assert-authorized-user "getClusterInfo")
-    (json-response (nimbus-summary) (:callback m)))
-  (GET "/api/v1/history/summary" [:as {:keys [cookies servlet-request]} & m]
-    (let [user (.getUserName http-creds-handler servlet-request)]
-      (json-response (topology-history-info user) (:callback m))))
-  (GET "/api/v1/supervisor/summary" [:as {:keys [cookies servlet-request]} & m]
-    (mark! ui:num-supervisor-summary-http-requests)
-    (populate-context! servlet-request)
-    (assert-authorized-user "getClusterInfo")
-    (json-response (assoc (supervisor-summary)
-                     "logviewerPort" (*STORM-CONF* LOGVIEWER-PORT)) (:callback m)))
-  (GET "/api/v1/topology/summary" [:as {:keys [cookies servlet-request]} & m]
-    (mark! ui:num-all-topologies-summary-http-requests)
-    (populate-context! servlet-request)
-    (assert-authorized-user "getClusterInfo")
-    (json-response (all-topologies-summary) (:callback m)))
-  (GET  "/api/v1/topology-workers/:id" [:as {:keys [cookies servlet-request]} id & m]
-    (let [id (url-decode id)]
-      (json-response {"hostPortList" (worker-host-port id)
-                      "logviewerPort" (*STORM-CONF* LOGVIEWER-PORT)} (:callback m))))
-  (GET "/api/v1/topology/:id" [:as {:keys [cookies servlet-request scheme]} id & m]
-    (mark! ui:num-topology-page-http-requests)
-    (populate-context! servlet-request)
-    (assert-authorized-user "getTopology" (topology-config id))
-    (let [user (get-user-name servlet-request)]
-      (json-response (topology-page id (:window m) (check-include-sys? (:sys m)) user (= scheme :https)) (:callback m))))
-  (GET "/api/v1/topology/:id/visualization-init" [:as {:keys [cookies servlet-request]} id & m]
-    (mark! ui:num-build-visualization-http-requests)
-    (populate-context! servlet-request)
-    (assert-authorized-user "getTopology" (topology-config id))
-    (json-response (build-visualization id (:window m) (check-include-sys? (:sys m))) (:callback m)))
-  (GET "/api/v1/topology/:id/visualization" [:as {:keys [cookies servlet-request]} id & m]
-    (mark! ui:num-mk-visualization-data-http-requests)
-    (populate-context! servlet-request)
-    (assert-authorized-user "getTopology" (topology-config id))
-    (json-response (mk-visualization-data id (:window m) (check-include-sys? (:sys m))) (:callback m)))
-  (GET "/api/v1/topology/:id/component/:component" [:as {:keys [cookies servlet-request scheme]} id component & m]
-    (mark! ui:num-component-page-http-requests)
-    (populate-context! servlet-request)
-    (assert-authorized-user "getTopology" (topology-config id))
-    (let [user (get-user-name servlet-request)]
-      (json-response
-          (component-page id component (:window m) (check-include-sys? (:sys m)) user (= scheme :https))
-          (:callback m))))
-  (GET "/api/v1/topology/:id/logconfig" [:as {:keys [cookies servlet-request]} id & m]
-    (mark! ui:num-log-config-http-requests)
-    (populate-context! servlet-request)
-    (assert-authorized-user "getTopology" (topology-config id))
-       (json-response (log-config id) (:callback m)))
-  (POST "/api/v1/topology/:id/activate" [:as {:keys [cookies servlet-request]} id & m]
-    (mark! ui:num-activate-topology-http-requests)
-    (populate-context! servlet-request)
-    (assert-authorized-user "activate" (topology-config id))
-    (thrift/with-configured-nimbus-connection nimbus
-       (let [tplg (->> (doto
-                        (GetInfoOptions.)
-                        (.set_num_err_choice NumErrorsChoice/NONE))
-                      (.getTopologyInfoWithOpts ^Nimbus$Client nimbus id))
-            name (.get_name tplg)]
-        (.activate nimbus name)
-        (log-message "Activating topology '" name "'")))
-    (json-response (topology-op-response id "activate") (m "callback")))
-  (POST "/api/v1/topology/:id/deactivate" [:as {:keys [cookies servlet-request]} id & m]
-    (mark! ui:num-deactivate-topology-http-requests)
-    (populate-context! servlet-request)
-    (assert-authorized-user "deactivate" (topology-config id))
-    (thrift/with-configured-nimbus-connection nimbus
-        (let [tplg (->> (doto
-                        (GetInfoOptions.)
-                        (.set_num_err_choice NumErrorsChoice/NONE))
-                      (.getTopologyInfoWithOpts ^Nimbus$Client nimbus id))
-            name (.get_name tplg)]
-        (.deactivate nimbus name)
-        (log-message "Deactivating topology '" name "'")))
-    (json-response (topology-op-response id "deactivate") (m "callback")))
-  (POST "/api/v1/topology/:id/debug/:action/:spct" [:as {:keys [cookies servlet-request]} id action spct & m]
-    (mark! ui:num-debug-topology-http-requests)
-    (populate-context! servlet-request)
-    (assert-authorized-user "debug" (topology-config id))
-    (thrift/with-configured-nimbus-connection nimbus
-        (let [tplg (->> (doto
-                        (GetInfoOptions.)
-                        (.set_num_err_choice NumErrorsChoice/NONE))
-                   (.getTopologyInfoWithOpts ^Nimbus$Client nimbus id))
-            name (.get_name tplg)
-            enable? (= "enable" action)]
-        (.debug nimbus name "" enable? (Integer/parseInt spct))
-        (log-message "Debug topology [" name "] action [" action "] sampling pct [" spct "]")))
-     (json-response (topology-op-response id (str "debug/" action)) (m "callback")))
-  (POST "/api/v1/topology/:id/component/:component/debug/:action/:spct" [:as {:keys [cookies servlet-request]} id component action spct & m]
-    (mark! ui:num-component-op-response-http-requests)
-    (populate-context! servlet-request)
-    (assert-authorized-user "debug" (topology-config id))
-    (thrift/with-configured-nimbus-connection nimbus
-      (let [tplg (->> (doto
-                        (GetInfoOptions.)
-                        (.set_num_err_choice NumErrorsChoice/NONE))
-                   (.getTopologyInfoWithOpts ^Nimbus$Client nimbus id))
-            name (.get_name tplg)
-            enable? (= "enable" action)]
-        (.debug nimbus name component enable? (Integer/parseInt spct))
-        (log-message "Debug topology [" name "] component [" component "] action [" action "] sampling pct [" spct "]")))
-    (json-response (component-op-response id component (str "/debug/" action)) (m "callback")))
-  (POST "/api/v1/topology/:id/rebalance/:wait-time" [:as {:keys [cookies servlet-request]} id wait-time & m]
-    (mark! ui:num-topology-op-response-http-requests)
-    (populate-context! servlet-request)
-    (assert-authorized-user "rebalance" (topology-config id))
-    (thrift/with-configured-nimbus-connection nimbus
-      (let [tplg (->> (doto
-                        (GetInfoOptions.)
-                        (.set_num_err_choice NumErrorsChoice/NONE))
-                      (.getTopologyInfoWithOpts ^Nimbus$Client nimbus id))
-            name (.get_name tplg)
-            rebalance-options (m "rebalanceOptions")
-            options (RebalanceOptions.)]
-        (.set_wait_secs options (Integer/parseInt wait-time))
-        (if (and (not-nil? rebalance-options) (contains? rebalance-options "numWorkers"))
-          (.set_num_workers options (Integer/parseInt (.toString (rebalance-options "numWorkers")))))
-        (if (and (not-nil? rebalance-options) (contains? rebalance-options "executors"))
-          (doseq [keyval (rebalance-options "executors")]
-            (.put_to_num_executors options (key keyval) (Integer/parseInt (.toString (val keyval))))))
-        (.rebalance nimbus name options)
-        (log-message "Rebalancing topology '" name "' with wait time: " wait-time " secs")))
-    (json-response (topology-op-response id "rebalance") (m "callback")))
-  (POST "/api/v1/topology/:id/kill/:wait-time" [:as {:keys [cookies servlet-request]} id wait-time & m]
-    (mark! ui:num-topology-op-response-http-requests)
-    (populate-context! servlet-request)
-    (assert-authorized-user "killTopology" (topology-config id))
-    (thrift/with-configured-nimbus-connection nimbus
-      (let [tplg (->> (doto
-                        (GetInfoOptions.)
-                        (.set_num_err_choice NumErrorsChoice/NONE))
-                      (.getTopologyInfoWithOpts ^Nimbus$Client nimbus id))
-            name (.get_name tplg)
-            options (KillOptions.)]
-        (.set_wait_secs options (Integer/parseInt wait-time))
-        (.killTopologyWithOpts nimbus name options)
-        (log-message "Killing topology '" name "' with wait time: " wait-time " secs")))
-    (json-response (topology-op-response id "kill") (m "callback")))
-  (POST "/api/v1/topology/:id/logconfig" [:as {:keys [cookies servlet-request]} id namedLoggerLevels & m]
-    (mark! ui:num-topology-op-response-http-requests)
-    (populate-context! servlet-request)
-    (assert-authorized-user "setLogConfig" (topology-config id))
-    (thrift/with-configured-nimbus-connection
-      nimbus
-      (let [new-log-config (LogConfig.)]
-        (doseq [[key level] namedLoggerLevels]
-            (let [logger-name (str key)
-                  target-level (.get level "target_level")
-                  timeout (or (.get level "timeout") 0)
-                  named-logger-level (LogLevel.)]
-              ;; if target-level is nil, do not set it, user wants to clear
-              (log-message "The target level for " logger-name " is " target-level)
-              (if (nil? target-level)
-                (do
-                  (.set_action named-logger-level LogLevelAction/REMOVE)
-                  (.unset_target_log_level named-logger-level))
-                (do
-                  (.set_action named-logger-level LogLevelAction/UPDATE)
-                  ;; the toLevel here ensures the string we get is valid
-                  (.set_target_log_level named-logger-level (.name (Level/toLevel target-level)))
-                  (.set_reset_log_level_timeout_secs named-logger-level timeout)))
-              (log-message "Adding this " logger-name " " named-logger-level " to " new-log-config)
-              (.put_to_named_logger_level new-log-config logger-name named-logger-level)))
-        (log-message "Setting topology " id " log config " new-log-config)
-        (.setLogConfig nimbus id new-log-config)
-        (json-response (log-config id) (m "callback")))))
-
-  (GET "/api/v1/topology/:id/profiling/start/:host-port/:timeout"
-       [:as {:keys [servlet-request]} id host-port timeout & m]
-       (thrift/with-configured-nimbus-connection nimbus
-         (assert-authorized-user "setWorkerProfiler" (topology-config id))
-         (assert-authorized-profiler-action "start")
-         (let [[host, port] (split host-port #":")
-               nodeinfo (NodeInfo. host (set [(Long. port)]))
-               timestamp (+ (System/currentTimeMillis) (* 60000 (Long. timeout)))
-               request (ProfileRequest. nodeinfo
-                                        ProfileAction/JPROFILE_STOP)]
-           (.set_time_stamp request timestamp)
-           (.setWorkerProfiler nimbus id request)
-           (json-response {"status" "ok"
-                           "id" host-port
-                           "timeout" timeout
-                           "dumplink" (worker-dump-link
-                                       host
-                                       port
-                                       id)}
-                          (m "callback")))))
-
-  (GET "/api/v1/topology/:id/profiling/stop/:host-port"
-       [:as {:keys [servlet-request]} id host-port & m]
-       (thrift/with-configured-nimbus-connection nimbus
-         (assert-authorized-user "setWorkerProfiler" (topology-config id))
-         (assert-authorized-profiler-action "stop")
-         (let [[host, port] (split host-port #":")
-               nodeinfo (NodeInfo. host (set [(Long. port)]))
-               timestamp 0
-               request (ProfileRequest. nodeinfo
-                                        ProfileAction/JPROFILE_STOP)]
-           (.set_time_stamp request timestamp)
-           (.setWorkerProfiler nimbus id request)
-           (json-response {"status" "ok"
-                           "id" host-port}
-                          (m "callback")))))
-  
-  (GET "/api/v1/topology/:id/profiling/dumpprofile/:host-port"
-       [:as {:keys [servlet-request]} id host-port & m]
-       (thrift/with-configured-nimbus-connection nimbus
-         (assert-authorized-user "setWorkerProfiler" (topology-config id))
-         (assert-authorized-profiler-action "dumpprofile")
-         (let [[host, port] (split host-port #":")
-               nodeinfo (NodeInfo. host (set [(Long. port)]))
-               timestamp (System/currentTimeMillis)
-               request (ProfileRequest. nodeinfo
-                                        ProfileAction/JPROFILE_DUMP)]
-           (.set_time_stamp request timestamp)
-           (.setWorkerProfiler nimbus id request)
-           (json-response {"status" "ok"
-                           "id" host-port}
-                          (m "callback")))))
-
-  (GET "/api/v1/topology/:id/profiling/dumpjstack/:host-port"
-       [:as {:keys [servlet-request]} id host-port & m]
-       (thrift/with-configured-nimbus-connection nimbus
-         (assert-authorized-user "setWorkerProfiler" (topology-config id))
-         (assert-authorized-profiler-action "dumpjstack")
-         (let [[host, port] (split host-port #":")
-               nodeinfo (NodeInfo. host (set [(Long. port)]))
-               timestamp (System/currentTimeMillis)
-               request (ProfileRequest. nodeinfo
-                                        ProfileAction/JSTACK_DUMP)]
-           (.set_time_stamp request timestamp)
-           (.setWorkerProfiler nimbus id request)
-           (json-response {"status" "ok"
-                           "id" host-port}
-                          (m "callback")))))
-
-  (GET "/api/v1/topology/:id/profiling/restartworker/:host-port"
-       [:as {:keys [servlet-request]} id host-port & m]
-       (thrift/with-configured-nimbus-connection nimbus
-         (assert-authorized-user "setWorkerProfiler" (topology-config id))
-         (assert-authorized-profiler-action "restartworker")
-         (let [[host, port] (split host-port #":")
-               nodeinfo (NodeInfo. host (set [(Long. port)]))
-               timestamp (System/currentTimeMillis)
-               request (ProfileRequest. nodeinfo
-                                        ProfileAction/JVM_RESTART)]
-           (.set_time_stamp request timestamp)
-           (.setWorkerProfiler nimbus id request)
-           (json-response {"status" "ok"
-                           "id" host-port}
-                          (m "callback")))))
-       
-  (GET "/api/v1/topology/:id/profiling/dumpheap/:host-port"
-       [:as {:keys [servlet-request]} id host-port & m]
-       (thrift/with-configured-nimbus-connection nimbus
-         (assert-authorized-user "setWorkerProfiler" (topology-config id))
-         (assert-authorized-profiler-action "dumpheap")
-         (let [[host, port] (split host-port #":")
-               nodeinfo (NodeInfo. host (set [(Long. port)]))
-               timestamp (System/currentTimeMillis)
-               request (ProfileRequest. nodeinfo
-                                        ProfileAction/JMAP_DUMP)]
-           (.set_time_stamp request timestamp)
-           (.setWorkerProfiler nimbus id request)
-           (json-response {"status" "ok"
-                           "id" host-port}
-                          (m "callback")))))
-  
-  (GET "/" [:as {cookies :cookies}]
-    (mark! ui:num-main-page-http-requests)
-    (resp/redirect "/index.html"))
-  (route/resources "/")
-  (route/not-found "Page not found"))
-
-(defn catch-errors
-  [handler]
-  (fn [request]
-    (try
-      (handler request)
-      (catch Exception ex
-        (json-response (exception->json ex) ((:query-params request) "callback") :status 500)))))
-
-(def app
-  (handler/site (-> main-routes
-                    (wrap-json-params)
-                    (wrap-multipart-params)
-                    (wrap-reload '[backtype.storm.ui.core])
-                    requests-middleware
-                    catch-errors)))
-
-(defn start-server!
-  []
-  (try
-    (let [conf *STORM-CONF*
-          header-buffer-size (int (.get conf UI-HEADER-BUFFER-BYTES))
-          filters-confs [{:filter-class (conf UI-FILTER)
-                          :filter-params (conf UI-FILTER-PARAMS)}]
-          https-port (if (not-nil? (conf UI-HTTPS-PORT)) (conf UI-HTTPS-PORT) 0)
-          https-ks-path (conf UI-HTTPS-KEYSTORE-PATH)
-          https-ks-password (conf UI-HTTPS-KEYSTORE-PASSWORD)
-          https-ks-type (conf UI-HTTPS-KEYSTORE-TYPE)
-          https-key-password (conf UI-HTTPS-KEY-PASSWORD)
-          https-ts-path (conf UI-HTTPS-TRUSTSTORE-PATH)
-          https-ts-password (conf UI-HTTPS-TRUSTSTORE-PASSWORD)
-          https-ts-type (conf UI-HTTPS-TRUSTSTORE-TYPE)
-          https-want-client-auth (conf UI-HTTPS-WANT-CLIENT-AUTH)
-          https-need-client-auth (conf UI-HTTPS-NEED-CLIENT-AUTH)]
-      (start-metrics-reporters)
-      (storm-run-jetty {:port (conf UI-PORT)
-                        :host (conf UI-HOST)
-                        :https-port https-port
-                        :configurator (fn [server]
-                                        (config-ssl server
-                                                    https-port
-                                                    https-ks-path
-                                                    https-ks-password
-                                                    https-ks-type
-                                                    https-key-password
-                                                    https-ts-path
-                                                    https-ts-password
-                                                    https-ts-type
-                                                    https-need-client-auth
-                                                    https-want-client-auth)
-                                        (doseq [connector (.getConnectors server)]
-                                          (.setRequestHeaderSize connector header-buffer-size))
-                                        (config-filter server app filters-confs))}))
-   (catch Exception ex
-     (log-error ex))))
-
-(defn -main
-  []
-  (log-message "Starting ui server for storm version '" STORM-VERSION "'")
-  (start-server!))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/ui/helpers.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/ui/helpers.clj b/storm-core/src/clj/backtype/storm/ui/helpers.clj
deleted file mode 100644
index e0db5c8..0000000
--- a/storm-core/src/clj/backtype/storm/ui/helpers.clj
+++ /dev/null
@@ -1,240 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-(ns backtype.storm.ui.helpers
-  (:use compojure.core)
-  (:use [hiccup core page-helpers])
-  (:use [clojure
-         [string :only [blank? join]]
-         [walk :only [keywordize-keys]]])
-  (:use [backtype.storm config log])
-  (:use [backtype.storm.util :only [clojurify-structure uuid defnk to-json url-encode not-nil?]])
-  (:use [clj-time coerce format])
-  (:import [backtype.storm.generated ExecutorInfo ExecutorSummary])
-  (:import [backtype.storm.logging.filters AccessLoggingFilter])
-  (:import [java.util EnumSet])
-  (:import [org.eclipse.jetty.server Server]
-           [org.eclipse.jetty.server.nio SelectChannelConnector]
-           [org.eclipse.jetty.server.ssl SslSocketConnector]
-           [org.eclipse.jetty.servlet ServletHolder FilterMapping]
-	   [org.eclipse.jetty.util.ssl SslContextFactory]
-           [org.eclipse.jetty.server DispatcherType]
-           [org.eclipse.jetty.servlets CrossOriginFilter])
-  (:require [ring.util servlet])
-  (:require [compojure.route :as route]
-            [compojure.handler :as handler])
-  (:require [metrics.meters :refer [defmeter mark!]]))
-
-(defmeter num-web-requests)
-(defn requests-middleware
-  "Coda Hale metric for counting the number of web requests."
-  [handler]
-  (fn [req]
-    (mark! num-web-requests)
-    (handler req)))
-
-(defn split-divide [val divider]
-  [(Integer. (int (/ val divider))) (mod val divider)]
-  )
-
-(def PRETTY-SEC-DIVIDERS
-     [["s" 60]
-      ["m" 60]
-      ["h" 24]
-      ["d" nil]])
-
-(def PRETTY-MS-DIVIDERS
-     (cons ["ms" 1000]
-           PRETTY-SEC-DIVIDERS))
-
-(defn pretty-uptime-str* [val dividers]
-  (let [val (if (string? val) (Integer/parseInt val) val)
-        vals (reduce (fn [[state val] [_ divider]]
-                       (if (pos? val)
-                         (let [[divided mod] (if divider
-                                               (split-divide val divider)
-                                               [nil val])]
-                           [(concat state [mod])
-                            divided]
-                           )
-                         [state val]
-                         ))
-                     [[] val]
-                     dividers)
-        strs (->>
-              (first vals)
-              (map
-               (fn [[suffix _] val]
-                 (str val suffix))
-               dividers
-               ))]
-    (join " " (reverse strs))
-    ))
-
-(defn pretty-uptime-sec [secs]
-  (pretty-uptime-str* secs PRETTY-SEC-DIVIDERS))
-
-(defn pretty-uptime-ms [ms]
-  (pretty-uptime-str* ms PRETTY-MS-DIVIDERS))
-
-
-(defelem table [headers-map data]
-  [:table
-   [:thead
-    [:tr
-     (for [h headers-map]
-       [:th (if (:text h) [:span (:attr h) (:text h)] h)])
-     ]]
-   [:tbody
-    (for [row data]
-      [:tr
-       (for [col row]
-         [:td col]
-         )]
-      )]
-   ])
-
-(defn url-format [fmt & args]
-  (String/format fmt
-    (to-array (map #(url-encode (str %)) args))))
-
-(defn pretty-executor-info [^ExecutorInfo e]
-  (str "[" (.get_task_start e) "-" (.get_task_end e) "]"))
-
-(defn unauthorized-user-json
-  [user]
-  {"error" "No Authorization"
-   "errorMessage" (str "User " user " is not authorized.")})
-
-(defn unauthorized-user-html [user]
-  [[:h2 "User '" (escape-html user) "' is not authorized."]])
-
-(defn- mk-ssl-connector [port ks-path ks-password ks-type key-password
-                         ts-path ts-password ts-type need-client-auth want-client-auth]
-  (let [sslContextFactory (doto (SslContextFactory.)
-                            (.setExcludeCipherSuites (into-array String ["SSL_RSA_WITH_RC4_128_MD5" "SSL_RSA_WITH_RC4_128_SHA"]))
-                            (.setExcludeProtocols (into-array String ["SSLv3"]))
-                            (.setAllowRenegotiate false)
-                            (.setKeyStorePath ks-path)
-                            (.setKeyStoreType ks-type)
-                            (.setKeyStorePassword ks-password)
-                            (.setKeyManagerPassword key-password))]
-    (if (and (not-nil? ts-path) (not-nil? ts-password) (not-nil? ts-type))
-      (do
-        (.setTrustStore sslContextFactory ts-path)
-        (.setTrustStoreType sslContextFactory ts-type)
-        (.setTrustStorePassword sslContextFactory ts-password)))
-    (cond
-      need-client-auth (.setNeedClientAuth sslContextFactory true)
-      want-client-auth (.setWantClientAuth sslContextFactory true))
-    (doto (SslSocketConnector. sslContextFactory)
-      (.setPort port))))
-
-
-(defn config-ssl [server port ks-path ks-password ks-type key-password
-                  ts-path ts-password ts-type need-client-auth want-client-auth]
-  (when (> port 0)
-    (.addConnector server (mk-ssl-connector port ks-path ks-password ks-type key-password
-                                            ts-path ts-password ts-type need-client-auth want-client-auth))))
-
-(defn cors-filter-handler
-  []
-  (doto (org.eclipse.jetty.servlet.FilterHolder. (CrossOriginFilter.))
-    (.setInitParameter CrossOriginFilter/ALLOWED_ORIGINS_PARAM "*")
-    (.setInitParameter CrossOriginFilter/ALLOWED_METHODS_PARAM "GET, POST, PUT")
-    (.setInitParameter CrossOriginFilter/ALLOWED_HEADERS_PARAM "X-Requested-With, X-Requested-By, Access-Control-Allow-Origin, Content-Type, Content-Length, Accept, Origin")
-    (.setInitParameter CrossOriginFilter/ACCESS_CONTROL_ALLOW_ORIGIN_HEADER "*")
-    ))
-
-(defn mk-access-logging-filter-handler []
-  (org.eclipse.jetty.servlet.FilterHolder. (AccessLoggingFilter.)))
-
-(defn config-filter [server handler filters-confs]
-  (if filters-confs
-    (let [servlet-holder (ServletHolder.
-                           (ring.util.servlet/servlet handler))
-          context (doto (org.eclipse.jetty.servlet.ServletContextHandler. server "/")
-                    (.addServlet servlet-holder "/"))]
-      (.addFilter context (cors-filter-handler) "/*" (EnumSet/allOf DispatcherType))
-      (doseq [{:keys [filter-name filter-class filter-params]} filters-confs]
-        (if filter-class
-          (let [filter-holder (doto (org.eclipse.jetty.servlet.FilterHolder.)
-                                (.setClassName filter-class)
-                                (.setName (or filter-name filter-class))
-                                (.setInitParameters (or filter-params {})))]
-            (.addFilter context filter-holder "/*" FilterMapping/ALL))))
-      (.addFilter context (mk-access-logging-filter-handler) "/*" (EnumSet/allOf DispatcherType))
-      (.setHandler server context))))
-
-(defn ring-response-from-exception [ex]
-  {:headers {}
-   :status 400
-   :body (.getMessage ex)})
-
-(defn- remove-non-ssl-connectors [server]
-  (doseq [c (.getConnectors server)]
-    (when-not (or (nil? c) (instance? SslSocketConnector c))
-      (.removeConnector server c)
-      ))
-  server)
-
-;; Modified from ring.adapter.jetty 1.3.0
-(defn- jetty-create-server
-  "Construct a Jetty Server instance."
-  [options]
-  (let [connector (doto (SelectChannelConnector.)
-                    (.setPort (options :port 80))
-                    (.setHost (options :host))
-                    (.setMaxIdleTime (options :max-idle-time 200000)))
-        server    (doto (Server.)
-                    (.addConnector connector)
-                    (.setSendDateHeader true))
-        https-port (options :https-port)]
-    (if (and (not-nil? https-port) (> https-port 0)) (remove-non-ssl-connectors server))
-    server))
-
-(defn storm-run-jetty
-  "Modified version of run-jetty
-  Assumes configurator sets handler."
-  [config]
-  {:pre [(:configurator config)]}
-  (let [#^Server s (jetty-create-server (dissoc config :configurator))
-        configurator (:configurator config)]
-    (configurator s)
-    (.start s)))
-
-(defn wrap-json-in-callback [callback response]
-  (str callback "(" response ");"))
-
-(defnk json-response
-  [data callback :serialize-fn to-json :status 200 :headers {}]
-  {:status status
-   :headers (merge {"Cache-Control" "no-cache, no-store"
-                    "Access-Control-Allow-Origin" "*"
-                    "Access-Control-Allow-Headers" "Content-Type, Access-Control-Allow-Headers, Access-Controler-Allow-Origin, X-Requested-By, X-Csrf-Token, Authorization, X-Requested-With"}
-              (if (not-nil? callback) {"Content-Type" "application/javascript;charset=utf-8"}
-                {"Content-Type" "application/json;charset=utf-8"})
-              headers)
-   :body (if (not-nil? callback)
-           (wrap-json-in-callback callback (serialize-fn data))
-           (serialize-fn data))})
-
-(defn exception->json
-  [ex]
-  {"error" "Internal Server Error"
-   "errorMessage"
-   (let [sw (java.io.StringWriter.)]
-     (.printStackTrace ex (java.io.PrintWriter. sw))
-     (.toString sw))})


[26/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/stats.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/stats.clj b/storm-core/src/clj/backtype/storm/stats.clj
deleted file mode 100644
index 5f8053e..0000000
--- a/storm-core/src/clj/backtype/storm/stats.clj
+++ /dev/null
@@ -1,1521 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-
-(ns backtype.storm.stats
-  (:import [backtype.storm.generated Nimbus Nimbus$Processor Nimbus$Iface StormTopology ShellComponent
-            NotAliveException AlreadyAliveException InvalidTopologyException GlobalStreamId
-            ClusterSummary TopologyInfo TopologySummary ExecutorInfo ExecutorSummary ExecutorStats
-            ExecutorSpecificStats SpoutStats BoltStats ErrorInfo
-            SupervisorSummary CommonAggregateStats ComponentAggregateStats
-            ComponentPageInfo ComponentType BoltAggregateStats
-            ExecutorAggregateStats SpecificAggregateStats
-            SpoutAggregateStats TopologyPageInfo TopologyStats])
-  (:import [backtype.storm.utils Utils])
-  (:import [backtype.storm.metric.internal MultiCountStatAndMetric MultiLatencyStatAndMetric])
-  (:use [backtype.storm log util])
-  (:use [clojure.math.numeric-tower :only [ceil]]))
-
-(def TEN-MIN-IN-SECONDS (* 10 60))
-
-(def COMMON-FIELDS [:emitted :transferred])
-(defrecord CommonStats [^MultiCountStatAndMetric emitted
-                        ^MultiCountStatAndMetric transferred
-                        rate])
-
-(def BOLT-FIELDS [:acked :failed :process-latencies :executed :execute-latencies])
-;;acked and failed count individual tuples
-(defrecord BoltExecutorStats [^CommonStats common
-                              ^MultiCountStatAndMetric acked
-                              ^MultiCountStatAndMetric failed
-                              ^MultiLatencyStatAndMetric process-latencies
-                              ^MultiCountStatAndMetric executed
-                              ^MultiLatencyStatAndMetric execute-latencies])
-
-(def SPOUT-FIELDS [:acked :failed :complete-latencies])
-;;acked and failed count tuple completion
-(defrecord SpoutExecutorStats [^CommonStats common
-                               ^MultiCountStatAndMetric acked
-                               ^MultiCountStatAndMetric failed
-                               ^MultiLatencyStatAndMetric complete-latencies])
-
-(def NUM-STAT-BUCKETS 20)
-
-(defn- mk-common-stats
-  [rate]
-  (CommonStats.
-    (MultiCountStatAndMetric. NUM-STAT-BUCKETS)
-    (MultiCountStatAndMetric. NUM-STAT-BUCKETS)
-    rate))
-
-(defn mk-bolt-stats
-  [rate]
-  (BoltExecutorStats.
-    (mk-common-stats rate)
-    (MultiCountStatAndMetric. NUM-STAT-BUCKETS)
-    (MultiCountStatAndMetric. NUM-STAT-BUCKETS)
-    (MultiLatencyStatAndMetric. NUM-STAT-BUCKETS)
-    (MultiCountStatAndMetric. NUM-STAT-BUCKETS)
-    (MultiLatencyStatAndMetric. NUM-STAT-BUCKETS)))
-
-(defn mk-spout-stats
-  [rate]
-  (SpoutExecutorStats.
-    (mk-common-stats rate)
-    (MultiCountStatAndMetric. NUM-STAT-BUCKETS)
-    (MultiCountStatAndMetric. NUM-STAT-BUCKETS)
-    (MultiLatencyStatAndMetric. NUM-STAT-BUCKETS)))
-
-(defmacro stats-rate
-  [stats]
-  `(-> ~stats :common :rate))
-
-(defmacro stats-emitted
-  [stats]
-  `(-> ~stats :common :emitted))
-
-(defmacro stats-transferred
-  [stats]
-  `(-> ~stats :common :transferred))
-
-(defmacro stats-executed
-  [stats]
-  `(:executed ~stats))
-
-(defmacro stats-acked
-  [stats]
-  `(:acked ~stats))
-
-(defmacro stats-failed
-  [stats]
-  `(:failed ~stats))
-
-(defmacro stats-execute-latencies
-  [stats]
-  `(:execute-latencies ~stats))
-
-(defmacro stats-process-latencies
-  [stats]
-  `(:process-latencies ~stats))
-
-(defmacro stats-complete-latencies
-  [stats]
-  `(:complete-latencies ~stats))
-
-(defn emitted-tuple!
-  [stats stream]
-  (.incBy ^MultiCountStatAndMetric (stats-emitted stats) ^Object stream ^long (stats-rate stats)))
-
-(defn transferred-tuples!
-  [stats stream amt]
-  (.incBy ^MultiCountStatAndMetric (stats-transferred stats) ^Object stream ^long (* (stats-rate stats) amt)))
-
-(defn bolt-execute-tuple!
-  [^BoltExecutorStats stats component stream latency-ms]
-  (let [key [component stream]
-        ^MultiCountStatAndMetric executed (stats-executed stats)
-        ^MultiLatencyStatAndMetric exec-lat (stats-execute-latencies stats)]
-    (.incBy executed key (stats-rate stats))
-    (.record exec-lat key latency-ms)))
-
-(defn bolt-acked-tuple!
-  [^BoltExecutorStats stats component stream latency-ms]
-  (let [key [component stream]
-        ^MultiCountStatAndMetric acked (stats-acked stats)
-        ^MultiLatencyStatAndMetric process-lat (stats-process-latencies stats)]
-    (.incBy acked key (stats-rate stats))
-    (.record process-lat key latency-ms)))
-
-(defn bolt-failed-tuple!
-  [^BoltExecutorStats stats component stream latency-ms]
-  (let [key [component stream]
-        ^MultiCountStatAndMetric failed (stats-failed stats)]
-    (.incBy failed key (stats-rate stats))))
-
-(defn spout-acked-tuple!
-  [^SpoutExecutorStats stats stream latency-ms]
-  (.incBy ^MultiCountStatAndMetric (stats-acked stats) stream (stats-rate stats))
-  (.record ^MultiLatencyStatAndMetric (stats-complete-latencies stats) stream latency-ms))
-
-(defn spout-failed-tuple!
-  [^SpoutExecutorStats stats stream latency-ms]
-  (.incBy ^MultiCountStatAndMetric (stats-failed stats) stream (stats-rate stats)))
-
-(defn- cleanup-stat! [stat]
-  (.close stat))
-
-(defn- cleanup-common-stats!
-  [^CommonStats stats]
-  (doseq [f COMMON-FIELDS]
-    (cleanup-stat! (f stats))))
-
-(defn cleanup-bolt-stats!
-  [^BoltExecutorStats stats]
-  (cleanup-common-stats! (:common stats))
-  (doseq [f BOLT-FIELDS]
-    (cleanup-stat! (f stats))))
-
-(defn cleanup-spout-stats!
-  [^SpoutExecutorStats stats]
-  (cleanup-common-stats! (:common stats))
-  (doseq [f SPOUT-FIELDS]
-    (cleanup-stat! (f stats))))
-
-(defn- value-stats
-  [stats fields]
-  (into {} (dofor [f fields]
-                  [f (if (instance? MultiCountStatAndMetric (f stats))
-                         (.getTimeCounts ^MultiCountStatAndMetric (f stats))
-                         (.getTimeLatAvg ^MultiLatencyStatAndMetric (f stats)))])))
-
-(defn- value-common-stats
-  [^CommonStats stats]
-  (merge
-    (value-stats stats COMMON-FIELDS)
-    {:rate (:rate stats)}))
-
-(defn value-bolt-stats!
-  [^BoltExecutorStats stats]
-  (cleanup-bolt-stats! stats)
-  (merge (value-common-stats (:common stats))
-         (value-stats stats BOLT-FIELDS)
-         {:type :bolt}))
-
-(defn value-spout-stats!
-  [^SpoutExecutorStats stats]
-  (cleanup-spout-stats! stats)
-  (merge (value-common-stats (:common stats))
-         (value-stats stats SPOUT-FIELDS)
-         {:type :spout}))
-
-(defmulti render-stats! class-selector)
-
-(defmethod render-stats! SpoutExecutorStats
-  [stats]
-  (value-spout-stats! stats))
-
-(defmethod render-stats! BoltExecutorStats
-  [stats]
-  (value-bolt-stats! stats))
-
-(defmulti thriftify-specific-stats :type)
-(defmulti clojurify-specific-stats class-selector)
-
-(defn window-set-converter
-  ([stats key-fn first-key-fun]
-    (into {}
-      (for [[k v] stats]
-        ;apply the first-key-fun only to first key.
-        [(first-key-fun k)
-         (into {} (for [[k2 v2] v]
-                    [(key-fn k2) v2]))])))
-  ([stats first-key-fun]
-    (window-set-converter stats identity first-key-fun)))
-
-(defn to-global-stream-id
-  [[component stream]]
-  (GlobalStreamId. component stream))
-
-(defn from-global-stream-id [global-stream-id]
-  [(.get_componentId global-stream-id) (.get_streamId global-stream-id)])
-
-(defmethod clojurify-specific-stats BoltStats [^BoltStats stats]
-  [(window-set-converter (.get_acked stats) from-global-stream-id identity)
-   (window-set-converter (.get_failed stats) from-global-stream-id identity)
-   (window-set-converter (.get_process_ms_avg stats) from-global-stream-id identity)
-   (window-set-converter (.get_executed stats) from-global-stream-id identity)
-   (window-set-converter (.get_execute_ms_avg stats) from-global-stream-id identity)])
-
-(defmethod clojurify-specific-stats SpoutStats [^SpoutStats stats]
-  [(.get_acked stats)
-   (.get_failed stats)
-   (.get_complete_ms_avg stats)])
-
-
-(defn clojurify-executor-stats
-  [^ExecutorStats stats]
-  (let [ specific-stats (.get_specific stats)
-         is_bolt? (.is_set_bolt specific-stats)
-         specific-stats (if is_bolt? (.get_bolt specific-stats) (.get_spout specific-stats))
-         specific-stats (clojurify-specific-stats specific-stats)
-         common-stats (CommonStats. (.get_emitted stats)
-                                    (.get_transferred stats)
-                                    (.get_rate stats))]
-    (if is_bolt?
-      ; worker heart beat does not store the BoltExecutorStats or SpoutExecutorStats , instead it stores the result returned by render-stats!
-      ; which flattens the BoltExecutorStats/SpoutExecutorStats by extracting values from all atoms and merging all values inside :common to top
-      ;level map we are pretty much doing the same here.
-      (dissoc (merge common-stats {:type :bolt}  (apply ->BoltExecutorStats (into [nil] specific-stats))) :common)
-      (dissoc (merge common-stats {:type :spout} (apply ->SpoutExecutorStats (into [nil] specific-stats))) :common)
-      )))
-
-(defmethod thriftify-specific-stats :bolt
-  [stats]
-  (ExecutorSpecificStats/bolt
-    (BoltStats.
-      (window-set-converter (:acked stats) to-global-stream-id str)
-      (window-set-converter (:failed stats) to-global-stream-id str)
-      (window-set-converter (:process-latencies stats) to-global-stream-id str)
-      (window-set-converter (:executed stats) to-global-stream-id str)
-      (window-set-converter (:execute-latencies stats) to-global-stream-id str))))
-
-(defmethod thriftify-specific-stats :spout
-  [stats]
-  (ExecutorSpecificStats/spout
-    (SpoutStats. (window-set-converter (:acked stats) str)
-      (window-set-converter (:failed stats) str)
-      (window-set-converter (:complete-latencies stats) str))))
-
-(defn thriftify-executor-stats
-  [stats]
-  (let [specific-stats (thriftify-specific-stats stats)
-        rate (:rate stats)]
-    (ExecutorStats. (window-set-converter (:emitted stats) str)
-      (window-set-converter (:transferred stats) str)
-      specific-stats
-      rate)))
-
-(defn valid-number?
-  "Returns true if x is a number that is not NaN or Infinity, false otherwise"
-  [x]
-  (and (number? x)
-       (not (Double/isNaN x))
-       (not (Double/isInfinite x))))
-
-(defn apply-default
-  [f defaulting-fn & args]
-  (apply f (map defaulting-fn args)))
-
-(defn apply-or-0
-  [f & args]
-  (apply apply-default
-         f
-         #(if (valid-number? %) % 0)
-         args))
-
-(defn sum-or-0
-  [& args]
-  (apply apply-or-0 + args))
-
-(defn product-or-0
-  [& args]
-  (apply apply-or-0 * args))
-
-(defn max-or-0
-  [& args]
-  (apply apply-or-0 max args))
-
-(defn- agg-bolt-lat-and-count
-  "Aggregates number executed, process latency, and execute latency across all
-  streams."
-  [idk->exec-avg idk->proc-avg idk->num-executed]
-  (letfn [(weight-avg [[id avg]]
-            (let [num-e (get idk->num-executed id)]
-              (product-or-0 avg num-e)))]
-    {:executeLatencyTotal (sum (map weight-avg idk->exec-avg))
-     :processLatencyTotal (sum (map weight-avg idk->proc-avg))
-     :executed (sum (vals idk->num-executed))}))
-
-(defn- agg-spout-lat-and-count
-  "Aggregates number acked and complete latencies across all streams."
-  [sid->comp-avg sid->num-acked]
-  (letfn [(weight-avg [[id avg]]
-            (product-or-0 avg (get sid->num-acked id)))]
-    {:completeLatencyTotal (sum (map weight-avg sid->comp-avg))
-     :acked (sum (vals sid->num-acked))}))
-
-(defn add-pairs
-  ([] [0 0])
-  ([[a1 a2] [b1 b2]]
-   [(+ a1 b1) (+ a2 b2)]))
-
-(defn mk-include-sys-fn
-  [include-sys?]
-  (if include-sys?
-    (fn [_] true)
-    (fn [stream] (and (string? stream) (not (Utils/isSystemId stream))))))
-
-(defn mk-include-sys-filter
-  "Returns a function that includes or excludes map entries whose keys are
-  system ids."
-  [include-sys?]
-  (if include-sys?
-    identity
-    (partial filter-key (mk-include-sys-fn false))))
-
-(defn- agg-bolt-streams-lat-and-count
-  "Aggregates number executed and process & execute latencies."
-  [idk->exec-avg idk->proc-avg idk->executed]
-  (letfn [(weight-avg [id avg]
-            (let [num-e (idk->executed id)]
-              (product-or-0 avg num-e)))]
-    (into {}
-      (for [k (keys idk->exec-avg)]
-        [k {:executeLatencyTotal (weight-avg k (get idk->exec-avg k))
-            :processLatencyTotal (weight-avg k (get idk->proc-avg k))
-            :executed (idk->executed k)}]))))
-
-(defn- agg-spout-streams-lat-and-count
-  "Aggregates number acked and complete latencies."
-  [idk->comp-avg idk->acked]
-  (letfn [(weight-avg [id avg]
-            (let [num-e (get idk->acked id)]
-              (product-or-0 avg num-e)))]
-    (into {}
-      (for [k (keys idk->comp-avg)]
-        [k {:completeLatencyTotal (weight-avg k (get idk->comp-avg k))
-            :acked (get idk->acked k)}]))))
-
-(defn swap-map-order
-  "For a nested map, rearrange data such that the top-level keys become the
-  nested map's keys and vice versa.
-  Example:
-  {:a {:X :banana, :Y :pear}, :b {:X :apple, :Y :orange}}
-  -> {:Y {:a :pear, :b :orange}, :X {:a :banana, :b :apple}}"
-  [m]
-  (apply merge-with
-         merge
-         (map (fn [[k v]]
-                (into {}
-                      (for [[k2 v2] v]
-                        [k2 {k v2}])))
-              m)))
-
-(defn- compute-agg-capacity
-  "Computes the capacity metric for one executor given its heartbeat data and
-  uptime."
-  [m uptime]
-  (when uptime
-    (->>
-      ;; For each stream, create weighted averages and counts.
-      (merge-with (fn weighted-avg+count-fn
-                    [avg cnt]
-                    [(* avg cnt) cnt])
-                  (get (:execute-latencies m) (str TEN-MIN-IN-SECONDS))
-                  (get (:executed m) (str TEN-MIN-IN-SECONDS)))
-      vals ;; Ignore the stream ids.
-      (reduce add-pairs
-              [0. 0]) ;; Combine weighted averages and counts.
-      ((fn [[weighted-avg cnt]]
-        (div weighted-avg (* 1000 (min uptime TEN-MIN-IN-SECONDS))))))))
-
-(defn agg-pre-merge-comp-page-bolt
-  [{exec-id :exec-id
-    host :host
-    port :port
-    uptime :uptime
-    comp-id :comp-id
-    num-tasks :num-tasks
-    statk->w->sid->num :stats}
-   window
-   include-sys?]
-  (let [str-key (partial map-key str)
-        handle-sys-components-fn (mk-include-sys-filter include-sys?)]
-    {:executor-id exec-id,
-     :host host,
-     :port port,
-     :uptime uptime,
-     :num-executors 1,
-     :num-tasks num-tasks,
-     :capacity (compute-agg-capacity statk->w->sid->num uptime)
-     :cid+sid->input-stats
-     (merge-with
-       merge
-       (swap-map-order
-         {:acked (-> statk->w->sid->num
-                     :acked
-                     str-key
-                     (get window))
-          :failed (-> statk->w->sid->num
-                      :failed
-                      str-key
-                      (get window))})
-       (agg-bolt-streams-lat-and-count (-> statk->w->sid->num
-                                           :execute-latencies
-                                           str-key
-                                           (get window))
-                                       (-> statk->w->sid->num
-                                           :process-latencies
-                                           str-key
-                                           (get window))
-                                       (-> statk->w->sid->num
-                                           :executed
-                                           str-key
-                                           (get window)))),
-     :sid->output-stats
-     (swap-map-order
-       {:emitted (-> statk->w->sid->num
-                     :emitted
-                     str-key
-                     (get window)
-                     handle-sys-components-fn)
-        :transferred (-> statk->w->sid->num
-                         :transferred
-                         str-key
-                         (get window)
-                         handle-sys-components-fn)})}))
-
-(defn agg-pre-merge-comp-page-spout
-  [{exec-id :exec-id
-    host :host
-    port :port
-    uptime :uptime
-    comp-id :comp-id
-    num-tasks :num-tasks
-    statk->w->sid->num :stats}
-   window
-   include-sys?]
-  (let [str-key (partial map-key str)
-        handle-sys-components-fn (mk-include-sys-filter include-sys?)]
-    {:executor-id exec-id,
-     :host host,
-     :port port,
-     :uptime uptime,
-     :num-executors 1,
-     :num-tasks num-tasks,
-     :sid->output-stats
-     (merge-with
-       merge
-       (agg-spout-streams-lat-and-count (-> statk->w->sid->num
-                                            :complete-latencies
-                                            str-key
-                                            (get window))
-                                        (-> statk->w->sid->num
-                                            :acked
-                                            str-key
-                                            (get window)))
-       (swap-map-order
-         {:acked (-> statk->w->sid->num
-                     :acked
-                     str-key
-                     (get window))
-          :failed (-> statk->w->sid->num
-                      :failed
-                      str-key
-                      (get window))
-          :emitted (-> statk->w->sid->num
-                       :emitted
-                       str-key
-                       (get window)
-                       handle-sys-components-fn)
-          :transferred (-> statk->w->sid->num
-                           :transferred
-                           str-key
-                           (get window)
-                           handle-sys-components-fn)}))}))
-
-(defn agg-pre-merge-topo-page-bolt
-  [{comp-id :comp-id
-    num-tasks :num-tasks
-    statk->w->sid->num :stats
-    uptime :uptime}
-   window
-   include-sys?]
-  (let [str-key (partial map-key str)
-        handle-sys-components-fn (mk-include-sys-filter include-sys?)]
-    {comp-id
-     (merge
-       (agg-bolt-lat-and-count (-> statk->w->sid->num
-                                   :execute-latencies
-                                   str-key
-                                   (get window))
-                               (-> statk->w->sid->num
-                                   :process-latencies
-                                   str-key
-                                   (get window))
-                               (-> statk->w->sid->num
-                                   :executed
-                                   str-key
-                                   (get window)))
-       {:num-executors 1
-        :num-tasks num-tasks
-        :emitted (-> statk->w->sid->num
-                     :emitted
-                     str-key
-                     (get window)
-                     handle-sys-components-fn
-                     vals
-                     sum)
-        :transferred (-> statk->w->sid->num
-                         :transferred
-                         str-key
-                         (get window)
-                         handle-sys-components-fn
-                         vals
-                         sum)
-        :capacity (compute-agg-capacity statk->w->sid->num uptime)
-        :acked (-> statk->w->sid->num
-                   :acked
-                   str-key
-                   (get window)
-                   vals
-                   sum)
-        :failed (-> statk->w->sid->num
-                    :failed
-                    str-key
-                    (get window)
-                    vals
-                    sum)})}))
-
-(defn agg-pre-merge-topo-page-spout
-  [{comp-id :comp-id
-    num-tasks :num-tasks
-    statk->w->sid->num :stats}
-   window
-   include-sys?]
-  (let [str-key (partial map-key str)
-        handle-sys-components-fn (mk-include-sys-filter include-sys?)]
-    {comp-id
-     (merge
-       (agg-spout-lat-and-count (-> statk->w->sid->num
-                                    :complete-latencies
-                                    str-key
-                                    (get window))
-                                (-> statk->w->sid->num
-                                    :acked
-                                    str-key
-                                    (get window)))
-       {:num-executors 1
-        :num-tasks num-tasks
-        :emitted (-> statk->w->sid->num
-                     :emitted
-                     str-key
-                     (get window)
-                     handle-sys-components-fn
-                     vals
-                     sum)
-        :transferred (-> statk->w->sid->num
-                         :transferred
-                         str-key
-                         (get window)
-                         handle-sys-components-fn
-                         vals
-                         sum)
-        :failed (-> statk->w->sid->num
-                    :failed
-                    str-key
-                    (get window)
-                    vals
-                    sum)})}))
-
-(defn merge-agg-comp-stats-comp-page-bolt
-  [{acc-in :cid+sid->input-stats
-    acc-out :sid->output-stats
-    :as acc-bolt-stats}
-   {bolt-in :cid+sid->input-stats
-    bolt-out :sid->output-stats
-    :as bolt-stats}]
-  {:num-executors (inc (or (:num-executors acc-bolt-stats) 0)),
-   :num-tasks (sum-or-0 (:num-tasks acc-bolt-stats) (:num-tasks bolt-stats)),
-   :sid->output-stats (merge-with (partial merge-with sum-or-0)
-                                  acc-out
-                                  bolt-out),
-   :cid+sid->input-stats (merge-with (partial merge-with sum-or-0)
-                                     acc-in
-                                     bolt-in),
-   :executor-stats
-   (let [sum-streams (fn [m k] (->> m vals (map k) (apply sum-or-0)))
-         executed (sum-streams bolt-in :executed)]
-     (conj (:executor-stats acc-bolt-stats)
-           (merge
-             (select-keys bolt-stats
-                          [:executor-id :uptime :host :port :capacity])
-             {:emitted (sum-streams bolt-out :emitted)
-              :transferred (sum-streams bolt-out :transferred)
-              :acked (sum-streams bolt-in :acked)
-              :failed (sum-streams bolt-in :failed)
-              :executed executed}
-             (->>
-               (if (and executed (pos? executed))
-                 [(div (sum-streams bolt-in :executeLatencyTotal) executed)
-                  (div (sum-streams bolt-in :processLatencyTotal) executed)]
-                 [nil nil])
-               (mapcat vector [:execute-latency :process-latency])
-               (apply assoc {})))))})
-
-(defn merge-agg-comp-stats-comp-page-spout
-  [{acc-out :sid->output-stats
-    :as acc-spout-stats}
-   {spout-out :sid->output-stats
-    :as spout-stats}]
-  {:num-executors (inc (or (:num-executors acc-spout-stats) 0)),
-   :num-tasks (sum-or-0 (:num-tasks acc-spout-stats) (:num-tasks spout-stats)),
-   :sid->output-stats (merge-with (partial merge-with sum-or-0)
-                                  acc-out
-                                  spout-out),
-   :executor-stats
-   (let [sum-streams (fn [m k] (->> m vals (map k) (apply sum-or-0)))
-         acked (sum-streams spout-out :acked)]
-     (conj (:executor-stats acc-spout-stats)
-           (merge
-             (select-keys spout-stats [:executor-id :uptime :host :port])
-             {:emitted (sum-streams spout-out :emitted)
-              :transferred (sum-streams spout-out :transferred)
-              :acked acked
-              :failed (sum-streams spout-out :failed)}
-             {:complete-latency (if (and acked (pos? acked))
-                                  (div (sum-streams spout-out
-                                                    :completeLatencyTotal)
-                                       acked)
-                                  nil)})))})
-
-(defn merge-agg-comp-stats-topo-page-bolt
-  [acc-bolt-stats bolt-stats]
-  {:num-executors (inc (or (:num-executors acc-bolt-stats) 0))
-   :num-tasks (sum-or-0 (:num-tasks acc-bolt-stats) (:num-tasks bolt-stats))
-   :emitted (sum-or-0 (:emitted acc-bolt-stats) (:emitted bolt-stats))
-   :transferred (sum-or-0 (:transferred acc-bolt-stats)
-                          (:transferred bolt-stats))
-   :capacity (max-or-0 (:capacity acc-bolt-stats) (:capacity bolt-stats))
-   ;; We sum average latency totals here to avoid dividing at each step.
-   ;; Compute the average latencies by dividing the total by the count.
-   :executeLatencyTotal (sum-or-0 (:executeLatencyTotal acc-bolt-stats)
-                                  (:executeLatencyTotal bolt-stats))
-   :processLatencyTotal (sum-or-0 (:processLatencyTotal acc-bolt-stats)
-                                  (:processLatencyTotal bolt-stats))
-   :executed (sum-or-0 (:executed acc-bolt-stats) (:executed bolt-stats))
-   :acked (sum-or-0 (:acked acc-bolt-stats) (:acked bolt-stats))
-   :failed (sum-or-0 (:failed acc-bolt-stats) (:failed bolt-stats))})
-
-(defn merge-agg-comp-stats-topo-page-spout
-  [acc-spout-stats spout-stats]
-  {:num-executors (inc (or (:num-executors acc-spout-stats) 0))
-   :num-tasks (sum-or-0 (:num-tasks acc-spout-stats) (:num-tasks spout-stats))
-   :emitted (sum-or-0 (:emitted acc-spout-stats) (:emitted spout-stats))
-   :transferred (sum-or-0 (:transferred acc-spout-stats) (:transferred spout-stats))
-   ;; We sum average latency totals here to avoid dividing at each step.
-   ;; Compute the average latencies by dividing the total by the count.
-   :completeLatencyTotal (sum-or-0 (:completeLatencyTotal acc-spout-stats)
-                            (:completeLatencyTotal spout-stats))
-   :acked (sum-or-0 (:acked acc-spout-stats) (:acked spout-stats))
-   :failed (sum-or-0 (:failed acc-spout-stats) (:failed spout-stats))})
-
-(defn aggregate-count-streams
-  [stats]
-  (->> stats
-       (map-val #(reduce + (vals %)))))
-
-(defn- agg-topo-exec-stats*
-  "A helper function that does the common work to aggregate stats of one
-  executor with the given map for the topology page."
-  [window
-   include-sys?
-   {:keys [workers-set
-           bolt-id->stats
-           spout-id->stats
-           window->emitted
-           window->transferred
-           window->comp-lat-wgt-avg
-           window->acked
-           window->failed] :as acc-stats}
-   {:keys [stats] :as new-data}
-   pre-merge-fn
-   merge-fn
-   comp-key]
-  (let [cid->statk->num (pre-merge-fn new-data window include-sys?)
-        {w->compLatWgtAvg :completeLatencyTotal
-         w->acked :acked}
-          (if (:complete-latencies stats)
-            (swap-map-order
-              (into {}
-                    (for [w (keys (:acked stats))]
-                         [w (agg-spout-lat-and-count
-                              (get (:complete-latencies stats) w)
-                              (get (:acked stats) w))])))
-            {:completeLatencyTotal nil
-             :acks (aggregate-count-streams (:acked stats))})
-        handle-sys-components-fn (mk-include-sys-filter include-sys?)]
-    (assoc {:workers-set (conj workers-set
-                               [(:host new-data) (:port new-data)])
-            :bolt-id->stats bolt-id->stats
-            :spout-id->stats spout-id->stats
-            :window->emitted (->> (:emitted stats)
-                                  (map-val handle-sys-components-fn)
-                                  aggregate-count-streams
-                                  (merge-with + window->emitted))
-            :window->transferred (->> (:transferred stats)
-                                      (map-val handle-sys-components-fn)
-                                      aggregate-count-streams
-                                      (merge-with + window->transferred))
-            :window->comp-lat-wgt-avg (merge-with +
-                                                  window->comp-lat-wgt-avg
-                                                  w->compLatWgtAvg)
-            :window->acked (if (= :spout (:type stats))
-                             (merge-with + window->acked w->acked)
-                             window->acked)
-            :window->failed (if (= :spout (:type stats))
-                              (->> (:failed stats)
-                                   aggregate-count-streams
-                                   (merge-with + window->failed))
-                              window->failed)}
-           comp-key (merge-with merge-fn
-                                (acc-stats comp-key)
-                                cid->statk->num)
-           :type (:type stats))))
-
-(defmulti agg-topo-exec-stats
-  "Combines the aggregate stats of one executor with the given map, selecting
-  the appropriate window and including system components as specified."
-  (fn dispatch-fn [& args] (:type (last args))))
-
-(defmethod agg-topo-exec-stats :bolt
-  [window include-sys? acc-stats new-data]
-  (agg-topo-exec-stats* window
-                        include-sys?
-                        acc-stats
-                        new-data
-                        agg-pre-merge-topo-page-bolt
-                        merge-agg-comp-stats-topo-page-bolt
-                        :bolt-id->stats))
-
-(defmethod agg-topo-exec-stats :spout
-  [window include-sys? acc-stats new-data]
-  (agg-topo-exec-stats* window
-                        include-sys?
-                        acc-stats
-                        new-data
-                        agg-pre-merge-topo-page-spout
-                        merge-agg-comp-stats-topo-page-spout
-                        :spout-id->stats))
-
-(defmethod agg-topo-exec-stats :default [_ _ acc-stats _] acc-stats)
-
-(defn get-last-error
-  [storm-cluster-state storm-id component-id]
-  (if-let [e (.last-error storm-cluster-state storm-id component-id)]
-    (ErrorInfo. (:error e) (:time-secs e))))
-
-(defn component-type
-  "Returns the component type (either :bolt or :spout) for a given
-  topology and component id. Returns nil if not found."
-  [^StormTopology topology id]
-  (let [bolts (.get_bolts topology)
-        spouts (.get_spouts topology)]
-    (cond
-      (Utils/isSystemId id) :bolt
-      (.containsKey bolts id) :bolt
-      (.containsKey spouts id) :spout)))
-
-(defn extract-nodeinfos-from-hb-for-comp
-  ([exec->host+port task->component include-sys? comp-id]
-   (distinct (for [[[start end :as executor] [host port]] exec->host+port
-         :let [id (task->component start)]
-         :when (and (or (nil? comp-id) (= comp-id id))
-                 (or include-sys? (not (Utils/isSystemId id))))]
-     {:host host
-      :port port}))))
-
-(defn extract-data-from-hb
-  ([exec->host+port task->component beats include-sys? topology comp-id]
-   (for [[[start end :as executor] [host port]] exec->host+port
-         :let [beat (beats executor)
-               id (task->component start)]
-         :when (and (or (nil? comp-id) (= comp-id id))
-                    (or include-sys? (not (Utils/isSystemId id))))]
-     {:exec-id executor
-      :comp-id id
-      :num-tasks (count (range start (inc end)))
-      :host host
-      :port port
-      :uptime (:uptime beat)
-      :stats (:stats beat)
-      :type (or (:type (:stats beat))
-                (component-type topology id))}))
-  ([exec->host+port task->component beats include-sys? topology]
-    (extract-data-from-hb exec->host+port
-                          task->component
-                          beats
-                          include-sys?
-                          topology
-                          nil)))
-
-(defn aggregate-topo-stats
-  [window include-sys? data]
-  (let [init-val {:workers-set #{}
-                  :bolt-id->stats {}
-                  :spout-id->stats {}
-                  :window->emitted {}
-                  :window->transferred {}
-                  :window->comp-lat-wgt-avg {}
-                  :window->acked {}
-                  :window->failed {}}
-        reducer-fn (partial agg-topo-exec-stats
-                            window
-                            include-sys?)]
-    (reduce reducer-fn init-val data)))
-
-(defn- compute-weighted-averages-per-window
-  [acc-data wgt-avg-key divisor-key]
-  (into {} (for [[window wgt-avg] (wgt-avg-key acc-data)
-                 :let [divisor ((divisor-key acc-data) window)]
-                 :when (and divisor (pos? divisor))]
-             [(str window) (div wgt-avg divisor)])))
-
-(defn- post-aggregate-topo-stats
-  [task->component exec->node+port last-err-fn acc-data]
-  {:num-tasks (count task->component)
-   :num-workers (count (:workers-set acc-data))
-   :num-executors (count exec->node+port)
-   :bolt-id->stats
-     (into {} (for [[id m] (:bolt-id->stats acc-data)
-                    :let [executed (:executed m)]]
-                     [id (-> m
-                             (assoc :execute-latency
-                                    (if (and executed (pos? executed))
-                                      (div (or (:executeLatencyTotal m) 0)
-                                           executed)
-                                      0)
-                                    :process-latency
-                                    (if (and executed (pos? executed))
-                                      (div (or (:processLatencyTotal m) 0)
-                                           executed)
-                                      0))
-                             (dissoc :executeLatencyTotal
-                                     :processLatencyTotal)
-                             (assoc :lastError (last-err-fn id)))]))
-   :spout-id->stats
-     (into {} (for [[id m] (:spout-id->stats acc-data)
-                    :let [acked (:acked m)]]
-                    [id (-> m
-                            (assoc :complete-latency
-                                   (if (and acked (pos? acked))
-                                     (div (:completeLatencyTotal m)
-                                          (:acked m))
-                                     0))
-                            (dissoc :completeLatencyTotal)
-                            (assoc :lastError (last-err-fn id)))]))
-   :window->emitted (map-key str (:window->emitted acc-data))
-   :window->transferred (map-key str (:window->transferred acc-data))
-   :window->complete-latency
-     (compute-weighted-averages-per-window acc-data
-                                           :window->comp-lat-wgt-avg
-                                           :window->acked)
-   :window->acked (map-key str (:window->acked acc-data))
-   :window->failed (map-key str (:window->failed acc-data))})
-
-(defn- thriftify-common-agg-stats
-  [^ComponentAggregateStats s
-   {:keys [num-tasks
-           emitted
-           transferred
-           acked
-           failed
-           num-executors] :as statk->num}]
-  (let [cas (CommonAggregateStats.)]
-    (and num-executors (.set_num_executors cas num-executors))
-    (and num-tasks (.set_num_tasks cas num-tasks))
-    (and emitted (.set_emitted cas emitted))
-    (and transferred (.set_transferred cas transferred))
-    (and acked (.set_acked cas acked))
-    (and failed (.set_failed cas failed))
-    (.set_common_stats s cas)))
-
-(defn thriftify-bolt-agg-stats
-  [statk->num]
-  (let [{:keys [lastError
-                execute-latency
-                process-latency
-                executed
-                capacity]} statk->num
-        s (ComponentAggregateStats.)]
-    (.set_type s ComponentType/BOLT)
-    (and lastError (.set_last_error s lastError))
-    (thriftify-common-agg-stats s statk->num)
-    (.set_specific_stats s
-      (SpecificAggregateStats/bolt
-        (let [bas (BoltAggregateStats.)]
-          (and execute-latency (.set_execute_latency_ms bas execute-latency))
-          (and process-latency (.set_process_latency_ms bas process-latency))
-          (and executed (.set_executed bas executed))
-          (and capacity (.set_capacity bas capacity))
-          bas)))
-    s))
-
-(defn thriftify-spout-agg-stats
-  [statk->num]
-  (let [{:keys [lastError
-                complete-latency]} statk->num
-        s (ComponentAggregateStats.)]
-    (.set_type s ComponentType/SPOUT)
-    (and lastError (.set_last_error s lastError))
-    (thriftify-common-agg-stats s statk->num)
-    (.set_specific_stats s
-      (SpecificAggregateStats/spout
-        (let [sas (SpoutAggregateStats.)]
-          (and complete-latency (.set_complete_latency_ms sas complete-latency))
-          sas)))
-    s))
-
-(defn thriftify-topo-page-data
-  [topology-id data]
-  (let [{:keys [num-tasks
-                num-workers
-                num-executors
-                spout-id->stats
-                bolt-id->stats
-                window->emitted
-                window->transferred
-                window->complete-latency
-                window->acked
-                window->failed]} data
-        spout-agg-stats (into {}
-                              (for [[id m] spout-id->stats
-                                    :let [m (assoc m :type :spout)]]
-                                [id
-                                 (thriftify-spout-agg-stats m)]))
-        bolt-agg-stats (into {}
-                             (for [[id m] bolt-id->stats
-                                   :let [m (assoc m :type :bolt)]]
-                              [id
-                               (thriftify-bolt-agg-stats m)]))
-        topology-stats (doto (TopologyStats.)
-                         (.set_window_to_emitted window->emitted)
-                         (.set_window_to_transferred window->transferred)
-                         (.set_window_to_complete_latencies_ms
-                           window->complete-latency)
-                         (.set_window_to_acked window->acked)
-                         (.set_window_to_failed window->failed))
-      topo-page-info (doto (TopologyPageInfo. topology-id)
-                       (.set_num_tasks num-tasks)
-                       (.set_num_workers num-workers)
-                       (.set_num_executors num-executors)
-                       (.set_id_to_spout_agg_stats spout-agg-stats)
-                       (.set_id_to_bolt_agg_stats bolt-agg-stats)
-                       (.set_topology_stats topology-stats))]
-    topo-page-info))
-
-(defn agg-topo-execs-stats
-  "Aggregate various executor statistics for a topology from the given
-  heartbeats."
-  [topology-id
-   exec->node+port
-   task->component
-   beats
-   topology
-   window
-   include-sys?
-   last-err-fn]
-  (->> ;; This iterates over each executor one time, because of lazy evaluation.
-    (extract-data-from-hb exec->node+port
-                          task->component
-                          beats
-                          include-sys?
-                          topology)
-    (aggregate-topo-stats window include-sys?)
-    (post-aggregate-topo-stats task->component exec->node+port last-err-fn)
-    (thriftify-topo-page-data topology-id)))
-
-(defn- agg-bolt-exec-win-stats
-  "A helper function that aggregates windowed stats from one bolt executor."
-  [acc-stats new-stats include-sys?]
-  (let [{w->execLatWgtAvg :executeLatencyTotal
-         w->procLatWgtAvg :processLatencyTotal
-         w->executed :executed}
-          (swap-map-order
-            (into {} (for [w (keys (:executed new-stats))]
-                       [w (agg-bolt-lat-and-count
-                            (get (:execute-latencies new-stats) w)
-                            (get (:process-latencies new-stats) w)
-                            (get (:executed new-stats) w))])))
-        handle-sys-components-fn (mk-include-sys-filter include-sys?)]
-    {:window->emitted (->> (:emitted new-stats)
-                           (map-val handle-sys-components-fn)
-                           aggregate-count-streams
-                           (merge-with + (:window->emitted acc-stats)))
-     :window->transferred (->> (:transferred new-stats)
-                               (map-val handle-sys-components-fn)
-                               aggregate-count-streams
-                               (merge-with + (:window->transferred acc-stats)))
-     :window->exec-lat-wgt-avg (merge-with +
-                                           (:window->exec-lat-wgt-avg acc-stats)
-                                           w->execLatWgtAvg)
-     :window->proc-lat-wgt-avg (merge-with +
-                                           (:window->proc-lat-wgt-avg acc-stats)
-                                           w->procLatWgtAvg)
-     :window->executed (merge-with + (:window->executed acc-stats) w->executed)
-     :window->acked (->> (:acked new-stats)
-                         aggregate-count-streams
-                         (merge-with + (:window->acked acc-stats)))
-     :window->failed (->> (:failed new-stats)
-                          aggregate-count-streams
-                          (merge-with + (:window->failed acc-stats)))}))
-
-(defn- agg-spout-exec-win-stats
-  "A helper function that aggregates windowed stats from one spout executor."
-  [acc-stats new-stats include-sys?]
-  (let [{w->compLatWgtAvg :completeLatencyTotal
-         w->acked :acked}
-          (swap-map-order
-            (into {} (for [w (keys (:acked new-stats))]
-                       [w (agg-spout-lat-and-count
-                            (get (:complete-latencies new-stats) w)
-                            (get (:acked new-stats) w))])))
-        handle-sys-components-fn (mk-include-sys-filter include-sys?)]
-    {:window->emitted (->> (:emitted new-stats)
-                           (map-val handle-sys-components-fn)
-                           aggregate-count-streams
-                           (merge-with + (:window->emitted acc-stats)))
-     :window->transferred (->> (:transferred new-stats)
-                               (map-val handle-sys-components-fn)
-                               aggregate-count-streams
-                               (merge-with + (:window->transferred acc-stats)))
-     :window->comp-lat-wgt-avg (merge-with +
-                                           (:window->comp-lat-wgt-avg acc-stats)
-                                           w->compLatWgtAvg)
-     :window->acked (->> (:acked new-stats)
-                         aggregate-count-streams
-                         (merge-with + (:window->acked acc-stats)))
-     :window->failed (->> (:failed new-stats)
-                          aggregate-count-streams
-                          (merge-with + (:window->failed acc-stats)))}))
-
-(defmulti agg-comp-exec-stats
-  "Combines the aggregate stats of one executor with the given map, selecting
-  the appropriate window and including system components as specified."
-  (fn dispatch-fn [_ _ init-val _] (:type init-val)))
-
-(defmethod agg-comp-exec-stats :bolt
-  [window include-sys? acc-stats new-data]
-  (assoc (agg-bolt-exec-win-stats acc-stats (:stats new-data) include-sys?)
-         :stats (merge-agg-comp-stats-comp-page-bolt
-                  (:stats acc-stats)
-                  (agg-pre-merge-comp-page-bolt new-data window include-sys?))
-         :type :bolt))
-
-(defmethod agg-comp-exec-stats :spout
-  [window include-sys? acc-stats new-data]
-  (assoc (agg-spout-exec-win-stats acc-stats (:stats new-data) include-sys?)
-         :stats (merge-agg-comp-stats-comp-page-spout
-                  (:stats acc-stats)
-                  (agg-pre-merge-comp-page-spout new-data window include-sys?))
-         :type :spout))
-
-(defn- aggregate-comp-stats*
-  [window include-sys? data init-val]
-  (-> (partial agg-comp-exec-stats
-               window
-               include-sys?)
-      (reduce init-val data)))
-
-(defmulti aggregate-comp-stats
-  (fn dispatch-fn [& args] (-> args last first :type)))
-
-(defmethod aggregate-comp-stats :bolt
-  [& args]
-  (let [init-val {:type :bolt
-                  :cid+sid->input-stats {}
-                  :sid->output-stats {}
-                  :executor-stats []
-                  :window->emitted {}
-                  :window->transferred {}
-                  :window->exec-lat-wgt-avg {}
-                  :window->executed {}
-                  :window->proc-lat-wgt-avg {}
-                  :window->acked {}
-                  :window->failed {}}]
-    (apply aggregate-comp-stats* (concat args (list init-val)))))
-
-(defmethod aggregate-comp-stats :spout
-  [& args]
-  (let [init-val {:type :spout
-                  :sid->output-stats {}
-                  :executor-stats []
-                  :window->emitted {}
-                  :window->transferred {}
-                  :window->comp-lat-wgt-avg {}
-                  :window->acked {}
-                  :window->failed {}}]
-    (apply aggregate-comp-stats* (concat args (list init-val)))))
-
-(defmethod aggregate-comp-stats :default [& _] {})
-
-(defmulti post-aggregate-comp-stats
-  (fn [_ _ data] (:type data)))
-
-(defmethod post-aggregate-comp-stats :bolt
-  [task->component
-   exec->host+port
-   {{i-stats :cid+sid->input-stats
-     o-stats :sid->output-stats
-     num-tasks :num-tasks
-     num-executors :num-executors} :stats
-    comp-type :type :as acc-data}]
-  {:type comp-type
-   :num-tasks num-tasks
-   :num-executors num-executors
-   :cid+sid->input-stats
-   (->> i-stats
-        (map-val (fn [m]
-                     (let [executed (:executed m)
-                           lats (if (and executed (pos? executed))
-                                  {:execute-latency
-                                   (div (or (:executeLatencyTotal m) 0)
-                                        executed)
-                                   :process-latency
-                                   (div (or (:processLatencyTotal m) 0)
-                                        executed)}
-                                  {:execute-latency 0
-                                   :process-latency 0})]
-                       (-> m (merge lats) (dissoc :executeLatencyTotal
-                                                  :processLatencyTotal))))))
-   :sid->output-stats o-stats
-   :executor-stats (:executor-stats (:stats acc-data))
-   :window->emitted (map-key str (:window->emitted acc-data))
-   :window->transferred (map-key str (:window->transferred acc-data))
-   :window->execute-latency
-     (compute-weighted-averages-per-window acc-data
-                                           :window->exec-lat-wgt-avg
-                                           :window->executed)
-   :window->executed (map-key str (:window->executed acc-data))
-   :window->process-latency
-     (compute-weighted-averages-per-window acc-data
-                                           :window->proc-lat-wgt-avg
-                                           :window->executed)
-   :window->acked (map-key str (:window->acked acc-data))
-   :window->failed (map-key str (:window->failed acc-data))})
-
-(defmethod post-aggregate-comp-stats :spout
-  [task->component
-   exec->host+port
-   {{o-stats :sid->output-stats
-     num-tasks :num-tasks
-     num-executors :num-executors} :stats
-    comp-type :type :as acc-data}]
-  {:type comp-type
-   :num-tasks num-tasks
-   :num-executors num-executors
-   :sid->output-stats
-   (->> o-stats
-        (map-val (fn [m]
-                     (let [acked (:acked m)
-                           lat (if (and acked (pos? acked))
-                                 {:complete-latency
-                                  (div (or (:completeLatencyTotal m) 0) acked)}
-                                 {:complete-latency 0})]
-                       (-> m (merge lat) (dissoc :completeLatencyTotal))))))
-   :executor-stats (:executor-stats (:stats acc-data))
-   :window->emitted (map-key str (:window->emitted acc-data))
-   :window->transferred (map-key str (:window->transferred acc-data))
-   :window->complete-latency
-     (compute-weighted-averages-per-window acc-data
-                                           :window->comp-lat-wgt-avg
-                                           :window->acked)
-   :window->acked (map-key str (:window->acked acc-data))
-   :window->failed (map-key str (:window->failed acc-data))})
-
-(defmethod post-aggregate-comp-stats :default [& _] {})
-
-(defn thriftify-exec-agg-stats
-  [comp-id comp-type {:keys [executor-id host port uptime] :as stats}]
-  (doto (ExecutorAggregateStats.)
-    (.set_exec_summary (ExecutorSummary. (apply #(ExecutorInfo. %1 %2)
-                                                executor-id)
-                                         comp-id
-                                         host
-                                         port
-                                         (or uptime 0)))
-    (.set_stats ((condp = comp-type
-                   :bolt thriftify-bolt-agg-stats
-                   :spout thriftify-spout-agg-stats) stats))))
-
-(defn- thriftify-bolt-input-stats
-  [cid+sid->input-stats]
-  (into {} (for [[cid+sid input-stats] cid+sid->input-stats]
-             [(to-global-stream-id cid+sid)
-              (thriftify-bolt-agg-stats input-stats)])))
-
-(defn- thriftify-bolt-output-stats
-  [sid->output-stats]
-  (map-val thriftify-bolt-agg-stats sid->output-stats))
-
-(defn- thriftify-spout-output-stats
-  [sid->output-stats]
-  (map-val thriftify-spout-agg-stats sid->output-stats))
-
-(defn thriftify-comp-page-data
-  [topo-id topology comp-id data]
-  (let [w->stats (swap-map-order
-                   (merge
-                     {:emitted (:window->emitted data)
-                      :transferred (:window->transferred data)
-                      :acked (:window->acked data)
-                      :failed (:window->failed data)}
-                     (condp = (:type data)
-                       :bolt {:execute-latency (:window->execute-latency data)
-                              :process-latency (:window->process-latency data)
-                              :executed (:window->executed data)}
-                       :spout {:complete-latency
-                               (:window->complete-latency data)}
-                       {}))) ; default
-        [compType exec-stats w->stats gsid->input-stats sid->output-stats]
-          (condp = (component-type topology comp-id)
-            :bolt [ComponentType/BOLT
-                   (->
-                     (partial thriftify-exec-agg-stats comp-id :bolt)
-                     (map (:executor-stats data)))
-                   (map-val thriftify-bolt-agg-stats w->stats)
-                   (thriftify-bolt-input-stats (:cid+sid->input-stats data))
-                   (thriftify-bolt-output-stats (:sid->output-stats data))]
-            :spout [ComponentType/SPOUT
-                    (->
-                      (partial thriftify-exec-agg-stats comp-id :spout)
-                      (map (:executor-stats data)))
-                    (map-val thriftify-spout-agg-stats w->stats)
-                    nil ;; spouts do not have input stats
-                    (thriftify-spout-output-stats (:sid->output-stats data))]),
-        num-executors (:num-executors data)
-        num-tasks (:num-tasks data)
-        ret (doto (ComponentPageInfo. comp-id compType)
-              (.set_topology_id topo-id)
-              (.set_topology_name nil)
-              (.set_window_to_stats w->stats)
-              (.set_sid_to_output_stats sid->output-stats)
-              (.set_exec_stats exec-stats))]
-    (and num-executors (.set_num_executors ret num-executors))
-    (and num-tasks (.set_num_tasks ret num-tasks))
-    (and gsid->input-stats
-         (.set_gsid_to_input_stats ret gsid->input-stats))
-    ret))
-
-(defn agg-comp-execs-stats
-  "Aggregate various executor statistics for a component from the given
-  heartbeats."
-  [exec->host+port
-   task->component
-   beats
-   window
-   include-sys?
-   topology-id
-   topology
-   component-id]
-  (->> ;; This iterates over each executor one time, because of lazy evaluation.
-    (extract-data-from-hb exec->host+port
-                          task->component
-                          beats
-                          include-sys?
-                          topology
-                          component-id)
-    (aggregate-comp-stats window include-sys?)
-    (post-aggregate-comp-stats task->component exec->host+port)
-    (thriftify-comp-page-data topology-id topology component-id)))
-
-(defn expand-averages
-  [avg counts]
-  (let [avg (clojurify-structure avg)
-        counts (clojurify-structure counts)]
-    (into {}
-          (for [[slice streams] counts]
-            [slice
-             (into {}
-                   (for [[stream c] streams]
-                     [stream
-                      [(* c (get-in avg [slice stream]))
-                       c]]
-                     ))]))))
-
-(defn expand-averages-seq
-  [average-seq counts-seq]
-  (->> (map vector average-seq counts-seq)
-       (map #(apply expand-averages %))
-       (apply merge-with (fn [s1 s2] (merge-with add-pairs s1 s2)))))
-
-(defn- val-avg
-  [[t c]]
-  (if (= c 0) 0
-    (double (/ t c))))
-
-(defn aggregate-averages
-  [average-seq counts-seq]
-  (->> (expand-averages-seq average-seq counts-seq)
-       (map-val
-         (fn [s]
-           (map-val val-avg s)))))
-
-(defn aggregate-avg-streams
-  [avg counts]
-  (let [expanded (expand-averages avg counts)]
-    (->> expanded
-         (map-val #(reduce add-pairs (vals %)))
-         (map-val val-avg))))
-
-(defn pre-process
-  [stream-summary include-sys?]
-  (let [filter-fn (mk-include-sys-fn include-sys?)
-        emitted (:emitted stream-summary)
-        emitted (into {} (for [[window stat] emitted]
-                           {window (filter-key filter-fn stat)}))
-        transferred (:transferred stream-summary)
-        transferred (into {} (for [[window stat] transferred]
-                               {window (filter-key filter-fn stat)}))
-        stream-summary (-> stream-summary (dissoc :emitted) (assoc :emitted emitted))
-        stream-summary (-> stream-summary (dissoc :transferred) (assoc :transferred transferred))]
-    stream-summary))
-
-(defn aggregate-counts
-  [counts-seq]
-  (->> counts-seq
-       (map clojurify-structure)
-       (apply merge-with
-              (fn [s1 s2]
-                (merge-with + s1 s2)))))
-
-(defn aggregate-common-stats
-  [stats-seq]
-  {:emitted (aggregate-counts (map #(.get_emitted ^ExecutorStats %) stats-seq))
-   :transferred (aggregate-counts (map #(.get_transferred ^ExecutorStats %) stats-seq))})
-
-(defn aggregate-bolt-stats
-  [stats-seq include-sys?]
-  (let [stats-seq (collectify stats-seq)]
-    (merge (pre-process (aggregate-common-stats stats-seq) include-sys?)
-           {:acked
-            (aggregate-counts (map #(.. ^ExecutorStats % get_specific get_bolt get_acked)
-                                   stats-seq))
-            :failed
-            (aggregate-counts (map #(.. ^ExecutorStats % get_specific get_bolt get_failed)
-                                   stats-seq))
-            :executed
-            (aggregate-counts (map #(.. ^ExecutorStats % get_specific get_bolt get_executed)
-                                   stats-seq))
-            :process-latencies
-            (aggregate-averages (map #(.. ^ExecutorStats % get_specific get_bolt get_process_ms_avg)
-                                     stats-seq)
-                                (map #(.. ^ExecutorStats % get_specific get_bolt get_acked)
-                                     stats-seq))
-            :execute-latencies
-            (aggregate-averages (map #(.. ^ExecutorStats % get_specific get_bolt get_execute_ms_avg)
-                                     stats-seq)
-                                (map #(.. ^ExecutorStats % get_specific get_bolt get_executed)
-                                     stats-seq))})))
-
-(defn aggregate-spout-stats
-  [stats-seq include-sys?]
-  (let [stats-seq (collectify stats-seq)]
-    (merge (pre-process (aggregate-common-stats stats-seq) include-sys?)
-           {:acked
-            (aggregate-counts (map #(.. ^ExecutorStats % get_specific get_spout get_acked)
-                                   stats-seq))
-            :failed
-            (aggregate-counts (map #(.. ^ExecutorStats % get_specific get_spout get_failed)
-                                   stats-seq))
-            :complete-latencies
-            (aggregate-averages (map #(.. ^ExecutorStats % get_specific get_spout get_complete_ms_avg)
-                                     stats-seq)
-                                (map #(.. ^ExecutorStats % get_specific get_spout get_acked)
-                                     stats-seq))})))
-
-(defn get-filled-stats
-  [summs]
-  (->> summs
-       (map #(.get_stats ^ExecutorSummary %))
-       (filter not-nil?)))
-
-(defn aggregate-spout-streams
-  [stats]
-  {:acked (aggregate-count-streams (:acked stats))
-   :failed (aggregate-count-streams (:failed stats))
-   :emitted (aggregate-count-streams (:emitted stats))
-   :transferred (aggregate-count-streams (:transferred stats))
-   :complete-latencies (aggregate-avg-streams (:complete-latencies stats)
-                                              (:acked stats))})
-
-(defn spout-streams-stats
-  [summs include-sys?]
-  (let [stats-seq (get-filled-stats summs)]
-    (aggregate-spout-streams
-      (aggregate-spout-stats
-        stats-seq include-sys?))))
-
-(defn aggregate-bolt-streams
-  [stats]
-  {:acked (aggregate-count-streams (:acked stats))
-   :failed (aggregate-count-streams (:failed stats))
-   :emitted (aggregate-count-streams (:emitted stats))
-   :transferred (aggregate-count-streams (:transferred stats))
-   :process-latencies (aggregate-avg-streams (:process-latencies stats)
-                                             (:acked stats))
-   :executed (aggregate-count-streams (:executed stats))
-   :execute-latencies (aggregate-avg-streams (:execute-latencies stats)
-                                             (:executed stats))})
-
-(defn compute-executor-capacity
-  [^ExecutorSummary e]
-  (let [stats (.get_stats e)
-        stats (if stats
-                (-> stats
-                    (aggregate-bolt-stats true)
-                    (aggregate-bolt-streams)
-                    swap-map-order
-                    (get (str TEN-MIN-IN-SECONDS))))
-        uptime (nil-to-zero (.get_uptime_secs e))
-        window (if (< uptime TEN-MIN-IN-SECONDS) uptime TEN-MIN-IN-SECONDS)
-        executed (-> stats :executed nil-to-zero)
-        latency (-> stats :execute-latencies nil-to-zero)]
-    (if (> window 0)
-      (div (* executed latency) (* 1000 window)))))
-
-(defn bolt-streams-stats
-  [summs include-sys?]
-  (let [stats-seq (get-filled-stats summs)]
-    (aggregate-bolt-streams
-      (aggregate-bolt-stats
-        stats-seq include-sys?))))
-
-(defn total-aggregate-stats
-  [spout-summs bolt-summs include-sys?]
-  (let [spout-stats (get-filled-stats spout-summs)
-        bolt-stats (get-filled-stats bolt-summs)
-        agg-spout-stats (-> spout-stats
-                            (aggregate-spout-stats include-sys?)
-                            aggregate-spout-streams)
-        agg-bolt-stats (-> bolt-stats
-                           (aggregate-bolt-stats include-sys?)
-                           aggregate-bolt-streams)]
-    (merge-with
-      (fn [s1 s2]
-        (merge-with + s1 s2))
-      (select-keys
-        agg-bolt-stats
-        ;; Include only keys that will be used.  We want to count acked and
-        ;; failed only for the "tuple trees," so we do not include those keys
-        ;; from the bolt executors.
-        [:emitted :transferred])
-      agg-spout-stats)))
-
-(defn error-subset
-  [error-str]
-  (apply str (take 200 error-str)))
-
-(defn most-recent-error
-  [errors-list]
-  (let [error (->> errors-list
-                   (sort-by #(.get_error_time_secs ^ErrorInfo %))
-                   reverse
-                   first)]
-    (if error
-      (error-subset (.get_error ^ErrorInfo error))
-      "")))
-
-(defn float-str [n]
-  (if n
-    (format "%.3f" (float n))
-    "0"))
-
-(defn compute-bolt-capacity
-  [executors]
-  (->> executors
-       (map compute-executor-capacity)
-       (map nil-to-zero)
-       (apply max)))


[32/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/command/config_value.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/command/config_value.clj b/storm-core/src/clj/backtype/storm/command/config_value.clj
deleted file mode 100644
index 1d193a2..0000000
--- a/storm-core/src/clj/backtype/storm/command/config_value.clj
+++ /dev/null
@@ -1,24 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-(ns backtype.storm.command.config-value
-  (:use [backtype.storm config log])
-  (:gen-class))
-
-
-(defn -main [^String name]
-  (let [conf (read-storm-config)]
-    (println "VALUE:" (conf name))
-    ))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/command/deactivate.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/command/deactivate.clj b/storm-core/src/clj/backtype/storm/command/deactivate.clj
deleted file mode 100644
index 1a614de..0000000
--- a/storm-core/src/clj/backtype/storm/command/deactivate.clj
+++ /dev/null
@@ -1,24 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-(ns backtype.storm.command.deactivate
-  (:use [backtype.storm thrift log])
-  (:gen-class))
-
-(defn -main [name] 
-  (with-configured-nimbus-connection nimbus
-    (.deactivate nimbus name)
-    (log-message "Deactivated topology: " name)
-    ))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/command/dev_zookeeper.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/command/dev_zookeeper.clj b/storm-core/src/clj/backtype/storm/command/dev_zookeeper.clj
deleted file mode 100644
index d90e72a..0000000
--- a/storm-core/src/clj/backtype/storm/command/dev_zookeeper.clj
+++ /dev/null
@@ -1,26 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-(ns backtype.storm.command.dev-zookeeper
-  (:use [backtype.storm zookeeper util config])
-  (:gen-class))
-
-(defn -main [& args]
-  (let [conf (read-storm-config)
-        port (conf STORM-ZOOKEEPER-PORT)
-        localpath (conf DEV-ZOOKEEPER-PATH)]
-    (rmr localpath)
-    (mk-inprocess-zookeeper localpath :port port)
-    ))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/command/get_errors.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/command/get_errors.clj b/storm-core/src/clj/backtype/storm/command/get_errors.clj
deleted file mode 100644
index 60707b2..0000000
--- a/storm-core/src/clj/backtype/storm/command/get_errors.clj
+++ /dev/null
@@ -1,52 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-(ns backtype.storm.command.get-errors
-  (:use [clojure.tools.cli :only [cli]])
-  (:use [backtype.storm thrift log])
-  (:use [backtype.storm util])
-  (:require [backtype.storm.daemon
-             [nimbus :as nimbus]
-             [common :as common]])
-  (:import [backtype.storm.generated GetInfoOptions NumErrorsChoice
-            TopologySummary ErrorInfo])
-  (:gen-class))
-
-(defn get-topology-id [name topologies]
-  (let [topology (first (filter #(= (.get_name %1) name) topologies))]
-    (when (not-nil? topology) (.get_id topology))))
-
-(defn get-component-errors
-  [topology-errors]
-  (apply hash-map (remove nil?
-                    (flatten (for [[comp-name comp-errors] topology-errors]
-                               (let [latest-error (when (not (empty? comp-errors)) (first comp-errors))]
-                                 (if latest-error [comp-name (.get_error ^ErrorInfo latest-error)])))))))
-
-(defn -main [name]
-  (with-configured-nimbus-connection nimbus
-    (let [opts (doto (GetInfoOptions.)
-                 (.set_num_err_choice NumErrorsChoice/ONE))
-          cluster-info (.getClusterInfo nimbus)
-          topologies (.get_topologies cluster-info)
-          topo-id (get-topology-id name topologies)
-          topo-info (when (not-nil? topo-id) (.getTopologyInfoWithOpts nimbus topo-id opts))]
-      (if (or (nil? topo-id) (nil? topo-info))
-        (println (to-json {"Failure" (str "No topologies running with name " name)}))
-        (let [topology-name (.get_name topo-info)
-              topology-errors (.get_errors topo-info)]
-          (println (to-json (hash-map
-                              "Topology Name" topology-name
-                              "Comp-Errors" (get-component-errors topology-errors)))))))))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/command/healthcheck.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/command/healthcheck.clj b/storm-core/src/clj/backtype/storm/command/healthcheck.clj
deleted file mode 100644
index 14af223..0000000
--- a/storm-core/src/clj/backtype/storm/command/healthcheck.clj
+++ /dev/null
@@ -1,88 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-(ns backtype.storm.command.healthcheck
-  (:require [backtype.storm
-             [config :refer :all]
-             [log :refer :all]]
-            [clojure.java [io :as io]]
-            [clojure [string :refer [split]]])
-  (:gen-class))
-
-(defn interrupter
-  "Interrupt a given thread after ms milliseconds."
-  [thread ms]
-  (let [interrupter (Thread.
-                     (fn []
-                       (try
-                         (Thread/sleep ms)
-                         (.interrupt thread)
-                         (catch InterruptedException e))))]
-    (.start interrupter)
-    interrupter))
-
-(defn check-output [lines]
-  (if (some #(.startsWith % "ERROR") lines)
-    :failed
-    :success))
-
-(defn process-script [conf script]
-  (let [script-proc (. (Runtime/getRuntime) (exec script))
-        curthread (Thread/currentThread)
-        interrupter-thread (interrupter curthread
-                                        (conf STORM-HEALTH-CHECK-TIMEOUT-MS))]
-    (try
-      (.waitFor script-proc)
-      (.interrupt interrupter-thread)
-      (if (not (= (.exitValue script-proc) 0))
-        :failed_with_exit_code
-        (check-output (split
-                       (slurp (.getInputStream script-proc))
-                       #"\n+")))
-      (catch InterruptedException e
-        (println "Script" script "timed out.")
-        :timeout)
-      (catch Exception e
-        (println "Script failed with exception: " e)
-        :failed_with_exception)
-      (finally (.interrupt interrupter-thread)))))
-
-(defn health-check [conf]
-  (let [health-dir (absolute-healthcheck-dir conf)
-        health-files (file-seq (io/file health-dir))
-        health-scripts (filter #(and (.canExecute %)
-                                     (not (.isDirectory %)))
-                               health-files)
-        results (->> health-scripts
-                     (map #(.getAbsolutePath %))
-                     (map (partial process-script conf)))]
-    (log-message
-     (pr-str (map #'vector
-                  (map #(.getAbsolutePath %) health-scripts)
-                  results)))
-    ; failed_with_exit_code is OK. We're mimicing Hadoop's health checks.
-    ; We treat non-zero exit codes as indicators that the scripts failed
-    ; to execute properly, not that the system is unhealthy, in which case
-    ; we don't want to start killing things.
-    (if (every? #(or (= % :failed_with_exit_code)
-                     (= % :success))
-                results)
-      0
-      1)))
-
-(defn -main [& args]
-  (let [conf (read-storm-config)]
-    (System/exit
-     (health-check conf))))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/command/heartbeats.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/command/heartbeats.clj b/storm-core/src/clj/backtype/storm/command/heartbeats.clj
deleted file mode 100644
index 99790aa..0000000
--- a/storm-core/src/clj/backtype/storm/command/heartbeats.clj
+++ /dev/null
@@ -1,52 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-(ns backtype.storm.command.heartbeats
-  (:require [backtype.storm
-             [config :refer :all]
-             [log :refer :all]
-             [cluster :refer :all]
-             [converter :refer :all]]
-        [clojure.string :refer :all])
-  (:import [backtype.storm.generated ClusterWorkerHeartbeat]
-           [backtype.storm.utils Utils])
-  (:gen-class))
-
-(defn -main [command path & args]
-  (let [conf (read-storm-config)
-        cluster (mk-distributed-cluster-state conf :auth-conf conf)]
-    (println "Command: [" command "]")
-    (condp = command
-      "list"
-      (let [message (join " \n" (.get_worker_hb_children cluster path false))]
-        (log-message "list " path ":\n"
-                     message "\n"))
-      "get"
-      (log-message 
-       (if-let [hb (.get_worker_hb cluster path false)]
-         (clojurify-zk-worker-hb
-          (Utils/deserialize
-           hb
-           ClusterWorkerHeartbeat))
-         "Nothing"))
-      
-      (log-message "Usage: heartbeats [list|get] path"))
-    
-    (try
-      (.close cluster)
-      (catch Exception e
-        (log-message "Caught exception: " e " on close."))))
-  (System/exit 0))
-         

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/command/kill_topology.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/command/kill_topology.clj b/storm-core/src/clj/backtype/storm/command/kill_topology.clj
deleted file mode 100644
index 94b4585..0000000
--- a/storm-core/src/clj/backtype/storm/command/kill_topology.clj
+++ /dev/null
@@ -1,29 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-(ns backtype.storm.command.kill-topology
-  (:use [clojure.tools.cli :only [cli]])
-  (:use [backtype.storm thrift config log])
-  (:import [backtype.storm.generated KillOptions])
-  (:gen-class))
-
-(defn -main [& args]
-  (let [[{wait :wait} [name] _] (cli args ["-w" "--wait" :default nil :parse-fn #(Integer/parseInt %)])
-        opts (KillOptions.)]
-    (if wait (.set_wait_secs opts wait))
-    (with-configured-nimbus-connection nimbus
-      (.killTopologyWithOpts nimbus name opts)
-      (log-message "Killed topology: " name)
-      )))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/command/kill_workers.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/command/kill_workers.clj b/storm-core/src/clj/backtype/storm/command/kill_workers.clj
deleted file mode 100644
index 3866cc7..0000000
--- a/storm-core/src/clj/backtype/storm/command/kill_workers.clj
+++ /dev/null
@@ -1,33 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-(ns backtype.storm.command.kill-workers
-  (:import [java.io File])
-  (:use [backtype.storm.daemon common])
-  (:use [backtype.storm util config])
-  (:require [backtype.storm.daemon
-             [supervisor :as supervisor]])
-  (:gen-class))
-
-(defn -main 
-  "Construct the supervisor-data from scratch and kill the workers on this supervisor"
-  [& args]
-  (let [conf (read-storm-config)
-        conf (assoc conf STORM-LOCAL-DIR (. (File. (conf STORM-LOCAL-DIR)) getCanonicalPath))
-        isupervisor (supervisor/standalone-supervisor)
-        supervisor-data (supervisor/supervisor-data conf nil isupervisor)
-        ids (supervisor/my-worker-ids conf)]
-    (doseq [id ids]
-      (supervisor/shutdown-worker supervisor-data id))))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/command/list.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/command/list.clj b/storm-core/src/clj/backtype/storm/command/list.clj
deleted file mode 100644
index 79cfcf7..0000000
--- a/storm-core/src/clj/backtype/storm/command/list.clj
+++ /dev/null
@@ -1,38 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-(ns backtype.storm.command.list
-  (:use [backtype.storm thrift log])
-  (:import [backtype.storm.generated TopologySummary])
-  (:gen-class))
-
-(defn -main []
-  (with-configured-nimbus-connection nimbus
-    (let [cluster-info (.getClusterInfo nimbus)
-          topologies (.get_topologies cluster-info)
-          msg-format "%-20s %-10s %-10s %-12s %-10s"]
-      (if (or (nil? topologies) (empty? topologies))
-        (println "No topologies running.")
-        (do
-          (println (format msg-format "Topology_name" "Status" "Num_tasks" "Num_workers" "Uptime_secs"))
-          (println "-------------------------------------------------------------------")
-          (doseq [^TopologySummary topology topologies]
-            (let [topology-name (.get_name topology)
-                  topology-status (.get_status topology)
-                  topology-num-tasks (.get_num_tasks topology)
-                  topology-num-workers (.get_num_workers topology)
-                  topology-uptime-secs (.get_uptime_secs topology)]
-              (println (format msg-format  topology-name topology-status topology-num-tasks
-                               topology-num-workers topology-uptime-secs)))))))))
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/command/monitor.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/command/monitor.clj b/storm-core/src/clj/backtype/storm/command/monitor.clj
deleted file mode 100644
index 36ccbc9..0000000
--- a/storm-core/src/clj/backtype/storm/command/monitor.clj
+++ /dev/null
@@ -1,37 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-(ns backtype.storm.command.monitor
-  (:use [clojure.tools.cli :only [cli]])
-  (:use [backtype.storm.thrift :only [with-configured-nimbus-connection]])
-  (:import [backtype.storm.utils Monitor])
-  (:gen-class)
- )
-
-(defn -main [& args]
-  (let [[{interval :interval component :component stream :stream watch :watch} [name] _]
-        (cli args ["-i" "--interval" :default 4 :parse-fn #(Integer/parseInt %)]
-          ["-m" "--component" :default nil]
-          ["-s" "--stream" :default "default"]
-          ["-w" "--watch" :default "emitted"])
-        mon (Monitor.)]
-    (if interval (.set_interval mon interval))
-    (if name (.set_topology mon name))
-    (if component (.set_component mon component))
-    (if stream (.set_stream mon stream))
-    (if watch (.set_watch mon watch))
-    (with-configured-nimbus-connection nimbus
-      (.metrics mon nimbus)
-      )))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/command/rebalance.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/command/rebalance.clj b/storm-core/src/clj/backtype/storm/command/rebalance.clj
deleted file mode 100644
index e3a032b..0000000
--- a/storm-core/src/clj/backtype/storm/command/rebalance.clj
+++ /dev/null
@@ -1,46 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-(ns backtype.storm.command.rebalance
-  (:use [clojure.tools.cli :only [cli]])
-  (:use [backtype.storm thrift config log])
-  (:import [backtype.storm.generated RebalanceOptions])
-  (:gen-class))
-
-(defn- parse-executor [^String s]
-  (let [eq-pos (.lastIndexOf s "=")
-        name (.substring s 0 eq-pos)
-        amt (.substring s (inc eq-pos))]
-    {name (Integer/parseInt amt)}
-    ))
-
-(defn -main [& args] 
-  (let [[{wait :wait executor :executor num-workers :num-workers} [name] _]
-                  (cli args ["-w" "--wait" :default nil :parse-fn #(Integer/parseInt %)]
-                            ["-n" "--num-workers" :default nil :parse-fn #(Integer/parseInt %)]
-                            ["-e" "--executor"  :parse-fn parse-executor
-                             :assoc-fn (fn [previous key val]
-                                         (assoc previous key
-                                                (if-let [oldval (get previous key)]
-                                                  (merge oldval val)
-                                                  val)))])
-        opts (RebalanceOptions.)]
-    (if wait (.set_wait_secs opts wait))
-    (if executor (.set_num_executors opts executor))
-    (if num-workers (.set_num_workers opts num-workers))
-    (with-configured-nimbus-connection nimbus
-      (.rebalance nimbus name opts)
-      (log-message "Topology " name " is rebalancing")
-      )))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/command/set_log_level.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/command/set_log_level.clj b/storm-core/src/clj/backtype/storm/command/set_log_level.clj
deleted file mode 100644
index 88b297d..0000000
--- a/storm-core/src/clj/backtype/storm/command/set_log_level.clj
+++ /dev/null
@@ -1,75 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-(ns backtype.storm.command.set-log-level
-  (:use [clojure.tools.cli :only [cli]])
-  (:use [backtype.storm thrift log])
-  (:import [org.apache.logging.log4j Level])
-  (:import [backtype.storm.generated LogConfig LogLevel LogLevelAction])
-  (:gen-class))
-
-(defn- get-storm-id
-  "Get topology id for a running topology from the topology name."
-  [nimbus name]
-  (let [info (.getClusterInfo nimbus)
-        topologies (.get_topologies info)
-        topology (first (filter (fn [topo] (= name (.get_name topo))) topologies))]
-    (if topology 
-      (.get_id topology)
-      (throw (.IllegalArgumentException (str name " is not a running topology"))))))
-
-(defn- parse-named-log-levels [action]
-  "Parses [logger name]=[level string]:[optional timeout],[logger name2]...
-
-   e.g. ROOT=DEBUG:30
-        root logger, debug for 30 seconds
-
-        org.apache.foo=WARN
-        org.apache.foo set to WARN indefinitely"
-  (fn [^String s]
-    (let [log-args (re-find #"(.*)=([A-Z]+):?(\d*)" s)
-          name (if (= action LogLevelAction/REMOVE) s (nth log-args 1))
-          level (Level/toLevel (nth log-args 2))
-          timeout-str (nth log-args 3)
-          log-level (LogLevel.)]
-      (if (= action LogLevelAction/REMOVE)
-        (.set_action log-level action)
-        (do
-          (.set_action log-level action)
-          (.set_target_log_level log-level (.toString level))
-          (.set_reset_log_level_timeout_secs log-level
-            (Integer. (if (= timeout-str "") "0" timeout-str)))))
-      {name log-level})))
-
-(defn- merge-together [previous key val]
-   (assoc previous key
-      (if-let [oldval (get previous key)]
-         (merge oldval val)
-         val)))
-
-(defn -main [& args]
-  (let [[{log-setting :log-setting remove-log-setting :remove-log-setting} [name] _]
-        (cli args ["-l" "--log-setting"
-                   :parse-fn (parse-named-log-levels LogLevelAction/UPDATE)
-                   :assoc-fn merge-together]
-                  ["-r" "--remove-log-setting"
-                   :parse-fn (parse-named-log-levels LogLevelAction/REMOVE)
-                   :assoc-fn merge-together])
-        log-config (LogConfig.)]
-    (doseq [[log-name log-val] (merge log-setting remove-log-setting)]
-      (.put_to_named_logger_level log-config log-name log-val))
-    (log-message "Sent log config " log-config " for topology " name)
-    (with-configured-nimbus-connection nimbus
-      (.setLogConfig nimbus (get-storm-id nimbus name) log-config))))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/command/shell_submission.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/command/shell_submission.clj b/storm-core/src/clj/backtype/storm/command/shell_submission.clj
deleted file mode 100644
index 9bb8efe..0000000
--- a/storm-core/src/clj/backtype/storm/command/shell_submission.clj
+++ /dev/null
@@ -1,33 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-(ns backtype.storm.command.shell-submission
-  (:import [backtype.storm StormSubmitter])
-  (:use [backtype.storm thrift util config log zookeeper])
-  (:require [clojure.string :as str])
-  (:gen-class))
-
-
-(defn -main [^String tmpjarpath & args]
-  (let [conf (read-storm-config)
-        zk-leader-elector (zk-leader-elector conf)
-        leader-nimbus (.getLeader zk-leader-elector)
-        host (.getHost leader-nimbus)
-        port (.getPort leader-nimbus)
-        no-op (.close zk-leader-elector)
-        jarpath (StormSubmitter/submitJar conf tmpjarpath)
-        args (concat args [host port jarpath])]
-    (exec-command! (str/join " " args))
-    ))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/command/upload_credentials.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/command/upload_credentials.clj b/storm-core/src/clj/backtype/storm/command/upload_credentials.clj
deleted file mode 100644
index 05a82cb..0000000
--- a/storm-core/src/clj/backtype/storm/command/upload_credentials.clj
+++ /dev/null
@@ -1,35 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-(ns backtype.storm.command.upload-credentials
-  (:use [clojure.tools.cli :only [cli]])
-  (:use [backtype.storm log util])
-  (:import [backtype.storm StormSubmitter])
-  (:import [java.util Properties])
-  (:import [java.io FileReader])
-  (:gen-class))
-
-(defn read-map [file-name]
-  (let [props (Properties. )
-        _ (.load props (FileReader. file-name))]
-    (clojurify-structure props)))
-
-(defn -main [& args]
-  (let [[{cred-file :file} [name & rawCreds]] (cli args ["-f" "--file" :default nil])
-        _ (when (and rawCreds (not (even? (.size rawCreds)))) (throw (RuntimeException.  "Need an even number of arguments to make a map")))
-        mapping (if rawCreds (apply assoc {} rawCreds) {})
-        file-mapping (if (nil? cred-file) {} (read-map cred-file))]
-      (StormSubmitter/pushCredentials name {} (merge file-mapping mapping))
-      (log-message "Uploaded new creds to topology: " name)))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/config.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/config.clj b/storm-core/src/clj/backtype/storm/config.clj
deleted file mode 100644
index 4d24f97..0000000
--- a/storm-core/src/clj/backtype/storm/config.clj
+++ /dev/null
@@ -1,331 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-
-(ns backtype.storm.config
-  (:import [java.io FileReader File IOException]
-           [backtype.storm.generated StormTopology])
-  (:import [backtype.storm Config])
-  (:import [backtype.storm.utils Utils LocalState])
-  (:import [backtype.storm.validation ConfigValidation])
-  (:import [org.apache.commons.io FileUtils])
-  (:require [clojure [string :as str]])
-  (:use [backtype.storm log util]))
-
-(def RESOURCES-SUBDIR "resources")
-(def NIMBUS-DO-NOT-REASSIGN "NIMBUS-DO-NOT-REASSIGN")
-
-(defn- clojure-config-name [name]
-  (.replace (.toUpperCase name) "_" "-"))
-
-; define clojure constants for every configuration parameter
-(doseq [f (seq (.getFields Config))]
-  (let [name (.getName f)
-        new-name (clojure-config-name name)]
-    (eval
-      `(def ~(symbol new-name) (. Config ~(symbol name))))))
-
-(def ALL-CONFIGS
-  (dofor [f (seq (.getFields Config))]
-         (.get f nil)))
-
-
-(defn cluster-mode
-  [conf & args]
-  (keyword (conf STORM-CLUSTER-MODE)))
-
-(defn local-mode?
-  [conf]
-  (let [mode (conf STORM-CLUSTER-MODE)]
-    (condp = mode
-      "local" true
-      "distributed" false
-      (throw (IllegalArgumentException.
-               (str "Illegal cluster mode in conf: " mode))))))
-
-(defn sampling-rate
-  [conf]
-  (->> (conf TOPOLOGY-STATS-SAMPLE-RATE)
-       (/ 1)
-       int))
-
-(defn mk-stats-sampler
-  [conf]
-  (even-sampler (sampling-rate conf)))
-
-(defn read-default-config
-  []
-  (clojurify-structure (Utils/readDefaultConfig)))
-
-(defn validate-configs-with-schemas
-  [conf]
-  (ConfigValidation/validateFields conf))
-
-(defn read-storm-config
-  []
-  (let [conf (clojurify-structure (Utils/readStormConfig))]
-    (validate-configs-with-schemas conf)
-    conf))
-
-(defn read-yaml-config
-  ([name must-exist]
-     (let [conf (clojurify-structure (Utils/findAndReadConfigFile name must-exist))]
-       (validate-configs-with-schemas conf)
-       conf))
-  ([name]
-     (read-yaml-config true)))
-
-(defn absolute-storm-local-dir [conf]
-  (let [storm-home (System/getProperty "storm.home")
-        path (conf STORM-LOCAL-DIR)]
-    (if path
-      (if (is-absolute-path? path) path (str storm-home file-path-separator path))
-      (str storm-home file-path-separator "storm-local"))))
-
-(def LOG-DIR
-  (.getCanonicalPath
-    (clojure.java.io/file (or (System/getProperty "storm.log.dir")
-                              (get (read-storm-config) "storm.log.dir")
-                              (str (System/getProperty "storm.home") file-path-separator "logs")))))
-
-(defn absolute-healthcheck-dir [conf]
-  (let [storm-home (System/getProperty "storm.home")
-        path (conf STORM-HEALTH-CHECK-DIR)]
-    (if path
-      (if (is-absolute-path? path) path (str storm-home file-path-separator path))
-      (str storm-home file-path-separator "healthchecks"))))
-
-(defn master-local-dir
-  [conf]
-  (let [ret (str (absolute-storm-local-dir conf) file-path-separator "nimbus")]
-    (FileUtils/forceMkdir (File. ret))
-    ret))
-
-(defn master-stormjar-key
-  [topology-id]
-  (str topology-id "-stormjar.jar"))
-
-(defn master-stormcode-key
-  [topology-id]
-  (str topology-id "-stormcode.ser"))
-
-(defn master-stormconf-key
-  [topology-id]
-  (str topology-id "-stormconf.ser"))
-
-(defn master-stormdist-root
-  ([conf]
-   (str (master-local-dir conf) file-path-separator "stormdist"))
-  ([conf storm-id]
-   (str (master-stormdist-root conf) file-path-separator storm-id)))
-
-(defn master-tmp-dir
-  [conf]
-  (let [ret (str (master-local-dir conf) file-path-separator "tmp")]
-    (FileUtils/forceMkdir (File. ret))
-    ret ))
-
-(defn read-supervisor-storm-conf-given-path
-  [conf stormconf-path]
-  (merge conf (clojurify-structure (Utils/fromCompressedJsonConf (FileUtils/readFileToByteArray (File. stormconf-path))))))
-
-(defn master-storm-metafile-path [stormroot ]
-  (str stormroot file-path-separator "storm-code-distributor.meta"))
-
-(defn master-stormjar-path
-  [stormroot]
-  (str stormroot file-path-separator "stormjar.jar"))
-
-(defn master-stormcode-path
-  [stormroot]
-  (str stormroot file-path-separator "stormcode.ser"))
-
-(defn master-stormconf-path
-  [stormroot]
-  (str stormroot file-path-separator "stormconf.ser"))
-
-(defn master-inbox
-  [conf]
-  (let [ret (str (master-local-dir conf) file-path-separator "inbox")]
-    (FileUtils/forceMkdir (File. ret))
-    ret ))
-
-(defn master-inimbus-dir
-  [conf]
-  (str (master-local-dir conf) file-path-separator "inimbus"))
-
-(defn supervisor-local-dir
-  [conf]
-  (let [ret (str (absolute-storm-local-dir conf) file-path-separator "supervisor")]
-    (FileUtils/forceMkdir (File. ret))
-    ret))
-
-(defn supervisor-isupervisor-dir
-  [conf]
-  (str (supervisor-local-dir conf) file-path-separator "isupervisor"))
-
-(defn supervisor-stormdist-root
-  ([conf]
-   (str (supervisor-local-dir conf) file-path-separator "stormdist"))
-  ([conf storm-id]
-   (str (supervisor-stormdist-root conf) file-path-separator (url-encode storm-id))))
-
-(defn supervisor-stormjar-path [stormroot]
-  (str stormroot file-path-separator "stormjar.jar"))
-
-(defn supervisor-storm-metafile-path [stormroot]
-  (str stormroot file-path-separator "storm-code-distributor.meta"))
-
-(defn supervisor-stormcode-path
-  [stormroot]
-  (str stormroot file-path-separator "stormcode.ser"))
-
-(defn supervisor-stormconf-path
-  [stormroot]
-  (str stormroot file-path-separator "stormconf.ser"))
-
-(defn supervisor-tmp-dir
-  [conf]
-  (let [ret (str (supervisor-local-dir conf) file-path-separator "tmp")]
-    (FileUtils/forceMkdir (File. ret))
-    ret ))
-
-(defn supervisor-storm-resources-path
-  [stormroot]
-  (str stormroot file-path-separator RESOURCES-SUBDIR))
-
-(defn ^LocalState supervisor-state
-  [conf]
-  (LocalState. (str (supervisor-local-dir conf) file-path-separator "localstate")))
-
-(defn ^LocalState nimbus-topo-history-state
-  [conf]
-  (LocalState. (str (master-local-dir conf) file-path-separator "history")))
-
-(defn read-supervisor-storm-conf
-  [conf storm-id]
-  (let [stormroot (supervisor-stormdist-root conf storm-id)
-        conf-path (supervisor-stormconf-path stormroot)]
-    (read-supervisor-storm-conf-given-path conf conf-path)))
-
-(defn read-supervisor-topology
-  [conf storm-id]
-  (let [stormroot (supervisor-stormdist-root conf storm-id)
-        topology-path (supervisor-stormcode-path stormroot)]
-    (Utils/deserialize (FileUtils/readFileToByteArray (File. topology-path)) StormTopology)
-    ))
-
-(defn worker-user-root [conf]
-  (str (absolute-storm-local-dir conf) "/workers-users"))
-
-(defn worker-user-file [conf worker-id]
-  (str (worker-user-root conf) "/" worker-id))
-
-(defn get-worker-user [conf worker-id]
-  (log-message "GET worker-user " worker-id)
-  (try
-    (str/trim (slurp (worker-user-file conf worker-id)))
-  (catch IOException e
-    (log-warn-error e "Failed to get worker user for " worker-id ".")
-    nil
-    )))
-
-(defn get-id-from-blob-key
-  [key]
-  (if-let [groups (re-find #"^(.*)((-stormjar\.jar)|(-stormcode\.ser)|(-stormconf\.ser))$" key)]
-    (nth groups 1)))
-
-(defn set-worker-user! [conf worker-id user]
-  (log-message "SET worker-user " worker-id " " user)
-  (let [file (worker-user-file conf worker-id)]
-    (.mkdirs (.getParentFile (File. file)))
-    (spit (worker-user-file conf worker-id) user)))
-
-(defn remove-worker-user! [conf worker-id]
-  (log-message "REMOVE worker-user " worker-id)
-  (.delete (File. (worker-user-file conf worker-id))))
-
-(defn worker-artifacts-root
-  ([conf]
-   (let [workers-artifacts-dir (conf STORM-WORKERS-ARTIFACTS-DIR)]
-     (if workers-artifacts-dir
-       (if (is-absolute-path? workers-artifacts-dir)
-         workers-artifacts-dir
-         (str LOG-DIR file-path-separator workers-artifacts-dir))
-       (str LOG-DIR file-path-separator "workers-artifacts"))))
-  ([conf id]
-   (str (worker-artifacts-root conf) file-path-separator id))
-  ([conf id port]
-   (str (worker-artifacts-root conf id) file-path-separator port)))
-
-(defn worker-artifacts-pid-path
-  [conf id port]
-  (str (worker-artifacts-root conf id port) file-path-separator "worker.pid"))
-
-(defn get-log-metadata-file
-  ([fname]
-    (let [[id port & _] (str/split fname (re-pattern file-path-separator))]
-      (get-log-metadata-file (read-storm-config) id port)))
-  ([conf id port]
-    (clojure.java.io/file (str (worker-artifacts-root conf id) file-path-separator port file-path-separator) "worker.yaml")))
-
-(defn get-worker-dir-from-root
-  [log-root id port]
-  (clojure.java.io/file (str log-root file-path-separator id file-path-separator port)))
-
-(defn worker-root
-  ([conf]
-   (str (absolute-storm-local-dir conf) file-path-separator "workers"))
-  ([conf id]
-   (str (worker-root conf) file-path-separator id)))
-
-(defn worker-pids-root
-  [conf id]
-  (str (worker-root conf id) file-path-separator "pids"))
-
-(defn worker-pid-path
-  [conf id pid]
-  (str (worker-pids-root conf id) file-path-separator pid))
-
-(defn worker-heartbeats-root
-  [conf id]
-  (str (worker-root conf id) file-path-separator "heartbeats"))
-
-;; workers heartbeat here with pid and timestamp
-;; if supervisor stops receiving heartbeat, it kills and restarts the process
-;; in local mode, keep a global map of ids to threads for simulating process management
-(defn ^LocalState worker-state
-  [conf id]
-  (LocalState. (worker-heartbeats-root conf id)))
-
-(defn override-login-config-with-system-property [conf]
-  (if-let [login_conf_file (System/getProperty "java.security.auth.login.config")]
-    (assoc conf "java.security.auth.login.config" login_conf_file)
-    conf))
-
-(defn get-topo-logs-users
-  [topology-conf]
-  (sort (distinct (remove nil?
-                    (concat
-                      (topology-conf LOGS-USERS)
-                      (topology-conf TOPOLOGY-USERS))))))
-
-(defn get-topo-logs-groups
-  [topology-conf]
-  (sort (distinct (remove nil?
-                    (concat
-                      (topology-conf LOGS-GROUPS)
-                      (topology-conf TOPOLOGY-GROUPS))))))
-

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/converter.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/converter.clj b/storm-core/src/clj/backtype/storm/converter.clj
deleted file mode 100644
index 52a1817..0000000
--- a/storm-core/src/clj/backtype/storm/converter.clj
+++ /dev/null
@@ -1,277 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-(ns backtype.storm.converter
-  (:import [backtype.storm.generated SupervisorInfo NodeInfo Assignment WorkerResources
-            StormBase TopologyStatus ClusterWorkerHeartbeat ExecutorInfo ErrorInfo Credentials RebalanceOptions KillOptions
-            TopologyActionOptions DebugOptions ProfileRequest])
-  (:use [backtype.storm util stats log])
-  (:require [backtype.storm.daemon [common :as common]]))
-
-(defn thriftify-supervisor-info [supervisor-info]
-  (doto (SupervisorInfo.)
-    (.set_time_secs (long (:time-secs supervisor-info)))
-    (.set_hostname (:hostname supervisor-info))
-    (.set_assignment_id (:assignment-id supervisor-info))
-    (.set_used_ports (map long (:used-ports supervisor-info)))
-    (.set_meta (map long (:meta supervisor-info)))
-    (.set_scheduler_meta (:scheduler-meta supervisor-info))
-    (.set_uptime_secs (long (:uptime-secs supervisor-info)))
-    (.set_version (:version supervisor-info))
-    (.set_resources_map (:resources-map supervisor-info))
-    ))
-
-(defn clojurify-supervisor-info [^SupervisorInfo supervisor-info]
-  (if supervisor-info
-    (backtype.storm.daemon.common.SupervisorInfo.
-      (.get_time_secs supervisor-info)
-      (.get_hostname supervisor-info)
-      (.get_assignment_id supervisor-info)
-      (if (.get_used_ports supervisor-info) (into [] (.get_used_ports supervisor-info)))
-      (if (.get_meta supervisor-info) (into [] (.get_meta supervisor-info)))
-      (if (.get_scheduler_meta supervisor-info) (into {} (.get_scheduler_meta supervisor-info)))
-      (.get_uptime_secs supervisor-info)
-      (.get_version supervisor-info)
-      (if-let [res-map (.get_resources_map supervisor-info)] (into {} res-map)))))
-
-(defn thriftify-assignment [assignment]
-  (let [thrift-assignment (doto (Assignment.)
-                            (.set_master_code_dir (:master-code-dir assignment))
-                            (.set_node_host (:node->host assignment))
-                            (.set_executor_node_port (into {}
-                                                           (map (fn [[k v]]
-                                                                  [(map long k)
-                                                                   (NodeInfo. (first v) (set (map long (rest v))))])
-                                                                (:executor->node+port assignment))))
-                            (.set_executor_start_time_secs
-                              (into {}
-                                    (map (fn [[k v]]
-                                           [(map long k) (long v)])
-                                         (:executor->start-time-secs assignment)))))]
-    (if (:worker->resources assignment)
-      (.set_worker_resources thrift-assignment (into {} (map
-                                                          (fn [[node+port resources]]
-                                                            [(NodeInfo. (first node+port) (set (map long (rest node+port))))
-                                                             (doto (WorkerResources.)
-                                                               (.set_mem_on_heap (first resources))
-                                                               (.set_mem_off_heap (second resources))
-                                                               (.set_cpu (last resources)))])
-                                                          (:worker->resources assignment)))))
-    thrift-assignment))
-
-(defn clojurify-executor->node_port [executor->node_port]
-  (into {}
-    (map-val
-      (fn [nodeInfo]
-        (concat [(.get_node nodeInfo)] (.get_port nodeInfo))) ;nodeInfo should be converted to [node,port1,port2..]
-      (map-key
-        (fn [list-of-executors]
-          (into [] list-of-executors)) ; list of executors must be coverted to clojure vector to ensure it is sortable.
-        executor->node_port))))
-
-(defn clojurify-worker->resources [worker->resources]
-  "convert worker info to be [node, port]
-   convert resources to be [mem_on_heap mem_off_heap cpu]"
-  (into {} (map
-             (fn [[nodeInfo resources]]
-               [(concat [(.get_node nodeInfo)] (.get_port nodeInfo))
-                [(.get_mem_on_heap resources) (.get_mem_off_heap resources) (.get_cpu resources)]])
-             worker->resources)))
-
-(defn clojurify-assignment [^Assignment assignment]
-  (if assignment
-    (backtype.storm.daemon.common.Assignment.
-      (.get_master_code_dir assignment)
-      (into {} (.get_node_host assignment))
-      (clojurify-executor->node_port (into {} (.get_executor_node_port assignment)))
-      (map-key (fn [executor] (into [] executor))
-        (into {} (.get_executor_start_time_secs assignment)))
-      (clojurify-worker->resources (into {} (.get_worker_resources assignment))))))
-
-(defn convert-to-symbol-from-status [status]
-  (condp = status
-    TopologyStatus/ACTIVE {:type :active}
-    TopologyStatus/INACTIVE {:type :inactive}
-    TopologyStatus/REBALANCING {:type :rebalancing}
-    TopologyStatus/KILLED {:type :killed}
-    nil))
-
-(defn- convert-to-status-from-symbol [status]
-  (if status
-    (condp = (:type status)
-      :active TopologyStatus/ACTIVE
-      :inactive TopologyStatus/INACTIVE
-      :rebalancing TopologyStatus/REBALANCING
-      :killed TopologyStatus/KILLED
-      nil)))
-
-(defn clojurify-rebalance-options [^RebalanceOptions rebalance-options]
-  (-> {:action :rebalance}
-    (assoc-non-nil :delay-secs (if (.is_set_wait_secs rebalance-options) (.get_wait_secs rebalance-options)))
-    (assoc-non-nil :num-workers (if (.is_set_num_workers rebalance-options) (.get_num_workers rebalance-options)))
-    (assoc-non-nil :component->executors (if (.is_set_num_executors rebalance-options) (into {} (.get_num_executors rebalance-options))))))
-
-(defn thriftify-rebalance-options [rebalance-options]
-  (if rebalance-options
-    (let [thrift-rebalance-options (RebalanceOptions.)]
-      (if (:delay-secs rebalance-options)
-        (.set_wait_secs thrift-rebalance-options (int (:delay-secs rebalance-options))))
-      (if (:num-workers rebalance-options)
-        (.set_num_workers thrift-rebalance-options (int (:num-workers rebalance-options))))
-      (if (:component->executors rebalance-options)
-        (.set_num_executors thrift-rebalance-options (map-val int (:component->executors rebalance-options))))
-      thrift-rebalance-options)))
-
-(defn clojurify-kill-options [^KillOptions kill-options]
-  (-> {:action :kill}
-    (assoc-non-nil :delay-secs (if (.is_set_wait_secs kill-options) (.get_wait_secs kill-options)))))
-
-(defn thriftify-kill-options [kill-options]
-  (if kill-options
-    (let [thrift-kill-options (KillOptions.)]
-      (if (:delay-secs kill-options)
-        (.set_wait_secs thrift-kill-options (int (:delay-secs kill-options))))
-      thrift-kill-options)))
-
-(defn thriftify-topology-action-options [storm-base]
-  (if (:topology-action-options storm-base)
-    (let [ topology-action-options (:topology-action-options storm-base)
-           action (:action topology-action-options)
-           thrift-topology-action-options (TopologyActionOptions.)]
-      (if (= action :kill)
-        (.set_kill_options thrift-topology-action-options (thriftify-kill-options topology-action-options)))
-      (if (= action :rebalance)
-        (.set_rebalance_options thrift-topology-action-options (thriftify-rebalance-options topology-action-options)))
-      thrift-topology-action-options)))
-
-(defn clojurify-topology-action-options [^TopologyActionOptions topology-action-options]
-  (if topology-action-options
-    (or (and (.is_set_kill_options topology-action-options)
-             (clojurify-kill-options
-               (.get_kill_options topology-action-options)))
-        (and (.is_set_rebalance_options topology-action-options)
-             (clojurify-rebalance-options
-               (.get_rebalance_options topology-action-options))))))
-
-(defn clojurify-debugoptions [^DebugOptions options]
-  (if options
-    {
-      :enable (.is_enable options)
-      :samplingpct (.get_samplingpct options)
-      }
-    ))
-
-(defn thriftify-debugoptions [options]
-  (doto (DebugOptions.)
-    (.set_enable (get options :enable false))
-    (.set_samplingpct (get options :samplingpct 10))))
-
-(defn thriftify-storm-base [storm-base]
-  (doto (StormBase.)
-    (.set_name (:storm-name storm-base))
-    (.set_launch_time_secs (int (:launch-time-secs storm-base)))
-    (.set_status (convert-to-status-from-symbol (:status storm-base)))
-    (.set_num_workers (int (:num-workers storm-base)))
-    (.set_component_executors (map-val int (:component->executors storm-base)))
-    (.set_owner (:owner storm-base))
-    (.set_topology_action_options (thriftify-topology-action-options storm-base))
-    (.set_prev_status (convert-to-status-from-symbol (:prev-status storm-base)))
-    (.set_component_debug (map-val thriftify-debugoptions (:component->debug storm-base)))))
-
-(defn clojurify-storm-base [^StormBase storm-base]
-  (if storm-base
-    (backtype.storm.daemon.common.StormBase.
-      (.get_name storm-base)
-      (.get_launch_time_secs storm-base)
-      (convert-to-symbol-from-status (.get_status storm-base))
-      (.get_num_workers storm-base)
-      (into {} (.get_component_executors storm-base))
-      (.get_owner storm-base)
-      (clojurify-topology-action-options (.get_topology_action_options storm-base))
-      (convert-to-symbol-from-status (.get_prev_status storm-base))
-      (map-val clojurify-debugoptions (.get_component_debug storm-base)))))
-
-(defn thriftify-stats [stats]
-  (if stats
-    (map-val thriftify-executor-stats
-      (map-key #(ExecutorInfo. (int (first %1)) (int (last %1)))
-        stats))
-    {}))
-
-(defn clojurify-stats [stats]
-  (if stats
-    (map-val clojurify-executor-stats
-      (map-key (fn [x] (list (.get_task_start x) (.get_task_end x)))
-        stats))
-    {}))
-
-(defn clojurify-zk-worker-hb [^ClusterWorkerHeartbeat worker-hb]
-  (if worker-hb
-    {:storm-id (.get_storm_id worker-hb)
-     :executor-stats (clojurify-stats (into {} (.get_executor_stats worker-hb)))
-     :uptime (.get_uptime_secs worker-hb)
-     :time-secs (.get_time_secs worker-hb)
-     }
-    {}))
-
-(defn thriftify-zk-worker-hb [worker-hb]
-  (if (not-empty (filter second (:executor-stats worker-hb)))
-    (doto (ClusterWorkerHeartbeat.)
-      (.set_uptime_secs (:uptime worker-hb))
-      (.set_storm_id (:storm-id worker-hb))
-      (.set_executor_stats (thriftify-stats (filter second (:executor-stats worker-hb))))
-      (.set_time_secs (:time-secs worker-hb)))))
-
-(defn clojurify-error [^ErrorInfo error]
-  (if error
-    {
-      :error (.get_error error)
-      :time-secs (.get_error_time_secs error)
-      :host (.get_host error)
-      :port (.get_port error)
-      }
-    ))
-
-(defn thriftify-error [error]
-  (doto (ErrorInfo. (:error error) (:time-secs error))
-    (.set_host (:host error))
-    (.set_port (:port error))))
-
-(defn clojurify-profile-request
-  [^ProfileRequest request]
-  (when request
-    {:host (.get_node (.get_nodeInfo request))
-     :port (first (.get_port (.get_nodeInfo request)))
-     :action     (.get_action request)
-     :timestamp  (.get_time_stamp request)}))
-
-(defn thriftify-profile-request
-  [profile-request]
-  (let [nodeinfo (doto (NodeInfo.)
-                   (.set_node (:host profile-request))
-                   (.set_port (set [(:port profile-request)])))
-        request (ProfileRequest. nodeinfo (:action profile-request))]
-    (.set_time_stamp request (:timestamp profile-request))
-    request))
-
-(defn thriftify-credentials [credentials]
-    (doto (Credentials.)
-      (.set_creds (if credentials credentials {}))))
-
-(defn clojurify-crdentials [^Credentials credentials]
-  (if credentials
-    (into {} (.get_creds credentials))
-    nil
-    ))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/daemon/acker.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/daemon/acker.clj b/storm-core/src/clj/backtype/storm/daemon/acker.clj
deleted file mode 100644
index ce88d11..0000000
--- a/storm-core/src/clj/backtype/storm/daemon/acker.clj
+++ /dev/null
@@ -1,107 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-(ns backtype.storm.daemon.acker
-  (:import [backtype.storm.task OutputCollector TopologyContext IBolt])
-  (:import [backtype.storm.tuple Tuple Fields])
-  (:import [backtype.storm.utils RotatingMap MutableObject])
-  (:import [java.util List Map])
-  (:import [backtype.storm Constants])
-  (:use [backtype.storm config util log])
-  (:gen-class
-   :init init
-   :implements [backtype.storm.task.IBolt]
-   :constructors {[] []}
-   :state state ))
-
-(def ACKER-COMPONENT-ID "__acker")
-(def ACKER-INIT-STREAM-ID "__ack_init")
-(def ACKER-ACK-STREAM-ID "__ack_ack")
-(def ACKER-FAIL-STREAM-ID "__ack_fail")
-
-(defn- update-ack [curr-entry val]
-  (let [old (get curr-entry :val 0)]
-    (assoc curr-entry :val (bit-xor old val))
-    ))
-
-(defn- acker-emit-direct [^OutputCollector collector ^Integer task ^String stream ^List values]
-  (.emitDirect collector task stream values)
-  )
-
-(defn mk-acker-bolt []
-  (let [output-collector (MutableObject.)
-        pending (MutableObject.)]
-    (reify IBolt
-      (^void prepare [this ^Map storm-conf ^TopologyContext context ^OutputCollector collector]
-               (.setObject output-collector collector)
-               (.setObject pending (RotatingMap. 2))
-               )
-      (^void execute [this ^Tuple tuple]
-             (let [^RotatingMap pending (.getObject pending)
-                   stream-id (.getSourceStreamId tuple)]
-               (if (= stream-id Constants/SYSTEM_TICK_STREAM_ID)
-                 (.rotate pending)
-                 (let [id (.getValue tuple 0)
-                       ^OutputCollector output-collector (.getObject output-collector)
-                       curr (.get pending id)
-                       curr (condp = stream-id
-                                ACKER-INIT-STREAM-ID (-> curr
-                                                         (update-ack (.getValue tuple 1))
-                                                         (assoc :spout-task (.getValue tuple 2)))
-                                ACKER-ACK-STREAM-ID (update-ack curr (.getValue tuple 1))
-                                ACKER-FAIL-STREAM-ID (assoc curr :failed true))]
-                   (.put pending id curr)
-                   (when (and curr (:spout-task curr))
-                     (cond (= 0 (:val curr))
-                           (do
-                             (.remove pending id)
-                             (acker-emit-direct output-collector
-                                                (:spout-task curr)
-                                                ACKER-ACK-STREAM-ID
-                                                [id]
-                                                ))
-                           (:failed curr)
-                           (do
-                             (.remove pending id)
-                             (acker-emit-direct output-collector
-                                                (:spout-task curr)
-                                                ACKER-FAIL-STREAM-ID
-                                                [id]
-                                                ))
-                           ))
-                   (.ack output-collector tuple)
-                   ))))
-      (^void cleanup [this]
-        )
-      )))
-
-(defn -init []
-  [[] (container)])
-
-(defn -prepare [this conf context collector]
-  (let [^IBolt ret (mk-acker-bolt)]
-    (container-set! (.state ^backtype.storm.daemon.acker this) ret)
-    (.prepare ret conf context collector)
-    ))
-
-(defn -execute [this tuple]
-  (let [^IBolt delegate (container-get (.state ^backtype.storm.daemon.acker this))]
-    (.execute delegate tuple)
-    ))
-
-(defn -cleanup [this]
-  (let [^IBolt delegate (container-get (.state ^backtype.storm.daemon.acker this))]
-    (.cleanup delegate)
-    ))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/daemon/builtin_metrics.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/daemon/builtin_metrics.clj b/storm-core/src/clj/backtype/storm/daemon/builtin_metrics.clj
deleted file mode 100644
index 0caa0b9..0000000
--- a/storm-core/src/clj/backtype/storm/daemon/builtin_metrics.clj
+++ /dev/null
@@ -1,98 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-(ns backtype.storm.daemon.builtin-metrics
-  (:import [backtype.storm.metric.api CountMetric StateMetric IMetric IStatefulObject])
-  (:import [backtype.storm.metric.internal MultiCountStatAndMetric MultiLatencyStatAndMetric])
-  (:import [backtype.storm Config])
-  (:use [backtype.storm.stats]))
-
-(defrecord BuiltinSpoutMetrics [^MultiCountStatAndMetric ack-count
-                                ^MultiLatencyStatAndMetric complete-latency
-                                ^MultiCountStatAndMetric fail-count
-                                ^MultiCountStatAndMetric emit-count
-                                ^MultiCountStatAndMetric transfer-count])
-(defrecord BuiltinBoltMetrics [^MultiCountStatAndMetric ack-count
-                               ^MultiLatencyStatAndMetric process-latency
-                               ^MultiCountStatAndMetric fail-count
-                               ^MultiCountStatAndMetric execute-count
-                               ^MultiLatencyStatAndMetric execute-latency
-                               ^MultiCountStatAndMetric emit-count
-                               ^MultiCountStatAndMetric transfer-count])
-(defrecord SpoutThrottlingMetrics [^CountMetric skipped-max-spout
-                                   ^CountMetric skipped-throttle
-                                   ^CountMetric skipped-inactive])
-
-
-(defn make-data [executor-type stats]
-  (condp = executor-type
-    :spout (BuiltinSpoutMetrics. (stats-acked stats)
-                                 (stats-complete-latencies stats)
-                                 (stats-failed stats)
-                                 (stats-emitted stats)
-                                 (stats-transferred stats))
-    :bolt (BuiltinBoltMetrics. (stats-acked stats)
-                               (stats-process-latencies stats)
-                               (stats-failed stats)
-                               (stats-executed stats)
-                               (stats-execute-latencies stats)
-                               (stats-emitted stats)
-                               (stats-transferred stats))))
-
-(defn make-spout-throttling-data []
-  (SpoutThrottlingMetrics. (CountMetric.)
-                           (CountMetric.)
-                           (CountMetric.)))
-
-(defn register-spout-throttling-metrics [throttling-metrics  storm-conf topology-context]
-  (doseq [[kw imetric] throttling-metrics]
-    (.registerMetric topology-context (str "__" (name kw)) imetric
-                     (int (get storm-conf Config/TOPOLOGY_BUILTIN_METRICS_BUCKET_SIZE_SECS)))))
-
-(defn register-all [builtin-metrics  storm-conf topology-context]
-  (doseq [[kw imetric] builtin-metrics]
-    (.registerMetric topology-context (str "__" (name kw)) imetric
-                     (int (get storm-conf Config/TOPOLOGY_BUILTIN_METRICS_BUCKET_SIZE_SECS)))))
-
-(defn register-iconnection-server-metric [server storm-conf topology-context]
-  (if (instance? IStatefulObject server)
-    (.registerMetric topology-context "__recv-iconnection" (StateMetric. server)
-                     (int (get storm-conf Config/TOPOLOGY_BUILTIN_METRICS_BUCKET_SIZE_SECS)))))
-
-(defn register-iconnection-client-metrics [node+port->socket-ref storm-conf topology-context]
-  (.registerMetric topology-context "__send-iconnection"
-    (reify IMetric
-      (^Object getValueAndReset [this]
-        (into {}
-          (map
-            (fn [[node+port ^IStatefulObject connection]] [node+port (.getState connection)])
-            (filter 
-              (fn [[node+port connection]] (instance? IStatefulObject connection))
-              @node+port->socket-ref)))))
-    (int (get storm-conf Config/TOPOLOGY_BUILTIN_METRICS_BUCKET_SIZE_SECS))))
- 
-(defn register-queue-metrics [queues storm-conf topology-context]
-  (doseq [[qname q] queues]
-    (.registerMetric topology-context (str "__" (name qname)) (StateMetric. q)
-                     (int (get storm-conf Config/TOPOLOGY_BUILTIN_METRICS_BUCKET_SIZE_SECS)))))
-
-(defn skipped-max-spout! [^SpoutThrottlingMetrics m stats]
-  (-> m .skipped-max-spout (.incrBy (stats-rate stats))))
-
-(defn skipped-throttle! [^SpoutThrottlingMetrics m stats]
-  (-> m .skipped-throttle (.incrBy (stats-rate stats))))
-
-(defn skipped-inactive! [^SpoutThrottlingMetrics m stats]
-  (-> m .skipped-inactive (.incrBy (stats-rate stats))))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/daemon/common.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/daemon/common.clj b/storm-core/src/clj/backtype/storm/daemon/common.clj
deleted file mode 100644
index 9b3aab3..0000000
--- a/storm-core/src/clj/backtype/storm/daemon/common.clj
+++ /dev/null
@@ -1,402 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-(ns backtype.storm.daemon.common
-  (:use [backtype.storm log config util])
-  (:import [backtype.storm.generated StormTopology
-            InvalidTopologyException GlobalStreamId]
-           [backtype.storm.utils ThriftTopologyUtils])
-  (:import [backtype.storm.utils Utils])
-  (:import [backtype.storm.task WorkerTopologyContext])
-  (:import [backtype.storm Constants])
-  (:import [backtype.storm.metric SystemBolt])
-  (:import [backtype.storm.metric EventLoggerBolt])
-  (:import [backtype.storm.security.auth IAuthorizer]) 
-  (:import [java.io InterruptedIOException])
-  (:require [clojure.set :as set])  
-  (:require [backtype.storm.daemon.acker :as acker])
-  (:require [backtype.storm.thrift :as thrift])
-  (:require [metrics.reporters.jmx :as jmx]))
-
-(defn start-metrics-reporters []
-  (jmx/start (jmx/reporter {})))
-
-(def ACKER-COMPONENT-ID acker/ACKER-COMPONENT-ID)
-(def ACKER-INIT-STREAM-ID acker/ACKER-INIT-STREAM-ID)
-(def ACKER-ACK-STREAM-ID acker/ACKER-ACK-STREAM-ID)
-(def ACKER-FAIL-STREAM-ID acker/ACKER-FAIL-STREAM-ID)
-
-(def SYSTEM-STREAM-ID "__system")
-
-(def EVENTLOGGER-COMPONENT-ID "__eventlogger")
-(def EVENTLOGGER-STREAM-ID "__eventlog")
-
-(def SYSTEM-COMPONENT-ID Constants/SYSTEM_COMPONENT_ID)
-(def SYSTEM-TICK-STREAM-ID Constants/SYSTEM_TICK_STREAM_ID)
-(def METRICS-STREAM-ID Constants/METRICS_STREAM_ID)
-(def METRICS-TICK-STREAM-ID Constants/METRICS_TICK_STREAM_ID)
-(def CREDENTIALS-CHANGED-STREAM-ID Constants/CREDENTIALS_CHANGED_STREAM_ID)
-
-;; the task id is the virtual port
-;; node->host is here so that tasks know who to talk to just from assignment
-;; this avoid situation where node goes down and task doesn't know what to do information-wise
-(defrecord Assignment [master-code-dir node->host executor->node+port executor->start-time-secs worker->resources])
-
-
-;; component->executors is a map from spout/bolt id to number of executors for that component
-(defrecord StormBase [storm-name launch-time-secs status num-workers component->executors owner topology-action-options prev-status component->debug])
-
-(defrecord SupervisorInfo [time-secs hostname assignment-id used-ports meta scheduler-meta uptime-secs version resources-map])
-
-(defprotocol DaemonCommon
-  (waiting? [this]))
-
-(defrecord ExecutorStats [^long processed
-                          ^long acked
-                          ^long emitted
-                          ^long transferred
-                          ^long failed])
-
-(defn new-executor-stats []
-  (ExecutorStats. 0 0 0 0 0))
-
-(defn get-storm-id [storm-cluster-state storm-name]
-  (let [active-storms (.active-storms storm-cluster-state)]
-    (find-first
-      #(= storm-name (:storm-name (.storm-base storm-cluster-state % nil)))
-      active-storms)
-    ))
-
-(defn topology-bases [storm-cluster-state]
-  (let [active-topologies (.active-storms storm-cluster-state)]
-    (into {} 
-          (dofor [id active-topologies]
-                 [id (.storm-base storm-cluster-state id nil)]
-                 ))
-    ))
-
-(defn validate-distributed-mode! [conf]
-  (if (local-mode? conf)
-      (throw
-        (IllegalArgumentException. "Cannot start server in local mode!"))))
-
-(defmacro defserverfn [name & body]
-  `(let [exec-fn# (fn ~@body)]
-    (defn ~name [& args#]
-      (try-cause
-        (apply exec-fn# args#)
-      (catch InterruptedIOException e#
-        (throw e#))
-      (catch InterruptedException e#
-        (throw e#))
-      (catch Throwable t#
-        (log-error t# "Error on initialization of server " ~(str name))
-        (exit-process! 13 "Error on initialization")
-        )))))
-
-(defn- validate-ids! [^StormTopology topology]
-  (let [sets (map #(.getFieldValue topology %) thrift/STORM-TOPOLOGY-FIELDS)
-        offending (apply any-intersection sets)]
-    (if-not (empty? offending)
-      (throw (InvalidTopologyException.
-              (str "Duplicate component ids: " offending))))
-    (doseq [f thrift/STORM-TOPOLOGY-FIELDS
-            :let [obj-map (.getFieldValue topology f)]]
-      (if-not (ThriftTopologyUtils/isWorkerHook f)
-        (do
-          (doseq [id (keys obj-map)]
-            (if (Utils/isSystemId id)
-              (throw (InvalidTopologyException.
-                       (str id " is not a valid component id")))))
-          (doseq [obj (vals obj-map)
-                  id (-> obj .get_common .get_streams keys)]
-            (if (Utils/isSystemId id)
-              (throw (InvalidTopologyException.
-                       (str id " is not a valid stream id"))))))))))
-
-(defn all-components [^StormTopology topology]
-  (apply merge {}
-    (for [f thrift/STORM-TOPOLOGY-FIELDS]
-      (if-not (ThriftTopologyUtils/isWorkerHook f)
-        (.getFieldValue topology f)))))
-
-(defn component-conf [component]
-  (->> component
-      .get_common
-      .get_json_conf
-      from-json))
-
-(defn validate-basic! [^StormTopology topology]
-  (validate-ids! topology)
-  (doseq [f thrift/SPOUT-FIELDS
-          obj (->> f (.getFieldValue topology) vals)]
-    (if-not (empty? (-> obj .get_common .get_inputs))
-      (throw (InvalidTopologyException. "May not declare inputs for a spout"))))
-  (doseq [[comp-id comp] (all-components topology)
-          :let [conf (component-conf comp)
-                p (-> comp .get_common thrift/parallelism-hint)]]
-    (when (and (> (conf TOPOLOGY-TASKS) 0)
-               p
-               (<= p 0))
-      (throw (InvalidTopologyException. "Number of executors must be greater than 0 when number of tasks is greater than 0"))
-      )))
-
-(defn validate-structure! [^StormTopology topology]
-  ;; validate all the component subscribe from component+stream which actually exists in the topology
-  ;; and if it is a fields grouping, validate the corresponding field exists  
-  (let [all-components (all-components topology)]
-    (doseq [[id comp] all-components
-            :let [inputs (.. comp get_common get_inputs)]]
-      (doseq [[global-stream-id grouping] inputs
-              :let [source-component-id (.get_componentId global-stream-id)
-                    source-stream-id    (.get_streamId global-stream-id)]]
-        (if-not (contains? all-components source-component-id)
-          (throw (InvalidTopologyException. (str "Component: [" id "] subscribes from non-existent component [" source-component-id "]")))
-          (let [source-streams (-> all-components (get source-component-id) .get_common .get_streams)]
-            (if-not (contains? source-streams source-stream-id)
-              (throw (InvalidTopologyException. (str "Component: [" id "] subscribes from non-existent stream: [" source-stream-id "] of component [" source-component-id "]")))
-              (if (= :fields (thrift/grouping-type grouping))
-                (let [grouping-fields (set (.get_fields grouping))
-                      source-stream-fields (-> source-streams (get source-stream-id) .get_output_fields set)
-                      diff-fields (set/difference grouping-fields source-stream-fields)]
-                  (when-not (empty? diff-fields)
-                    (throw (InvalidTopologyException. (str "Component: [" id "] subscribes from stream: [" source-stream-id "] of component [" source-component-id "] with non-existent fields: " diff-fields)))))))))))))
-
-(defn acker-inputs [^StormTopology topology]
-  (let [bolt-ids (.. topology get_bolts keySet)
-        spout-ids (.. topology get_spouts keySet)
-        spout-inputs (apply merge
-                            (for [id spout-ids]
-                              {[id ACKER-INIT-STREAM-ID] ["id"]}
-                              ))
-        bolt-inputs (apply merge
-                           (for [id bolt-ids]
-                             {[id ACKER-ACK-STREAM-ID] ["id"]
-                              [id ACKER-FAIL-STREAM-ID] ["id"]}
-                             ))]
-    (merge spout-inputs bolt-inputs)))
-
-;; the event logger receives inputs from all the spouts and bolts
-;; with a field grouping on component id so that all tuples from a component
-;; goes to same executor and can be viewed via logviewer.
-(defn eventlogger-inputs [^StormTopology topology]
-  (let [bolt-ids (.. topology get_bolts keySet)
-        spout-ids (.. topology get_spouts keySet)
-        spout-inputs (apply merge
-                       (for [id spout-ids]
-                         {[id EVENTLOGGER-STREAM-ID] ["component-id"]}
-                         ))
-        bolt-inputs (apply merge
-                      (for [id bolt-ids]
-                        {[id EVENTLOGGER-STREAM-ID] ["component-id"]}
-                        ))]
-    (merge spout-inputs bolt-inputs)))
-
-(defn add-acker! [storm-conf ^StormTopology ret]
-  (let [num-executors (if (nil? (storm-conf TOPOLOGY-ACKER-EXECUTORS)) (storm-conf TOPOLOGY-WORKERS) (storm-conf TOPOLOGY-ACKER-EXECUTORS))
-        acker-bolt (thrift/mk-bolt-spec* (acker-inputs ret)
-                                         (new backtype.storm.daemon.acker)
-                                         {ACKER-ACK-STREAM-ID (thrift/direct-output-fields ["id"])
-                                          ACKER-FAIL-STREAM-ID (thrift/direct-output-fields ["id"])
-                                          }
-                                         :p num-executors
-                                         :conf {TOPOLOGY-TASKS num-executors
-                                                TOPOLOGY-TICK-TUPLE-FREQ-SECS (storm-conf TOPOLOGY-MESSAGE-TIMEOUT-SECS)})]
-    (dofor [[_ bolt] (.get_bolts ret)
-            :let [common (.get_common bolt)]]
-           (do
-             (.put_to_streams common ACKER-ACK-STREAM-ID (thrift/output-fields ["id" "ack-val"]))
-             (.put_to_streams common ACKER-FAIL-STREAM-ID (thrift/output-fields ["id"]))
-             ))
-    (dofor [[_ spout] (.get_spouts ret)
-            :let [common (.get_common spout)
-                  spout-conf (merge
-                               (component-conf spout)
-                               {TOPOLOGY-TICK-TUPLE-FREQ-SECS (storm-conf TOPOLOGY-MESSAGE-TIMEOUT-SECS)})]]
-      (do
-        ;; this set up tick tuples to cause timeouts to be triggered
-        (.set_json_conf common (to-json spout-conf))
-        (.put_to_streams common ACKER-INIT-STREAM-ID (thrift/output-fields ["id" "init-val" "spout-task"]))
-        (.put_to_inputs common
-                        (GlobalStreamId. ACKER-COMPONENT-ID ACKER-ACK-STREAM-ID)
-                        (thrift/mk-direct-grouping))
-        (.put_to_inputs common
-                        (GlobalStreamId. ACKER-COMPONENT-ID ACKER-FAIL-STREAM-ID)
-                        (thrift/mk-direct-grouping))
-        ))
-    (.put_to_bolts ret "__acker" acker-bolt)
-    ))
-
-(defn add-metric-streams! [^StormTopology topology]
-  (doseq [[_ component] (all-components topology)
-          :let [common (.get_common component)]]
-    (.put_to_streams common METRICS-STREAM-ID
-                     (thrift/output-fields ["task-info" "data-points"]))))
-
-(defn add-system-streams! [^StormTopology topology]
-  (doseq [[_ component] (all-components topology)
-          :let [common (.get_common component)]]
-    (.put_to_streams common SYSTEM-STREAM-ID (thrift/output-fields ["event"]))))
-
-
-(defn map-occurrences [afn coll]
-  (->> coll
-       (reduce (fn [[counts new-coll] x]
-                 (let [occurs (inc (get counts x 0))]
-                   [(assoc counts x occurs) (cons (afn x occurs) new-coll)]))
-               [{} []])
-       (second)
-       (reverse)))
-
-(defn number-duplicates
-  "(number-duplicates [\"a\", \"b\", \"a\"]) => [\"a\", \"b\", \"a#2\"]"
-  [coll]
-  (map-occurrences (fn [x occurences] (if (>= occurences 2) (str x "#" occurences) x)) coll))
-
-(defn metrics-consumer-register-ids
-  "Generates a list of component ids for each metrics consumer
-   e.g. [\"__metrics_org.mycompany.MyMetricsConsumer\", ..] "
-  [storm-conf]
-  (->> (get storm-conf TOPOLOGY-METRICS-CONSUMER-REGISTER)         
-       (map #(get % "class"))
-       (number-duplicates)
-       (map #(str Constants/METRICS_COMPONENT_ID_PREFIX %))))
-
-(defn metrics-consumer-bolt-specs [storm-conf topology]
-  (let [component-ids-that-emit-metrics (cons SYSTEM-COMPONENT-ID (keys (all-components topology)))
-        inputs (->> (for [comp-id component-ids-that-emit-metrics]
-                      {[comp-id METRICS-STREAM-ID] :shuffle})
-                    (into {}))
-        
-        mk-bolt-spec (fn [class arg p]
-                       (thrift/mk-bolt-spec*
-                        inputs
-                        (backtype.storm.metric.MetricsConsumerBolt. class arg)
-                        {} :p p :conf {TOPOLOGY-TASKS p}))]
-    
-    (map
-     (fn [component-id register]           
-       [component-id (mk-bolt-spec (get register "class")
-                                   (get register "argument")
-                                   (or (get register "parallelism.hint") 1))])
-     
-     (metrics-consumer-register-ids storm-conf)
-     (get storm-conf TOPOLOGY-METRICS-CONSUMER-REGISTER))))
-
-;; return the fields that event logger bolt expects
-(defn eventlogger-bolt-fields []
-  [(EventLoggerBolt/FIELD_COMPONENT_ID) (EventLoggerBolt/FIELD_MESSAGE_ID)  (EventLoggerBolt/FIELD_TS) (EventLoggerBolt/FIELD_VALUES)]
-  )
-
-(defn add-eventlogger! [storm-conf ^StormTopology ret]
-  (let [num-executors (if (nil? (storm-conf TOPOLOGY-EVENTLOGGER-EXECUTORS)) (storm-conf TOPOLOGY-WORKERS) (storm-conf TOPOLOGY-EVENTLOGGER-EXECUTORS))
-        eventlogger-bolt (thrift/mk-bolt-spec* (eventlogger-inputs ret)
-                     (EventLoggerBolt.)
-                     {}
-                     :p num-executors
-                     :conf {TOPOLOGY-TASKS num-executors
-                            TOPOLOGY-TICK-TUPLE-FREQ-SECS (storm-conf TOPOLOGY-MESSAGE-TIMEOUT-SECS)})]
-
-    (doseq [[_ component] (all-components ret)
-            :let [common (.get_common component)]]
-      (.put_to_streams common EVENTLOGGER-STREAM-ID (thrift/output-fields (eventlogger-bolt-fields))))
-    (.put_to_bolts ret EVENTLOGGER-COMPONENT-ID eventlogger-bolt)
-    ))
-
-(defn add-metric-components! [storm-conf ^StormTopology topology]  
-  (doseq [[comp-id bolt-spec] (metrics-consumer-bolt-specs storm-conf topology)]
-    (.put_to_bolts topology comp-id bolt-spec)))
-
-(defn add-system-components! [conf ^StormTopology topology]
-  (let [system-bolt-spec (thrift/mk-bolt-spec*
-                          {}
-                          (SystemBolt.)
-                          {SYSTEM-TICK-STREAM-ID (thrift/output-fields ["rate_secs"])
-                           METRICS-TICK-STREAM-ID (thrift/output-fields ["interval"])
-                           CREDENTIALS-CHANGED-STREAM-ID (thrift/output-fields ["creds"])}
-                          :p 0
-                          :conf {TOPOLOGY-TASKS 0})]
-    (.put_to_bolts topology SYSTEM-COMPONENT-ID system-bolt-spec)))
-
-(defn system-topology! [storm-conf ^StormTopology topology]
-  (validate-basic! topology)
-  (let [ret (.deepCopy topology)]
-    (add-acker! storm-conf ret)
-    (add-eventlogger! storm-conf ret)
-    (add-metric-components! storm-conf ret)
-    (add-system-components! storm-conf ret)
-    (add-metric-streams! ret)
-    (add-system-streams! ret)
-    (validate-structure! ret)
-    ret
-    ))
-
-(defn has-ackers? [storm-conf]
-  (or (nil? (storm-conf TOPOLOGY-ACKER-EXECUTORS)) (> (storm-conf TOPOLOGY-ACKER-EXECUTORS) 0)))
-
-(defn has-eventloggers? [storm-conf]
-  (or (nil? (storm-conf TOPOLOGY-EVENTLOGGER-EXECUTORS)) (> (storm-conf TOPOLOGY-EVENTLOGGER-EXECUTORS) 0)))
-
-(defn num-start-executors [component]
-  (thrift/parallelism-hint (.get_common component)))
-
-(defn storm-task-info
-  "Returns map from task -> component id"
-  [^StormTopology user-topology storm-conf]
-  (->> (system-topology! storm-conf user-topology)
-       all-components
-       (map-val (comp #(get % TOPOLOGY-TASKS) component-conf))
-       (sort-by first)
-       (mapcat (fn [[c num-tasks]] (repeat num-tasks c)))
-       (map (fn [id comp] [id comp]) (iterate (comp int inc) (int 1)))
-       (into {})
-       ))
-
-(defn executor-id->tasks [[first-task-id last-task-id]]
-  (->> (range first-task-id (inc last-task-id))
-       (map int)))
-
-(defn worker-context [worker]
-  (WorkerTopologyContext. (:system-topology worker)
-                          (:storm-conf worker)
-                          (:task->component worker)
-                          (:component->sorted-tasks worker)
-                          (:component->stream->fields worker)
-                          (:storm-id worker)
-                          (supervisor-storm-resources-path
-                            (supervisor-stormdist-root (:conf worker) (:storm-id worker)))
-                          (worker-pids-root (:conf worker) (:worker-id worker))
-                          (:port worker)
-                          (:task-ids worker)
-                          (:default-shared-resources worker)
-                          (:user-shared-resources worker)
-                          ))
-
-
-(defn to-task->node+port [executor->node+port]
-  (->> executor->node+port
-       (mapcat (fn [[e node+port]] (for [t (executor-id->tasks e)] [t node+port])))
-       (into {})))
-
-(defn mk-authorization-handler [klassname conf]
-  (let [aznClass (if klassname (Class/forName klassname))
-        aznHandler (if aznClass (.newInstance aznClass))] 
-    (if aznHandler (.prepare ^IAuthorizer aznHandler conf))
-    (log-debug "authorization class name:" klassname
-                 " class:" aznClass
-                 " handler:" aznHandler)
-    aznHandler
-  )) 
-


[25/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/testing.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/testing.clj b/storm-core/src/clj/backtype/storm/testing.clj
deleted file mode 100644
index 0cb2f52..0000000
--- a/storm-core/src/clj/backtype/storm/testing.clj
+++ /dev/null
@@ -1,701 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-
-(ns backtype.storm.testing
-  (:require [backtype.storm.daemon
-             [nimbus :as nimbus]
-             [supervisor :as supervisor]
-             [common :as common]
-             [worker :as worker]
-             [executor :as executor]])
-  (:require [backtype.storm [process-simulator :as psim]])
-  (:import [org.apache.commons.io FileUtils])
-  (:import [java.io File])
-  (:import [java.util HashMap ArrayList])
-  (:import [java.util.concurrent.atomic AtomicInteger])
-  (:import [java.util.concurrent ConcurrentHashMap])
-  (:import [backtype.storm.utils Time Utils RegisteredGlobalState])
-  (:import [backtype.storm.tuple Fields Tuple TupleImpl])
-  (:import [backtype.storm.task TopologyContext])
-  (:import [backtype.storm.generated GlobalStreamId Bolt KillOptions])
-  (:import [backtype.storm.testing FeederSpout FixedTupleSpout FixedTuple
-            TupleCaptureBolt SpoutTracker BoltTracker NonRichBoltTracker
-            TestWordSpout MemoryTransactionalSpout])
-  (:import [backtype.storm.security.auth ThriftServer ThriftConnectionType ReqContext AuthUtils])
-  (:import [backtype.storm.generated NotAliveException AlreadyAliveException StormTopology ErrorInfo
-            ExecutorInfo InvalidTopologyException Nimbus$Iface Nimbus$Processor SubmitOptions TopologyInitialStatus
-            KillOptions RebalanceOptions ClusterSummary SupervisorSummary TopologySummary TopologyInfo
-            ExecutorSummary AuthorizationException GetInfoOptions NumErrorsChoice])
-  (:import [backtype.storm.transactional TransactionalSpoutCoordinator])
-  (:import [backtype.storm.transactional.partitioned PartitionedTransactionalSpoutExecutor])
-  (:import [backtype.storm.tuple Tuple])
-  (:import [backtype.storm.generated StormTopology])
-  (:import [backtype.storm.task TopologyContext])
-  (:require [backtype.storm [zookeeper :as zk]])
-  (:require [backtype.storm.messaging.loader :as msg-loader])
-  (:require [backtype.storm.daemon.acker :as acker])
-  (:use [backtype.storm cluster util thrift config log local-state]))
-
-(defn feeder-spout
-  [fields]
-  (FeederSpout. (Fields. fields)))
-
-(defn local-temp-path
-  []
-  (str (System/getProperty "java.io.tmpdir") (if-not on-windows? "/") (uuid)))
-
-(defn delete-all
-  [paths]
-  (dorun
-    (for [t paths]
-      (if (.exists (File. t))
-        (try
-          (FileUtils/forceDelete (File. t))
-          (catch Exception e
-            (log-message (.getMessage e))))))))
-
-(defmacro with-local-tmp
-  [[& tmp-syms] & body]
-  (let [tmp-paths (mapcat (fn [t] [t `(local-temp-path)]) tmp-syms)]
-    `(let [~@tmp-paths]
-       (try
-         ~@body
-         (finally
-           (delete-all ~(vec tmp-syms)))))))
-
-(defn start-simulating-time!
-  []
-  (Time/startSimulating))
-
-(defn stop-simulating-time!
-  []
-  (Time/stopSimulating))
-
- (defmacro with-simulated-time
-   [& body]
-   `(try
-     (start-simulating-time!)
-     ~@body
-     (finally
-       (stop-simulating-time!))))
-
-(defn advance-time-ms! [ms]
-  (Time/advanceTime ms))
-
-(defn advance-time-secs! [secs]
-  (advance-time-ms! (* (long secs) 1000)))
-
-(defnk add-supervisor
-  [cluster-map :ports 2 :conf {} :id nil]
-  (let [tmp-dir (local-temp-path)
-        port-ids (if (sequential? ports)
-                   ports
-                   (doall (repeatedly ports (:port-counter cluster-map))))
-        supervisor-conf (merge (:daemon-conf cluster-map)
-                               conf
-                               {STORM-LOCAL-DIR tmp-dir
-                                SUPERVISOR-SLOTS-PORTS port-ids})
-        id-fn (if id (fn [] id) supervisor/generate-supervisor-id)
-        daemon (with-var-roots [supervisor/generate-supervisor-id id-fn] (supervisor/mk-supervisor supervisor-conf (:shared-context cluster-map) (supervisor/standalone-supervisor)))]
-    (swap! (:supervisors cluster-map) conj daemon)
-    (swap! (:tmp-dirs cluster-map) conj tmp-dir)
-    daemon))
-
-(defn mk-shared-context [conf]
-  (if-not (conf STORM-LOCAL-MODE-ZMQ)
-    (msg-loader/mk-local-context)))
-
-(defn start-nimbus-daemon [conf nimbus]
-  (let [server (ThriftServer. conf (Nimbus$Processor. nimbus)
-                              ThriftConnectionType/NIMBUS)
-        nimbus-thread (Thread. (fn [] (.serve server)))]
-    (log-message "Starting Nimbus server...")
-    (.start nimbus-thread)
-    server))
-
-
-;; returns map containing cluster info
-;; local dir is always overridden in maps
-;; can customize the supervisors (except for ports) by passing in map for :supervisors parameter
-;; if need to customize amt of ports more, can use add-supervisor calls afterwards
-(defnk mk-local-storm-cluster [:supervisors 2 :ports-per-supervisor 3 :daemon-conf {} :inimbus nil :supervisor-slot-port-min 1024 :nimbus-daemon false]
-  (let [zk-tmp (local-temp-path)
-        [zk-port zk-handle] (if-not (contains? daemon-conf STORM-ZOOKEEPER-SERVERS)
-                              (zk/mk-inprocess-zookeeper zk-tmp))
-        daemon-conf (merge (read-storm-config)
-                           {TOPOLOGY-SKIP-MISSING-KRYO-REGISTRATIONS true
-                            ZMQ-LINGER-MILLIS 0
-                            TOPOLOGY-ENABLE-MESSAGE-TIMEOUTS false
-                            TOPOLOGY-TRIDENT-BATCH-EMIT-INTERVAL-MILLIS 50
-                            STORM-CLUSTER-MODE "local"
-                            BLOBSTORE-SUPERUSER (System/getProperty "user.name")}
-                           (if-not (contains? daemon-conf STORM-ZOOKEEPER-SERVERS)
-                             {STORM-ZOOKEEPER-PORT zk-port
-                              STORM-ZOOKEEPER-SERVERS ["localhost"]})
-                           daemon-conf)
-        nimbus-tmp (local-temp-path)
-        port-counter (mk-counter supervisor-slot-port-min)
-        nimbus (nimbus/service-handler
-                (assoc daemon-conf STORM-LOCAL-DIR nimbus-tmp)
-                (if inimbus inimbus (nimbus/standalone-nimbus)))
-        context (mk-shared-context daemon-conf)
-        nimbus-thrift-server (if nimbus-daemon (start-nimbus-daemon daemon-conf nimbus) nil)
-        cluster-map {:nimbus nimbus
-                     :port-counter port-counter
-                     :daemon-conf daemon-conf
-                     :supervisors (atom [])
-                     :state (mk-distributed-cluster-state daemon-conf)
-                     :storm-cluster-state (mk-storm-cluster-state daemon-conf)
-                     :tmp-dirs (atom [nimbus-tmp zk-tmp])
-                     :zookeeper (if (not-nil? zk-handle) zk-handle)
-                     :shared-context context
-                     :nimbus-thrift-server nimbus-thrift-server}
-        supervisor-confs (if (sequential? supervisors)
-                           supervisors
-                           (repeat supervisors {}))]
-
-    (doseq [sc supervisor-confs]
-      (add-supervisor cluster-map :ports ports-per-supervisor :conf sc))
-    cluster-map))
-
-(defn get-supervisor [cluster-map supervisor-id]
-  (let [finder-fn #(= (.get-id %) supervisor-id)]
-    (find-first finder-fn @(:supervisors cluster-map))))
-
-(defn kill-supervisor [cluster-map supervisor-id]
-  (let [finder-fn #(= (.get-id %) supervisor-id)
-        supervisors @(:supervisors cluster-map)
-        sup (find-first finder-fn
-                        supervisors)]
-    ;; tmp-dir will be taken care of by shutdown
-    (reset! (:supervisors cluster-map) (remove-first finder-fn supervisors))
-    (.shutdown sup)))
-
-(defn kill-local-storm-cluster [cluster-map]
-  (.shutdown (:nimbus cluster-map))
-  (if (not-nil? (:nimbus-thrift-server cluster-map))
-    (do
-      (log-message "shutting down thrift server")
-      (try
-        (.stop (:nimbus-thrift-server cluster-map))
-        (catch Exception e (log-message "failed to stop thrift")))
-      ))
-  (.close (:state cluster-map))
-  (.disconnect (:storm-cluster-state cluster-map))
-  (doseq [s @(:supervisors cluster-map)]
-    (.shutdown-all-workers s)
-    ;; race condition here? will it launch the workers again?
-    (supervisor/kill-supervisor s))
-  (psim/kill-all-processes)
-  (if (not-nil? (:zookeeper cluster-map))
-    (do
-      (log-message "Shutting down in process zookeeper")
-      (zk/shutdown-inprocess-zookeeper (:zookeeper cluster-map))
-      (log-message "Done shutting down in process zookeeper")))
-  (doseq [t @(:tmp-dirs cluster-map)]
-    (log-message "Deleting temporary path " t)
-    (try
-      (rmr t)
-      ;; on windows, the host process still holds lock on the logfile
-      (catch Exception e (log-message (.getMessage e)))) ))
-
-(def TEST-TIMEOUT-MS
-  (let [timeout (System/getenv "STORM_TEST_TIMEOUT_MS")]
-    (parse-int (if timeout timeout "5000"))))
-
-(defmacro while-timeout [timeout-ms condition & body]
-  `(let [end-time# (+ (System/currentTimeMillis) ~timeout-ms)]
-     (log-debug "Looping until " '~condition)
-     (while ~condition
-       (when (> (System/currentTimeMillis) end-time#)
-         (let [thread-dump# (Utils/threadDump)]
-           (log-message "Condition " '~condition  " not met in " ~timeout-ms "ms")
-           (log-message thread-dump#)
-           (throw (AssertionError. (str "Test timed out (" ~timeout-ms "ms) " '~condition)))))
-       ~@body)
-     (log-debug "Condition met " '~condition)))
-
-(defn wait-for-condition
-  ([apredicate]
-    (wait-for-condition TEST-TIMEOUT-MS apredicate))
-  ([timeout-ms apredicate]
-    (while-timeout timeout-ms (not (apredicate))
-      (Time/sleep 100))))
-
-(defn wait-until-cluster-waiting
-  "Wait until the cluster is idle. Should be used with time simulation."
-  ([cluster-map] (wait-until-cluster-waiting cluster-map TEST-TIMEOUT-MS))
-  ([cluster-map timeout-ms]
-  ;; wait until all workers, supervisors, and nimbus is waiting
-  (let [supervisors @(:supervisors cluster-map)
-        workers (filter (partial satisfies? common/DaemonCommon) (psim/all-processes))
-        daemons (concat
-                  [(:nimbus cluster-map)]
-                  supervisors
-                  ; because a worker may already be dead
-                  workers)]
-    (while-timeout timeout-ms (not (every? (memfn waiting?) daemons))
-                   (Thread/sleep (rand-int 20))
-                   ;;      (doseq [d daemons]
-                   ;;        (if-not ((memfn waiting?) d)
-                   ;;          (println d)))
-                   ))))
-
-(defn advance-cluster-time
-  ([cluster-map secs increment-secs]
-   (loop [left secs]
-     (when (> left 0)
-       (let [diff (min left increment-secs)]
-         (advance-time-secs! diff)
-         (wait-until-cluster-waiting cluster-map)
-         (recur (- left diff))))))
-  ([cluster-map secs]
-   (advance-cluster-time cluster-map secs 1)))
-
-(defmacro with-local-cluster
-  [[cluster-sym & args] & body]
-  `(let [~cluster-sym (mk-local-storm-cluster ~@args)]
-     (try
-       ~@body
-       (catch Throwable t#
-         (log-error t# "Error in cluster")
-         (throw t#))
-       (finally
-         (let [keep-waiting?# (atom true)
-               f# (future (while @keep-waiting?# (simulate-wait ~cluster-sym)))]
-           (kill-local-storm-cluster ~cluster-sym)
-           (reset! keep-waiting?# false)
-            @f#)))))
-
-(defmacro with-simulated-time-local-cluster
-  [& args]
-  `(with-simulated-time
-     (with-local-cluster ~@args)))
-
-(defmacro with-inprocess-zookeeper
-  [port-sym & body]
-  `(with-local-tmp [tmp#]
-                   (let [[~port-sym zks#] (zk/mk-inprocess-zookeeper tmp#)]
-                     (try
-                       ~@body
-                       (finally
-                         (zk/shutdown-inprocess-zookeeper zks#))))))
-
-(defn submit-local-topology
-  [nimbus storm-name conf topology]
-  (when-not (Utils/isValidConf conf)
-    (throw (IllegalArgumentException. "Topology conf is not json-serializable")))
-  (.submitTopology nimbus storm-name nil (to-json conf) topology))
-
-(defn submit-local-topology-with-opts
-  [nimbus storm-name conf topology submit-opts]
-  (when-not (Utils/isValidConf conf)
-    (throw (IllegalArgumentException. "Topology conf is not json-serializable")))
-  (.submitTopologyWithOpts nimbus storm-name nil (to-json conf) topology submit-opts))
-
-(defn mocked-convert-assignments-to-worker->resources [storm-cluster-state storm-name worker->resources]
-  (fn [existing-assignments]
-    (let [topology-id (common/get-storm-id storm-cluster-state storm-name)
-          existing-assignments (into {} (for [[tid assignment] existing-assignments]
-                                          {tid (:worker->resources assignment)}))
-          new-assignments (assoc existing-assignments topology-id worker->resources)]
-      new-assignments)))
-
-(defn mocked-compute-new-topology->executor->node+port [storm-cluster-state storm-name executor->node+port]
-  (fn [new-scheduler-assignments existing-assignments]
-    (let [topology-id (common/get-storm-id storm-cluster-state storm-name)
-          existing-assignments (into {} (for [[tid assignment] existing-assignments]
-                                          {tid (:executor->node+port assignment)}))
-          new-assignments (assoc existing-assignments topology-id executor->node+port)]
-      new-assignments)))
-
-(defn mocked-compute-new-scheduler-assignments []
-  (fn [nimbus existing-assignments topologies scratch-topology-id]
-    existing-assignments))
-
-(defn submit-mocked-assignment
-  [nimbus storm-cluster-state storm-name conf topology task->component executor->node+port worker->resources]
-  (with-var-roots [common/storm-task-info (fn [& ignored] task->component)
-                   nimbus/compute-new-scheduler-assignments (mocked-compute-new-scheduler-assignments)
-                   nimbus/convert-assignments-to-worker->resources (mocked-convert-assignments-to-worker->resources
-                                                          storm-cluster-state
-                                                          storm-name
-                                                          worker->resources)
-                   nimbus/compute-new-topology->executor->node+port (mocked-compute-new-topology->executor->node+port
-                                                                      storm-cluster-state
-                                                                      storm-name
-                                                                      executor->node+port)]
-    (submit-local-topology nimbus storm-name conf topology)))
-
-(defn mk-capture-launch-fn [capture-atom]
-  (fn [supervisor storm-id port worker-id mem-onheap]
-    (let [supervisor-id (:supervisor-id supervisor)
-          conf (:conf supervisor)
-          existing (get @capture-atom [supervisor-id port] [])]
-      (set-worker-user! conf worker-id "")
-      (swap! capture-atom assoc [supervisor-id port] (conj existing storm-id)))))
-
-(defn find-worker-id
-  [supervisor-conf port]
-  (let [supervisor-state (supervisor-state supervisor-conf)
-        worker->port (ls-approved-workers supervisor-state)]
-    (first ((reverse-map worker->port) port))))
-
-(defn find-worker-port
-  [supervisor-conf worker-id]
-  (let [supervisor-state (supervisor-state supervisor-conf)
-        worker->port (ls-approved-workers supervisor-state)]
-    (worker->port worker-id)))
-
-(defn mk-capture-shutdown-fn
-  [capture-atom]
-  (let [existing-fn supervisor/shutdown-worker]
-    (fn [supervisor worker-id]
-      (let [conf (:conf supervisor)
-            supervisor-id (:supervisor-id supervisor)
-            port (find-worker-port conf worker-id)
-            existing (get @capture-atom [supervisor-id port] 0)]
-        (swap! capture-atom assoc [supervisor-id port] (inc existing))
-        (existing-fn supervisor worker-id)))))
-
-(defmacro capture-changed-workers
-  [& body]
-  `(let [launch-captured# (atom {})
-         shutdown-captured# (atom {})]
-     (with-var-roots [supervisor/launch-worker (mk-capture-launch-fn launch-captured#)
-                      supervisor/shutdown-worker (mk-capture-shutdown-fn shutdown-captured#)]
-                     ~@body
-                     {:launched @launch-captured#
-                      :shutdown @shutdown-captured#})))
-
-(defmacro capture-launched-workers
-  [& body]
-  `(:launched (capture-changed-workers ~@body)))
-
-(defmacro capture-shutdown-workers
-  [& body]
-  `(:shutdown (capture-changed-workers ~@body)))
-
-(defnk aggregated-stat
-  [cluster-map storm-name stat-key :component-ids nil]
-  (let [state (:storm-cluster-state cluster-map)
-        nimbus (:nimbus cluster-map)
-        storm-id (common/get-storm-id state storm-name)
-        component->tasks (reverse-map
-                           (common/storm-task-info
-                             (.getUserTopology nimbus storm-id)
-                             (from-json (.getTopologyConf nimbus storm-id))))
-        component->tasks (if component-ids
-                           (select-keys component->tasks component-ids)
-                           component->tasks)
-        task-ids (apply concat (vals component->tasks))
-        assignment (.assignment-info state storm-id nil)
-        taskbeats (.taskbeats state storm-id (:task->node+port assignment))
-        heartbeats (dofor [id task-ids] (get taskbeats id))
-        stats (dofor [hb heartbeats] (if hb (stat-key (:stats hb)) 0))]
-    (reduce + stats)))
-
-(defn emitted-spout-tuples
-  [cluster-map topology storm-name]
-  (aggregated-stat
-    cluster-map
-    storm-name
-    :emitted
-    :component-ids (keys (.get_spouts topology))))
-
-(defn transferred-tuples
-  [cluster-map storm-name]
-  (aggregated-stat cluster-map storm-name :transferred))
-
-(defn acked-tuples
-  [cluster-map storm-name]
-  (aggregated-stat cluster-map storm-name :acked))
-
-(defn simulate-wait
-  [cluster-map]
-  (if (Time/isSimulating)
-    (advance-cluster-time cluster-map 10)
-    (Thread/sleep 100)))
-
-(defprotocol CompletableSpout
-  (exhausted?
-    [this]
-    "Whether all the tuples for this spout have been completed.")
-  (cleanup
-    [this]
-    "Cleanup any global state kept")
-  (startup
-    [this]
-    "Prepare the spout (globally) before starting the topology"))
-
-(extend-type FixedTupleSpout
-  CompletableSpout
-  (exhausted? [this]
-              (= (-> this .getSourceTuples count)
-                 (.getCompleted this)))
-  (cleanup [this]
-           (.cleanup this))
-  (startup [this]))
-
-(extend-type TransactionalSpoutCoordinator
-  CompletableSpout
-  (exhausted? [this]
-              (exhausted? (.getSpout this)))
-  (cleanup [this]
-           (cleanup (.getSpout this)))
-  (startup [this]
-           (startup (.getSpout this))))
-
-(extend-type PartitionedTransactionalSpoutExecutor
-  CompletableSpout
-  (exhausted? [this]
-              (exhausted? (.getPartitionedSpout this)))
-  (cleanup [this]
-           (cleanup (.getPartitionedSpout this)))
-  (startup [this]
-           (startup (.getPartitionedSpout this))))
-
-(extend-type MemoryTransactionalSpout
-  CompletableSpout
-  (exhausted? [this]
-              (.isExhaustedTuples this))
-  (cleanup [this]
-           (.cleanup this))
-  (startup [this]
-           (.startup this)))
-
-(defn spout-objects [spec-map]
-  (for [[_ spout-spec] spec-map]
-    (-> spout-spec
-        .get_spout_object
-        deserialized-component-object)))
-
-(defn capture-topology
-  [topology]
-  (let [topology (.deepCopy topology)
-        spouts (.get_spouts topology)
-        bolts (.get_bolts topology)
-        all-streams (apply concat
-                           (for [[id spec] (merge (clojurify-structure spouts)
-                                                  (clojurify-structure bolts))]
-                             (for [[stream info] (.. spec get_common get_streams)]
-                               [(GlobalStreamId. id stream) (.is_direct info)])))
-        capturer (TupleCaptureBolt.)]
-    (.set_bolts topology
-                (assoc (clojurify-structure bolts)
-                  (uuid)
-                  (Bolt.
-                    (serialize-component-object capturer)
-                    (mk-plain-component-common (into {} (for [[id direct?] all-streams]
-                                                          [id (if direct?
-                                                                (mk-direct-grouping)
-                                                                (mk-global-grouping))]))
-                                               {}
-                                               nil))))
-    {:topology topology
-     :capturer capturer}))
-
-;; TODO: mock-sources needs to be able to mock out state spouts as well
-(defnk complete-topology
-  [cluster-map topology
-   :mock-sources {}
-   :storm-conf {}
-   :cleanup-state true
-   :topology-name nil
-   :timeout-ms TEST-TIMEOUT-MS]
-  ;; TODO: the idea of mocking for transactional topologies should be done an
-  ;; abstraction level above... should have a complete-transactional-topology for this
-  (let [{topology :topology capturer :capturer} (capture-topology topology)
-        storm-name (or topology-name (str "topologytest-" (uuid)))
-        state (:storm-cluster-state cluster-map)
-        spouts (.get_spouts topology)
-        replacements (map-val (fn [v]
-                                (FixedTupleSpout.
-                                  (for [tup v]
-                                    (if (map? tup)
-                                      (FixedTuple. (:stream tup) (:values tup))
-                                      tup))))
-                              mock-sources)]
-    (doseq [[id spout] replacements]
-      (let [spout-spec (get spouts id)]
-        (.set_spout_object spout-spec (serialize-component-object spout))))
-    (doseq [spout (spout-objects spouts)]
-      (when-not (extends? CompletableSpout (.getClass spout))
-        (throw (RuntimeException. (str "Cannot complete topology unless every spout is a CompletableSpout (or mocked to be); failed by " spout)))))
-
-    (doseq [spout (spout-objects spouts)]
-      (startup spout))
-
-    (submit-local-topology (:nimbus cluster-map) storm-name storm-conf topology)
-    (advance-cluster-time cluster-map 11)
-
-    (let [storm-id (common/get-storm-id state storm-name)]
-      ;;Give the topology time to come up without using it to wait for the spouts to complete
-      (simulate-wait cluster-map)
-
-      (while-timeout timeout-ms (not (every? exhausted? (spout-objects spouts)))
-                     (simulate-wait cluster-map))
-
-      (.killTopologyWithOpts (:nimbus cluster-map) storm-name (doto (KillOptions.) (.set_wait_secs 0)))
-      (while-timeout timeout-ms (.assignment-info state storm-id nil)
-                     (simulate-wait cluster-map))
-      (when cleanup-state
-        (doseq [spout (spout-objects spouts)]
-          (cleanup spout))))
-
-    (if cleanup-state
-      (.getAndRemoveResults capturer)
-      (.getAndClearResults capturer))))
-
-(defn read-tuples
-  ([results component-id stream-id]
-   (let [fixed-tuples (get results component-id [])]
-     (mapcat
-       (fn [ft]
-         (if (= stream-id (. ft stream))
-           [(vec (. ft values))]))
-       fixed-tuples)
-     ))
-  ([results component-id]
-   (read-tuples results component-id Utils/DEFAULT_STREAM_ID)))
-
-(defn ms=
-  [& args]
-  (apply = (map multi-set args)))
-
-(def TRACKER-BOLT-ID "+++tracker-bolt")
-
-;; TODO: should override system-topology! and wrap everything there
-(defn mk-tracked-topology
-  ([tracked-cluster topology]
-   (let [track-id (::track-id tracked-cluster)
-         ret (.deepCopy topology)]
-     (dofor [[_ bolt] (.get_bolts ret)
-             :let [obj (deserialized-component-object (.get_bolt_object bolt))]]
-            (.set_bolt_object bolt (serialize-component-object
-                                     (BoltTracker. obj track-id))))
-     (dofor [[_ spout] (.get_spouts ret)
-             :let [obj (deserialized-component-object (.get_spout_object spout))]]
-            (.set_spout_object spout (serialize-component-object
-                                       (SpoutTracker. obj track-id))))
-     {:topology ret
-      :last-spout-emit (atom 0)
-      :cluster tracked-cluster})))
-
-(defn assoc-track-id
-  [cluster track-id]
-  (assoc cluster ::track-id track-id))
-
-(defn increment-global!
-  [id key amt]
-  (-> (RegisteredGlobalState/getState id)
-      (get key)
-      (.addAndGet amt)))
-
-(defn global-amt
-  [id key]
-  (-> (RegisteredGlobalState/getState id)
-      (get key)
-      .get))
-
-(defmacro with-tracked-cluster
-  [[cluster-sym & cluster-args] & body]
-  `(let [id# (uuid)]
-     (RegisteredGlobalState/setState
-       id#
-       (doto (ConcurrentHashMap.)
-         (.put "spout-emitted" (AtomicInteger. 0))
-         (.put "transferred" (AtomicInteger. 0))
-         (.put "processed" (AtomicInteger. 0))))
-     (with-var-roots
-       [acker/mk-acker-bolt
-        (let [old# acker/mk-acker-bolt]
-          (fn [& args#] (NonRichBoltTracker. (apply old# args#) id#)))
-        ;; critical that this particular function is overridden here,
-        ;; since the transferred stat needs to be incremented at the moment
-        ;; of tuple emission (and not on a separate thread later) for
-        ;; topologies to be tracked correctly. This is because "transferred" *must*
-        ;; be incremented before "processing".
-        executor/mk-executor-transfer-fn
-        (let [old# executor/mk-executor-transfer-fn]
-          (fn [& args#]
-            (let [transferrer# (apply old# args#)]
-              (fn [& args2#]
-                ;; (log-message "Transferring: " transfer-args#)
-                (increment-global! id# "transferred" 1)
-                (apply transferrer# args2#)))))]
-       (with-simulated-time-local-cluster [~cluster-sym ~@cluster-args]
-                           (let [~cluster-sym (assoc-track-id ~cluster-sym id#)]
-                             ~@body)))
-     (RegisteredGlobalState/clearState id#)))
-
-(defn tracked-wait
-  "Waits until topology is idle and 'amt' more tuples have been emitted by spouts."
-  ([tracked-topology]
-     (tracked-wait tracked-topology 1 TEST-TIMEOUT-MS))
-  ([tracked-topology amt]
-     (tracked-wait tracked-topology amt TEST-TIMEOUT-MS))
-  ([tracked-topology amt timeout-ms]
-    (let [target (+ amt @(:last-spout-emit tracked-topology))
-          track-id (-> tracked-topology :cluster ::track-id)
-          waiting? (fn []
-                     (or (not= target (global-amt track-id "spout-emitted"))
-                         (not= (global-amt track-id "transferred")
-                               (global-amt track-id "processed"))))]
-      (while-timeout timeout-ms (waiting?)
-                     ;; (println "Spout emitted: " (global-amt track-id "spout-emitted"))
-                     ;; (println "Processed: " (global-amt track-id "processed"))
-                     ;; (println "Transferred: " (global-amt track-id "transferred"))
-                    (Thread/sleep (rand-int 200)))
-      (reset! (:last-spout-emit tracked-topology) target))))
-
-(defnk test-tuple
-  [values
-   :stream Utils/DEFAULT_STREAM_ID
-   :component "component"
-   :fields nil]
-  (let [fields (or fields
-                   (->> (iterate inc 1)
-                        (take (count values))
-                        (map #(str "field" %))))
-        spout-spec (mk-spout-spec* (TestWordSpout.)
-                                   {stream fields})
-        topology (StormTopology. {component spout-spec} {} {})
-        context (TopologyContext.
-                  topology
-                  (read-storm-config)
-                  {(int 1) component}
-                  {component [(int 1)]}
-                  {component {stream (Fields. fields)}}
-                  "test-storm-id"
-                  nil
-                  nil
-                  (int 1)
-                  nil
-                  [(int 1)]
-                  {}
-                  {}
-                  (HashMap.)
-                  (HashMap.)
-                  (atom false))]
-    (TupleImpl. context values 1 stream)))
-
-(defmacro with-timeout
-  [millis unit & body]
-  `(let [f# (future ~@body)]
-     (try
-       (.get f# ~millis ~unit)
-       (finally (future-cancel f#)))))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/testing4j.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/testing4j.clj b/storm-core/src/clj/backtype/storm/testing4j.clj
deleted file mode 100644
index bc5dc57..0000000
--- a/storm-core/src/clj/backtype/storm/testing4j.clj
+++ /dev/null
@@ -1,184 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-(ns backtype.storm.testing4j
-  (:import [java.util Map List Collection ArrayList])
-  (:require [backtype.storm [LocalCluster :as LocalCluster]])
-  (:import [backtype.storm Config ILocalCluster LocalCluster])
-  (:import [backtype.storm.generated StormTopology])
-  (:import [backtype.storm.daemon nimbus])
-  (:import [backtype.storm.testing TestJob MockedSources TrackedTopology
-            MkClusterParam CompleteTopologyParam MkTupleParam])
-  (:import [backtype.storm.utils Utils])
-  (:use [backtype.storm testing util log])
-  (:gen-class
-   :name backtype.storm.Testing
-   :methods [^:static [completeTopology
-                       [backtype.storm.ILocalCluster  backtype.storm.generated.StormTopology
-                        backtype.storm.testing.CompleteTopologyParam]
-                       java.util.Map]
-             ^:static [completeTopology
-                       [backtype.storm.ILocalCluster backtype.storm.generated.StormTopology]
-                       java.util.Map]
-             ^:static [withSimulatedTime [Runnable] void]
-             ^:static [withLocalCluster [backtype.storm.testing.TestJob] void]
-             ^:static [withLocalCluster [backtype.storm.testing.MkClusterParam backtype.storm.testing.TestJob] void]
-             ^:static [getLocalCluster [java.util.Map] backtype.storm.ILocalCluster]
-             ^:static [withSimulatedTimeLocalCluster [backtype.storm.testing.TestJob] void]
-             ^:static [withSimulatedTimeLocalCluster [backtype.storm.testing.MkClusterParam backtype.storm.testing.TestJob] void]
-             ^:static [withTrackedCluster [backtype.storm.testing.TestJob] void]
-             ^:static [withTrackedCluster [backtype.storm.testing.MkClusterParam backtype.storm.testing.TestJob] void]
-             ^:static [readTuples [java.util.Map String String] java.util.List]
-             ^:static [readTuples [java.util.Map String] java.util.List]
-             ^:static [mkTrackedTopology [backtype.storm.ILocalCluster backtype.storm.generated.StormTopology] backtype.storm.testing.TrackedTopology]
-             ^:static [trackedWait [backtype.storm.testing.TrackedTopology] void]
-             ^:static [trackedWait [backtype.storm.testing.TrackedTopology Integer] void]
-             ^:static [trackedWait [backtype.storm.testing.TrackedTopology Integer Integer] void]
-             ^:static [advanceClusterTime [backtype.storm.ILocalCluster Integer Integer] void]
-             ^:static [advanceClusterTime [backtype.storm.ILocalCluster Integer] void]
-             ^:static [multiseteq [java.util.Collection java.util.Collection] boolean]
-             ^:static [multiseteq [java.util.Map java.util.Map] boolean]
-             ^:static [testTuple [java.util.List] backtype.storm.tuple.Tuple]
-             ^:static [testTuple [java.util.List backtype.storm.testing.MkTupleParam] backtype.storm.tuple.Tuple]]))
-
-(defn -completeTopology
-  ([^ILocalCluster cluster ^StormTopology topology ^CompleteTopologyParam completeTopologyParam]
-    (let [mocked-sources (or (-> completeTopologyParam .getMockedSources .getData) {})
-          storm-conf (or (.getStormConf completeTopologyParam) {})
-          cleanup-state (or (.getCleanupState completeTopologyParam) true)
-          topology-name (.getTopologyName completeTopologyParam)
-          timeout-ms (or (.getTimeoutMs completeTopologyParam) TEST-TIMEOUT-MS)]
-      (complete-topology (.getState cluster) topology
-        :mock-sources mocked-sources
-        :storm-conf storm-conf
-        :cleanup-state cleanup-state
-        :topology-name topology-name
-        :timeout-ms timeout-ms)))
-  ([^ILocalCluster cluster ^StormTopology topology]
-    (-completeTopology cluster topology (CompleteTopologyParam.))))
-
-
-(defn -withSimulatedTime
-  [^Runnable code]
-  (with-simulated-time
-    (.run code)))
-
-(defmacro with-cluster
-  [cluster-type mkClusterParam code]
-  `(let [supervisors# (or (.getSupervisors ~mkClusterParam) 2)
-         ports-per-supervisor# (or (.getPortsPerSupervisor ~mkClusterParam) 3)
-         daemon-conf# (or (.getDaemonConf ~mkClusterParam) {})]
-     (~cluster-type [cluster# :supervisors supervisors#
-                     :ports-per-supervisor ports-per-supervisor#
-                     :daemon-conf daemon-conf#]
-                    (let [cluster# (LocalCluster. cluster#)]
-                      (.run ~code cluster#)))))
-
-(defn -withLocalCluster
-  ([^MkClusterParam mkClusterParam ^TestJob code]
-     (with-cluster with-local-cluster mkClusterParam code))
-  ([^TestJob code]
-     (-withLocalCluster (MkClusterParam.) code)))
-
-(defn -getLocalCluster
-  ([^Map clusterConf]
-     (let [daemon-conf (get-in clusterConf ["daemon-conf"] {})
-           supervisors (get-in clusterConf ["supervisors"] 2)
-           ports-per-supervisor (get-in clusterConf ["ports-per-supervisor"] 3)
-           inimbus (get-in clusterConf ["inimbus"] nil)
-           supervisor-slot-port-min (get-in clusterConf ["supervisor-slot-port-min"] 1024)
-           nimbus-daemon (get-in clusterConf ["nimbus-daemon"] false)
-           local-cluster-map (mk-local-storm-cluster :supervisors supervisors
-                                                     :ports-per-supervisor ports-per-supervisor
-                                                     :daemon-conf daemon-conf
-                                                     :inimbus inimbus
-                                                     :supervisor-slot-port-min supervisor-slot-port-min
-                                                     :nimbus-daemon nimbus-daemon
-                                                     )]
-       (LocalCluster. local-cluster-map))))
-
-(defn -withSimulatedTimeLocalCluster
-  ([^MkClusterParam mkClusterParam ^TestJob code]
-     (with-cluster with-simulated-time-local-cluster mkClusterParam code))
-  ([^TestJob code]
-     (-withSimulatedTimeLocalCluster (MkClusterParam.) code)))
-
-(defn -withTrackedCluster
-  ([^MkClusterParam mkClusterParam ^TestJob code]
-     (with-cluster with-tracked-cluster mkClusterParam code))
-  ([^TestJob code]
-     (-withTrackedCluster (MkClusterParam.) code)))
-
-(defn- find-tuples
-  [^List fixed-tuples ^String stream]
-  (let [ret (ArrayList.)]
-    (doseq [fixed-tuple fixed-tuples]
-      (if (= (.stream fixed-tuple) stream)
-        (.add ret (.values fixed-tuple))))
-    ret))
-
-(defn -readTuples
-  ([^Map result ^String componentId ^String streamId]
-   (let [stream-result (.get result componentId)
-         ret (if stream-result
-               (find-tuples stream-result streamId)
-               [])]
-     ret))
-  ([^Map result ^String componentId]
-   (-readTuples result componentId Utils/DEFAULT_STREAM_ID)))
-
-(defn -mkTrackedTopology
-  [^ILocalCluster trackedCluster ^StormTopology topology]
-  (-> (mk-tracked-topology (.getState trackedCluster) topology)
-      (TrackedTopology.)))
-
-(defn -trackedWait
-  ([^TrackedTopology trackedTopology ^Integer amt ^Integer timeout-ms]
-   (tracked-wait trackedTopology amt timeout-ms))
-  ([^TrackedTopology trackedTopology ^Integer amt]
-   (tracked-wait trackedTopology amt))
-  ([^TrackedTopology trackedTopology]
-   (-trackedWait trackedTopology 1)))
-
-(defn -advanceClusterTime
-  ([^ILocalCluster cluster ^Integer secs ^Integer step]
-   (advance-cluster-time (.getState cluster) secs step))
-  ([^ILocalCluster cluster ^Integer secs]
-   (-advanceClusterTime cluster secs 1)))
-
-(defn- multiseteq
-  [^Object obj1 ^Object obj2]
-  (let [obj1 (clojurify-structure obj1)
-        obj2 (clojurify-structure obj2)]
-    (ms= obj1 obj2)))
-
-(defn -multiseteq
-  [^Collection coll1 ^Collection coll2]
-  (multiseteq coll1 coll2))
-
-(defn -multiseteq
-  [^Map coll1 ^Map coll2]
-  (multiseteq coll1 coll2))
-
-(defn -testTuple
-  ([^List values]
-   (-testTuple values nil))
-  ([^List values ^MkTupleParam param]
-   (if (nil? param)
-     (test-tuple values)
-     (let [stream (or (.getStream param) Utils/DEFAULT_STREAM_ID)
-           component (or (.getComponent param) "component")
-           fields (.getFields param)]
-       (test-tuple values :stream stream :component component :fields fields)))))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/thrift.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/thrift.clj b/storm-core/src/clj/backtype/storm/thrift.clj
deleted file mode 100644
index 8f4c659..0000000
--- a/storm-core/src/clj/backtype/storm/thrift.clj
+++ /dev/null
@@ -1,284 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-
-(ns backtype.storm.thrift
-  (:import [java.util HashMap]
-           [java.io Serializable]
-           [backtype.storm.generated NodeInfo Assignment])
-  (:import [backtype.storm.generated JavaObject Grouping Nimbus StormTopology
-            StormTopology$_Fields Bolt Nimbus$Client Nimbus$Iface
-            ComponentCommon Grouping$_Fields SpoutSpec NullStruct StreamInfo
-            GlobalStreamId ComponentObject ComponentObject$_Fields
-            ShellComponent SupervisorInfo])
-  (:import [backtype.storm.utils Utils NimbusClient])
-  (:import [backtype.storm Constants])
-  (:import [backtype.storm.security.auth ReqContext])
-  (:import [backtype.storm.grouping CustomStreamGrouping])
-  (:import [backtype.storm.topology TopologyBuilder])
-  (:import [backtype.storm.clojure RichShellBolt RichShellSpout])
-  (:import [org.apache.thrift.transport TTransport])
-  (:use [backtype.storm util config log zookeeper]))
-
-(defn instantiate-java-object
-  [^JavaObject obj]
-  (let [name (symbol (.get_full_class_name obj))
-        args (map (memfn getFieldValue) (.get_args_list obj))]
-    (eval `(new ~name ~@args))))
-
-(def grouping-constants
-  {Grouping$_Fields/FIELDS :fields
-   Grouping$_Fields/SHUFFLE :shuffle
-   Grouping$_Fields/ALL :all
-   Grouping$_Fields/NONE :none
-   Grouping$_Fields/CUSTOM_SERIALIZED :custom-serialized
-   Grouping$_Fields/CUSTOM_OBJECT :custom-object
-   Grouping$_Fields/DIRECT :direct
-   Grouping$_Fields/LOCAL_OR_SHUFFLE :local-or-shuffle})
-
-(defn grouping-type
-  [^Grouping grouping]
-  (grouping-constants (.getSetField grouping)))
-
-(defn field-grouping
-  [^Grouping grouping]
-  (when-not (= (grouping-type grouping) :fields)
-    (throw (IllegalArgumentException. "Tried to get grouping fields from non fields grouping")))
-  (.get_fields grouping))
-
-(defn global-grouping?
-  [^Grouping grouping]
-  (and (= :fields (grouping-type grouping))
-       (empty? (field-grouping grouping))))
-
-(defn parallelism-hint
-  [^ComponentCommon component-common]
-  (let [phint (.get_parallelism_hint component-common)]
-    (if-not (.is_set_parallelism_hint component-common) 1 phint)))
-
-(defn nimbus-client-and-conn
-  ([host port]
-    (nimbus-client-and-conn host port nil))
-  ([host port as-user]
-  (log-message "Connecting to Nimbus at " host ":" port " as user: " as-user)
-  (let [conf (read-storm-config)
-        nimbusClient (NimbusClient. conf host port nil as-user)
-        client (.getClient nimbusClient)
-        transport (.transport nimbusClient)]
-        [client transport] )))
-
-(defmacro with-nimbus-connection
-  [[client-sym host port] & body]
-  `(let [[^Nimbus$Client ~client-sym ^TTransport conn#] (nimbus-client-and-conn ~host ~port)]
-    (try
-      ~@body
-    (finally (.close conn#)))))
-
-(defmacro with-configured-nimbus-connection
-  [client-sym & body]
-  `(let [conf# (read-storm-config)
-         context# (ReqContext/context)
-         user# (if (.principal context#) (.getName (.principal context#)))
-         nimbusClient# (NimbusClient/getConfiguredClientAs conf# user#)
-         ~client-sym (.getClient nimbusClient#)
-         conn# (.transport nimbusClient#)
-         ]
-     (try
-       ~@body
-     (finally (.close conn#)))))
-
-(defn direct-output-fields
-  [fields]
-  (StreamInfo. fields true))
-
-(defn output-fields
-  [fields]
-  (StreamInfo. fields false))
-
-(defn mk-output-spec
-  [output-spec]
-  (let [output-spec (if (map? output-spec)
-                      output-spec
-                      {Utils/DEFAULT_STREAM_ID output-spec})]
-    (map-val
-      (fn [out]
-        (if (instance? StreamInfo out)
-          out
-          (StreamInfo. out false)))
-      output-spec)))
-
-(defnk mk-plain-component-common
-  [inputs output-spec parallelism-hint :conf nil]
-  (let [ret (ComponentCommon. (HashMap. inputs) (HashMap. (mk-output-spec output-spec)))]
-    (when parallelism-hint
-      (.set_parallelism_hint ret parallelism-hint))
-    (when conf
-      (.set_json_conf ret (to-json conf)))
-    ret))
-
-(defnk mk-spout-spec*
-  [spout outputs :p nil :conf nil]
-  (SpoutSpec. (ComponentObject/serialized_java (Utils/javaSerialize spout))
-              (mk-plain-component-common {} outputs p :conf conf)))
-
-(defn mk-shuffle-grouping
-  []
-  (Grouping/shuffle (NullStruct.)))
-
-(defn mk-local-or-shuffle-grouping
-  []
-  (Grouping/local_or_shuffle (NullStruct.)))
-
-(defn mk-fields-grouping
-  [fields]
-  (Grouping/fields fields))
-
-(defn mk-global-grouping
-  []
-  (mk-fields-grouping []))
-
-(defn mk-direct-grouping
-  []
-  (Grouping/direct (NullStruct.)))
-
-(defn mk-all-grouping
-  []
-  (Grouping/all (NullStruct.)))
-
-(defn mk-none-grouping
-  []
-  (Grouping/none (NullStruct.)))
-
-(defn deserialized-component-object
-  [^ComponentObject obj]
-  (when (not= (.getSetField obj) ComponentObject$_Fields/SERIALIZED_JAVA)
-    (throw (RuntimeException. "Cannot deserialize non-java-serialized object")))
-  (Utils/javaDeserialize (.get_serialized_java obj) Serializable))
-
-(defn serialize-component-object
-  [obj]
-  (ComponentObject/serialized_java (Utils/javaSerialize obj)))
-
-(defn- mk-grouping
-  [grouping-spec]
-  (cond (nil? grouping-spec)
-        (mk-none-grouping)
-
-        (instance? Grouping grouping-spec)
-        grouping-spec
-
-        (instance? CustomStreamGrouping grouping-spec)
-        (Grouping/custom_serialized (Utils/javaSerialize grouping-spec))
-
-        (instance? JavaObject grouping-spec)
-        (Grouping/custom_object grouping-spec)
-
-        (sequential? grouping-spec)
-        (mk-fields-grouping grouping-spec)
-
-        (= grouping-spec :shuffle)
-        (mk-shuffle-grouping)
-
-        (= grouping-spec :local-or-shuffle)
-        (mk-local-or-shuffle-grouping)
-        (= grouping-spec :none)
-        (mk-none-grouping)
-
-        (= grouping-spec :all)
-        (mk-all-grouping)
-
-        (= grouping-spec :global)
-        (mk-global-grouping)
-
-        (= grouping-spec :direct)
-        (mk-direct-grouping)
-
-        true
-        (throw (IllegalArgumentException.
-                 (str grouping-spec " is not a valid grouping")))))
-
-(defn- mk-inputs
-  [inputs]
-  (into {} (for [[stream-id grouping-spec] inputs]
-             [(if (sequential? stream-id)
-                (GlobalStreamId. (first stream-id) (second stream-id))
-                (GlobalStreamId. stream-id Utils/DEFAULT_STREAM_ID))
-              (mk-grouping grouping-spec)])))
-
-(defnk mk-bolt-spec*
-  [inputs bolt outputs :p nil :conf nil]
-  (let [common (mk-plain-component-common (mk-inputs inputs) outputs p :conf conf)]
-    (Bolt. (ComponentObject/serialized_java (Utils/javaSerialize bolt))
-           common)))
-
-(defnk mk-spout-spec
-  [spout :parallelism-hint nil :p nil :conf nil]
-  (let [parallelism-hint (if p p parallelism-hint)]
-    {:obj spout :p parallelism-hint :conf conf}))
-
-(defn- shell-component-params
-  [command script-or-output-spec kwargs]
-  (if (string? script-or-output-spec)
-    [(into-array String [command script-or-output-spec])
-     (first kwargs)
-     (rest kwargs)]
-    [(into-array String command)
-     script-or-output-spec
-     kwargs]))
-
-(defnk mk-bolt-spec
-  [inputs bolt :parallelism-hint nil :p nil :conf nil]
-  (let [parallelism-hint (if p p parallelism-hint)]
-    {:obj bolt :inputs inputs :p parallelism-hint :conf conf}))
-
-(defn mk-shell-bolt-spec
-  [inputs command script-or-output-spec & kwargs]
-  (let [[command output-spec kwargs]
-        (shell-component-params command script-or-output-spec kwargs)]
-    (apply mk-bolt-spec inputs
-           (RichShellBolt. command (mk-output-spec output-spec)) kwargs)))
-
-(defn mk-shell-spout-spec
-  [command script-or-output-spec & kwargs]
-  (let [[command output-spec kwargs]
-        (shell-component-params command script-or-output-spec kwargs)]
-    (apply mk-spout-spec
-           (RichShellSpout. command (mk-output-spec output-spec)) kwargs)))
-
-(defn- add-inputs
-  [declarer inputs]
-  (doseq [[id grouping] (mk-inputs inputs)]
-    (.grouping declarer id grouping)))
-
-(defn mk-topology
-  ([spout-map bolt-map]
-   (let [builder (TopologyBuilder.)]
-     (doseq [[name {spout :obj p :p conf :conf}] spout-map]
-       (-> builder (.setSpout name spout (if-not (nil? p) (int p) p)) (.addConfigurations conf)))
-     (doseq [[name {bolt :obj p :p conf :conf inputs :inputs}] bolt-map]
-       (-> builder (.setBolt name bolt (if-not (nil? p) (int p) p)) (.addConfigurations conf) (add-inputs inputs)))
-     (.createTopology builder)))
-  ([spout-map bolt-map state-spout-map]
-   (mk-topology spout-map bolt-map)))
-
-;; clojurify-structure is needed or else every element becomes the same after successive calls
-;; don't know why this happens
-(def STORM-TOPOLOGY-FIELDS
-  (-> StormTopology/metaDataMap clojurify-structure keys))
-
-(def SPOUT-FIELDS
-  [StormTopology$_Fields/SPOUTS
-   StormTopology$_Fields/STATE_SPOUTS])
-

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/timer.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/timer.clj b/storm-core/src/clj/backtype/storm/timer.clj
deleted file mode 100644
index b5f73f7..0000000
--- a/storm-core/src/clj/backtype/storm/timer.clj
+++ /dev/null
@@ -1,128 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-
-(ns backtype.storm.timer
-  (:import [backtype.storm.utils Time])
-  (:import [java.util PriorityQueue Comparator Random])
-  (:import [java.util.concurrent Semaphore])
-  (:use [backtype.storm util log]))
-
-;; The timer defined in this file is very similar to java.util.Timer, except
-;; it integrates with Storm's time simulation capabilities. This lets us test
-;; code that does asynchronous work on the timer thread
-
-(defnk mk-timer [:kill-fn (fn [& _] ) :timer-name nil]
-  (let [queue (PriorityQueue. 10 (reify Comparator
-                                   (compare
-                                     [this o1 o2]
-                                     (- (first o1) (first o2)))
-                                   (equals
-                                     [this obj]
-                                     true)))
-        active (atom true)
-        lock (Object.)
-        notifier (Semaphore. 0)
-        thread-name (if timer-name timer-name "timer")
-        timer-thread (Thread.
-                       (fn []
-                         (while @active
-                           (try
-                             (let [[time-millis _ _ :as elem] (locking lock (.peek queue))]
-                               (if (and elem (>= (current-time-millis) time-millis))
-                                 ;; It is imperative to not run the function
-                                 ;; inside the timer lock. Otherwise, it is
-                                 ;; possible to deadlock if the fn deals with
-                                 ;; other locks, like the submit lock.
-                                 (let [afn (locking lock (second (.poll queue)))]
-                                   (afn))
-                                 (if time-millis
-                                   ;; If any events are scheduled, sleep until
-                                   ;; event generation. If any recurring events
-                                   ;; are scheduled then we will always go
-                                   ;; through this branch, sleeping only the
-                                   ;; exact necessary amount of time. We give
-                                   ;; an upper bound, e.g. 1000 millis, to the
-                                   ;; sleeping time, to limit the response time
-                                   ;; for detecting any new event within 1 secs.
-                                   (Time/sleep (min 1000 (- time-millis (current-time-millis))))
-                                   ;; Otherwise poll to see if any new event
-                                   ;; was scheduled. This is, in essence, the
-                                   ;; response time for detecting any new event
-                                   ;; schedulings when there are no scheduled
-                                   ;; events.
-                                   (Time/sleep 1000))))
-                             (catch Throwable t
-                               ;; Because the interrupted exception can be
-                               ;; wrapped in a RuntimeException.
-                               (when-not (exception-cause? InterruptedException t)
-                                 (kill-fn t)
-                                 (reset! active false)
-                                 (throw t)))))
-                         (.release notifier)) thread-name)]
-    (.setDaemon timer-thread true)
-    (.setPriority timer-thread Thread/MAX_PRIORITY)
-    (.start timer-thread)
-    {:timer-thread timer-thread
-     :queue queue
-     :active active
-     :lock lock
-     :random (Random.)
-     :cancel-notifier notifier}))
-
-(defn- check-active!
-  [timer]
-  (when-not @(:active timer)
-    (throw (IllegalStateException. "Timer is not active"))))
-
-(defnk schedule
-  [timer delay-secs afn :check-active true :jitter-ms 0]
-  (when check-active (check-active! timer))
-  (let [id (uuid)
-        ^PriorityQueue queue (:queue timer)
-        end-time-ms (+ (current-time-millis) (secs-to-millis-long delay-secs))
-        end-time-ms (if (< 0 jitter-ms) (+ (.nextInt (:random timer) jitter-ms) end-time-ms) end-time-ms)]
-    (locking (:lock timer)
-      (.add queue [end-time-ms afn id]))))
-
-(defn schedule-recurring
-  [timer delay-secs recur-secs afn]
-  (schedule timer
-            delay-secs
-            (fn this []
-              (afn)
-              ; This avoids a race condition with cancel-timer.
-              (schedule timer recur-secs this :check-active false))))
-
-(defn schedule-recurring-with-jitter
-  [timer delay-secs recur-secs jitter-ms afn]
-  (schedule timer
-            delay-secs
-            (fn this []
-              (afn)
-              ; This avoids a race condition with cancel-timer.
-              (schedule timer recur-secs this :check-active false :jitter-ms jitter-ms))))
-
-(defn cancel-timer
-  [timer]
-  (check-active! timer)
-  (locking (:lock timer)
-    (reset! (:active timer) false)
-    (.interrupt (:timer-thread timer)))
-  (.acquire (:cancel-notifier timer)))
-
-(defn timer-waiting?
-  [timer]
-  (Time/isThreadWaiting (:timer-thread timer)))


[19/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/daemon/logviewer.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/daemon/logviewer.clj b/storm-core/src/clj/org/apache/storm/daemon/logviewer.clj
new file mode 100644
index 0000000..39a9c12
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/daemon/logviewer.clj
@@ -0,0 +1,1199 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+(ns org.apache.storm.daemon.logviewer
+  (:use compojure.core)
+  (:use [clojure.set :only [difference intersection]])
+  (:use [clojure.string :only [blank? split]])
+  (:use [hiccup core page-helpers form-helpers])
+  (:use [org.apache.storm config util log timer])
+  (:use [org.apache.storm.ui helpers])
+  (:import [org.apache.storm.utils Utils VersionInfo])
+  (:import [org.slf4j LoggerFactory])
+  (:import [java.util Arrays ArrayList HashSet])
+  (:import [java.util.zip GZIPInputStream])
+  (:import [org.apache.logging.log4j LogManager])
+  (:import [org.apache.logging.log4j.core Appender LoggerContext])
+  (:import [org.apache.logging.log4j.core.appender RollingFileAppender])
+  (:import [java.io BufferedInputStream File FileFilter FileInputStream
+            InputStream InputStreamReader])
+  (:import [java.nio.file Files Path Paths DirectoryStream])
+  (:import [java.nio ByteBuffer])
+  (:import [org.apache.storm.utils Utils])
+  (:import [org.apache.storm.daemon DirectoryCleaner])
+  (:import [org.yaml.snakeyaml Yaml]
+           [org.yaml.snakeyaml.constructor SafeConstructor])
+  (:import [org.apache.storm.ui InvalidRequestException]
+           [org.apache.storm.security.auth AuthUtils])
+  (:require [org.apache.storm.daemon common [supervisor :as supervisor]])
+  (:require [compojure.route :as route]
+            [compojure.handler :as handler]
+            [ring.middleware.keyword-params]
+            [ring.util.codec :as codec]
+            [ring.util.response :as resp]
+            [clojure.string :as string])
+  (:require [metrics.meters :refer [defmeter mark!]])
+  (:use [org.apache.storm.daemon.common :only [start-metrics-reporters]])
+  (:gen-class))
+
+(def ^:dynamic *STORM-CONF* (read-storm-config))
+(def STORM-VERSION (VersionInfo/getVersion))
+
+(defmeter logviewer:num-log-page-http-requests)
+(defmeter logviewer:num-daemonlog-page-http-requests)
+(defmeter logviewer:num-download-log-file-http-requests)
+(defmeter logviewer:num-download-log-daemon-file-http-requests)
+(defmeter logviewer:num-list-logs-http-requests)
+
+(defn cleanup-cutoff-age-millis [conf now-millis]
+  (- now-millis (* (conf LOGVIEWER-CLEANUP-AGE-MINS) 60 1000)))
+
+(defn get-stream-for-dir
+  [^File f]
+  (try (Files/newDirectoryStream (.toPath f))
+    (catch Exception ex (log-error ex) nil)))
+
+(defn- last-modifiedtime-worker-logdir
+  "Return the last modified time for all log files in a worker's log dir.
+  Using stream rather than File.listFiles is to avoid large mem usage
+  when a directory has too many files"
+  [^File log-dir]
+  (let [^DirectoryStream stream (get-stream-for-dir log-dir)
+        dir-modified (.lastModified log-dir)
+        last-modified (try (reduce
+                        (fn [maximum path]
+                          (let [curr (.lastModified (.toFile path))]
+                            (if (> curr maximum)
+                              curr
+                              maximum)))
+                        dir-modified
+                        stream)
+                        (catch Exception ex
+                          (log-error ex) dir-modified)
+                        (finally
+                          (if (instance? DirectoryStream stream)
+                            (.close stream))))]
+    last-modified))
+
+(defn get-size-for-logdir
+  "Return the sum of lengths for all log files in a worker's log dir.
+   Using stream rather than File.listFiles is to avoid large mem usage
+   when a directory has too many files"
+  [log-dir]
+  (let [^DirectoryStream stream (get-stream-for-dir log-dir)]
+    (reduce
+      (fn [sum path]
+        (let [size (.length (.toFile path))]
+          (+ sum size)))
+      0
+      stream)))
+
+(defn mk-FileFilter-for-log-cleanup [conf now-millis]
+  (let [cutoff-age-millis (cleanup-cutoff-age-millis conf now-millis)]
+    (reify FileFilter (^boolean accept [this ^File file]
+                        (boolean (and
+                                   (not (.isFile file))
+                                   (<= (last-modifiedtime-worker-logdir file) cutoff-age-millis)))))))
+
+(defn select-dirs-for-cleanup [conf now-millis root-dir]
+  (let [file-filter (mk-FileFilter-for-log-cleanup conf now-millis)]
+    (reduce clojure.set/union
+            (sorted-set)
+            (for [^File topo-dir (.listFiles (File. root-dir))]
+              (into [] (.listFiles topo-dir file-filter))))))
+
+(defn get-topo-port-workerlog
+  "Return the path of the worker log with the format of topoId/port/worker.log.*"
+  [^File file]
+  (clojure.string/join file-path-separator
+                       (take-last 3
+                                  (split (.getCanonicalPath file) (re-pattern file-path-separator)))))
+
+(defn get-metadata-file-for-log-root-name [root-name root-dir]
+  (let [metaFile (clojure.java.io/file root-dir "metadata"
+                                       (str root-name ".yaml"))]
+    (if (.exists metaFile)
+      metaFile
+      (do
+        (log-warn "Could not find " (.getCanonicalPath metaFile)
+                  " to clean up for " root-name)
+        nil))))
+
+(defn get-metadata-file-for-wroker-logdir [logdir]
+  (let [metaFile (clojure.java.io/file logdir "worker.yaml")]
+    (if (.exists metaFile)
+      metaFile
+      (do
+        (log-warn "Could not find " (.getCanonicalPath metaFile)
+                  " to clean up for " logdir)
+        nil))))
+
+(defn get-worker-id-from-metadata-file [metaFile]
+  (get (clojure-from-yaml-file metaFile) "worker-id"))
+
+(defn get-topo-owner-from-metadata-file [metaFile]
+  (get (clojure-from-yaml-file metaFile) TOPOLOGY-SUBMITTER-USER))
+
+(defn identify-worker-log-dirs [log-dirs]
+  "return the workerid to worker-log-dir map"
+  (into {} (for [logdir log-dirs
+                 :let [metaFile (get-metadata-file-for-wroker-logdir logdir)]
+                 :when metaFile]
+             {(get-worker-id-from-metadata-file metaFile) logdir})))
+
+(defn get-alive-ids
+  [conf now-secs]
+  (->>
+    (supervisor/read-worker-heartbeats conf)
+    (remove
+      #(or (not (val %))
+           (supervisor/is-worker-hb-timed-out? now-secs
+                                               (val %)
+                                               conf)))
+    keys
+    set))
+
+(defn get-dead-worker-dirs
+  "Return a sorted set of java.io.Files that were written by workers that are
+  now dead"
+  [conf now-secs log-dirs]
+  (if (empty? log-dirs)
+    (sorted-set)
+    (let [alive-ids (get-alive-ids conf now-secs)
+          id->dir (identify-worker-log-dirs log-dirs)]
+      (apply sorted-set
+             (for [[id dir] id->dir
+                   :when (not (contains? alive-ids id))]
+               dir)))))
+
+(defn get-all-worker-dirs [^File root-dir]
+  (reduce clojure.set/union
+          (sorted-set)
+          (for [^File topo-dir (.listFiles root-dir)]
+            (into [] (.listFiles topo-dir)))))
+
+(defn get-alive-worker-dirs
+  "Return a sorted set of java.io.Files that were written by workers that are
+  now active"
+  [conf root-dir]
+  (let [alive-ids (get-alive-ids conf (current-time-secs))
+        log-dirs (get-all-worker-dirs root-dir)
+        id->dir (identify-worker-log-dirs log-dirs)]
+    (apply sorted-set
+           (for [[id dir] id->dir
+                 :when (contains? alive-ids id)]
+             (.getCanonicalPath dir)))))
+
+(defn get-all-logs-for-rootdir [^File log-dir]
+  (reduce concat
+          (for [port-dir (get-all-worker-dirs log-dir)]
+            (into [] (DirectoryCleaner/getFilesForDir port-dir)))))
+
+(defn is-active-log [^File file]
+  (re-find #"\.(log|err|out|current|yaml|pid)$" (.getName file)))
+
+(defn sum-file-size
+  "Given a sequence of Files, sum their sizes."
+  [files]
+  (reduce #(+ %1 (.length %2)) 0 files))
+
+(defn per-workerdir-cleanup!
+  "Delete the oldest files in each overloaded worker log dir"
+  [^File root-dir size ^DirectoryCleaner cleaner]
+  (dofor [worker-dir (get-all-worker-dirs root-dir)]
+    (.deleteOldestWhileTooLarge cleaner (ArrayList. [worker-dir]) size true nil)))
+
+(defn global-log-cleanup!
+  "Delete the oldest files in overloaded worker-artifacts globally"
+  [^File root-dir size ^DirectoryCleaner cleaner]
+  (let [worker-dirs (ArrayList. (get-all-worker-dirs root-dir))
+        alive-worker-dirs (HashSet. (get-alive-worker-dirs *STORM-CONF* root-dir))]
+    (.deleteOldestWhileTooLarge cleaner worker-dirs size false alive-worker-dirs)))
+
+(defn cleanup-empty-topodir!
+  "Delete the topo dir if it contains zero port dirs"
+  [^File dir]
+  (let [topodir (.getParentFile dir)]
+    (if (empty? (.listFiles topodir))
+      (rmr (.getCanonicalPath topodir)))))
+
+(defn cleanup-fn!
+  "Delete old log dirs for which the workers are no longer alive"
+  [log-root-dir]
+  (let [now-secs (current-time-secs)
+        old-log-dirs (select-dirs-for-cleanup *STORM-CONF*
+                                              (* now-secs 1000)
+                                              log-root-dir)
+        total-size (*STORM-CONF* LOGVIEWER-MAX-SUM-WORKER-LOGS-SIZE-MB)
+        per-dir-size (*STORM-CONF* LOGVIEWER-MAX-PER-WORKER-LOGS-SIZE-MB)
+        per-dir-size (min per-dir-size (* total-size 0.5))
+        cleaner (DirectoryCleaner.)
+        dead-worker-dirs (get-dead-worker-dirs *STORM-CONF*
+                                               now-secs
+                                               old-log-dirs)]
+    (log-debug "log cleanup: now=" now-secs
+               " old log dirs " (pr-str (map #(.getName %) old-log-dirs))
+               " dead worker dirs " (pr-str
+                                       (map #(.getName %) dead-worker-dirs)))
+    (dofor [dir dead-worker-dirs]
+           (let [path (.getCanonicalPath dir)]
+             (log-message "Cleaning up: Removing " path)
+             (try (rmr path)
+                  (cleanup-empty-topodir! dir)
+                  (catch Exception ex (log-error ex)))))
+    (per-workerdir-cleanup! (File. log-root-dir) (* per-dir-size (* 1024 1024)) cleaner)
+    (let [size (* total-size (* 1024 1024))]
+      (global-log-cleanup! (File. log-root-dir) size cleaner))))
+
+(defn start-log-cleaner! [conf log-root-dir]
+  (let [interval-secs (conf LOGVIEWER-CLEANUP-INTERVAL-SECS)]
+    (when interval-secs
+      (log-debug "starting log cleanup thread at interval: " interval-secs)
+      (schedule-recurring (mk-timer :thread-name "logviewer-cleanup"
+                                    :kill-fn (fn [t]
+                                               (log-error t "Error when doing logs cleanup")
+                                               (exit-process! 20 "Error when doing log cleanup")))
+                          0 ;; Start immediately.
+                          interval-secs
+                          (fn [] (cleanup-fn! log-root-dir))))))
+
+(defn- skip-bytes
+  "FileInputStream#skip may not work the first time, so ensure it successfully
+  skips the given number of bytes."
+  [^InputStream stream n]
+  (loop [skipped 0]
+    (let [skipped (+ skipped (.skip stream (- n skipped)))]
+      (if (< skipped n) (recur skipped)))))
+
+(defn logfile-matches-filter?
+  [log-file-name]
+  (let [regex-string (str "worker.log.*")
+        regex-pattern (re-pattern regex-string)]
+    (not= (re-seq regex-pattern (.toString log-file-name)) nil)))
+
+(defn page-file
+  ([path tail]
+    (let [zip-file? (.endsWith path ".gz")
+          flen (if zip-file? (Utils/zipFileSize (clojure.java.io/file path)) (.length (clojure.java.io/file path)))
+          skip (- flen tail)]
+      (page-file path skip tail)))
+  ([path start length]
+    (let [zip-file? (.endsWith path ".gz")
+          flen (if zip-file? (Utils/zipFileSize (clojure.java.io/file path)) (.length (clojure.java.io/file path)))]
+      (with-open [input (if zip-file? (GZIPInputStream. (FileInputStream. path)) (FileInputStream. path))
+                  output (java.io.ByteArrayOutputStream.)]
+        (if (>= start flen)
+          (throw
+            (InvalidRequestException. "Cannot start past the end of the file")))
+        (if (> start 0) (skip-bytes input start))
+        (let [buffer (make-array Byte/TYPE 1024)]
+          (loop []
+            (when (< (.size output) length)
+              (let [size (.read input buffer 0 (min 1024 (- length (.size output))))]
+                (when (pos? size)
+                  (.write output buffer 0 size)
+                  (recur)))))
+        (.toString output))))))
+
+(defn get-log-user-group-whitelist [fname]
+  (let [wl-file (get-log-metadata-file fname)
+        m (clojure-from-yaml-file wl-file)]
+    (if (not-nil? m)
+      (do
+        (let [user-wl (.get m LOGS-USERS)
+              user-wl (if user-wl user-wl [])
+              group-wl (.get m LOGS-GROUPS)
+              group-wl (if group-wl group-wl [])]
+          [user-wl group-wl]))
+        nil)))
+
+(def igroup-mapper (AuthUtils/GetGroupMappingServiceProviderPlugin *STORM-CONF*))
+(defn user-groups
+  [user]
+  (if (blank? user) [] (.getGroups igroup-mapper user)))
+
+(defn authorized-log-user? [user fname conf]
+  (if (or (blank? user) (blank? fname) (nil? (get-log-user-group-whitelist fname)))
+    nil
+    (let [groups (user-groups user)
+          [user-wl group-wl] (get-log-user-group-whitelist fname)
+          logs-users (concat (conf LOGS-USERS)
+                             (conf NIMBUS-ADMINS)
+                             user-wl)
+          logs-groups (concat (conf LOGS-GROUPS)
+                              group-wl)]
+       (or (some #(= % user) logs-users)
+           (< 0 (.size (intersection (set groups) (set logs-groups))))))))
+
+(defn log-root-dir
+  "Given an appender name, as configured, get the parent directory of the appender's log file.
+   Note that if anything goes wrong, this will throw an Error and exit."
+  [appender-name]
+  (let [appender (.getAppender (.getConfiguration (LogManager/getContext)) appender-name)]
+    (if (and appender-name appender (instance? RollingFileAppender appender))
+      (.getParent (File. (.getFileName appender)))
+      (throw
+       (RuntimeException. "Log viewer could not find configured appender, or the appender is not a FileAppender. Please check that the appender name configured in storm and log4j agree.")))))
+
+(defnk to-btn-link
+  "Create a link that is formatted like a button"
+  [url text :enabled true]
+  [:a {:href (java.net.URI. url)
+       :class (str "btn btn-default " (if enabled "enabled" "disabled"))} text])
+
+(defn search-file-form [fname]
+  [[:form {:action "logviewer_search.html" :id "search-box"}
+    "Search this file:"
+    [:input {:type "text" :name "search"}]
+    [:input {:type "hidden" :name "file" :value fname}]
+    [:input {:type "submit" :value "Search"}]]])
+
+(defn log-file-selection-form [log-files type]
+  [[:form {:action type :id "list-of-files"}
+    (drop-down "file" log-files)
+    [:input {:type "submit" :value "Switch file"}]]])
+
+(defn pager-links [fname start length file-size]
+  (let [prev-start (max 0 (- start length))
+        next-start (if (> file-size 0)
+                     (min (max 0 (- file-size length)) (+ start length))
+                     (+ start length))]
+    [[:div
+      (concat
+          [(to-btn-link (url "/log"
+                          {:file fname
+                           :start (max 0 (- start length))
+                           :length length})
+                          "Prev" :enabled (< prev-start start))]
+          [(to-btn-link (url "/log"
+                           {:file fname
+                            :start 0
+                            :length length}) "First")]
+          [(to-btn-link (url "/log"
+                           {:file fname
+                            :length length})
+                        "Last")]
+          [(to-btn-link (url "/log"
+                          {:file fname
+                           :start (min (max 0 (- file-size length))
+                                       (+ start length))
+                           :length length})
+                        "Next" :enabled (> next-start start))])]]))
+
+(defn- download-link [fname]
+  [[:p (link-to (url-format "/download/%s" fname) "Download Full File")]])
+
+(defn- daemon-download-link [fname]
+  [[:p (link-to (url-format "/daemondownload/%s" fname) "Download Full File")]])
+
+(defn- is-txt-file [fname]
+  (re-find #"\.(log.*|txt|yaml|pid)$" fname))
+
+(def default-bytes-per-page 51200)
+
+(defn log-page [fname start length grep user root-dir]
+  (if (or (blank? (*STORM-CONF* UI-FILTER))
+          (authorized-log-user? user fname *STORM-CONF*))
+    (let [file (.getCanonicalFile (File. root-dir fname))
+          path (.getCanonicalPath file)
+          zip-file? (.endsWith path ".gz")
+          topo-dir (.getParentFile (.getParentFile file))]
+      (if (and (.exists file)
+               (= (.getCanonicalFile (File. root-dir))
+                  (.getParentFile topo-dir)))
+        (let [file-length (if zip-file? (Utils/zipFileSize (clojure.java.io/file path)) (.length (clojure.java.io/file path)))
+              log-files (reduce clojure.set/union
+                          (sorted-set)
+                          (for [^File port-dir (.listFiles topo-dir)]
+                            (into [] (filter #(.isFile %) (DirectoryCleaner/getFilesForDir port-dir))))) ;all types of files included
+              files-str (for [file log-files]
+                          (get-topo-port-workerlog file))
+              reordered-files-str (conj (filter #(not= fname %) files-str) fname)
+               length (if length
+                       (min 10485760 length)
+                       default-bytes-per-page)
+              log-string (escape-html
+                           (if (is-txt-file fname)
+                             (if start
+                               (page-file path start length)
+                               (page-file path length))
+                             "This is a binary file and cannot display! You may download the full file."))
+              start (or start (- file-length length))]
+          (if grep
+            (html [:pre#logContent
+                   (if grep
+                     (->> (.split log-string "\n")
+                          (filter #(.contains % grep))
+                          (string/join "\n"))
+                     log-string)])
+            (let [pager-data (if (is-txt-file fname) (pager-links fname start length file-length) nil)]
+              (html (concat (search-file-form fname)
+                            (log-file-selection-form reordered-files-str "log") ; list all files for this topology
+                            pager-data
+                            (download-link fname)
+                            [[:pre#logContent log-string]]
+                            pager-data)))))
+        (-> (resp/response "Page not found")
+            (resp/status 404))))
+    (if (nil? (get-log-user-group-whitelist fname))
+      (-> (resp/response "Page not found")
+        (resp/status 404))
+      (unauthorized-user-html user))))
+
+(defn daemonlog-page [fname start length grep user root-dir]
+  (let [file (.getCanonicalFile (File. root-dir fname))
+        file-length (.length file)
+        path (.getCanonicalPath file)
+        zip-file? (.endsWith path ".gz")]
+    (if (and (= (.getCanonicalFile (File. root-dir))
+                (.getParentFile file))
+             (.exists file))
+      (let [file-length (if zip-file? (Utils/zipFileSize (clojure.java.io/file path)) (.length (clojure.java.io/file path)))
+            length (if length
+                     (min 10485760 length)
+                     default-bytes-per-page)
+            log-files (into [] (filter #(.isFile %) (.listFiles (File. root-dir)))) ;all types of files included
+            files-str (for [file log-files]
+                        (.getName file))
+            reordered-files-str (conj (filter #(not= fname %) files-str) fname)
+            log-string (escape-html
+                         (if (is-txt-file fname)
+                           (if start
+                             (page-file path start length)
+                             (page-file path length))
+                           "This is a binary file and cannot display! You may download the full file."))
+            start (or start (- file-length length))]
+        (if grep
+          (html [:pre#logContent
+                 (if grep
+                   (->> (.split log-string "\n")
+                        (filter #(.contains % grep))
+                        (string/join "\n"))
+                   log-string)])
+          (let [pager-data (if (is-txt-file fname) (pager-links fname start length file-length) nil)]
+            (html (concat (log-file-selection-form reordered-files-str "daemonlog") ; list all daemon logs
+                          pager-data
+                          (daemon-download-link fname)
+                          [[:pre#logContent log-string]]
+                          pager-data)))))
+      (-> (resp/response "Page not found")
+          (resp/status 404)))))
+
+(defn download-log-file [fname req resp user ^String root-dir]
+  (let [file (.getCanonicalFile (File. root-dir fname))]
+    (if (.exists file)
+      (if (or (blank? (*STORM-CONF* UI-FILTER))
+              (authorized-log-user? user fname *STORM-CONF*))
+        (-> (resp/response file)
+            (resp/content-type "application/octet-stream"))
+        (unauthorized-user-html user))
+      (-> (resp/response "Page not found")
+          (resp/status 404)))))
+
+(def grep-max-search-size 1024)
+(def grep-buf-size 2048)
+(def grep-context-size 128)
+
+(defn logviewer-port
+  []
+  (int (*STORM-CONF* LOGVIEWER-PORT)))
+
+(defn url-to-match-centered-in-log-page
+  [needle fname offset port]
+  (let [host (local-hostname)
+        port (logviewer-port)
+        fname (clojure.string/join file-path-separator (take-last 3 (split fname (re-pattern file-path-separator))))]
+    (url (str "http://" host ":" port "/log")
+      {:file fname
+       :start (max 0
+                (- offset
+                  (int (/ default-bytes-per-page 2))
+                  (int (/ (alength needle) -2)))) ;; Addition
+       :length default-bytes-per-page})))
+
+(defnk mk-match-data
+  [^bytes needle ^ByteBuffer haystack haystack-offset file-offset fname
+   :before-bytes nil :after-bytes nil]
+  (let [url (url-to-match-centered-in-log-page needle
+              fname
+              file-offset
+              (*STORM-CONF* LOGVIEWER-PORT))
+        haystack-bytes (.array haystack)
+        before-string (if (>= haystack-offset grep-context-size)
+                        (String. haystack-bytes
+                          (- haystack-offset grep-context-size)
+                          grep-context-size
+                          "UTF-8")
+                        (let [num-desired (max 0 (- grep-context-size
+                                                   haystack-offset))
+                              before-size (if before-bytes
+                                            (alength before-bytes)
+                                            0)
+                              num-expected (min before-size num-desired)]
+                          (if (pos? num-expected)
+                            (str (String. before-bytes
+                                   (- before-size num-expected)
+                                   num-expected
+                                   "UTF-8")
+                              (String. haystack-bytes
+                                0
+                                haystack-offset
+                                "UTF-8"))
+                            (String. haystack-bytes
+                              0
+                              haystack-offset
+                              "UTF-8"))))
+        after-string (let [needle-size (alength needle)
+                           after-offset (+ haystack-offset needle-size)
+                           haystack-size (.limit haystack)]
+                       (if (< (+ after-offset grep-context-size) haystack-size)
+                         (String. haystack-bytes
+                           after-offset
+                           grep-context-size
+                           "UTF-8")
+                         (let [num-desired (- grep-context-size
+                                             (- haystack-size after-offset))
+                               after-size (if after-bytes
+                                            (alength after-bytes)
+                                            0)
+                               num-expected (min after-size num-desired)]
+                           (if (pos? num-expected)
+                             (str (String. haystack-bytes
+                                    after-offset
+                                    (- haystack-size after-offset)
+                                    "UTF-8")
+                               (String. after-bytes 0 num-expected "UTF-8"))
+                             (String. haystack-bytes
+                               after-offset
+                               (- haystack-size after-offset)
+                               "UTF-8")))))]
+    {"byteOffset" file-offset
+     "beforeString" before-string
+     "afterString" after-string
+     "matchString" (String. needle "UTF-8")
+     "logviewerURL" url}))
+
+(defn- try-read-ahead!
+  "Tries once to read ahead in the stream to fill the context and resets the
+  stream to its position before the call."
+  [^BufferedInputStream stream haystack offset file-len bytes-read]
+  (let [num-expected (min (- file-len bytes-read)
+                       grep-context-size)
+        after-bytes (byte-array num-expected)]
+    (.mark stream num-expected)
+    ;; Only try reading once.
+    (.read stream after-bytes 0 num-expected)
+    (.reset stream)
+    after-bytes))
+
+(defn offset-of-bytes
+  "Searches a given byte array for a match of a sub-array of bytes.  Returns
+  the offset to the byte that matches, or -1 if no match was found."
+  [^bytes buf ^bytes value init-offset]
+  {:pre [(> (alength value) 0)
+         (not (neg? init-offset))]}
+  (loop [offset init-offset
+         candidate-offset init-offset
+         val-offset 0]
+    (if-not (pos? (- (alength value) val-offset))
+      ;; Found
+      candidate-offset
+      (if (>= offset (alength buf))
+        ;; We ran out of buffer for the search.
+        -1
+        (if (not= (aget value val-offset) (aget buf offset))
+          ;; The match at this candidate offset failed, so start over with the
+          ;; next candidate byte from the buffer.
+          (let [new-offset (inc candidate-offset)]
+            (recur new-offset new-offset 0))
+          ;; So far it matches.  Keep going...
+          (recur (inc offset) candidate-offset (inc val-offset)))))))
+
+(defn- buffer-substring-search!
+  "As the file is read into a buffer, 1/2 the buffer's size at a time, we
+  search the buffer for matches of the substring and return a list of zero or
+  more matches."
+  [file file-len offset-to-buf init-buf-offset stream bytes-skipped
+   bytes-read ^ByteBuffer haystack ^bytes needle initial-matches num-matches
+   ^bytes before-bytes]
+  (loop [buf-offset init-buf-offset
+         matches initial-matches]
+    (let [offset (offset-of-bytes (.array haystack) needle buf-offset)]
+      (if (and (< (count matches) num-matches) (not (neg? offset)))
+        (let [file-offset (+ offset-to-buf offset)
+              bytes-needed-after-match (- (.limit haystack)
+                                         grep-context-size
+                                         (alength needle))
+              before-arg (if (< offset grep-context-size) before-bytes)
+              after-arg (if (> offset bytes-needed-after-match)
+                          (try-read-ahead! stream
+                            haystack
+                            offset
+                            file-len
+                            bytes-read))]
+          (recur (+ offset (alength needle))
+            (conj matches
+              (mk-match-data needle
+                haystack
+                offset
+                file-offset
+                (.getCanonicalPath file)
+                :before-bytes before-arg
+                :after-bytes after-arg))))
+        (let [before-str-to-offset (min (.limit haystack)
+                                     grep-max-search-size)
+              before-str-from-offset (max 0 (- before-str-to-offset
+                                              grep-context-size))
+              new-before-bytes (Arrays/copyOfRange (.array haystack)
+                                 before-str-from-offset
+                                 before-str-to-offset)
+              ;; It's OK if new-byte-offset is negative.  This is normal if
+              ;; we are out of bytes to read from a small file.
+              new-byte-offset (if (>= (count matches) num-matches)
+                                (+ (get (last matches) "byteOffset")
+                                  (alength needle))
+                                (+ bytes-skipped
+                                  bytes-read
+                                  (- grep-max-search-size)))]
+          [matches new-byte-offset new-before-bytes])))))
+
+(defn- mk-grep-response
+  "This response data only includes a next byte offset if there is more of the
+  file to read."
+  [search-bytes offset matches next-byte-offset]
+  (merge {"searchString" (String. search-bytes "UTF-8")
+          "startByteOffset" offset
+          "matches" matches}
+    (and next-byte-offset {"nextByteOffset" next-byte-offset})))
+
+(defn rotate-grep-buffer!
+  [^ByteBuffer buf ^BufferedInputStream stream total-bytes-read file file-len]
+  (let [buf-arr (.array buf)]
+    ;; Copy the 2nd half of the buffer to the first half.
+    (System/arraycopy buf-arr
+      grep-max-search-size
+      buf-arr
+      0
+      grep-max-search-size)
+    ;; Zero-out the 2nd half to prevent accidental matches.
+    (Arrays/fill buf-arr
+      grep-max-search-size
+      (count buf-arr)
+      (byte 0))
+    ;; Fill the 2nd half with new bytes from the stream.
+    (let [bytes-read (.read stream
+                       buf-arr
+                       grep-max-search-size
+                       (min file-len grep-max-search-size))]
+      (.limit buf (+ grep-max-search-size bytes-read))
+      (swap! total-bytes-read + bytes-read))))
+
+(defnk substring-search
+  "Searches for a substring in a log file, starting at the given offset,
+  returning the given number of matches, surrounded by the given number of
+  context lines.  Other information is included to be useful for progressively
+  searching through a file for display in a UI. The search string must
+  grep-max-search-size bytes or fewer when decoded with UTF-8."
+  [file ^String search-string :num-matches 10 :start-byte-offset 0]
+  {:pre [(not (empty? search-string))
+         (<= (count (.getBytes search-string "UTF-8")) grep-max-search-size)]}
+  (let [zip-file? (.endsWith (.getName file) ".gz")
+        f-input-steam (FileInputStream. file)
+        gzipped-input-stream (if zip-file?
+                               (GZIPInputStream. f-input-steam)
+                               f-input-steam)
+        stream ^BufferedInputStream (BufferedInputStream.
+                                      gzipped-input-stream)
+        file-len (if zip-file? (Utils/zipFileSize file) (.length file))
+        buf ^ByteBuffer (ByteBuffer/allocate grep-buf-size)
+        buf-arr ^bytes (.array buf)
+        string nil
+        total-bytes-read (atom 0)
+        matches []
+        search-bytes ^bytes (.getBytes search-string "UTF-8")
+        num-matches (or num-matches 10)
+        start-byte-offset (or start-byte-offset 0)]
+    ;; Start at the part of the log file we are interested in.
+    ;; Allow searching when start-byte-offset == file-len so it doesn't blow up on 0-length files
+    (if (> start-byte-offset file-len)
+      (throw
+        (InvalidRequestException. "Cannot search past the end of the file")))
+    (when (> start-byte-offset 0)
+      (skip-bytes stream start-byte-offset))
+    (java.util.Arrays/fill buf-arr (byte 0))
+    (let [bytes-read (.read stream buf-arr 0 (min file-len grep-buf-size))]
+      (.limit buf bytes-read)
+      (swap! total-bytes-read + bytes-read))
+    (loop [initial-matches []
+           init-buf-offset 0
+           byte-offset start-byte-offset
+           before-bytes nil]
+      (let [[matches new-byte-offset new-before-bytes]
+            (buffer-substring-search! file
+              file-len
+              byte-offset
+              init-buf-offset
+              stream
+              start-byte-offset
+              @total-bytes-read
+              buf
+              search-bytes
+              initial-matches
+              num-matches
+              before-bytes)]
+        (if (and (< (count matches) num-matches)
+              (< (+ @total-bytes-read start-byte-offset) file-len))
+          (let [;; The start index is positioned to find any possible
+                ;; occurrence search string that did not quite fit in the
+                ;; buffer on the previous read.
+                new-buf-offset (- (min (.limit ^ByteBuffer buf)
+                                    grep-max-search-size)
+                                 (alength search-bytes))]
+            (rotate-grep-buffer! buf stream total-bytes-read file file-len)
+            (when (< @total-bytes-read 0)
+              (throw (InvalidRequestException. "Cannot search past the end of the file")))
+            (recur matches
+              new-buf-offset
+              new-byte-offset
+              new-before-bytes))
+          (mk-grep-response search-bytes
+            start-byte-offset
+            matches
+            (if-not (and (< (count matches) num-matches)
+                      (>= @total-bytes-read file-len))
+              (let [next-byte-offset (+ (get (last matches)
+                                          "byteOffset")
+                                       (alength search-bytes))]
+                (if (> file-len next-byte-offset)
+                  next-byte-offset)))))))))
+
+(defn- try-parse-int-param
+  [nam value]
+  (try
+    (Integer/parseInt value)
+    (catch java.lang.NumberFormatException e
+      (->
+        (str "Could not parse " nam " to an integer")
+        (InvalidRequestException. e)
+        throw))))
+
+(defn search-log-file
+  [fname user ^String root-dir search num-matches offset callback origin]
+  (let [file (.getCanonicalFile (File. root-dir fname))]
+    (if (.exists file)
+      (if (or (blank? (*STORM-CONF* UI-FILTER))
+            (authorized-log-user? user fname *STORM-CONF*))
+        (let [num-matches-int (if num-matches
+                                (try-parse-int-param "num-matches"
+                                  num-matches))
+              offset-int (if offset
+                           (try-parse-int-param "start-byte-offset" offset))]
+          (try
+            (if (and (not (empty? search))
+                  <= (count (.getBytes search "UTF-8")) grep-max-search-size)
+              (json-response
+                (substring-search file
+                  search
+                  :num-matches num-matches-int
+                  :start-byte-offset offset-int)
+                callback
+                :headers {"Access-Control-Allow-Origin" origin
+                          "Access-Control-Allow-Credentials" "true"})
+              (throw
+                (InvalidRequestException.
+                  (str "Search substring must be between 1 and 1024 UTF-8 "
+                    "bytes in size (inclusive)"))))
+            (catch Exception ex
+              (json-response (exception->json ex) callback :status 500))))
+        (json-response (unauthorized-user-json user) callback :status 401))
+      (json-response {"error" "Not Found"
+                      "errorMessage" "The file was not found on this node."}
+        callback
+        :status 404))))
+
+(defn find-n-matches [logs n file-offset offset search]
+  (let [logs (drop file-offset logs)
+        wrap-matches-fn (fn [matches]
+                          {"fileOffset" file-offset
+                           "searchString" search
+                           "matches" matches})]
+    (loop [matches []
+           logs logs
+           offset offset
+           file-offset file-offset
+           match-count 0]
+      (if (empty? logs)
+        (wrap-matches-fn matches)
+        (let [these-matches (try
+                              (log-debug "Looking through " (first logs))
+                              (substring-search (first logs)
+                                search
+                                :num-matches (- n match-count)
+                                :start-byte-offset offset)
+                              (catch InvalidRequestException e
+                                (log-error e "Can't search past end of file.")
+                                {}))
+              file-name (get-topo-port-workerlog (first logs))
+              new-matches (conj matches
+                            (merge these-matches
+                              { "fileName" file-name
+                                "port" (first (take-last 2 (split (.getCanonicalPath (first logs)) (re-pattern file-path-separator))))}))
+              new-count (+ match-count (count (these-matches "matches")))]
+          (if (empty? these-matches)
+            (recur matches (rest logs) 0 (+ file-offset 1) match-count)
+            (if (>= new-count n)
+              (wrap-matches-fn new-matches)
+              (recur new-matches (rest logs) 0 (+ file-offset 1) new-count))))))))
+
+(defn logs-for-port
+  "Get the filtered, authorized, sorted log files for a port."
+  [user port-dir]
+  (let [filter-authorized-fn (fn [user logs]
+                               (filter #(or
+                                          (blank? (*STORM-CONF* UI-FILTER))
+                                          (authorized-log-user? user (get-topo-port-workerlog %) *STORM-CONF*)) logs))]
+    (sort #(compare (.lastModified %2) (.lastModified %1))
+      (filter-authorized-fn
+        user
+        (filter #(re-find worker-log-filename-pattern (.getName %)) (DirectoryCleaner/getFilesForDir port-dir))))))
+
+(defn deep-search-logs-for-topology
+  [topology-id user ^String root-dir search num-matches port file-offset offset search-archived? callback origin]
+  (json-response
+    (if (or (not search) (not (.exists (File. (str root-dir file-path-separator topology-id)))))
+      []
+      (let [file-offset (if file-offset (Integer/parseInt file-offset) 0)
+            offset (if offset (Integer/parseInt offset) 0)
+            num-matches (or (Integer/parseInt num-matches) 1)
+            port-dirs (vec (.listFiles (File. (str root-dir file-path-separator topology-id))))
+            logs-for-port-fn (partial logs-for-port user)]
+        (if (or (not port) (= "*" port))
+          ;; Check for all ports
+          (let [filtered-logs (filter (comp not empty?) (map logs-for-port-fn port-dirs))]
+            (if search-archived?
+              (map #(find-n-matches % num-matches 0 0 search)
+                filtered-logs)
+              (map #(find-n-matches % num-matches 0 0 search)
+                (map (comp vector first) filtered-logs))))
+          ;; Check just the one port
+          (if (not (contains? (into #{} (map str (*STORM-CONF* SUPERVISOR-SLOTS-PORTS))) port))
+            []
+            (let [port-dir (File. (str root-dir file-path-separator topology-id file-path-separator port))]
+              (if (or (not (.exists port-dir)) (empty? (logs-for-port user port-dir)))
+                []
+                (let [filtered-logs (logs-for-port user port-dir)]
+                  (if search-archived?
+                    (find-n-matches filtered-logs num-matches file-offset offset search)
+                    (find-n-matches [(first filtered-logs)] num-matches 0 offset search)))))))))
+    callback
+    :headers {"Access-Control-Allow-Origin" origin
+              "Access-Control-Allow-Credentials" "true"}))
+
+(defn log-template
+  ([body] (log-template body nil nil))
+  ([body fname user]
+    (html4
+     [:head
+      [:title (str (escape-html fname) " - Storm Log Viewer")]
+      (include-css "/css/bootstrap-3.3.1.min.css")
+      (include-css "/css/jquery.dataTables.1.10.4.min.css")
+      (include-css "/css/style.css")
+      ]
+     [:body
+      (concat
+        (when (not (blank? user)) [[:div.ui-user [:p "User: " user]]])
+        [[:div.ui-note [:p "Note: the drop-list shows at most 1024 files for each worker directory."]]]
+        [[:h3 (escape-html fname)]]
+        (seq body))
+      ])))
+
+(def http-creds-handler (AuthUtils/GetUiHttpCredentialsPlugin *STORM-CONF*))
+
+(defn- parse-long-from-map [m k]
+  (try
+    (Long/parseLong (k m))
+    (catch NumberFormatException ex
+      (throw (InvalidRequestException.
+               (str "Could not make an integer out of the query parameter '"
+                    (name k) "'")
+               ex)))))
+
+(defn list-log-files
+  [user topoId port log-root callback origin]
+  (let [file-results
+        (if (nil? topoId)
+          (if (nil? port)
+            (get-all-logs-for-rootdir (File. log-root))
+            (reduce concat
+              (for [topo-dir (.listFiles (File. log-root))]
+                (reduce concat
+                  (for [port-dir (.listFiles topo-dir)]
+                    (if (= (str port) (.getName port-dir))
+                      (into [] (DirectoryCleaner/getFilesForDir port-dir))))))))
+          (if (nil? port)
+            (let [topo-dir (File. (str log-root file-path-separator topoId))]
+              (if (.exists topo-dir)
+                (reduce concat
+                  (for [port-dir (.listFiles topo-dir)]
+                    (into [] (DirectoryCleaner/getFilesForDir port-dir))))
+                []))
+            (let [port-dir (get-worker-dir-from-root log-root topoId port)]
+              (if (.exists port-dir)
+                (into [] (DirectoryCleaner/getFilesForDir port-dir))
+                []))))
+        file-strs (sort (for [file file-results]
+                          (get-topo-port-workerlog file)))]
+    (json-response file-strs
+      callback
+      :headers {"Access-Control-Allow-Origin" origin
+                "Access-Control-Allow-Credentials" "true"})))
+
+(defn get-profiler-dump-files
+  [dir]
+  (filter (comp not nil?)
+        (for [f (DirectoryCleaner/getFilesForDir dir)]
+          (let [name (.getName f)]
+            (if (or
+                  (.endsWith name ".txt")
+                  (.endsWith name ".jfr")
+                  (.endsWith name ".bin"))
+              (.getName f))))))
+
+(defroutes log-routes
+  (GET "/log" [:as req & m]
+    (try
+      (mark! logviewer:num-log-page-http-requests)
+      (let [servlet-request (:servlet-request req)
+            log-root (:log-root req)
+            user (.getUserName http-creds-handler servlet-request)
+            start (if (:start m) (parse-long-from-map m :start))
+            length (if (:length m) (parse-long-from-map m :length))
+            file (url-decode (:file m))]
+        (log-template (log-page file start length (:grep m) user log-root)
+          file user))
+      (catch InvalidRequestException ex
+        (log-error ex)
+        (ring-response-from-exception ex))))
+  (GET "/dumps/:topo-id/:host-port/:filename"
+       [:as {:keys [servlet-request servlet-response log-root]} topo-id host-port filename &m]
+     (let [user (.getUserName http-creds-handler servlet-request)
+           port (second (split host-port #":"))
+           dir (File. (str log-root
+                           file-path-separator
+                           topo-id
+                           file-path-separator
+                           port))
+           file (File. (str log-root
+                            file-path-separator
+                            topo-id
+                            file-path-separator
+                            port
+                            file-path-separator
+                            filename))]
+       (if (and (.exists dir) (.exists file))
+         (if (or (blank? (*STORM-CONF* UI-FILTER))
+               (authorized-log-user? user 
+                                     (str topo-id file-path-separator port file-path-separator "worker.log")
+                                     *STORM-CONF*))
+           (-> (resp/response file)
+               (resp/content-type "application/octet-stream"))
+           (unauthorized-user-html user))
+         (-> (resp/response "Page not found")
+           (resp/status 404)))))
+  (GET "/dumps/:topo-id/:host-port"
+       [:as {:keys [servlet-request servlet-response log-root]} topo-id host-port &m]
+     (let [user (.getUserName http-creds-handler servlet-request)
+           port (second (split host-port #":"))
+           dir (File. (str log-root
+                           file-path-separator
+                           topo-id
+                           file-path-separator
+                           port))]
+       (if (.exists dir)
+         (if (or (blank? (*STORM-CONF* UI-FILTER))
+               (authorized-log-user? user 
+                                     (str topo-id file-path-separator port file-path-separator "worker.log")
+                                     *STORM-CONF*))
+           (html4
+             [:head
+              [:title "File Dumps - Storm Log Viewer"]
+              (include-css "/css/bootstrap-3.3.1.min.css")
+              (include-css "/css/jquery.dataTables.1.10.4.min.css")
+              (include-css "/css/style.css")]
+             [:body
+              [:ul
+               (for [file (get-profiler-dump-files dir)]
+                 [:li
+                  [:a {:href (str "/dumps/" topo-id "/" host-port "/" file)} file ]])]])
+           (unauthorized-user-html user))
+         (-> (resp/response "Page not found")
+           (resp/status 404)))))
+  (GET "/daemonlog" [:as req & m]
+    (try
+      (mark! logviewer:num-daemonlog-page-http-requests)
+      (let [servlet-request (:servlet-request req)
+            daemonlog-root (:daemonlog-root req)
+            user (.getUserName http-creds-handler servlet-request)
+            start (if (:start m) (parse-long-from-map m :start))
+            length (if (:length m) (parse-long-from-map m :length))
+            file (url-decode (:file m))]
+        (log-template (daemonlog-page file start length (:grep m) user daemonlog-root)
+          file user))
+      (catch InvalidRequestException ex
+        (log-error ex)
+        (ring-response-from-exception ex))))
+  (GET "/download/:file" [:as {:keys [servlet-request servlet-response log-root]} file & m]
+    (try
+      (mark! logviewer:num-download-log-file-http-requests)
+      (let [user (.getUserName http-creds-handler servlet-request)]
+        (download-log-file file servlet-request servlet-response user log-root))
+      (catch InvalidRequestException ex
+        (log-error ex)
+        (ring-response-from-exception ex))))
+  (GET "/daemondownload/:file" [:as {:keys [servlet-request servlet-response daemonlog-root]} file & m]
+    (try
+      (mark! logviewer:num-download-log-daemon-file-http-requests)
+      (let [user (.getUserName http-creds-handler servlet-request)]
+        (download-log-file file servlet-request servlet-response user daemonlog-root))
+      (catch InvalidRequestException ex
+        (log-error ex)
+        (ring-response-from-exception ex))))
+  (GET "/search/:file" [:as {:keys [servlet-request servlet-response log-root]} file & m]
+    ;; We do not use servlet-response here, but do not remove it from the
+    ;; :keys list, or this rule could stop working when an authentication
+    ;; filter is configured.
+    (try
+      (let [user (.getUserName http-creds-handler servlet-request)]
+        (search-log-file (url-decode file)
+          user
+          log-root
+          (:search-string m)
+          (:num-matches m)
+          (:start-byte-offset m)
+          (:callback m)
+          (.getHeader servlet-request "Origin")))
+      (catch InvalidRequestException ex
+        (log-error ex)
+        (json-response (exception->json ex) (:callback m) :status 400))))
+  (GET "/deepSearch/:topo-id" [:as {:keys [servlet-request servlet-response log-root]} topo-id & m]
+    ;; We do not use servlet-response here, but do not remove it from the
+    ;; :keys list, or this rule could stop working when an authentication
+    ;; filter is configured.
+    (try
+      (let [user (.getUserName http-creds-handler servlet-request)]
+        (deep-search-logs-for-topology topo-id
+          user
+          log-root
+          (:search-string m)
+          (:num-matches m)
+          (:port m)
+          (:start-file-offset m)
+          (:start-byte-offset m)
+          (:search-archived m)
+          (:callback m)
+          (.getHeader servlet-request "Origin")))
+      (catch InvalidRequestException ex
+        (log-error ex)
+        (json-response (exception->json ex) (:callback m) :status 400))))
+  (GET "/searchLogs" [:as req & m]
+    (try
+      (let [servlet-request (:servlet-request req)
+            user (.getUserName http-creds-handler servlet-request)]
+        (list-log-files user
+          (:topoId m)
+          (:port m)
+          (:log-root req)
+          (:callback m)
+          (.getHeader servlet-request "Origin")))
+      (catch InvalidRequestException ex
+        (log-error ex)
+        (json-response (exception->json ex) (:callback m) :status 400))))
+  (GET "/listLogs" [:as req & m]
+    (try
+      (mark! logviewer:num-list-logs-http-requests)
+      (let [servlet-request (:servlet-request req)
+            user (.getUserName http-creds-handler servlet-request)]
+        (list-log-files user
+          (:topoId m)
+          (:port m)
+          (:log-root req)
+          (:callback m)
+          (.getHeader servlet-request "Origin")))
+      (catch InvalidRequestException ex
+        (log-error ex)
+        (json-response (exception->json ex) (:callback m) :status 400))))
+  (route/resources "/")
+  (route/not-found "Page not found"))
+
+(defn conf-middleware
+  "For passing the storm configuration with each request."
+  [app log-root daemonlog-root]
+  (fn [req]
+    (app (assoc req :log-root log-root :daemonlog-root daemonlog-root))))
+
+(defn start-logviewer! [conf log-root-dir daemonlog-root-dir]
+  (try
+    (let [header-buffer-size (int (.get conf UI-HEADER-BUFFER-BYTES))
+          filter-class (conf UI-FILTER)
+          filter-params (conf UI-FILTER-PARAMS)
+          logapp (handler/api (-> log-routes
+                                requests-middleware))  ;; query params as map
+          middle (conf-middleware logapp log-root-dir daemonlog-root-dir)
+          filters-confs (if (conf UI-FILTER)
+                          [{:filter-class filter-class
+                            :filter-params (or (conf UI-FILTER-PARAMS) {})}]
+                          [])
+          filters-confs (concat filters-confs
+                          [{:filter-class "org.eclipse.jetty.servlets.GzipFilter"
+                            :filter-name "Gzipper"
+                            :filter-params {}}])
+          https-port (int (or (conf LOGVIEWER-HTTPS-PORT) 0))
+          keystore-path (conf LOGVIEWER-HTTPS-KEYSTORE-PATH)
+          keystore-pass (conf LOGVIEWER-HTTPS-KEYSTORE-PASSWORD)
+          keystore-type (conf LOGVIEWER-HTTPS-KEYSTORE-TYPE)
+          key-password (conf LOGVIEWER-HTTPS-KEY-PASSWORD)
+          truststore-path (conf LOGVIEWER-HTTPS-TRUSTSTORE-PATH)
+          truststore-password (conf LOGVIEWER-HTTPS-TRUSTSTORE-PASSWORD)
+          truststore-type (conf LOGVIEWER-HTTPS-TRUSTSTORE-TYPE)
+          want-client-auth (conf LOGVIEWER-HTTPS-WANT-CLIENT-AUTH)
+          need-client-auth (conf LOGVIEWER-HTTPS-NEED-CLIENT-AUTH)]
+      (storm-run-jetty {:port (int (conf LOGVIEWER-PORT))
+                        :configurator (fn [server]
+                                        (config-ssl server
+                                                    https-port
+                                                    keystore-path
+                                                    keystore-pass
+                                                    keystore-type
+                                                    key-password
+                                                    truststore-path
+                                                    truststore-password
+                                                    truststore-type
+                                                    want-client-auth
+                                                    need-client-auth)
+                                        (config-filter server middle filters-confs))}))
+  (catch Exception ex
+    (log-error ex))))
+
+(defn -main []
+  (let [conf (read-storm-config)
+        log-root (worker-artifacts-root conf)
+        daemonlog-root (log-root-dir (conf LOGVIEWER-APPENDER-NAME))]
+    (setup-default-uncaught-exception-handler)
+    (start-log-cleaner! conf log-root)
+    (log-message "Starting logviewer server for storm version '"
+                 STORM-VERSION
+                 "'")
+    (start-logviewer! conf log-root daemonlog-root)
+    (start-metrics-reporters)))


[15/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/stats.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/stats.clj b/storm-core/src/clj/org/apache/storm/stats.clj
new file mode 100644
index 0000000..68b16fd
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/stats.clj
@@ -0,0 +1,1521 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+
+(ns org.apache.storm.stats
+  (:import [org.apache.storm.generated Nimbus Nimbus$Processor Nimbus$Iface StormTopology ShellComponent
+            NotAliveException AlreadyAliveException InvalidTopologyException GlobalStreamId
+            ClusterSummary TopologyInfo TopologySummary ExecutorInfo ExecutorSummary ExecutorStats
+            ExecutorSpecificStats SpoutStats BoltStats ErrorInfo
+            SupervisorSummary CommonAggregateStats ComponentAggregateStats
+            ComponentPageInfo ComponentType BoltAggregateStats
+            ExecutorAggregateStats SpecificAggregateStats
+            SpoutAggregateStats TopologyPageInfo TopologyStats])
+  (:import [org.apache.storm.utils Utils])
+  (:import [org.apache.storm.metric.internal MultiCountStatAndMetric MultiLatencyStatAndMetric])
+  (:use [org.apache.storm log util])
+  (:use [clojure.math.numeric-tower :only [ceil]]))
+
+(def TEN-MIN-IN-SECONDS (* 10 60))
+
+(def COMMON-FIELDS [:emitted :transferred])
+(defrecord CommonStats [^MultiCountStatAndMetric emitted
+                        ^MultiCountStatAndMetric transferred
+                        rate])
+
+(def BOLT-FIELDS [:acked :failed :process-latencies :executed :execute-latencies])
+;;acked and failed count individual tuples
+(defrecord BoltExecutorStats [^CommonStats common
+                              ^MultiCountStatAndMetric acked
+                              ^MultiCountStatAndMetric failed
+                              ^MultiLatencyStatAndMetric process-latencies
+                              ^MultiCountStatAndMetric executed
+                              ^MultiLatencyStatAndMetric execute-latencies])
+
+(def SPOUT-FIELDS [:acked :failed :complete-latencies])
+;;acked and failed count tuple completion
+(defrecord SpoutExecutorStats [^CommonStats common
+                               ^MultiCountStatAndMetric acked
+                               ^MultiCountStatAndMetric failed
+                               ^MultiLatencyStatAndMetric complete-latencies])
+
+(def NUM-STAT-BUCKETS 20)
+
+(defn- mk-common-stats
+  [rate]
+  (CommonStats.
+    (MultiCountStatAndMetric. NUM-STAT-BUCKETS)
+    (MultiCountStatAndMetric. NUM-STAT-BUCKETS)
+    rate))
+
+(defn mk-bolt-stats
+  [rate]
+  (BoltExecutorStats.
+    (mk-common-stats rate)
+    (MultiCountStatAndMetric. NUM-STAT-BUCKETS)
+    (MultiCountStatAndMetric. NUM-STAT-BUCKETS)
+    (MultiLatencyStatAndMetric. NUM-STAT-BUCKETS)
+    (MultiCountStatAndMetric. NUM-STAT-BUCKETS)
+    (MultiLatencyStatAndMetric. NUM-STAT-BUCKETS)))
+
+(defn mk-spout-stats
+  [rate]
+  (SpoutExecutorStats.
+    (mk-common-stats rate)
+    (MultiCountStatAndMetric. NUM-STAT-BUCKETS)
+    (MultiCountStatAndMetric. NUM-STAT-BUCKETS)
+    (MultiLatencyStatAndMetric. NUM-STAT-BUCKETS)))
+
+(defmacro stats-rate
+  [stats]
+  `(-> ~stats :common :rate))
+
+(defmacro stats-emitted
+  [stats]
+  `(-> ~stats :common :emitted))
+
+(defmacro stats-transferred
+  [stats]
+  `(-> ~stats :common :transferred))
+
+(defmacro stats-executed
+  [stats]
+  `(:executed ~stats))
+
+(defmacro stats-acked
+  [stats]
+  `(:acked ~stats))
+
+(defmacro stats-failed
+  [stats]
+  `(:failed ~stats))
+
+(defmacro stats-execute-latencies
+  [stats]
+  `(:execute-latencies ~stats))
+
+(defmacro stats-process-latencies
+  [stats]
+  `(:process-latencies ~stats))
+
+(defmacro stats-complete-latencies
+  [stats]
+  `(:complete-latencies ~stats))
+
+(defn emitted-tuple!
+  [stats stream]
+  (.incBy ^MultiCountStatAndMetric (stats-emitted stats) ^Object stream ^long (stats-rate stats)))
+
+(defn transferred-tuples!
+  [stats stream amt]
+  (.incBy ^MultiCountStatAndMetric (stats-transferred stats) ^Object stream ^long (* (stats-rate stats) amt)))
+
+(defn bolt-execute-tuple!
+  [^BoltExecutorStats stats component stream latency-ms]
+  (let [key [component stream]
+        ^MultiCountStatAndMetric executed (stats-executed stats)
+        ^MultiLatencyStatAndMetric exec-lat (stats-execute-latencies stats)]
+    (.incBy executed key (stats-rate stats))
+    (.record exec-lat key latency-ms)))
+
+(defn bolt-acked-tuple!
+  [^BoltExecutorStats stats component stream latency-ms]
+  (let [key [component stream]
+        ^MultiCountStatAndMetric acked (stats-acked stats)
+        ^MultiLatencyStatAndMetric process-lat (stats-process-latencies stats)]
+    (.incBy acked key (stats-rate stats))
+    (.record process-lat key latency-ms)))
+
+(defn bolt-failed-tuple!
+  [^BoltExecutorStats stats component stream latency-ms]
+  (let [key [component stream]
+        ^MultiCountStatAndMetric failed (stats-failed stats)]
+    (.incBy failed key (stats-rate stats))))
+
+(defn spout-acked-tuple!
+  [^SpoutExecutorStats stats stream latency-ms]
+  (.incBy ^MultiCountStatAndMetric (stats-acked stats) stream (stats-rate stats))
+  (.record ^MultiLatencyStatAndMetric (stats-complete-latencies stats) stream latency-ms))
+
+(defn spout-failed-tuple!
+  [^SpoutExecutorStats stats stream latency-ms]
+  (.incBy ^MultiCountStatAndMetric (stats-failed stats) stream (stats-rate stats)))
+
+(defn- cleanup-stat! [stat]
+  (.close stat))
+
+(defn- cleanup-common-stats!
+  [^CommonStats stats]
+  (doseq [f COMMON-FIELDS]
+    (cleanup-stat! (f stats))))
+
+(defn cleanup-bolt-stats!
+  [^BoltExecutorStats stats]
+  (cleanup-common-stats! (:common stats))
+  (doseq [f BOLT-FIELDS]
+    (cleanup-stat! (f stats))))
+
+(defn cleanup-spout-stats!
+  [^SpoutExecutorStats stats]
+  (cleanup-common-stats! (:common stats))
+  (doseq [f SPOUT-FIELDS]
+    (cleanup-stat! (f stats))))
+
+(defn- value-stats
+  [stats fields]
+  (into {} (dofor [f fields]
+                  [f (if (instance? MultiCountStatAndMetric (f stats))
+                         (.getTimeCounts ^MultiCountStatAndMetric (f stats))
+                         (.getTimeLatAvg ^MultiLatencyStatAndMetric (f stats)))])))
+
+(defn- value-common-stats
+  [^CommonStats stats]
+  (merge
+    (value-stats stats COMMON-FIELDS)
+    {:rate (:rate stats)}))
+
+(defn value-bolt-stats!
+  [^BoltExecutorStats stats]
+  (cleanup-bolt-stats! stats)
+  (merge (value-common-stats (:common stats))
+         (value-stats stats BOLT-FIELDS)
+         {:type :bolt}))
+
+(defn value-spout-stats!
+  [^SpoutExecutorStats stats]
+  (cleanup-spout-stats! stats)
+  (merge (value-common-stats (:common stats))
+         (value-stats stats SPOUT-FIELDS)
+         {:type :spout}))
+
+(defmulti render-stats! class-selector)
+
+(defmethod render-stats! SpoutExecutorStats
+  [stats]
+  (value-spout-stats! stats))
+
+(defmethod render-stats! BoltExecutorStats
+  [stats]
+  (value-bolt-stats! stats))
+
+(defmulti thriftify-specific-stats :type)
+(defmulti clojurify-specific-stats class-selector)
+
+(defn window-set-converter
+  ([stats key-fn first-key-fun]
+    (into {}
+      (for [[k v] stats]
+        ;apply the first-key-fun only to first key.
+        [(first-key-fun k)
+         (into {} (for [[k2 v2] v]
+                    [(key-fn k2) v2]))])))
+  ([stats first-key-fun]
+    (window-set-converter stats identity first-key-fun)))
+
+(defn to-global-stream-id
+  [[component stream]]
+  (GlobalStreamId. component stream))
+
+(defn from-global-stream-id [global-stream-id]
+  [(.get_componentId global-stream-id) (.get_streamId global-stream-id)])
+
+(defmethod clojurify-specific-stats BoltStats [^BoltStats stats]
+  [(window-set-converter (.get_acked stats) from-global-stream-id identity)
+   (window-set-converter (.get_failed stats) from-global-stream-id identity)
+   (window-set-converter (.get_process_ms_avg stats) from-global-stream-id identity)
+   (window-set-converter (.get_executed stats) from-global-stream-id identity)
+   (window-set-converter (.get_execute_ms_avg stats) from-global-stream-id identity)])
+
+(defmethod clojurify-specific-stats SpoutStats [^SpoutStats stats]
+  [(.get_acked stats)
+   (.get_failed stats)
+   (.get_complete_ms_avg stats)])
+
+
+(defn clojurify-executor-stats
+  [^ExecutorStats stats]
+  (let [ specific-stats (.get_specific stats)
+         is_bolt? (.is_set_bolt specific-stats)
+         specific-stats (if is_bolt? (.get_bolt specific-stats) (.get_spout specific-stats))
+         specific-stats (clojurify-specific-stats specific-stats)
+         common-stats (CommonStats. (.get_emitted stats)
+                                    (.get_transferred stats)
+                                    (.get_rate stats))]
+    (if is_bolt?
+      ; worker heart beat does not store the BoltExecutorStats or SpoutExecutorStats , instead it stores the result returned by render-stats!
+      ; which flattens the BoltExecutorStats/SpoutExecutorStats by extracting values from all atoms and merging all values inside :common to top
+      ;level map we are pretty much doing the same here.
+      (dissoc (merge common-stats {:type :bolt}  (apply ->BoltExecutorStats (into [nil] specific-stats))) :common)
+      (dissoc (merge common-stats {:type :spout} (apply ->SpoutExecutorStats (into [nil] specific-stats))) :common)
+      )))
+
+(defmethod thriftify-specific-stats :bolt
+  [stats]
+  (ExecutorSpecificStats/bolt
+    (BoltStats.
+      (window-set-converter (:acked stats) to-global-stream-id str)
+      (window-set-converter (:failed stats) to-global-stream-id str)
+      (window-set-converter (:process-latencies stats) to-global-stream-id str)
+      (window-set-converter (:executed stats) to-global-stream-id str)
+      (window-set-converter (:execute-latencies stats) to-global-stream-id str))))
+
+(defmethod thriftify-specific-stats :spout
+  [stats]
+  (ExecutorSpecificStats/spout
+    (SpoutStats. (window-set-converter (:acked stats) str)
+      (window-set-converter (:failed stats) str)
+      (window-set-converter (:complete-latencies stats) str))))
+
+(defn thriftify-executor-stats
+  [stats]
+  (let [specific-stats (thriftify-specific-stats stats)
+        rate (:rate stats)]
+    (ExecutorStats. (window-set-converter (:emitted stats) str)
+      (window-set-converter (:transferred stats) str)
+      specific-stats
+      rate)))
+
+(defn valid-number?
+  "Returns true if x is a number that is not NaN or Infinity, false otherwise"
+  [x]
+  (and (number? x)
+       (not (Double/isNaN x))
+       (not (Double/isInfinite x))))
+
+(defn apply-default
+  [f defaulting-fn & args]
+  (apply f (map defaulting-fn args)))
+
+(defn apply-or-0
+  [f & args]
+  (apply apply-default
+         f
+         #(if (valid-number? %) % 0)
+         args))
+
+(defn sum-or-0
+  [& args]
+  (apply apply-or-0 + args))
+
+(defn product-or-0
+  [& args]
+  (apply apply-or-0 * args))
+
+(defn max-or-0
+  [& args]
+  (apply apply-or-0 max args))
+
+(defn- agg-bolt-lat-and-count
+  "Aggregates number executed, process latency, and execute latency across all
+  streams."
+  [idk->exec-avg idk->proc-avg idk->num-executed]
+  (letfn [(weight-avg [[id avg]]
+            (let [num-e (get idk->num-executed id)]
+              (product-or-0 avg num-e)))]
+    {:executeLatencyTotal (sum (map weight-avg idk->exec-avg))
+     :processLatencyTotal (sum (map weight-avg idk->proc-avg))
+     :executed (sum (vals idk->num-executed))}))
+
+(defn- agg-spout-lat-and-count
+  "Aggregates number acked and complete latencies across all streams."
+  [sid->comp-avg sid->num-acked]
+  (letfn [(weight-avg [[id avg]]
+            (product-or-0 avg (get sid->num-acked id)))]
+    {:completeLatencyTotal (sum (map weight-avg sid->comp-avg))
+     :acked (sum (vals sid->num-acked))}))
+
+(defn add-pairs
+  ([] [0 0])
+  ([[a1 a2] [b1 b2]]
+   [(+ a1 b1) (+ a2 b2)]))
+
+(defn mk-include-sys-fn
+  [include-sys?]
+  (if include-sys?
+    (fn [_] true)
+    (fn [stream] (and (string? stream) (not (Utils/isSystemId stream))))))
+
+(defn mk-include-sys-filter
+  "Returns a function that includes or excludes map entries whose keys are
+  system ids."
+  [include-sys?]
+  (if include-sys?
+    identity
+    (partial filter-key (mk-include-sys-fn false))))
+
+(defn- agg-bolt-streams-lat-and-count
+  "Aggregates number executed and process & execute latencies."
+  [idk->exec-avg idk->proc-avg idk->executed]
+  (letfn [(weight-avg [id avg]
+            (let [num-e (idk->executed id)]
+              (product-or-0 avg num-e)))]
+    (into {}
+      (for [k (keys idk->exec-avg)]
+        [k {:executeLatencyTotal (weight-avg k (get idk->exec-avg k))
+            :processLatencyTotal (weight-avg k (get idk->proc-avg k))
+            :executed (idk->executed k)}]))))
+
+(defn- agg-spout-streams-lat-and-count
+  "Aggregates number acked and complete latencies."
+  [idk->comp-avg idk->acked]
+  (letfn [(weight-avg [id avg]
+            (let [num-e (get idk->acked id)]
+              (product-or-0 avg num-e)))]
+    (into {}
+      (for [k (keys idk->comp-avg)]
+        [k {:completeLatencyTotal (weight-avg k (get idk->comp-avg k))
+            :acked (get idk->acked k)}]))))
+
+(defn swap-map-order
+  "For a nested map, rearrange data such that the top-level keys become the
+  nested map's keys and vice versa.
+  Example:
+  {:a {:X :banana, :Y :pear}, :b {:X :apple, :Y :orange}}
+  -> {:Y {:a :pear, :b :orange}, :X {:a :banana, :b :apple}}"
+  [m]
+  (apply merge-with
+         merge
+         (map (fn [[k v]]
+                (into {}
+                      (for [[k2 v2] v]
+                        [k2 {k v2}])))
+              m)))
+
+(defn- compute-agg-capacity
+  "Computes the capacity metric for one executor given its heartbeat data and
+  uptime."
+  [m uptime]
+  (when uptime
+    (->>
+      ;; For each stream, create weighted averages and counts.
+      (merge-with (fn weighted-avg+count-fn
+                    [avg cnt]
+                    [(* avg cnt) cnt])
+                  (get (:execute-latencies m) (str TEN-MIN-IN-SECONDS))
+                  (get (:executed m) (str TEN-MIN-IN-SECONDS)))
+      vals ;; Ignore the stream ids.
+      (reduce add-pairs
+              [0. 0]) ;; Combine weighted averages and counts.
+      ((fn [[weighted-avg cnt]]
+        (div weighted-avg (* 1000 (min uptime TEN-MIN-IN-SECONDS))))))))
+
+(defn agg-pre-merge-comp-page-bolt
+  [{exec-id :exec-id
+    host :host
+    port :port
+    uptime :uptime
+    comp-id :comp-id
+    num-tasks :num-tasks
+    statk->w->sid->num :stats}
+   window
+   include-sys?]
+  (let [str-key (partial map-key str)
+        handle-sys-components-fn (mk-include-sys-filter include-sys?)]
+    {:executor-id exec-id,
+     :host host,
+     :port port,
+     :uptime uptime,
+     :num-executors 1,
+     :num-tasks num-tasks,
+     :capacity (compute-agg-capacity statk->w->sid->num uptime)
+     :cid+sid->input-stats
+     (merge-with
+       merge
+       (swap-map-order
+         {:acked (-> statk->w->sid->num
+                     :acked
+                     str-key
+                     (get window))
+          :failed (-> statk->w->sid->num
+                      :failed
+                      str-key
+                      (get window))})
+       (agg-bolt-streams-lat-and-count (-> statk->w->sid->num
+                                           :execute-latencies
+                                           str-key
+                                           (get window))
+                                       (-> statk->w->sid->num
+                                           :process-latencies
+                                           str-key
+                                           (get window))
+                                       (-> statk->w->sid->num
+                                           :executed
+                                           str-key
+                                           (get window)))),
+     :sid->output-stats
+     (swap-map-order
+       {:emitted (-> statk->w->sid->num
+                     :emitted
+                     str-key
+                     (get window)
+                     handle-sys-components-fn)
+        :transferred (-> statk->w->sid->num
+                         :transferred
+                         str-key
+                         (get window)
+                         handle-sys-components-fn)})}))
+
+(defn agg-pre-merge-comp-page-spout
+  [{exec-id :exec-id
+    host :host
+    port :port
+    uptime :uptime
+    comp-id :comp-id
+    num-tasks :num-tasks
+    statk->w->sid->num :stats}
+   window
+   include-sys?]
+  (let [str-key (partial map-key str)
+        handle-sys-components-fn (mk-include-sys-filter include-sys?)]
+    {:executor-id exec-id,
+     :host host,
+     :port port,
+     :uptime uptime,
+     :num-executors 1,
+     :num-tasks num-tasks,
+     :sid->output-stats
+     (merge-with
+       merge
+       (agg-spout-streams-lat-and-count (-> statk->w->sid->num
+                                            :complete-latencies
+                                            str-key
+                                            (get window))
+                                        (-> statk->w->sid->num
+                                            :acked
+                                            str-key
+                                            (get window)))
+       (swap-map-order
+         {:acked (-> statk->w->sid->num
+                     :acked
+                     str-key
+                     (get window))
+          :failed (-> statk->w->sid->num
+                      :failed
+                      str-key
+                      (get window))
+          :emitted (-> statk->w->sid->num
+                       :emitted
+                       str-key
+                       (get window)
+                       handle-sys-components-fn)
+          :transferred (-> statk->w->sid->num
+                           :transferred
+                           str-key
+                           (get window)
+                           handle-sys-components-fn)}))}))
+
+(defn agg-pre-merge-topo-page-bolt
+  [{comp-id :comp-id
+    num-tasks :num-tasks
+    statk->w->sid->num :stats
+    uptime :uptime}
+   window
+   include-sys?]
+  (let [str-key (partial map-key str)
+        handle-sys-components-fn (mk-include-sys-filter include-sys?)]
+    {comp-id
+     (merge
+       (agg-bolt-lat-and-count (-> statk->w->sid->num
+                                   :execute-latencies
+                                   str-key
+                                   (get window))
+                               (-> statk->w->sid->num
+                                   :process-latencies
+                                   str-key
+                                   (get window))
+                               (-> statk->w->sid->num
+                                   :executed
+                                   str-key
+                                   (get window)))
+       {:num-executors 1
+        :num-tasks num-tasks
+        :emitted (-> statk->w->sid->num
+                     :emitted
+                     str-key
+                     (get window)
+                     handle-sys-components-fn
+                     vals
+                     sum)
+        :transferred (-> statk->w->sid->num
+                         :transferred
+                         str-key
+                         (get window)
+                         handle-sys-components-fn
+                         vals
+                         sum)
+        :capacity (compute-agg-capacity statk->w->sid->num uptime)
+        :acked (-> statk->w->sid->num
+                   :acked
+                   str-key
+                   (get window)
+                   vals
+                   sum)
+        :failed (-> statk->w->sid->num
+                    :failed
+                    str-key
+                    (get window)
+                    vals
+                    sum)})}))
+
+(defn agg-pre-merge-topo-page-spout
+  [{comp-id :comp-id
+    num-tasks :num-tasks
+    statk->w->sid->num :stats}
+   window
+   include-sys?]
+  (let [str-key (partial map-key str)
+        handle-sys-components-fn (mk-include-sys-filter include-sys?)]
+    {comp-id
+     (merge
+       (agg-spout-lat-and-count (-> statk->w->sid->num
+                                    :complete-latencies
+                                    str-key
+                                    (get window))
+                                (-> statk->w->sid->num
+                                    :acked
+                                    str-key
+                                    (get window)))
+       {:num-executors 1
+        :num-tasks num-tasks
+        :emitted (-> statk->w->sid->num
+                     :emitted
+                     str-key
+                     (get window)
+                     handle-sys-components-fn
+                     vals
+                     sum)
+        :transferred (-> statk->w->sid->num
+                         :transferred
+                         str-key
+                         (get window)
+                         handle-sys-components-fn
+                         vals
+                         sum)
+        :failed (-> statk->w->sid->num
+                    :failed
+                    str-key
+                    (get window)
+                    vals
+                    sum)})}))
+
+(defn merge-agg-comp-stats-comp-page-bolt
+  [{acc-in :cid+sid->input-stats
+    acc-out :sid->output-stats
+    :as acc-bolt-stats}
+   {bolt-in :cid+sid->input-stats
+    bolt-out :sid->output-stats
+    :as bolt-stats}]
+  {:num-executors (inc (or (:num-executors acc-bolt-stats) 0)),
+   :num-tasks (sum-or-0 (:num-tasks acc-bolt-stats) (:num-tasks bolt-stats)),
+   :sid->output-stats (merge-with (partial merge-with sum-or-0)
+                                  acc-out
+                                  bolt-out),
+   :cid+sid->input-stats (merge-with (partial merge-with sum-or-0)
+                                     acc-in
+                                     bolt-in),
+   :executor-stats
+   (let [sum-streams (fn [m k] (->> m vals (map k) (apply sum-or-0)))
+         executed (sum-streams bolt-in :executed)]
+     (conj (:executor-stats acc-bolt-stats)
+           (merge
+             (select-keys bolt-stats
+                          [:executor-id :uptime :host :port :capacity])
+             {:emitted (sum-streams bolt-out :emitted)
+              :transferred (sum-streams bolt-out :transferred)
+              :acked (sum-streams bolt-in :acked)
+              :failed (sum-streams bolt-in :failed)
+              :executed executed}
+             (->>
+               (if (and executed (pos? executed))
+                 [(div (sum-streams bolt-in :executeLatencyTotal) executed)
+                  (div (sum-streams bolt-in :processLatencyTotal) executed)]
+                 [nil nil])
+               (mapcat vector [:execute-latency :process-latency])
+               (apply assoc {})))))})
+
+(defn merge-agg-comp-stats-comp-page-spout
+  [{acc-out :sid->output-stats
+    :as acc-spout-stats}
+   {spout-out :sid->output-stats
+    :as spout-stats}]
+  {:num-executors (inc (or (:num-executors acc-spout-stats) 0)),
+   :num-tasks (sum-or-0 (:num-tasks acc-spout-stats) (:num-tasks spout-stats)),
+   :sid->output-stats (merge-with (partial merge-with sum-or-0)
+                                  acc-out
+                                  spout-out),
+   :executor-stats
+   (let [sum-streams (fn [m k] (->> m vals (map k) (apply sum-or-0)))
+         acked (sum-streams spout-out :acked)]
+     (conj (:executor-stats acc-spout-stats)
+           (merge
+             (select-keys spout-stats [:executor-id :uptime :host :port])
+             {:emitted (sum-streams spout-out :emitted)
+              :transferred (sum-streams spout-out :transferred)
+              :acked acked
+              :failed (sum-streams spout-out :failed)}
+             {:complete-latency (if (and acked (pos? acked))
+                                  (div (sum-streams spout-out
+                                                    :completeLatencyTotal)
+                                       acked)
+                                  nil)})))})
+
+(defn merge-agg-comp-stats-topo-page-bolt
+  [acc-bolt-stats bolt-stats]
+  {:num-executors (inc (or (:num-executors acc-bolt-stats) 0))
+   :num-tasks (sum-or-0 (:num-tasks acc-bolt-stats) (:num-tasks bolt-stats))
+   :emitted (sum-or-0 (:emitted acc-bolt-stats) (:emitted bolt-stats))
+   :transferred (sum-or-0 (:transferred acc-bolt-stats)
+                          (:transferred bolt-stats))
+   :capacity (max-or-0 (:capacity acc-bolt-stats) (:capacity bolt-stats))
+   ;; We sum average latency totals here to avoid dividing at each step.
+   ;; Compute the average latencies by dividing the total by the count.
+   :executeLatencyTotal (sum-or-0 (:executeLatencyTotal acc-bolt-stats)
+                                  (:executeLatencyTotal bolt-stats))
+   :processLatencyTotal (sum-or-0 (:processLatencyTotal acc-bolt-stats)
+                                  (:processLatencyTotal bolt-stats))
+   :executed (sum-or-0 (:executed acc-bolt-stats) (:executed bolt-stats))
+   :acked (sum-or-0 (:acked acc-bolt-stats) (:acked bolt-stats))
+   :failed (sum-or-0 (:failed acc-bolt-stats) (:failed bolt-stats))})
+
+(defn merge-agg-comp-stats-topo-page-spout
+  [acc-spout-stats spout-stats]
+  {:num-executors (inc (or (:num-executors acc-spout-stats) 0))
+   :num-tasks (sum-or-0 (:num-tasks acc-spout-stats) (:num-tasks spout-stats))
+   :emitted (sum-or-0 (:emitted acc-spout-stats) (:emitted spout-stats))
+   :transferred (sum-or-0 (:transferred acc-spout-stats) (:transferred spout-stats))
+   ;; We sum average latency totals here to avoid dividing at each step.
+   ;; Compute the average latencies by dividing the total by the count.
+   :completeLatencyTotal (sum-or-0 (:completeLatencyTotal acc-spout-stats)
+                            (:completeLatencyTotal spout-stats))
+   :acked (sum-or-0 (:acked acc-spout-stats) (:acked spout-stats))
+   :failed (sum-or-0 (:failed acc-spout-stats) (:failed spout-stats))})
+
+(defn aggregate-count-streams
+  [stats]
+  (->> stats
+       (map-val #(reduce + (vals %)))))
+
+(defn- agg-topo-exec-stats*
+  "A helper function that does the common work to aggregate stats of one
+  executor with the given map for the topology page."
+  [window
+   include-sys?
+   {:keys [workers-set
+           bolt-id->stats
+           spout-id->stats
+           window->emitted
+           window->transferred
+           window->comp-lat-wgt-avg
+           window->acked
+           window->failed] :as acc-stats}
+   {:keys [stats] :as new-data}
+   pre-merge-fn
+   merge-fn
+   comp-key]
+  (let [cid->statk->num (pre-merge-fn new-data window include-sys?)
+        {w->compLatWgtAvg :completeLatencyTotal
+         w->acked :acked}
+          (if (:complete-latencies stats)
+            (swap-map-order
+              (into {}
+                    (for [w (keys (:acked stats))]
+                         [w (agg-spout-lat-and-count
+                              (get (:complete-latencies stats) w)
+                              (get (:acked stats) w))])))
+            {:completeLatencyTotal nil
+             :acks (aggregate-count-streams (:acked stats))})
+        handle-sys-components-fn (mk-include-sys-filter include-sys?)]
+    (assoc {:workers-set (conj workers-set
+                               [(:host new-data) (:port new-data)])
+            :bolt-id->stats bolt-id->stats
+            :spout-id->stats spout-id->stats
+            :window->emitted (->> (:emitted stats)
+                                  (map-val handle-sys-components-fn)
+                                  aggregate-count-streams
+                                  (merge-with + window->emitted))
+            :window->transferred (->> (:transferred stats)
+                                      (map-val handle-sys-components-fn)
+                                      aggregate-count-streams
+                                      (merge-with + window->transferred))
+            :window->comp-lat-wgt-avg (merge-with +
+                                                  window->comp-lat-wgt-avg
+                                                  w->compLatWgtAvg)
+            :window->acked (if (= :spout (:type stats))
+                             (merge-with + window->acked w->acked)
+                             window->acked)
+            :window->failed (if (= :spout (:type stats))
+                              (->> (:failed stats)
+                                   aggregate-count-streams
+                                   (merge-with + window->failed))
+                              window->failed)}
+           comp-key (merge-with merge-fn
+                                (acc-stats comp-key)
+                                cid->statk->num)
+           :type (:type stats))))
+
+(defmulti agg-topo-exec-stats
+  "Combines the aggregate stats of one executor with the given map, selecting
+  the appropriate window and including system components as specified."
+  (fn dispatch-fn [& args] (:type (last args))))
+
+(defmethod agg-topo-exec-stats :bolt
+  [window include-sys? acc-stats new-data]
+  (agg-topo-exec-stats* window
+                        include-sys?
+                        acc-stats
+                        new-data
+                        agg-pre-merge-topo-page-bolt
+                        merge-agg-comp-stats-topo-page-bolt
+                        :bolt-id->stats))
+
+(defmethod agg-topo-exec-stats :spout
+  [window include-sys? acc-stats new-data]
+  (agg-topo-exec-stats* window
+                        include-sys?
+                        acc-stats
+                        new-data
+                        agg-pre-merge-topo-page-spout
+                        merge-agg-comp-stats-topo-page-spout
+                        :spout-id->stats))
+
+(defmethod agg-topo-exec-stats :default [_ _ acc-stats _] acc-stats)
+
+(defn get-last-error
+  [storm-cluster-state storm-id component-id]
+  (if-let [e (.last-error storm-cluster-state storm-id component-id)]
+    (ErrorInfo. (:error e) (:time-secs e))))
+
+(defn component-type
+  "Returns the component type (either :bolt or :spout) for a given
+  topology and component id. Returns nil if not found."
+  [^StormTopology topology id]
+  (let [bolts (.get_bolts topology)
+        spouts (.get_spouts topology)]
+    (cond
+      (Utils/isSystemId id) :bolt
+      (.containsKey bolts id) :bolt
+      (.containsKey spouts id) :spout)))
+
+(defn extract-nodeinfos-from-hb-for-comp
+  ([exec->host+port task->component include-sys? comp-id]
+   (distinct (for [[[start end :as executor] [host port]] exec->host+port
+         :let [id (task->component start)]
+         :when (and (or (nil? comp-id) (= comp-id id))
+                 (or include-sys? (not (Utils/isSystemId id))))]
+     {:host host
+      :port port}))))
+
+(defn extract-data-from-hb
+  ([exec->host+port task->component beats include-sys? topology comp-id]
+   (for [[[start end :as executor] [host port]] exec->host+port
+         :let [beat (beats executor)
+               id (task->component start)]
+         :when (and (or (nil? comp-id) (= comp-id id))
+                    (or include-sys? (not (Utils/isSystemId id))))]
+     {:exec-id executor
+      :comp-id id
+      :num-tasks (count (range start (inc end)))
+      :host host
+      :port port
+      :uptime (:uptime beat)
+      :stats (:stats beat)
+      :type (or (:type (:stats beat))
+                (component-type topology id))}))
+  ([exec->host+port task->component beats include-sys? topology]
+    (extract-data-from-hb exec->host+port
+                          task->component
+                          beats
+                          include-sys?
+                          topology
+                          nil)))
+
+(defn aggregate-topo-stats
+  [window include-sys? data]
+  (let [init-val {:workers-set #{}
+                  :bolt-id->stats {}
+                  :spout-id->stats {}
+                  :window->emitted {}
+                  :window->transferred {}
+                  :window->comp-lat-wgt-avg {}
+                  :window->acked {}
+                  :window->failed {}}
+        reducer-fn (partial agg-topo-exec-stats
+                            window
+                            include-sys?)]
+    (reduce reducer-fn init-val data)))
+
+(defn- compute-weighted-averages-per-window
+  [acc-data wgt-avg-key divisor-key]
+  (into {} (for [[window wgt-avg] (wgt-avg-key acc-data)
+                 :let [divisor ((divisor-key acc-data) window)]
+                 :when (and divisor (pos? divisor))]
+             [(str window) (div wgt-avg divisor)])))
+
+(defn- post-aggregate-topo-stats
+  [task->component exec->node+port last-err-fn acc-data]
+  {:num-tasks (count task->component)
+   :num-workers (count (:workers-set acc-data))
+   :num-executors (count exec->node+port)
+   :bolt-id->stats
+     (into {} (for [[id m] (:bolt-id->stats acc-data)
+                    :let [executed (:executed m)]]
+                     [id (-> m
+                             (assoc :execute-latency
+                                    (if (and executed (pos? executed))
+                                      (div (or (:executeLatencyTotal m) 0)
+                                           executed)
+                                      0)
+                                    :process-latency
+                                    (if (and executed (pos? executed))
+                                      (div (or (:processLatencyTotal m) 0)
+                                           executed)
+                                      0))
+                             (dissoc :executeLatencyTotal
+                                     :processLatencyTotal)
+                             (assoc :lastError (last-err-fn id)))]))
+   :spout-id->stats
+     (into {} (for [[id m] (:spout-id->stats acc-data)
+                    :let [acked (:acked m)]]
+                    [id (-> m
+                            (assoc :complete-latency
+                                   (if (and acked (pos? acked))
+                                     (div (:completeLatencyTotal m)
+                                          (:acked m))
+                                     0))
+                            (dissoc :completeLatencyTotal)
+                            (assoc :lastError (last-err-fn id)))]))
+   :window->emitted (map-key str (:window->emitted acc-data))
+   :window->transferred (map-key str (:window->transferred acc-data))
+   :window->complete-latency
+     (compute-weighted-averages-per-window acc-data
+                                           :window->comp-lat-wgt-avg
+                                           :window->acked)
+   :window->acked (map-key str (:window->acked acc-data))
+   :window->failed (map-key str (:window->failed acc-data))})
+
+(defn- thriftify-common-agg-stats
+  [^ComponentAggregateStats s
+   {:keys [num-tasks
+           emitted
+           transferred
+           acked
+           failed
+           num-executors] :as statk->num}]
+  (let [cas (CommonAggregateStats.)]
+    (and num-executors (.set_num_executors cas num-executors))
+    (and num-tasks (.set_num_tasks cas num-tasks))
+    (and emitted (.set_emitted cas emitted))
+    (and transferred (.set_transferred cas transferred))
+    (and acked (.set_acked cas acked))
+    (and failed (.set_failed cas failed))
+    (.set_common_stats s cas)))
+
+(defn thriftify-bolt-agg-stats
+  [statk->num]
+  (let [{:keys [lastError
+                execute-latency
+                process-latency
+                executed
+                capacity]} statk->num
+        s (ComponentAggregateStats.)]
+    (.set_type s ComponentType/BOLT)
+    (and lastError (.set_last_error s lastError))
+    (thriftify-common-agg-stats s statk->num)
+    (.set_specific_stats s
+      (SpecificAggregateStats/bolt
+        (let [bas (BoltAggregateStats.)]
+          (and execute-latency (.set_execute_latency_ms bas execute-latency))
+          (and process-latency (.set_process_latency_ms bas process-latency))
+          (and executed (.set_executed bas executed))
+          (and capacity (.set_capacity bas capacity))
+          bas)))
+    s))
+
+(defn thriftify-spout-agg-stats
+  [statk->num]
+  (let [{:keys [lastError
+                complete-latency]} statk->num
+        s (ComponentAggregateStats.)]
+    (.set_type s ComponentType/SPOUT)
+    (and lastError (.set_last_error s lastError))
+    (thriftify-common-agg-stats s statk->num)
+    (.set_specific_stats s
+      (SpecificAggregateStats/spout
+        (let [sas (SpoutAggregateStats.)]
+          (and complete-latency (.set_complete_latency_ms sas complete-latency))
+          sas)))
+    s))
+
+(defn thriftify-topo-page-data
+  [topology-id data]
+  (let [{:keys [num-tasks
+                num-workers
+                num-executors
+                spout-id->stats
+                bolt-id->stats
+                window->emitted
+                window->transferred
+                window->complete-latency
+                window->acked
+                window->failed]} data
+        spout-agg-stats (into {}
+                              (for [[id m] spout-id->stats
+                                    :let [m (assoc m :type :spout)]]
+                                [id
+                                 (thriftify-spout-agg-stats m)]))
+        bolt-agg-stats (into {}
+                             (for [[id m] bolt-id->stats
+                                   :let [m (assoc m :type :bolt)]]
+                              [id
+                               (thriftify-bolt-agg-stats m)]))
+        topology-stats (doto (TopologyStats.)
+                         (.set_window_to_emitted window->emitted)
+                         (.set_window_to_transferred window->transferred)
+                         (.set_window_to_complete_latencies_ms
+                           window->complete-latency)
+                         (.set_window_to_acked window->acked)
+                         (.set_window_to_failed window->failed))
+      topo-page-info (doto (TopologyPageInfo. topology-id)
+                       (.set_num_tasks num-tasks)
+                       (.set_num_workers num-workers)
+                       (.set_num_executors num-executors)
+                       (.set_id_to_spout_agg_stats spout-agg-stats)
+                       (.set_id_to_bolt_agg_stats bolt-agg-stats)
+                       (.set_topology_stats topology-stats))]
+    topo-page-info))
+
+(defn agg-topo-execs-stats
+  "Aggregate various executor statistics for a topology from the given
+  heartbeats."
+  [topology-id
+   exec->node+port
+   task->component
+   beats
+   topology
+   window
+   include-sys?
+   last-err-fn]
+  (->> ;; This iterates over each executor one time, because of lazy evaluation.
+    (extract-data-from-hb exec->node+port
+                          task->component
+                          beats
+                          include-sys?
+                          topology)
+    (aggregate-topo-stats window include-sys?)
+    (post-aggregate-topo-stats task->component exec->node+port last-err-fn)
+    (thriftify-topo-page-data topology-id)))
+
+(defn- agg-bolt-exec-win-stats
+  "A helper function that aggregates windowed stats from one bolt executor."
+  [acc-stats new-stats include-sys?]
+  (let [{w->execLatWgtAvg :executeLatencyTotal
+         w->procLatWgtAvg :processLatencyTotal
+         w->executed :executed}
+          (swap-map-order
+            (into {} (for [w (keys (:executed new-stats))]
+                       [w (agg-bolt-lat-and-count
+                            (get (:execute-latencies new-stats) w)
+                            (get (:process-latencies new-stats) w)
+                            (get (:executed new-stats) w))])))
+        handle-sys-components-fn (mk-include-sys-filter include-sys?)]
+    {:window->emitted (->> (:emitted new-stats)
+                           (map-val handle-sys-components-fn)
+                           aggregate-count-streams
+                           (merge-with + (:window->emitted acc-stats)))
+     :window->transferred (->> (:transferred new-stats)
+                               (map-val handle-sys-components-fn)
+                               aggregate-count-streams
+                               (merge-with + (:window->transferred acc-stats)))
+     :window->exec-lat-wgt-avg (merge-with +
+                                           (:window->exec-lat-wgt-avg acc-stats)
+                                           w->execLatWgtAvg)
+     :window->proc-lat-wgt-avg (merge-with +
+                                           (:window->proc-lat-wgt-avg acc-stats)
+                                           w->procLatWgtAvg)
+     :window->executed (merge-with + (:window->executed acc-stats) w->executed)
+     :window->acked (->> (:acked new-stats)
+                         aggregate-count-streams
+                         (merge-with + (:window->acked acc-stats)))
+     :window->failed (->> (:failed new-stats)
+                          aggregate-count-streams
+                          (merge-with + (:window->failed acc-stats)))}))
+
+(defn- agg-spout-exec-win-stats
+  "A helper function that aggregates windowed stats from one spout executor."
+  [acc-stats new-stats include-sys?]
+  (let [{w->compLatWgtAvg :completeLatencyTotal
+         w->acked :acked}
+          (swap-map-order
+            (into {} (for [w (keys (:acked new-stats))]
+                       [w (agg-spout-lat-and-count
+                            (get (:complete-latencies new-stats) w)
+                            (get (:acked new-stats) w))])))
+        handle-sys-components-fn (mk-include-sys-filter include-sys?)]
+    {:window->emitted (->> (:emitted new-stats)
+                           (map-val handle-sys-components-fn)
+                           aggregate-count-streams
+                           (merge-with + (:window->emitted acc-stats)))
+     :window->transferred (->> (:transferred new-stats)
+                               (map-val handle-sys-components-fn)
+                               aggregate-count-streams
+                               (merge-with + (:window->transferred acc-stats)))
+     :window->comp-lat-wgt-avg (merge-with +
+                                           (:window->comp-lat-wgt-avg acc-stats)
+                                           w->compLatWgtAvg)
+     :window->acked (->> (:acked new-stats)
+                         aggregate-count-streams
+                         (merge-with + (:window->acked acc-stats)))
+     :window->failed (->> (:failed new-stats)
+                          aggregate-count-streams
+                          (merge-with + (:window->failed acc-stats)))}))
+
+(defmulti agg-comp-exec-stats
+  "Combines the aggregate stats of one executor with the given map, selecting
+  the appropriate window and including system components as specified."
+  (fn dispatch-fn [_ _ init-val _] (:type init-val)))
+
+(defmethod agg-comp-exec-stats :bolt
+  [window include-sys? acc-stats new-data]
+  (assoc (agg-bolt-exec-win-stats acc-stats (:stats new-data) include-sys?)
+         :stats (merge-agg-comp-stats-comp-page-bolt
+                  (:stats acc-stats)
+                  (agg-pre-merge-comp-page-bolt new-data window include-sys?))
+         :type :bolt))
+
+(defmethod agg-comp-exec-stats :spout
+  [window include-sys? acc-stats new-data]
+  (assoc (agg-spout-exec-win-stats acc-stats (:stats new-data) include-sys?)
+         :stats (merge-agg-comp-stats-comp-page-spout
+                  (:stats acc-stats)
+                  (agg-pre-merge-comp-page-spout new-data window include-sys?))
+         :type :spout))
+
+(defn- aggregate-comp-stats*
+  [window include-sys? data init-val]
+  (-> (partial agg-comp-exec-stats
+               window
+               include-sys?)
+      (reduce init-val data)))
+
+(defmulti aggregate-comp-stats
+  (fn dispatch-fn [& args] (-> args last first :type)))
+
+(defmethod aggregate-comp-stats :bolt
+  [& args]
+  (let [init-val {:type :bolt
+                  :cid+sid->input-stats {}
+                  :sid->output-stats {}
+                  :executor-stats []
+                  :window->emitted {}
+                  :window->transferred {}
+                  :window->exec-lat-wgt-avg {}
+                  :window->executed {}
+                  :window->proc-lat-wgt-avg {}
+                  :window->acked {}
+                  :window->failed {}}]
+    (apply aggregate-comp-stats* (concat args (list init-val)))))
+
+(defmethod aggregate-comp-stats :spout
+  [& args]
+  (let [init-val {:type :spout
+                  :sid->output-stats {}
+                  :executor-stats []
+                  :window->emitted {}
+                  :window->transferred {}
+                  :window->comp-lat-wgt-avg {}
+                  :window->acked {}
+                  :window->failed {}}]
+    (apply aggregate-comp-stats* (concat args (list init-val)))))
+
+(defmethod aggregate-comp-stats :default [& _] {})
+
+(defmulti post-aggregate-comp-stats
+  (fn [_ _ data] (:type data)))
+
+(defmethod post-aggregate-comp-stats :bolt
+  [task->component
+   exec->host+port
+   {{i-stats :cid+sid->input-stats
+     o-stats :sid->output-stats
+     num-tasks :num-tasks
+     num-executors :num-executors} :stats
+    comp-type :type :as acc-data}]
+  {:type comp-type
+   :num-tasks num-tasks
+   :num-executors num-executors
+   :cid+sid->input-stats
+   (->> i-stats
+        (map-val (fn [m]
+                     (let [executed (:executed m)
+                           lats (if (and executed (pos? executed))
+                                  {:execute-latency
+                                   (div (or (:executeLatencyTotal m) 0)
+                                        executed)
+                                   :process-latency
+                                   (div (or (:processLatencyTotal m) 0)
+                                        executed)}
+                                  {:execute-latency 0
+                                   :process-latency 0})]
+                       (-> m (merge lats) (dissoc :executeLatencyTotal
+                                                  :processLatencyTotal))))))
+   :sid->output-stats o-stats
+   :executor-stats (:executor-stats (:stats acc-data))
+   :window->emitted (map-key str (:window->emitted acc-data))
+   :window->transferred (map-key str (:window->transferred acc-data))
+   :window->execute-latency
+     (compute-weighted-averages-per-window acc-data
+                                           :window->exec-lat-wgt-avg
+                                           :window->executed)
+   :window->executed (map-key str (:window->executed acc-data))
+   :window->process-latency
+     (compute-weighted-averages-per-window acc-data
+                                           :window->proc-lat-wgt-avg
+                                           :window->executed)
+   :window->acked (map-key str (:window->acked acc-data))
+   :window->failed (map-key str (:window->failed acc-data))})
+
+(defmethod post-aggregate-comp-stats :spout
+  [task->component
+   exec->host+port
+   {{o-stats :sid->output-stats
+     num-tasks :num-tasks
+     num-executors :num-executors} :stats
+    comp-type :type :as acc-data}]
+  {:type comp-type
+   :num-tasks num-tasks
+   :num-executors num-executors
+   :sid->output-stats
+   (->> o-stats
+        (map-val (fn [m]
+                     (let [acked (:acked m)
+                           lat (if (and acked (pos? acked))
+                                 {:complete-latency
+                                  (div (or (:completeLatencyTotal m) 0) acked)}
+                                 {:complete-latency 0})]
+                       (-> m (merge lat) (dissoc :completeLatencyTotal))))))
+   :executor-stats (:executor-stats (:stats acc-data))
+   :window->emitted (map-key str (:window->emitted acc-data))
+   :window->transferred (map-key str (:window->transferred acc-data))
+   :window->complete-latency
+     (compute-weighted-averages-per-window acc-data
+                                           :window->comp-lat-wgt-avg
+                                           :window->acked)
+   :window->acked (map-key str (:window->acked acc-data))
+   :window->failed (map-key str (:window->failed acc-data))})
+
+(defmethod post-aggregate-comp-stats :default [& _] {})
+
+(defn thriftify-exec-agg-stats
+  [comp-id comp-type {:keys [executor-id host port uptime] :as stats}]
+  (doto (ExecutorAggregateStats.)
+    (.set_exec_summary (ExecutorSummary. (apply #(ExecutorInfo. %1 %2)
+                                                executor-id)
+                                         comp-id
+                                         host
+                                         port
+                                         (or uptime 0)))
+    (.set_stats ((condp = comp-type
+                   :bolt thriftify-bolt-agg-stats
+                   :spout thriftify-spout-agg-stats) stats))))
+
+(defn- thriftify-bolt-input-stats
+  [cid+sid->input-stats]
+  (into {} (for [[cid+sid input-stats] cid+sid->input-stats]
+             [(to-global-stream-id cid+sid)
+              (thriftify-bolt-agg-stats input-stats)])))
+
+(defn- thriftify-bolt-output-stats
+  [sid->output-stats]
+  (map-val thriftify-bolt-agg-stats sid->output-stats))
+
+(defn- thriftify-spout-output-stats
+  [sid->output-stats]
+  (map-val thriftify-spout-agg-stats sid->output-stats))
+
+(defn thriftify-comp-page-data
+  [topo-id topology comp-id data]
+  (let [w->stats (swap-map-order
+                   (merge
+                     {:emitted (:window->emitted data)
+                      :transferred (:window->transferred data)
+                      :acked (:window->acked data)
+                      :failed (:window->failed data)}
+                     (condp = (:type data)
+                       :bolt {:execute-latency (:window->execute-latency data)
+                              :process-latency (:window->process-latency data)
+                              :executed (:window->executed data)}
+                       :spout {:complete-latency
+                               (:window->complete-latency data)}
+                       {}))) ; default
+        [compType exec-stats w->stats gsid->input-stats sid->output-stats]
+          (condp = (component-type topology comp-id)
+            :bolt [ComponentType/BOLT
+                   (->
+                     (partial thriftify-exec-agg-stats comp-id :bolt)
+                     (map (:executor-stats data)))
+                   (map-val thriftify-bolt-agg-stats w->stats)
+                   (thriftify-bolt-input-stats (:cid+sid->input-stats data))
+                   (thriftify-bolt-output-stats (:sid->output-stats data))]
+            :spout [ComponentType/SPOUT
+                    (->
+                      (partial thriftify-exec-agg-stats comp-id :spout)
+                      (map (:executor-stats data)))
+                    (map-val thriftify-spout-agg-stats w->stats)
+                    nil ;; spouts do not have input stats
+                    (thriftify-spout-output-stats (:sid->output-stats data))]),
+        num-executors (:num-executors data)
+        num-tasks (:num-tasks data)
+        ret (doto (ComponentPageInfo. comp-id compType)
+              (.set_topology_id topo-id)
+              (.set_topology_name nil)
+              (.set_window_to_stats w->stats)
+              (.set_sid_to_output_stats sid->output-stats)
+              (.set_exec_stats exec-stats))]
+    (and num-executors (.set_num_executors ret num-executors))
+    (and num-tasks (.set_num_tasks ret num-tasks))
+    (and gsid->input-stats
+         (.set_gsid_to_input_stats ret gsid->input-stats))
+    ret))
+
+(defn agg-comp-execs-stats
+  "Aggregate various executor statistics for a component from the given
+  heartbeats."
+  [exec->host+port
+   task->component
+   beats
+   window
+   include-sys?
+   topology-id
+   topology
+   component-id]
+  (->> ;; This iterates over each executor one time, because of lazy evaluation.
+    (extract-data-from-hb exec->host+port
+                          task->component
+                          beats
+                          include-sys?
+                          topology
+                          component-id)
+    (aggregate-comp-stats window include-sys?)
+    (post-aggregate-comp-stats task->component exec->host+port)
+    (thriftify-comp-page-data topology-id topology component-id)))
+
+(defn expand-averages
+  [avg counts]
+  (let [avg (clojurify-structure avg)
+        counts (clojurify-structure counts)]
+    (into {}
+          (for [[slice streams] counts]
+            [slice
+             (into {}
+                   (for [[stream c] streams]
+                     [stream
+                      [(* c (get-in avg [slice stream]))
+                       c]]
+                     ))]))))
+
+(defn expand-averages-seq
+  [average-seq counts-seq]
+  (->> (map vector average-seq counts-seq)
+       (map #(apply expand-averages %))
+       (apply merge-with (fn [s1 s2] (merge-with add-pairs s1 s2)))))
+
+(defn- val-avg
+  [[t c]]
+  (if (= c 0) 0
+    (double (/ t c))))
+
+(defn aggregate-averages
+  [average-seq counts-seq]
+  (->> (expand-averages-seq average-seq counts-seq)
+       (map-val
+         (fn [s]
+           (map-val val-avg s)))))
+
+(defn aggregate-avg-streams
+  [avg counts]
+  (let [expanded (expand-averages avg counts)]
+    (->> expanded
+         (map-val #(reduce add-pairs (vals %)))
+         (map-val val-avg))))
+
+(defn pre-process
+  [stream-summary include-sys?]
+  (let [filter-fn (mk-include-sys-fn include-sys?)
+        emitted (:emitted stream-summary)
+        emitted (into {} (for [[window stat] emitted]
+                           {window (filter-key filter-fn stat)}))
+        transferred (:transferred stream-summary)
+        transferred (into {} (for [[window stat] transferred]
+                               {window (filter-key filter-fn stat)}))
+        stream-summary (-> stream-summary (dissoc :emitted) (assoc :emitted emitted))
+        stream-summary (-> stream-summary (dissoc :transferred) (assoc :transferred transferred))]
+    stream-summary))
+
+(defn aggregate-counts
+  [counts-seq]
+  (->> counts-seq
+       (map clojurify-structure)
+       (apply merge-with
+              (fn [s1 s2]
+                (merge-with + s1 s2)))))
+
+(defn aggregate-common-stats
+  [stats-seq]
+  {:emitted (aggregate-counts (map #(.get_emitted ^ExecutorStats %) stats-seq))
+   :transferred (aggregate-counts (map #(.get_transferred ^ExecutorStats %) stats-seq))})
+
+(defn aggregate-bolt-stats
+  [stats-seq include-sys?]
+  (let [stats-seq (collectify stats-seq)]
+    (merge (pre-process (aggregate-common-stats stats-seq) include-sys?)
+           {:acked
+            (aggregate-counts (map #(.. ^ExecutorStats % get_specific get_bolt get_acked)
+                                   stats-seq))
+            :failed
+            (aggregate-counts (map #(.. ^ExecutorStats % get_specific get_bolt get_failed)
+                                   stats-seq))
+            :executed
+            (aggregate-counts (map #(.. ^ExecutorStats % get_specific get_bolt get_executed)
+                                   stats-seq))
+            :process-latencies
+            (aggregate-averages (map #(.. ^ExecutorStats % get_specific get_bolt get_process_ms_avg)
+                                     stats-seq)
+                                (map #(.. ^ExecutorStats % get_specific get_bolt get_acked)
+                                     stats-seq))
+            :execute-latencies
+            (aggregate-averages (map #(.. ^ExecutorStats % get_specific get_bolt get_execute_ms_avg)
+                                     stats-seq)
+                                (map #(.. ^ExecutorStats % get_specific get_bolt get_executed)
+                                     stats-seq))})))
+
+(defn aggregate-spout-stats
+  [stats-seq include-sys?]
+  (let [stats-seq (collectify stats-seq)]
+    (merge (pre-process (aggregate-common-stats stats-seq) include-sys?)
+           {:acked
+            (aggregate-counts (map #(.. ^ExecutorStats % get_specific get_spout get_acked)
+                                   stats-seq))
+            :failed
+            (aggregate-counts (map #(.. ^ExecutorStats % get_specific get_spout get_failed)
+                                   stats-seq))
+            :complete-latencies
+            (aggregate-averages (map #(.. ^ExecutorStats % get_specific get_spout get_complete_ms_avg)
+                                     stats-seq)
+                                (map #(.. ^ExecutorStats % get_specific get_spout get_acked)
+                                     stats-seq))})))
+
+(defn get-filled-stats
+  [summs]
+  (->> summs
+       (map #(.get_stats ^ExecutorSummary %))
+       (filter not-nil?)))
+
+(defn aggregate-spout-streams
+  [stats]
+  {:acked (aggregate-count-streams (:acked stats))
+   :failed (aggregate-count-streams (:failed stats))
+   :emitted (aggregate-count-streams (:emitted stats))
+   :transferred (aggregate-count-streams (:transferred stats))
+   :complete-latencies (aggregate-avg-streams (:complete-latencies stats)
+                                              (:acked stats))})
+
+(defn spout-streams-stats
+  [summs include-sys?]
+  (let [stats-seq (get-filled-stats summs)]
+    (aggregate-spout-streams
+      (aggregate-spout-stats
+        stats-seq include-sys?))))
+
+(defn aggregate-bolt-streams
+  [stats]
+  {:acked (aggregate-count-streams (:acked stats))
+   :failed (aggregate-count-streams (:failed stats))
+   :emitted (aggregate-count-streams (:emitted stats))
+   :transferred (aggregate-count-streams (:transferred stats))
+   :process-latencies (aggregate-avg-streams (:process-latencies stats)
+                                             (:acked stats))
+   :executed (aggregate-count-streams (:executed stats))
+   :execute-latencies (aggregate-avg-streams (:execute-latencies stats)
+                                             (:executed stats))})
+
+(defn compute-executor-capacity
+  [^ExecutorSummary e]
+  (let [stats (.get_stats e)
+        stats (if stats
+                (-> stats
+                    (aggregate-bolt-stats true)
+                    (aggregate-bolt-streams)
+                    swap-map-order
+                    (get (str TEN-MIN-IN-SECONDS))))
+        uptime (nil-to-zero (.get_uptime_secs e))
+        window (if (< uptime TEN-MIN-IN-SECONDS) uptime TEN-MIN-IN-SECONDS)
+        executed (-> stats :executed nil-to-zero)
+        latency (-> stats :execute-latencies nil-to-zero)]
+    (if (> window 0)
+      (div (* executed latency) (* 1000 window)))))
+
+(defn bolt-streams-stats
+  [summs include-sys?]
+  (let [stats-seq (get-filled-stats summs)]
+    (aggregate-bolt-streams
+      (aggregate-bolt-stats
+        stats-seq include-sys?))))
+
+(defn total-aggregate-stats
+  [spout-summs bolt-summs include-sys?]
+  (let [spout-stats (get-filled-stats spout-summs)
+        bolt-stats (get-filled-stats bolt-summs)
+        agg-spout-stats (-> spout-stats
+                            (aggregate-spout-stats include-sys?)
+                            aggregate-spout-streams)
+        agg-bolt-stats (-> bolt-stats
+                           (aggregate-bolt-stats include-sys?)
+                           aggregate-bolt-streams)]
+    (merge-with
+      (fn [s1 s2]
+        (merge-with + s1 s2))
+      (select-keys
+        agg-bolt-stats
+        ;; Include only keys that will be used.  We want to count acked and
+        ;; failed only for the "tuple trees," so we do not include those keys
+        ;; from the bolt executors.
+        [:emitted :transferred])
+      agg-spout-stats)))
+
+(defn error-subset
+  [error-str]
+  (apply str (take 200 error-str)))
+
+(defn most-recent-error
+  [errors-list]
+  (let [error (->> errors-list
+                   (sort-by #(.get_error_time_secs ^ErrorInfo %))
+                   reverse
+                   first)]
+    (if error
+      (error-subset (.get_error ^ErrorInfo error))
+      "")))
+
+(defn float-str [n]
+  (if n
+    (format "%.3f" (float n))
+    "0"))
+
+(defn compute-bolt-capacity
+  [executors]
+  (->> executors
+       (map compute-executor-capacity)
+       (map nil-to-zero)
+       (apply max)))


[07/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/drpc/JoinResult.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/drpc/JoinResult.java b/storm-core/src/jvm/backtype/storm/drpc/JoinResult.java
deleted file mode 100644
index 2c416ed..0000000
--- a/storm-core/src/jvm/backtype/storm/drpc/JoinResult.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm.drpc;
-
-import backtype.storm.task.OutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.base.BaseRichBolt;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.Values;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-
-public class JoinResult extends BaseRichBolt {
-    public static final Logger LOG = LoggerFactory.getLogger(JoinResult.class);
-
-    String returnComponent;
-    Map<Object, Tuple> returns = new HashMap<>();
-    Map<Object, Tuple> results = new HashMap<>();
-    OutputCollector _collector;
-
-    public JoinResult(String returnComponent) {
-        this.returnComponent = returnComponent;
-    }
- 
-    public void prepare(Map map, TopologyContext context, OutputCollector collector) {
-        _collector = collector;
-    }
-
-    public void execute(Tuple tuple) {
-        Object requestId = tuple.getValue(0);
-        if(tuple.getSourceComponent().equals(returnComponent)) {
-            returns.put(requestId, tuple);
-        } else {
-            results.put(requestId, tuple);
-        }
-
-        if(returns.containsKey(requestId) && results.containsKey(requestId)) {
-            Tuple result = results.remove(requestId);
-            Tuple returner = returns.remove(requestId);
-            LOG.debug(result.getValue(1).toString());
-            List<Tuple> anchors = new ArrayList<>();
-            anchors.add(result);
-            anchors.add(returner);            
-            _collector.emit(anchors, new Values(""+result.getValue(1), returner.getValue(1)));
-            _collector.ack(result);
-            _collector.ack(returner);
-        }
-    }
-
-    public void declareOutputFields(OutputFieldsDeclarer declarer) {
-        declarer.declare(new Fields("result", "return-info"));
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/drpc/KeyedFairBolt.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/drpc/KeyedFairBolt.java b/storm-core/src/jvm/backtype/storm/drpc/KeyedFairBolt.java
deleted file mode 100644
index 113163d..0000000
--- a/storm-core/src/jvm/backtype/storm/drpc/KeyedFairBolt.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm.drpc;
-
-import backtype.storm.coordination.CoordinatedBolt.FinishedCallback;
-import backtype.storm.task.OutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.BasicBoltExecutor;
-import backtype.storm.topology.IBasicBolt;
-import backtype.storm.topology.IRichBolt;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.utils.KeyedRoundRobinQueue;
-import java.util.HashMap;
-import java.util.Map;
-
-
-public class KeyedFairBolt implements IRichBolt, FinishedCallback {
-    IRichBolt _delegate;
-    KeyedRoundRobinQueue<Tuple> _rrQueue;
-    Thread _executor;
-    FinishedCallback _callback;
-
-    public KeyedFairBolt(IRichBolt delegate) {
-        _delegate = delegate;
-    }
-    
-    public KeyedFairBolt(IBasicBolt delegate) {
-        this(new BasicBoltExecutor(delegate));
-    }
-    
-    
-    public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
-        if(_delegate instanceof FinishedCallback) {
-            _callback = (FinishedCallback) _delegate;
-        }
-        _delegate.prepare(stormConf, context, collector);
-        _rrQueue = new KeyedRoundRobinQueue<Tuple>();
-        _executor = new Thread(new Runnable() {
-            public void run() {
-                try {
-                    while(true) {
-                        _delegate.execute(_rrQueue.take());
-                    }
-                } catch (InterruptedException e) {
-
-                }
-            }
-        });
-        _executor.setDaemon(true);
-        _executor.start();
-    }
-
-    public void execute(Tuple input) {
-        Object key = input.getValue(0);
-        _rrQueue.add(key, input);
-    }
-
-    public void cleanup() {
-        _executor.interrupt();
-        _delegate.cleanup();
-    }
-
-    public void declareOutputFields(OutputFieldsDeclarer declarer) {
-        _delegate.declareOutputFields(declarer);
-    }
-
-    public void finishedId(Object id) {
-        if(_callback!=null) {
-            _callback.finishedId(id);
-        }
-    }
-
-    @Override
-    public Map<String, Object> getComponentConfiguration() {
-        return new HashMap<String, Object>();
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/drpc/LinearDRPCInputDeclarer.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/drpc/LinearDRPCInputDeclarer.java b/storm-core/src/jvm/backtype/storm/drpc/LinearDRPCInputDeclarer.java
deleted file mode 100644
index d03075e..0000000
--- a/storm-core/src/jvm/backtype/storm/drpc/LinearDRPCInputDeclarer.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm.drpc;
-
-import backtype.storm.grouping.CustomStreamGrouping;
-import backtype.storm.topology.ComponentConfigurationDeclarer;
-import backtype.storm.tuple.Fields;
-
-public interface LinearDRPCInputDeclarer extends ComponentConfigurationDeclarer<LinearDRPCInputDeclarer> {
-    public LinearDRPCInputDeclarer fieldsGrouping(Fields fields);
-    public LinearDRPCInputDeclarer fieldsGrouping(String streamId, Fields fields);
-
-    public LinearDRPCInputDeclarer globalGrouping();
-    public LinearDRPCInputDeclarer globalGrouping(String streamId);
-
-    public LinearDRPCInputDeclarer shuffleGrouping();
-    public LinearDRPCInputDeclarer shuffleGrouping(String streamId);
-
-    public LinearDRPCInputDeclarer localOrShuffleGrouping();
-    public LinearDRPCInputDeclarer localOrShuffleGrouping(String streamId);
-    
-    public LinearDRPCInputDeclarer noneGrouping();
-    public LinearDRPCInputDeclarer noneGrouping(String streamId);
-
-    public LinearDRPCInputDeclarer allGrouping();
-    public LinearDRPCInputDeclarer allGrouping(String streamId);
-
-    public LinearDRPCInputDeclarer directGrouping();
-    public LinearDRPCInputDeclarer directGrouping(String streamId);
-
-    public LinearDRPCInputDeclarer partialKeyGrouping(Fields fields);
-    public LinearDRPCInputDeclarer partialKeyGrouping(String streamId, Fields fields);
-
-    public LinearDRPCInputDeclarer customGrouping(CustomStreamGrouping grouping);
-    public LinearDRPCInputDeclarer customGrouping(String streamId, CustomStreamGrouping grouping);
-    
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/drpc/LinearDRPCTopologyBuilder.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/drpc/LinearDRPCTopologyBuilder.java b/storm-core/src/jvm/backtype/storm/drpc/LinearDRPCTopologyBuilder.java
deleted file mode 100644
index ee82091..0000000
--- a/storm-core/src/jvm/backtype/storm/drpc/LinearDRPCTopologyBuilder.java
+++ /dev/null
@@ -1,393 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm.drpc;
-
-import backtype.storm.Constants;
-import backtype.storm.ILocalDRPC;
-import backtype.storm.coordination.BatchBoltExecutor;
-import backtype.storm.coordination.CoordinatedBolt;
-import backtype.storm.coordination.CoordinatedBolt.FinishedCallback;
-import backtype.storm.coordination.CoordinatedBolt.IdStreamSpec;
-import backtype.storm.coordination.CoordinatedBolt.SourceArgs;
-import backtype.storm.coordination.IBatchBolt;
-import backtype.storm.generated.StormTopology;
-import backtype.storm.generated.StreamInfo;
-import backtype.storm.grouping.CustomStreamGrouping;
-import backtype.storm.grouping.PartialKeyGrouping;
-import backtype.storm.topology.BaseConfigurationDeclarer;
-import backtype.storm.topology.BasicBoltExecutor;
-import backtype.storm.topology.BoltDeclarer;
-import backtype.storm.topology.IBasicBolt;
-import backtype.storm.topology.IRichBolt;
-import backtype.storm.topology.InputDeclarer;
-import backtype.storm.topology.OutputFieldsGetter;
-import backtype.storm.topology.TopologyBuilder;
-import backtype.storm.tuple.Fields;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-
-// Trident subsumes the functionality provided by this class, so it's deprecated
-@Deprecated
-public class LinearDRPCTopologyBuilder {    
-    String _function;
-    List<Component> _components = new ArrayList<Component>();
-    
-    
-    public LinearDRPCTopologyBuilder(String function) {
-        _function = function;
-    }
-        
-    public LinearDRPCInputDeclarer addBolt(IBatchBolt bolt, Number parallelism) {
-        return addBolt(new BatchBoltExecutor(bolt), parallelism);
-    }
-    
-    public LinearDRPCInputDeclarer addBolt(IBatchBolt bolt) {
-        return addBolt(bolt, 1);
-    }
-    
-    @Deprecated
-    public LinearDRPCInputDeclarer addBolt(IRichBolt bolt, Number parallelism) {
-        if(parallelism==null) parallelism = 1; 
-        Component component = new Component(bolt, parallelism.intValue());
-        _components.add(component);
-        return new InputDeclarerImpl(component);
-    }
-    
-    @Deprecated
-    public LinearDRPCInputDeclarer addBolt(IRichBolt bolt) {
-        return addBolt(bolt, null);
-    }
-    
-    public LinearDRPCInputDeclarer addBolt(IBasicBolt bolt, Number parallelism) {
-        return addBolt(new BasicBoltExecutor(bolt), parallelism);
-    }
-
-    public LinearDRPCInputDeclarer addBolt(IBasicBolt bolt) {
-        return addBolt(bolt, null);
-    }
-        
-    public StormTopology createLocalTopology(ILocalDRPC drpc) {
-        return createTopology(new DRPCSpout(_function, drpc));
-    }
-    
-    public StormTopology createRemoteTopology() {
-        return createTopology(new DRPCSpout(_function));
-    }
-    
-    
-    private StormTopology createTopology(DRPCSpout spout) {
-        final String SPOUT_ID = "spout";
-        final String PREPARE_ID = "prepare-request";
-        
-        TopologyBuilder builder = new TopologyBuilder();
-        builder.setSpout(SPOUT_ID, spout);
-        builder.setBolt(PREPARE_ID, new PrepareRequest())
-                .noneGrouping(SPOUT_ID);
-        int i=0;
-        for(; i<_components.size();i++) {
-            Component component = _components.get(i);
-            
-            Map<String, SourceArgs> source = new HashMap<String, SourceArgs>();
-            if (i==1) {
-                source.put(boltId(i-1), SourceArgs.single());
-            } else if (i>=2) {
-                source.put(boltId(i-1), SourceArgs.all());
-            }
-            IdStreamSpec idSpec = null;
-            if(i==_components.size()-1 && component.bolt instanceof FinishedCallback) {
-                idSpec = IdStreamSpec.makeDetectSpec(PREPARE_ID, PrepareRequest.ID_STREAM);
-            }
-            BoltDeclarer declarer = builder.setBolt(
-                    boltId(i),
-                    new CoordinatedBolt(component.bolt, source, idSpec),
-                    component.parallelism);
-            
-            for(Map<String, Object> conf: component.componentConfs) {
-                declarer.addConfigurations(conf);
-            }
-            
-            if(idSpec!=null) {
-                declarer.fieldsGrouping(idSpec.getGlobalStreamId().get_componentId(), PrepareRequest.ID_STREAM, new Fields("request"));
-            }
-            if(i==0 && component.declarations.isEmpty()) {
-                declarer.noneGrouping(PREPARE_ID, PrepareRequest.ARGS_STREAM);
-            } else {
-                String prevId;
-                if(i==0) {
-                    prevId = PREPARE_ID;
-                } else {
-                    prevId = boltId(i-1);
-                }
-                for(InputDeclaration declaration: component.declarations) {
-                    declaration.declare(prevId, declarer);
-                }
-            }
-            if(i>0) {
-                declarer.directGrouping(boltId(i-1), Constants.COORDINATED_STREAM_ID); 
-            }
-        }
-        
-        IRichBolt lastBolt = _components.get(_components.size()-1).bolt;
-        OutputFieldsGetter getter = new OutputFieldsGetter();
-        lastBolt.declareOutputFields(getter);
-        Map<String, StreamInfo> streams = getter.getFieldsDeclaration();
-        if(streams.size()!=1) {
-            throw new RuntimeException("Must declare exactly one stream from last bolt in LinearDRPCTopology");
-        }
-        String outputStream = streams.keySet().iterator().next();
-        List<String> fields = streams.get(outputStream).get_output_fields();
-        if(fields.size()!=2) {
-            throw new RuntimeException("Output stream of last component in LinearDRPCTopology must contain exactly two fields. The first should be the request id, and the second should be the result.");
-        }
-
-        builder.setBolt(boltId(i), new JoinResult(PREPARE_ID))
-                .fieldsGrouping(boltId(i-1), outputStream, new Fields(fields.get(0)))
-                .fieldsGrouping(PREPARE_ID, PrepareRequest.RETURN_STREAM, new Fields("request"));
-        i++;
-        builder.setBolt(boltId(i), new ReturnResults())
-                .noneGrouping(boltId(i-1));
-        return builder.createTopology();
-    }
-    
-    private static String boltId(int index) {
-        return "bolt" + index;
-    }
-    
-    private static class Component {
-        public IRichBolt bolt;
-        public int parallelism;
-        public List<Map<String, Object>> componentConfs = new ArrayList<>();
-        public List<InputDeclaration> declarations = new ArrayList<InputDeclaration>();
-        
-        public Component(IRichBolt bolt, int parallelism) {
-            this.bolt = bolt;
-            this.parallelism = parallelism;
-        }
-    }
-    
-    private static interface InputDeclaration {
-        public void declare(String prevComponent, InputDeclarer declarer);
-    }
-    
-    private static class InputDeclarerImpl extends BaseConfigurationDeclarer<LinearDRPCInputDeclarer> implements LinearDRPCInputDeclarer {
-        Component _component;
-        
-        public InputDeclarerImpl(Component component) {
-            _component = component;
-        }
-        
-        @Override
-        public LinearDRPCInputDeclarer fieldsGrouping(final Fields fields) {
-            addDeclaration(new InputDeclaration() {
-                @Override
-                public void declare(String prevComponent, InputDeclarer declarer) {
-                    declarer.fieldsGrouping(prevComponent, fields);
-                }                
-            });
-            return this;
-        }
-
-        @Override
-        public LinearDRPCInputDeclarer fieldsGrouping(final String streamId, final Fields fields) {
-            addDeclaration(new InputDeclaration() {
-                @Override
-                public void declare(String prevComponent, InputDeclarer declarer) {
-                    declarer.fieldsGrouping(prevComponent, streamId, fields);
-                }                
-            });
-            return this;
-        }
-
-        @Override
-        public LinearDRPCInputDeclarer globalGrouping() {
-            addDeclaration(new InputDeclaration() {
-                @Override
-                public void declare(String prevComponent, InputDeclarer declarer) {
-                    declarer.globalGrouping(prevComponent);
-                }                
-            });
-            return this;
-        }
-
-        @Override
-        public LinearDRPCInputDeclarer globalGrouping(final String streamId) {
-            addDeclaration(new InputDeclaration() {
-                @Override
-                public void declare(String prevComponent, InputDeclarer declarer) {
-                    declarer.globalGrouping(prevComponent, streamId);
-                }                
-            });
-            return this;
-        }
-
-        @Override
-        public LinearDRPCInputDeclarer shuffleGrouping() {
-            addDeclaration(new InputDeclaration() {
-                @Override
-                public void declare(String prevComponent, InputDeclarer declarer) {
-                    declarer.shuffleGrouping(prevComponent);
-                }                
-            });
-            return this;
-        }
-
-        @Override
-        public LinearDRPCInputDeclarer shuffleGrouping(final String streamId) {
-            addDeclaration(new InputDeclaration() {
-                @Override
-                public void declare(String prevComponent, InputDeclarer declarer) {
-                    declarer.shuffleGrouping(prevComponent, streamId);
-                }                
-            });
-            return this;
-        }
-
-        @Override
-        public LinearDRPCInputDeclarer localOrShuffleGrouping() {
-            addDeclaration(new InputDeclaration() {
-                @Override
-                public void declare(String prevComponent, InputDeclarer declarer) {
-                    declarer.localOrShuffleGrouping(prevComponent);
-                }                
-            });
-            return this;
-        }
-
-        @Override
-        public LinearDRPCInputDeclarer localOrShuffleGrouping(final String streamId) {
-            addDeclaration(new InputDeclaration() {
-                @Override
-                public void declare(String prevComponent, InputDeclarer declarer) {
-                    declarer.localOrShuffleGrouping(prevComponent, streamId);
-                }                
-            });
-            return this;
-        }
-        
-        @Override
-        public LinearDRPCInputDeclarer noneGrouping() {
-            addDeclaration(new InputDeclaration() {
-                @Override
-                public void declare(String prevComponent, InputDeclarer declarer) {
-                    declarer.noneGrouping(prevComponent);
-                }                
-            });
-            return this;
-        }
-
-        @Override
-        public LinearDRPCInputDeclarer noneGrouping(final String streamId) {
-            addDeclaration(new InputDeclaration() {
-                @Override
-                public void declare(String prevComponent, InputDeclarer declarer) {
-                    declarer.noneGrouping(prevComponent, streamId);
-                }                
-            });
-            return this;
-        }
-
-        @Override
-        public LinearDRPCInputDeclarer allGrouping() {
-            addDeclaration(new InputDeclaration() {
-                @Override
-                public void declare(String prevComponent, InputDeclarer declarer) {
-                    declarer.allGrouping(prevComponent);
-                }                
-            });
-            return this;
-        }
-
-        @Override
-        public LinearDRPCInputDeclarer allGrouping(final String streamId) {
-            addDeclaration(new InputDeclaration() {
-                @Override
-                public void declare(String prevComponent, InputDeclarer declarer) {
-                    declarer.allGrouping(prevComponent, streamId);
-                }                
-            });
-            return this;
-        }
-
-        @Override
-        public LinearDRPCInputDeclarer directGrouping() {
-            addDeclaration(new InputDeclaration() {
-                @Override
-                public void declare(String prevComponent, InputDeclarer declarer) {
-                    declarer.directGrouping(prevComponent);
-                }                
-            });
-            return this;
-        }
-
-        @Override
-        public LinearDRPCInputDeclarer directGrouping(final String streamId) {
-            addDeclaration(new InputDeclaration() {
-                @Override
-                public void declare(String prevComponent, InputDeclarer declarer) {
-                    declarer.directGrouping(prevComponent, streamId);
-                }                
-            });
-            return this;
-        }
-
-        @Override
-        public LinearDRPCInputDeclarer partialKeyGrouping(Fields fields) {
-            return customGrouping(new PartialKeyGrouping(fields));
-        }
-
-        @Override
-        public LinearDRPCInputDeclarer partialKeyGrouping(String streamId, Fields fields) {
-            return customGrouping(streamId, new PartialKeyGrouping(fields));
-        }
-
-        @Override
-        public LinearDRPCInputDeclarer customGrouping(final CustomStreamGrouping grouping) {
-            addDeclaration(new InputDeclaration() {
-                @Override
-                public void declare(String prevComponent, InputDeclarer declarer) {
-                    declarer.customGrouping(prevComponent, grouping);
-                }                
-            });
-            return this;
-        }
-
-        @Override
-        public LinearDRPCInputDeclarer customGrouping(final String streamId, final CustomStreamGrouping grouping) {
-            addDeclaration(new InputDeclaration() {
-                @Override
-                public void declare(String prevComponent, InputDeclarer declarer) {
-                    declarer.customGrouping(prevComponent, streamId, grouping);
-                }                
-            });
-            return this;
-        }
-        
-        private void addDeclaration(InputDeclaration declaration) {
-            _component.declarations.add(declaration);
-        }
-
-        @Override
-        public LinearDRPCInputDeclarer addConfigurations(Map<String, Object> conf) {
-            _component.componentConfs.add(conf);
-            return this;
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/drpc/PrepareRequest.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/drpc/PrepareRequest.java b/storm-core/src/jvm/backtype/storm/drpc/PrepareRequest.java
deleted file mode 100644
index bd32169..0000000
--- a/storm-core/src/jvm/backtype/storm/drpc/PrepareRequest.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm.drpc;
-
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.BasicOutputCollector;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.base.BaseBasicBolt;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.Values;
-import java.util.Map;
-import java.util.Random;
-import backtype.storm.utils.Utils;
-
-
-public class PrepareRequest extends BaseBasicBolt {
-    public static final String ARGS_STREAM = Utils.DEFAULT_STREAM_ID;
-    public static final String RETURN_STREAM = "ret";
-    public static final String ID_STREAM = "id";
-
-    Random rand;
-
-    @Override
-    public void prepare(Map map, TopologyContext context) {
-        rand = new Random();
-    }
-
-    @Override
-    public void execute(Tuple tuple, BasicOutputCollector collector) {
-        String args = tuple.getString(0);
-        String returnInfo = tuple.getString(1);
-        long requestId = rand.nextLong();
-        collector.emit(ARGS_STREAM, new Values(requestId, args));
-        collector.emit(RETURN_STREAM, new Values(requestId, returnInfo));
-        collector.emit(ID_STREAM, new Values(requestId));
-    }
-
-    public void declareOutputFields(OutputFieldsDeclarer declarer) {
-        declarer.declareStream(ARGS_STREAM, new Fields("request", "args"));
-        declarer.declareStream(RETURN_STREAM, new Fields("request", "return"));
-        declarer.declareStream(ID_STREAM, new Fields("request"));
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/drpc/ReturnResults.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/drpc/ReturnResults.java b/storm-core/src/jvm/backtype/storm/drpc/ReturnResults.java
deleted file mode 100644
index b26508d..0000000
--- a/storm-core/src/jvm/backtype/storm/drpc/ReturnResults.java
+++ /dev/null
@@ -1,124 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm.drpc;
-
-import backtype.storm.Config;
-import backtype.storm.generated.DistributedRPCInvocations;
-import backtype.storm.generated.AuthorizationException;
-import backtype.storm.task.OutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.base.BaseRichBolt;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.utils.ServiceRegistry;
-import backtype.storm.utils.Utils;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.thrift.TException;
-import org.apache.thrift.transport.TTransportException;
-import org.json.simple.JSONValue;
-
-
-public class ReturnResults extends BaseRichBolt {
-    //ANY CHANGE TO THIS CODE MUST BE SERIALIZABLE COMPATIBLE OR THERE WILL BE PROBLEMS
-    static final long serialVersionUID = -774882142710631591L;
-
-    public static final Logger LOG = LoggerFactory.getLogger(ReturnResults.class);
-    OutputCollector _collector;
-    boolean local;
-    Map _conf; 
-    Map<List, DRPCInvocationsClient> _clients = new HashMap<List, DRPCInvocationsClient>();
-
-    @Override
-    public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
-        _conf = stormConf;
-        _collector = collector;
-        local = stormConf.get(Config.STORM_CLUSTER_MODE).equals("local");
-    }
-
-    @Override
-    public void execute(Tuple input) {
-        String result = (String) input.getValue(0);
-        String returnInfo = (String) input.getValue(1);
-        if(returnInfo!=null) {
-            Map retMap = (Map) JSONValue.parse(returnInfo);
-            final String host = (String) retMap.get("host");
-            final int port = Utils.getInt(retMap.get("port"));
-            String id = (String) retMap.get("id");
-            DistributedRPCInvocations.Iface client;
-            if(local) {
-                client = (DistributedRPCInvocations.Iface) ServiceRegistry.getService(host);
-            } else {
-                List server = new ArrayList() {{
-                    add(host);
-                    add(port);
-                }};
-            
-                if(!_clients.containsKey(server)) {
-                    try {
-                        _clients.put(server, new DRPCInvocationsClient(_conf, host, port));
-                    } catch (TTransportException ex) {
-                        throw new RuntimeException(ex);
-                    }
-                }
-                client = _clients.get(server);
-            }
- 
-            try {
-                client.result(id, result);
-                _collector.ack(input);
-            } catch (AuthorizationException aze) {
-                LOG.error("Not authorized to return results to DRPC server", aze);
-                _collector.fail(input);
-                if (client instanceof DRPCInvocationsClient) {
-                    try {
-                        LOG.info("reconnecting... ");
-                        ((DRPCInvocationsClient)client).reconnectClient(); //Blocking call
-                    } catch (TException e2) {
-                        throw new RuntimeException(e2);
-                    }
-                }
-            } catch(TException e) {
-                LOG.error("Failed to return results to DRPC server", e);
-                _collector.fail(input);
-                if (client instanceof DRPCInvocationsClient) {
-                    try {
-                        LOG.info("reconnecting... ");
-                        ((DRPCInvocationsClient)client).reconnectClient(); //Blocking call
-                    } catch (TException e2) {
-                        throw new RuntimeException(e2);
-                    }
-                }
-            }
-        }
-    }    
-
-    @Override
-    public void cleanup() {
-        for(DRPCInvocationsClient c: _clients.values()) {
-            c.close();
-        }
-    }
-
-    public void declareOutputFields(OutputFieldsDeclarer declarer) {
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/generated/AccessControl.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/generated/AccessControl.java b/storm-core/src/jvm/backtype/storm/generated/AccessControl.java
deleted file mode 100644
index 2209168..0000000
--- a/storm-core/src/jvm/backtype/storm/generated/AccessControl.java
+++ /dev/null
@@ -1,627 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * Autogenerated by Thrift Compiler (0.9.3)
- *
- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
- *  @generated
- */
-package backtype.storm.generated;
-
-import org.apache.thrift.scheme.IScheme;
-import org.apache.thrift.scheme.SchemeFactory;
-import org.apache.thrift.scheme.StandardScheme;
-
-import org.apache.thrift.scheme.TupleScheme;
-import org.apache.thrift.protocol.TTupleProtocol;
-import org.apache.thrift.protocol.TProtocolException;
-import org.apache.thrift.EncodingUtils;
-import org.apache.thrift.TException;
-import org.apache.thrift.async.AsyncMethodCallback;
-import org.apache.thrift.server.AbstractNonblockingServer.*;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.EnumMap;
-import java.util.Set;
-import java.util.HashSet;
-import java.util.EnumSet;
-import java.util.Collections;
-import java.util.BitSet;
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import javax.annotation.Generated;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
-public class AccessControl implements org.apache.thrift.TBase<AccessControl, AccessControl._Fields>, java.io.Serializable, Cloneable, Comparable<AccessControl> {
-  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AccessControl");
-
-  private static final org.apache.thrift.protocol.TField TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("type", org.apache.thrift.protocol.TType.I32, (short)1);
-  private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)2);
-  private static final org.apache.thrift.protocol.TField ACCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("access", org.apache.thrift.protocol.TType.I32, (short)3);
-
-  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
-  static {
-    schemes.put(StandardScheme.class, new AccessControlStandardSchemeFactory());
-    schemes.put(TupleScheme.class, new AccessControlTupleSchemeFactory());
-  }
-
-  private AccessControlType type; // required
-  private String name; // optional
-  private int access; // required
-
-  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
-  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-    /**
-     * 
-     * @see AccessControlType
-     */
-    TYPE((short)1, "type"),
-    NAME((short)2, "name"),
-    ACCESS((short)3, "access");
-
-    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
-
-    static {
-      for (_Fields field : EnumSet.allOf(_Fields.class)) {
-        byName.put(field.getFieldName(), field);
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, or null if its not found.
-     */
-    public static _Fields findByThriftId(int fieldId) {
-      switch(fieldId) {
-        case 1: // TYPE
-          return TYPE;
-        case 2: // NAME
-          return NAME;
-        case 3: // ACCESS
-          return ACCESS;
-        default:
-          return null;
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, throwing an exception
-     * if it is not found.
-     */
-    public static _Fields findByThriftIdOrThrow(int fieldId) {
-      _Fields fields = findByThriftId(fieldId);
-      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
-      return fields;
-    }
-
-    /**
-     * Find the _Fields constant that matches name, or null if its not found.
-     */
-    public static _Fields findByName(String name) {
-      return byName.get(name);
-    }
-
-    private final short _thriftId;
-    private final String _fieldName;
-
-    _Fields(short thriftId, String fieldName) {
-      _thriftId = thriftId;
-      _fieldName = fieldName;
-    }
-
-    public short getThriftFieldId() {
-      return _thriftId;
-    }
-
-    public String getFieldName() {
-      return _fieldName;
-    }
-  }
-
-  // isset id assignments
-  private static final int __ACCESS_ISSET_ID = 0;
-  private byte __isset_bitfield = 0;
-  private static final _Fields optionals[] = {_Fields.NAME};
-  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
-  static {
-    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.TYPE, new org.apache.thrift.meta_data.FieldMetaData("type", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, AccessControlType.class)));
-    tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    tmpMap.put(_Fields.ACCESS, new org.apache.thrift.meta_data.FieldMetaData("access", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
-    metaDataMap = Collections.unmodifiableMap(tmpMap);
-    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AccessControl.class, metaDataMap);
-  }
-
-  public AccessControl() {
-  }
-
-  public AccessControl(
-    AccessControlType type,
-    int access)
-  {
-    this();
-    this.type = type;
-    this.access = access;
-    set_access_isSet(true);
-  }
-
-  /**
-   * Performs a deep copy on <i>other</i>.
-   */
-  public AccessControl(AccessControl other) {
-    __isset_bitfield = other.__isset_bitfield;
-    if (other.is_set_type()) {
-      this.type = other.type;
-    }
-    if (other.is_set_name()) {
-      this.name = other.name;
-    }
-    this.access = other.access;
-  }
-
-  public AccessControl deepCopy() {
-    return new AccessControl(this);
-  }
-
-  @Override
-  public void clear() {
-    this.type = null;
-    this.name = null;
-    set_access_isSet(false);
-    this.access = 0;
-  }
-
-  /**
-   * 
-   * @see AccessControlType
-   */
-  public AccessControlType get_type() {
-    return this.type;
-  }
-
-  /**
-   * 
-   * @see AccessControlType
-   */
-  public void set_type(AccessControlType type) {
-    this.type = type;
-  }
-
-  public void unset_type() {
-    this.type = null;
-  }
-
-  /** Returns true if field type is set (has been assigned a value) and false otherwise */
-  public boolean is_set_type() {
-    return this.type != null;
-  }
-
-  public void set_type_isSet(boolean value) {
-    if (!value) {
-      this.type = null;
-    }
-  }
-
-  public String get_name() {
-    return this.name;
-  }
-
-  public void set_name(String name) {
-    this.name = name;
-  }
-
-  public void unset_name() {
-    this.name = null;
-  }
-
-  /** Returns true if field name is set (has been assigned a value) and false otherwise */
-  public boolean is_set_name() {
-    return this.name != null;
-  }
-
-  public void set_name_isSet(boolean value) {
-    if (!value) {
-      this.name = null;
-    }
-  }
-
-  public int get_access() {
-    return this.access;
-  }
-
-  public void set_access(int access) {
-    this.access = access;
-    set_access_isSet(true);
-  }
-
-  public void unset_access() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ACCESS_ISSET_ID);
-  }
-
-  /** Returns true if field access is set (has been assigned a value) and false otherwise */
-  public boolean is_set_access() {
-    return EncodingUtils.testBit(__isset_bitfield, __ACCESS_ISSET_ID);
-  }
-
-  public void set_access_isSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ACCESS_ISSET_ID, value);
-  }
-
-  public void setFieldValue(_Fields field, Object value) {
-    switch (field) {
-    case TYPE:
-      if (value == null) {
-        unset_type();
-      } else {
-        set_type((AccessControlType)value);
-      }
-      break;
-
-    case NAME:
-      if (value == null) {
-        unset_name();
-      } else {
-        set_name((String)value);
-      }
-      break;
-
-    case ACCESS:
-      if (value == null) {
-        unset_access();
-      } else {
-        set_access((Integer)value);
-      }
-      break;
-
-    }
-  }
-
-  public Object getFieldValue(_Fields field) {
-    switch (field) {
-    case TYPE:
-      return get_type();
-
-    case NAME:
-      return get_name();
-
-    case ACCESS:
-      return get_access();
-
-    }
-    throw new IllegalStateException();
-  }
-
-  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
-  public boolean isSet(_Fields field) {
-    if (field == null) {
-      throw new IllegalArgumentException();
-    }
-
-    switch (field) {
-    case TYPE:
-      return is_set_type();
-    case NAME:
-      return is_set_name();
-    case ACCESS:
-      return is_set_access();
-    }
-    throw new IllegalStateException();
-  }
-
-  @Override
-  public boolean equals(Object that) {
-    if (that == null)
-      return false;
-    if (that instanceof AccessControl)
-      return this.equals((AccessControl)that);
-    return false;
-  }
-
-  public boolean equals(AccessControl that) {
-    if (that == null)
-      return false;
-
-    boolean this_present_type = true && this.is_set_type();
-    boolean that_present_type = true && that.is_set_type();
-    if (this_present_type || that_present_type) {
-      if (!(this_present_type && that_present_type))
-        return false;
-      if (!this.type.equals(that.type))
-        return false;
-    }
-
-    boolean this_present_name = true && this.is_set_name();
-    boolean that_present_name = true && that.is_set_name();
-    if (this_present_name || that_present_name) {
-      if (!(this_present_name && that_present_name))
-        return false;
-      if (!this.name.equals(that.name))
-        return false;
-    }
-
-    boolean this_present_access = true;
-    boolean that_present_access = true;
-    if (this_present_access || that_present_access) {
-      if (!(this_present_access && that_present_access))
-        return false;
-      if (this.access != that.access)
-        return false;
-    }
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    List<Object> list = new ArrayList<Object>();
-
-    boolean present_type = true && (is_set_type());
-    list.add(present_type);
-    if (present_type)
-      list.add(type.getValue());
-
-    boolean present_name = true && (is_set_name());
-    list.add(present_name);
-    if (present_name)
-      list.add(name);
-
-    boolean present_access = true;
-    list.add(present_access);
-    if (present_access)
-      list.add(access);
-
-    return list.hashCode();
-  }
-
-  @Override
-  public int compareTo(AccessControl other) {
-    if (!getClass().equals(other.getClass())) {
-      return getClass().getName().compareTo(other.getClass().getName());
-    }
-
-    int lastComparison = 0;
-
-    lastComparison = Boolean.valueOf(is_set_type()).compareTo(other.is_set_type());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_type()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.type, other.type);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_name()).compareTo(other.is_set_name());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_name()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.name, other.name);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_access()).compareTo(other.is_set_access());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_access()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.access, other.access);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    return 0;
-  }
-
-  public _Fields fieldForId(int fieldId) {
-    return _Fields.findByThriftId(fieldId);
-  }
-
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
-    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
-  }
-
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
-    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder("AccessControl(");
-    boolean first = true;
-
-    sb.append("type:");
-    if (this.type == null) {
-      sb.append("null");
-    } else {
-      sb.append(this.type);
-    }
-    first = false;
-    if (is_set_name()) {
-      if (!first) sb.append(", ");
-      sb.append("name:");
-      if (this.name == null) {
-        sb.append("null");
-      } else {
-        sb.append(this.name);
-      }
-      first = false;
-    }
-    if (!first) sb.append(", ");
-    sb.append("access:");
-    sb.append(this.access);
-    first = false;
-    sb.append(")");
-    return sb.toString();
-  }
-
-  public void validate() throws org.apache.thrift.TException {
-    // check for required fields
-    if (!is_set_type()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'type' is unset! Struct:" + toString());
-    }
-
-    if (!is_set_access()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'access' is unset! Struct:" + toString());
-    }
-
-    // check for sub-struct validity
-  }
-
-  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
-    try {
-      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
-    try {
-      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
-      __isset_bitfield = 0;
-      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private static class AccessControlStandardSchemeFactory implements SchemeFactory {
-    public AccessControlStandardScheme getScheme() {
-      return new AccessControlStandardScheme();
-    }
-  }
-
-  private static class AccessControlStandardScheme extends StandardScheme<AccessControl> {
-
-    public void read(org.apache.thrift.protocol.TProtocol iprot, AccessControl struct) throws org.apache.thrift.TException {
-      org.apache.thrift.protocol.TField schemeField;
-      iprot.readStructBegin();
-      while (true)
-      {
-        schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
-          break;
-        }
-        switch (schemeField.id) {
-          case 1: // TYPE
-            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
-              struct.type = backtype.storm.generated.AccessControlType.findByValue(iprot.readI32());
-              struct.set_type_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 2: // NAME
-            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
-              struct.name = iprot.readString();
-              struct.set_name_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 3: // ACCESS
-            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
-              struct.access = iprot.readI32();
-              struct.set_access_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          default:
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-        }
-        iprot.readFieldEnd();
-      }
-      iprot.readStructEnd();
-      struct.validate();
-    }
-
-    public void write(org.apache.thrift.protocol.TProtocol oprot, AccessControl struct) throws org.apache.thrift.TException {
-      struct.validate();
-
-      oprot.writeStructBegin(STRUCT_DESC);
-      if (struct.type != null) {
-        oprot.writeFieldBegin(TYPE_FIELD_DESC);
-        oprot.writeI32(struct.type.getValue());
-        oprot.writeFieldEnd();
-      }
-      if (struct.name != null) {
-        if (struct.is_set_name()) {
-          oprot.writeFieldBegin(NAME_FIELD_DESC);
-          oprot.writeString(struct.name);
-          oprot.writeFieldEnd();
-        }
-      }
-      oprot.writeFieldBegin(ACCESS_FIELD_DESC);
-      oprot.writeI32(struct.access);
-      oprot.writeFieldEnd();
-      oprot.writeFieldStop();
-      oprot.writeStructEnd();
-    }
-
-  }
-
-  private static class AccessControlTupleSchemeFactory implements SchemeFactory {
-    public AccessControlTupleScheme getScheme() {
-      return new AccessControlTupleScheme();
-    }
-  }
-
-  private static class AccessControlTupleScheme extends TupleScheme<AccessControl> {
-
-    @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, AccessControl struct) throws org.apache.thrift.TException {
-      TTupleProtocol oprot = (TTupleProtocol) prot;
-      oprot.writeI32(struct.type.getValue());
-      oprot.writeI32(struct.access);
-      BitSet optionals = new BitSet();
-      if (struct.is_set_name()) {
-        optionals.set(0);
-      }
-      oprot.writeBitSet(optionals, 1);
-      if (struct.is_set_name()) {
-        oprot.writeString(struct.name);
-      }
-    }
-
-    @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, AccessControl struct) throws org.apache.thrift.TException {
-      TTupleProtocol iprot = (TTupleProtocol) prot;
-      struct.type = backtype.storm.generated.AccessControlType.findByValue(iprot.readI32());
-      struct.set_type_isSet(true);
-      struct.access = iprot.readI32();
-      struct.set_access_isSet(true);
-      BitSet incoming = iprot.readBitSet(1);
-      if (incoming.get(0)) {
-        struct.name = iprot.readString();
-        struct.set_name_isSet(true);
-      }
-    }
-  }
-
-}
-

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/generated/AccessControlType.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/generated/AccessControlType.java b/storm-core/src/jvm/backtype/storm/generated/AccessControlType.java
deleted file mode 100644
index 3a9aa70..0000000
--- a/storm-core/src/jvm/backtype/storm/generated/AccessControlType.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * Autogenerated by Thrift Compiler (0.9.3)
- *
- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
- *  @generated
- */
-package backtype.storm.generated;
-
-
-import java.util.Map;
-import java.util.HashMap;
-import org.apache.thrift.TEnum;
-
-public enum AccessControlType implements org.apache.thrift.TEnum {
-  OTHER(1),
-  USER(2);
-
-  private final int value;
-
-  private AccessControlType(int value) {
-    this.value = value;
-  }
-
-  /**
-   * Get the integer value of this enum value, as defined in the Thrift IDL.
-   */
-  public int getValue() {
-    return value;
-  }
-
-  /**
-   * Find a the enum type by its integer value, as defined in the Thrift IDL.
-   * @return null if the value is not found.
-   */
-  public static AccessControlType findByValue(int value) { 
-    switch (value) {
-      case 1:
-        return OTHER;
-      case 2:
-        return USER;
-      default:
-        return null;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/generated/AlreadyAliveException.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/generated/AlreadyAliveException.java b/storm-core/src/jvm/backtype/storm/generated/AlreadyAliveException.java
deleted file mode 100644
index eecf044..0000000
--- a/storm-core/src/jvm/backtype/storm/generated/AlreadyAliveException.java
+++ /dev/null
@@ -1,406 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * Autogenerated by Thrift Compiler (0.9.3)
- *
- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
- *  @generated
- */
-package backtype.storm.generated;
-
-import org.apache.thrift.scheme.IScheme;
-import org.apache.thrift.scheme.SchemeFactory;
-import org.apache.thrift.scheme.StandardScheme;
-
-import org.apache.thrift.scheme.TupleScheme;
-import org.apache.thrift.protocol.TTupleProtocol;
-import org.apache.thrift.protocol.TProtocolException;
-import org.apache.thrift.EncodingUtils;
-import org.apache.thrift.TException;
-import org.apache.thrift.async.AsyncMethodCallback;
-import org.apache.thrift.server.AbstractNonblockingServer.*;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.EnumMap;
-import java.util.Set;
-import java.util.HashSet;
-import java.util.EnumSet;
-import java.util.Collections;
-import java.util.BitSet;
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import javax.annotation.Generated;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
-public class AlreadyAliveException extends TException implements org.apache.thrift.TBase<AlreadyAliveException, AlreadyAliveException._Fields>, java.io.Serializable, Cloneable, Comparable<AlreadyAliveException> {
-  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AlreadyAliveException");
-
-  private static final org.apache.thrift.protocol.TField MSG_FIELD_DESC = new org.apache.thrift.protocol.TField("msg", org.apache.thrift.protocol.TType.STRING, (short)1);
-
-  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
-  static {
-    schemes.put(StandardScheme.class, new AlreadyAliveExceptionStandardSchemeFactory());
-    schemes.put(TupleScheme.class, new AlreadyAliveExceptionTupleSchemeFactory());
-  }
-
-  private String msg; // required
-
-  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
-  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-    MSG((short)1, "msg");
-
-    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
-
-    static {
-      for (_Fields field : EnumSet.allOf(_Fields.class)) {
-        byName.put(field.getFieldName(), field);
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, or null if its not found.
-     */
-    public static _Fields findByThriftId(int fieldId) {
-      switch(fieldId) {
-        case 1: // MSG
-          return MSG;
-        default:
-          return null;
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, throwing an exception
-     * if it is not found.
-     */
-    public static _Fields findByThriftIdOrThrow(int fieldId) {
-      _Fields fields = findByThriftId(fieldId);
-      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
-      return fields;
-    }
-
-    /**
-     * Find the _Fields constant that matches name, or null if its not found.
-     */
-    public static _Fields findByName(String name) {
-      return byName.get(name);
-    }
-
-    private final short _thriftId;
-    private final String _fieldName;
-
-    _Fields(short thriftId, String fieldName) {
-      _thriftId = thriftId;
-      _fieldName = fieldName;
-    }
-
-    public short getThriftFieldId() {
-      return _thriftId;
-    }
-
-    public String getFieldName() {
-      return _fieldName;
-    }
-  }
-
-  // isset id assignments
-  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
-  static {
-    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.MSG, new org.apache.thrift.meta_data.FieldMetaData("msg", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    metaDataMap = Collections.unmodifiableMap(tmpMap);
-    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AlreadyAliveException.class, metaDataMap);
-  }
-
-  public AlreadyAliveException() {
-  }
-
-  public AlreadyAliveException(
-    String msg)
-  {
-    this();
-    this.msg = msg;
-  }
-
-  /**
-   * Performs a deep copy on <i>other</i>.
-   */
-  public AlreadyAliveException(AlreadyAliveException other) {
-    if (other.is_set_msg()) {
-      this.msg = other.msg;
-    }
-  }
-
-  public AlreadyAliveException deepCopy() {
-    return new AlreadyAliveException(this);
-  }
-
-  @Override
-  public void clear() {
-    this.msg = null;
-  }
-
-  public String get_msg() {
-    return this.msg;
-  }
-
-  public void set_msg(String msg) {
-    this.msg = msg;
-  }
-
-  public void unset_msg() {
-    this.msg = null;
-  }
-
-  /** Returns true if field msg is set (has been assigned a value) and false otherwise */
-  public boolean is_set_msg() {
-    return this.msg != null;
-  }
-
-  public void set_msg_isSet(boolean value) {
-    if (!value) {
-      this.msg = null;
-    }
-  }
-
-  public void setFieldValue(_Fields field, Object value) {
-    switch (field) {
-    case MSG:
-      if (value == null) {
-        unset_msg();
-      } else {
-        set_msg((String)value);
-      }
-      break;
-
-    }
-  }
-
-  public Object getFieldValue(_Fields field) {
-    switch (field) {
-    case MSG:
-      return get_msg();
-
-    }
-    throw new IllegalStateException();
-  }
-
-  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
-  public boolean isSet(_Fields field) {
-    if (field == null) {
-      throw new IllegalArgumentException();
-    }
-
-    switch (field) {
-    case MSG:
-      return is_set_msg();
-    }
-    throw new IllegalStateException();
-  }
-
-  @Override
-  public boolean equals(Object that) {
-    if (that == null)
-      return false;
-    if (that instanceof AlreadyAliveException)
-      return this.equals((AlreadyAliveException)that);
-    return false;
-  }
-
-  public boolean equals(AlreadyAliveException that) {
-    if (that == null)
-      return false;
-
-    boolean this_present_msg = true && this.is_set_msg();
-    boolean that_present_msg = true && that.is_set_msg();
-    if (this_present_msg || that_present_msg) {
-      if (!(this_present_msg && that_present_msg))
-        return false;
-      if (!this.msg.equals(that.msg))
-        return false;
-    }
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    List<Object> list = new ArrayList<Object>();
-
-    boolean present_msg = true && (is_set_msg());
-    list.add(present_msg);
-    if (present_msg)
-      list.add(msg);
-
-    return list.hashCode();
-  }
-
-  @Override
-  public int compareTo(AlreadyAliveException other) {
-    if (!getClass().equals(other.getClass())) {
-      return getClass().getName().compareTo(other.getClass().getName());
-    }
-
-    int lastComparison = 0;
-
-    lastComparison = Boolean.valueOf(is_set_msg()).compareTo(other.is_set_msg());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_msg()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.msg, other.msg);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    return 0;
-  }
-
-  public _Fields fieldForId(int fieldId) {
-    return _Fields.findByThriftId(fieldId);
-  }
-
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
-    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
-  }
-
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
-    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder("AlreadyAliveException(");
-    boolean first = true;
-
-    sb.append("msg:");
-    if (this.msg == null) {
-      sb.append("null");
-    } else {
-      sb.append(this.msg);
-    }
-    first = false;
-    sb.append(")");
-    return sb.toString();
-  }
-
-  public void validate() throws org.apache.thrift.TException {
-    // check for required fields
-    if (!is_set_msg()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'msg' is unset! Struct:" + toString());
-    }
-
-    // check for sub-struct validity
-  }
-
-  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
-    try {
-      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
-    try {
-      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private static class AlreadyAliveExceptionStandardSchemeFactory implements SchemeFactory {
-    public AlreadyAliveExceptionStandardScheme getScheme() {
-      return new AlreadyAliveExceptionStandardScheme();
-    }
-  }
-
-  private static class AlreadyAliveExceptionStandardScheme extends StandardScheme<AlreadyAliveException> {
-
-    public void read(org.apache.thrift.protocol.TProtocol iprot, AlreadyAliveException struct) throws org.apache.thrift.TException {
-      org.apache.thrift.protocol.TField schemeField;
-      iprot.readStructBegin();
-      while (true)
-      {
-        schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
-          break;
-        }
-        switch (schemeField.id) {
-          case 1: // MSG
-            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
-              struct.msg = iprot.readString();
-              struct.set_msg_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          default:
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-        }
-        iprot.readFieldEnd();
-      }
-      iprot.readStructEnd();
-      struct.validate();
-    }
-
-    public void write(org.apache.thrift.protocol.TProtocol oprot, AlreadyAliveException struct) throws org.apache.thrift.TException {
-      struct.validate();
-
-      oprot.writeStructBegin(STRUCT_DESC);
-      if (struct.msg != null) {
-        oprot.writeFieldBegin(MSG_FIELD_DESC);
-        oprot.writeString(struct.msg);
-        oprot.writeFieldEnd();
-      }
-      oprot.writeFieldStop();
-      oprot.writeStructEnd();
-    }
-
-  }
-
-  private static class AlreadyAliveExceptionTupleSchemeFactory implements SchemeFactory {
-    public AlreadyAliveExceptionTupleScheme getScheme() {
-      return new AlreadyAliveExceptionTupleScheme();
-    }
-  }
-
-  private static class AlreadyAliveExceptionTupleScheme extends TupleScheme<AlreadyAliveException> {
-
-    @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, AlreadyAliveException struct) throws org.apache.thrift.TException {
-      TTupleProtocol oprot = (TTupleProtocol) prot;
-      oprot.writeString(struct.msg);
-    }
-
-    @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, AlreadyAliveException struct) throws org.apache.thrift.TException {
-      TTupleProtocol iprot = (TTupleProtocol) prot;
-      struct.msg = iprot.readString();
-      struct.set_msg_isSet(true);
-    }
-  }
-
-}
-


[31/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/daemon/drpc.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/daemon/drpc.clj b/storm-core/src/clj/backtype/storm/daemon/drpc.clj
deleted file mode 100644
index 7ffb7d8..0000000
--- a/storm-core/src/clj/backtype/storm/daemon/drpc.clj
+++ /dev/null
@@ -1,274 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-
-(ns backtype.storm.daemon.drpc
-  (:import [backtype.storm.security.auth AuthUtils ThriftServer ThriftConnectionType ReqContext])
-  (:import [backtype.storm.security.auth.authorizer DRPCAuthorizerBase])
-  (:import [backtype.storm.generated DistributedRPC DistributedRPC$Iface DistributedRPC$Processor
-            DRPCRequest DRPCExecutionException DistributedRPCInvocations DistributedRPCInvocations$Iface
-            DistributedRPCInvocations$Processor])
-  (:import [java.util.concurrent Semaphore ConcurrentLinkedQueue
-            ThreadPoolExecutor ArrayBlockingQueue TimeUnit])
-  (:import [backtype.storm.daemon Shutdownable])
-  (:import [java.net InetAddress])
-  (:import [backtype.storm.generated AuthorizationException]
-           [backtype.storm.utils VersionInfo])
-  (:use [backtype.storm config log util])
-  (:use [backtype.storm.daemon common])
-  (:use [backtype.storm.ui helpers])
-  (:use compojure.core)
-  (:use ring.middleware.reload)
-  (:require [compojure.handler :as handler])
-  (:require [metrics.meters :refer [defmeter mark!]])
-  (:gen-class))
-
-(defmeter drpc:num-execute-http-requests)
-(defmeter drpc:num-execute-calls)
-(defmeter drpc:num-result-calls)
-(defmeter drpc:num-failRequest-calls)
-(defmeter drpc:num-fetchRequest-calls)
-(defmeter drpc:num-shutdown-calls)
-
-(def STORM-VERSION (VersionInfo/getVersion))
-
-(defn timeout-check-secs [] 5)
-
-(defn acquire-queue [queues-atom function]
-  (swap! queues-atom
-    (fn [amap]
-      (if-not (amap function)
-        (assoc amap function (ConcurrentLinkedQueue.))
-        amap)))
-  (@queues-atom function))
-
-(defn check-authorization
-  ([aclHandler mapping operation context]
-    (if (not-nil? context)
-      (log-thrift-access (.requestID context) (.remoteAddress context) (.principal context) operation))
-    (if aclHandler
-      (let [context (or context (ReqContext/context))]
-        (if-not (.permit aclHandler context operation mapping)
-          (let [principal (.principal context)
-                user (if principal (.getName principal) "unknown")]
-              (throw (AuthorizationException.
-                       (str "DRPC request '" operation "' for '"
-                            user "' user is not authorized"))))))))
-  ([aclHandler mapping operation]
-    (check-authorization aclHandler mapping operation (ReqContext/context))))
-
-;; TODO: change this to use TimeCacheMap
-(defn service-handler [conf]
-  (let [drpc-acl-handler (mk-authorization-handler (conf DRPC-AUTHORIZER) conf)
-        ctr (atom 0)
-        id->sem (atom {})
-        id->result (atom {})
-        id->start (atom {})
-        id->function (atom {})
-        id->request (atom {})
-        request-queues (atom {})
-        cleanup (fn [id] (swap! id->sem dissoc id)
-                  (swap! id->result dissoc id)
-                  (swap! id->function dissoc id)
-                  (swap! id->request dissoc id)
-                  (swap! id->start dissoc id))
-        my-ip (.getHostAddress (InetAddress/getLocalHost))
-        clear-thread (async-loop
-                       (fn []
-                         (doseq [[id start] @id->start]
-                           (when (> (time-delta start) (conf DRPC-REQUEST-TIMEOUT-SECS))
-                             (when-let [sem (@id->sem id)]
-                               (.remove (acquire-queue request-queues (@id->function id)) (@id->request id))
-                               (log-warn "Timeout DRPC request id: " id " start at " start)
-                               (.release sem))
-                             (cleanup id)))
-                         (timeout-check-secs)))]
-    (reify DistributedRPC$Iface
-      (^String execute
-        [this ^String function ^String args]
-        (mark! drpc:num-execute-calls)
-        (log-debug "Received DRPC request for " function " (" args ") at " (System/currentTimeMillis))
-        (check-authorization drpc-acl-handler
-                             {DRPCAuthorizerBase/FUNCTION_NAME function}
-                             "execute")
-        (let [id (str (swap! ctr (fn [v] (mod (inc v) 1000000000))))
-              ^Semaphore sem (Semaphore. 0)
-              req (DRPCRequest. args id)
-              ^ConcurrentLinkedQueue queue (acquire-queue request-queues function)]
-          (swap! id->start assoc id (current-time-secs))
-          (swap! id->sem assoc id sem)
-          (swap! id->function assoc id function)
-          (swap! id->request assoc id req)
-          (.add queue req)
-          (log-debug "Waiting for DRPC result for " function " " args " at " (System/currentTimeMillis))
-          (.acquire sem)
-          (log-debug "Acquired DRPC result for " function " " args " at " (System/currentTimeMillis))
-          (let [result (@id->result id)]
-            (cleanup id)
-            (log-debug "Returning DRPC result for " function " " args " at " (System/currentTimeMillis))
-            (if (instance? DRPCExecutionException result)
-              (throw result)
-              (if (nil? result)
-                (throw (DRPCExecutionException. "Request timed out"))
-                result)))))
-
-      DistributedRPCInvocations$Iface
-
-      (^void result
-        [this ^String id ^String result]
-        (mark! drpc:num-result-calls)
-        (when-let [func (@id->function id)]
-          (check-authorization drpc-acl-handler
-                               {DRPCAuthorizerBase/FUNCTION_NAME func}
-                               "result")
-          (let [^Semaphore sem (@id->sem id)]
-            (log-debug "Received result " result " for " id " at " (System/currentTimeMillis))
-            (when sem
-              (swap! id->result assoc id result)
-              (.release sem)
-              ))))
-
-      (^void failRequest
-        [this ^String id]
-        (mark! drpc:num-failRequest-calls)
-        (when-let [func (@id->function id)]
-          (check-authorization drpc-acl-handler
-                               {DRPCAuthorizerBase/FUNCTION_NAME func}
-                               "failRequest")
-          (let [^Semaphore sem (@id->sem id)]
-            (when sem
-              (swap! id->result assoc id (DRPCExecutionException. "Request failed"))
-              (.release sem)))))
-
-      (^DRPCRequest fetchRequest
-        [this ^String func]
-        (mark! drpc:num-fetchRequest-calls)
-        (check-authorization drpc-acl-handler
-                             {DRPCAuthorizerBase/FUNCTION_NAME func}
-                             "fetchRequest")
-        (let [^ConcurrentLinkedQueue queue (acquire-queue request-queues func)
-              ret (.poll queue)]
-          (if ret
-            (do (log-debug "Fetched request for " func " at " (System/currentTimeMillis))
-              ret)
-            (DRPCRequest. "" ""))))
-
-      Shutdownable
-
-      (shutdown
-        [this]
-        (mark! drpc:num-shutdown-calls)
-        (.interrupt clear-thread)))))
-
-(defn handle-request [handler]
-  (fn [request]
-    (handler request)))
-
-(defn populate-context!
-  "Populate the Storm RequestContext from an servlet-request. This should be called in each handler"
-  [http-creds-handler servlet-request]
-    (when http-creds-handler
-      (.populateContext http-creds-handler (ReqContext/context) servlet-request)))
-
-(defn webapp [handler http-creds-handler]
-  (mark! drpc:num-execute-http-requests)
-  (->
-    (routes
-      (POST "/drpc/:func" [:as {:keys [body servlet-request]} func & m]
-        (let [args (slurp body)]
-          (populate-context! http-creds-handler servlet-request)
-          (.execute handler func args)))
-      (POST "/drpc/:func/" [:as {:keys [body servlet-request]} func & m]
-        (let [args (slurp body)]
-          (populate-context! http-creds-handler servlet-request)
-          (.execute handler func args)))
-      (GET "/drpc/:func/:args" [:as {:keys [servlet-request]} func args & m]
-          (populate-context! http-creds-handler servlet-request)
-          (.execute handler func args))
-      (GET "/drpc/:func/" [:as {:keys [servlet-request]} func & m]
-          (populate-context! http-creds-handler servlet-request)
-          (.execute handler func ""))
-      (GET "/drpc/:func" [:as {:keys [servlet-request]} func & m]
-          (populate-context! http-creds-handler servlet-request)
-          (.execute handler func "")))
-    (wrap-reload '[backtype.storm.daemon.drpc])
-    handle-request))
-
-(defn launch-server!
-  ([]
-    (log-message "Starting drpc server for storm version '" STORM-VERSION "'")
-    (let [conf (read-storm-config)
-          worker-threads (int (conf DRPC-WORKER-THREADS))
-          queue-size (int (conf DRPC-QUEUE-SIZE))
-          drpc-http-port (int (conf DRPC-HTTP-PORT))
-          drpc-port (int (conf DRPC-PORT))
-          drpc-service-handler (service-handler conf)
-          ;; requests and returns need to be on separate thread pools, since calls to
-          ;; "execute" don't unblock until other thrift methods are called. So if
-          ;; 64 threads are calling execute, the server won't accept the result
-          ;; invocations that will unblock those threads
-          handler-server (when (> drpc-port 0)
-                           (ThriftServer. conf
-                             (DistributedRPC$Processor. drpc-service-handler)
-                             ThriftConnectionType/DRPC))
-          invoke-server (ThriftServer. conf
-                          (DistributedRPCInvocations$Processor. drpc-service-handler)
-                          ThriftConnectionType/DRPC_INVOCATIONS)
-          http-creds-handler (AuthUtils/GetDrpcHttpCredentialsPlugin conf)]
-      (add-shutdown-hook-with-force-kill-in-1-sec (fn []
-                                                    (if handler-server (.stop handler-server))
-                                                    (.stop invoke-server)))
-      (log-message "Starting Distributed RPC servers...")
-      (future (.serve invoke-server))
-      (when (> drpc-http-port 0)
-        (let [app (-> (webapp drpc-service-handler http-creds-handler)
-                    requests-middleware)
-              filter-class (conf DRPC-HTTP-FILTER)
-              filter-params (conf DRPC-HTTP-FILTER-PARAMS)
-              filters-confs [{:filter-class filter-class
-                              :filter-params filter-params}]
-              https-port (int (conf DRPC-HTTPS-PORT))
-              https-ks-path (conf DRPC-HTTPS-KEYSTORE-PATH)
-              https-ks-password (conf DRPC-HTTPS-KEYSTORE-PASSWORD)
-              https-ks-type (conf DRPC-HTTPS-KEYSTORE-TYPE)
-              https-key-password (conf DRPC-HTTPS-KEY-PASSWORD)
-              https-ts-path (conf DRPC-HTTPS-TRUSTSTORE-PATH)
-              https-ts-password (conf DRPC-HTTPS-TRUSTSTORE-PASSWORD)
-              https-ts-type (conf DRPC-HTTPS-TRUSTSTORE-TYPE)
-              https-want-client-auth (conf DRPC-HTTPS-WANT-CLIENT-AUTH)
-              https-need-client-auth (conf DRPC-HTTPS-NEED-CLIENT-AUTH)]
-
-          (storm-run-jetty
-           {:port drpc-http-port
-            :configurator (fn [server]
-                            (config-ssl server
-                                        https-port
-                                        https-ks-path
-                                        https-ks-password
-                                        https-ks-type
-                                        https-key-password
-                                        https-ts-path
-                                        https-ts-password
-                                        https-ts-type
-                                        https-need-client-auth
-                                        https-want-client-auth)
-                            (config-filter server app filters-confs))})))
-      (start-metrics-reporters)
-      (when handler-server
-        (.serve handler-server)))))
-
-(defn -main []
-  (setup-default-uncaught-exception-handler)
-  (launch-server!))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/daemon/executor.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/daemon/executor.clj b/storm-core/src/clj/backtype/storm/daemon/executor.clj
deleted file mode 100644
index 7fee67b..0000000
--- a/storm-core/src/clj/backtype/storm/daemon/executor.clj
+++ /dev/null
@@ -1,855 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-(ns backtype.storm.daemon.executor
-  (:use [backtype.storm.daemon common])
-  (:import [backtype.storm.generated Grouping]
-           [java.io Serializable])
-  (:use [backtype.storm util config log timer stats])
-  (:import [java.util List Random HashMap ArrayList LinkedList Map])
-  (:import [backtype.storm ICredentialsListener])
-  (:import [backtype.storm.hooks ITaskHook])
-  (:import [backtype.storm.tuple AddressedTuple Tuple Fields TupleImpl MessageId])
-  (:import [backtype.storm.spout ISpoutWaitStrategy ISpout SpoutOutputCollector ISpoutOutputCollector])
-  (:import [backtype.storm.hooks.info SpoutAckInfo SpoutFailInfo
-            EmitInfo BoltFailInfo BoltAckInfo BoltExecuteInfo])
-  (:import [backtype.storm.grouping CustomStreamGrouping])
-  (:import [backtype.storm.task WorkerTopologyContext IBolt OutputCollector IOutputCollector])
-  (:import [backtype.storm.generated GlobalStreamId])
-  (:import [backtype.storm.utils Utils TupleUtils MutableObject RotatingMap RotatingMap$ExpiredCallback MutableLong Time DisruptorQueue WorkerBackpressureThread])
-  (:import [com.lmax.disruptor InsufficientCapacityException])
-  (:import [backtype.storm.serialization KryoTupleSerializer])
-  (:import [backtype.storm.daemon Shutdownable])
-  (:import [backtype.storm.metric.api IMetric IMetricsConsumer$TaskInfo IMetricsConsumer$DataPoint StateMetric])
-  (:import [backtype.storm Config Constants])
-  (:import [backtype.storm.cluster ClusterStateContext DaemonType])
-  (:import [backtype.storm.grouping LoadAwareCustomStreamGrouping LoadAwareShuffleGrouping LoadMapping ShuffleGrouping])
-  (:import [java.util.concurrent ConcurrentLinkedQueue])
-  (:require [backtype.storm [thrift :as thrift]
-             [cluster :as cluster] [disruptor :as disruptor] [stats :as stats]])
-  (:require [backtype.storm.daemon [task :as task]])
-  (:require [backtype.storm.daemon.builtin-metrics :as builtin-metrics])
-  (:require [clojure.set :as set]))
-
-(defn- mk-fields-grouper
-  [^Fields out-fields ^Fields group-fields ^List target-tasks]
-  (let [num-tasks (count target-tasks)
-        task-getter (fn [i] (.get target-tasks i))]
-    (fn [task-id ^List values load]
-      (-> (.select out-fields group-fields values)
-          (TupleUtils/listHashCode)
-          (mod num-tasks)
-          task-getter))))
-
-(defn- mk-custom-grouper
-  [^CustomStreamGrouping grouping ^WorkerTopologyContext context ^String component-id ^String stream-id target-tasks]
-  (.prepare grouping context (GlobalStreamId. component-id stream-id) target-tasks)
-  (if (instance? LoadAwareCustomStreamGrouping grouping)
-    (fn [task-id ^List values load]
-      (.chooseTasks grouping task-id values load))
-    (fn [task-id ^List values load]
-      (.chooseTasks grouping task-id values))))
-
-(defn mk-shuffle-grouper
-  [^List target-tasks topo-conf ^WorkerTopologyContext context ^String component-id ^String stream-id]
-  (if (.get topo-conf TOPOLOGY-DISABLE-LOADAWARE-MESSAGING)
-    (mk-custom-grouper (ShuffleGrouping.) context component-id stream-id target-tasks)
-    (mk-custom-grouper (LoadAwareShuffleGrouping.) context component-id stream-id target-tasks)))
-
-(defn- mk-grouper
-  "Returns a function that returns a vector of which task indices to send tuple to, or just a single task index."
-  [^WorkerTopologyContext context component-id stream-id ^Fields out-fields thrift-grouping ^List target-tasks topo-conf]
-  (let [num-tasks (count target-tasks)
-        random (Random.)
-        target-tasks (vec (sort target-tasks))]
-    (condp = (thrift/grouping-type thrift-grouping)
-      :fields
-        (if (thrift/global-grouping? thrift-grouping)
-          (fn [task-id tuple load]
-            ;; It's possible for target to have multiple tasks if it reads multiple sources
-            (first target-tasks))
-          (let [group-fields (Fields. (thrift/field-grouping thrift-grouping))]
-            (mk-fields-grouper out-fields group-fields target-tasks)
-            ))
-      :all
-        (fn [task-id tuple load] target-tasks)
-      :shuffle
-        (mk-shuffle-grouper target-tasks topo-conf context component-id stream-id)
-      :local-or-shuffle
-        (let [same-tasks (set/intersection
-                           (set target-tasks)
-                           (set (.getThisWorkerTasks context)))]
-          (if-not (empty? same-tasks)
-            (mk-shuffle-grouper (vec same-tasks) topo-conf context component-id stream-id)
-            (mk-shuffle-grouper target-tasks topo-conf context component-id stream-id)))
-      :none
-        (fn [task-id tuple load]
-          (let [i (mod (.nextInt random) num-tasks)]
-            (get target-tasks i)
-            ))
-      :custom-object
-        (let [grouping (thrift/instantiate-java-object (.get_custom_object thrift-grouping))]
-          (mk-custom-grouper grouping context component-id stream-id target-tasks))
-      :custom-serialized
-        (let [grouping (Utils/javaDeserialize (.get_custom_serialized thrift-grouping) Serializable)]
-          (mk-custom-grouper grouping context component-id stream-id target-tasks))
-      :direct
-        :direct
-      )))
-
-(defn- outbound-groupings
-  [^WorkerTopologyContext worker-context this-component-id stream-id out-fields component->grouping topo-conf]
-  (->> component->grouping
-       (filter-key #(-> worker-context
-                        (.getComponentTasks %)
-                        count
-                        pos?))
-       (map (fn [[component tgrouping]]
-               [component
-                (mk-grouper worker-context
-                            this-component-id
-                            stream-id
-                            out-fields
-                            tgrouping
-                            (.getComponentTasks worker-context component)
-                            topo-conf)]))
-       (into {})
-       (HashMap.)))
-
-(defn outbound-components
-  "Returns map of stream id to component id to grouper"
-  [^WorkerTopologyContext worker-context component-id topo-conf]
-  (->> (.getTargets worker-context component-id)
-        clojurify-structure
-        (map (fn [[stream-id component->grouping]]
-               [stream-id
-                (outbound-groupings
-                  worker-context
-                  component-id
-                  stream-id
-                  (.getComponentOutputFields worker-context component-id stream-id)
-                  component->grouping
-                  topo-conf)]))
-         (into {})
-         (HashMap.)))
-
-(defn executor-type [^WorkerTopologyContext context component-id]
-  (let [topology (.getRawTopology context)
-        spouts (.get_spouts topology)
-        bolts (.get_bolts topology)]
-    (cond (contains? spouts component-id) :spout
-          (contains? bolts component-id) :bolt
-          :else (throw-runtime "Could not find " component-id " in topology " topology))))
-
-(defn executor-selector [executor-data & _] (:type executor-data))
-
-(defmulti mk-threads executor-selector)
-(defmulti mk-executor-stats executor-selector)
-(defmulti close-component executor-selector)
-
-(defn- normalized-component-conf [storm-conf general-context component-id]
-  (let [to-remove (disj (set ALL-CONFIGS)
-                        TOPOLOGY-DEBUG
-                        TOPOLOGY-MAX-SPOUT-PENDING
-                        TOPOLOGY-MAX-TASK-PARALLELISM
-                        TOPOLOGY-TRANSACTIONAL-ID
-                        TOPOLOGY-TICK-TUPLE-FREQ-SECS
-                        TOPOLOGY-SLEEP-SPOUT-WAIT-STRATEGY-TIME-MS
-                        TOPOLOGY-SPOUT-WAIT-STRATEGY
-                        TOPOLOGY-BOLTS-WINDOW-LENGTH-COUNT
-                        TOPOLOGY-BOLTS-WINDOW-LENGTH-DURATION-MS
-                        TOPOLOGY-BOLTS-SLIDING-INTERVAL-COUNT
-                        TOPOLOGY-BOLTS-SLIDING-INTERVAL-DURATION-MS
-                        TOPOLOGY-BOLTS-TUPLE-TIMESTAMP-FIELD-NAME
-                        TOPOLOGY-BOLTS-TUPLE-TIMESTAMP-MAX-LAG-MS
-                        )
-        spec-conf (-> general-context
-                      (.getComponentCommon component-id)
-                      .get_json_conf
-                      from-json)]
-    (merge storm-conf (apply dissoc spec-conf to-remove))
-    ))
-
-(defprotocol RunningExecutor
-  (render-stats [this])
-  (get-executor-id [this])
-  (credentials-changed [this creds])
-  (get-backpressure-flag [this]))
-
-(defn throttled-report-error-fn [executor]
-  (let [storm-conf (:storm-conf executor)
-        error-interval-secs (storm-conf TOPOLOGY-ERROR-THROTTLE-INTERVAL-SECS)
-        max-per-interval (storm-conf TOPOLOGY-MAX-ERROR-REPORT-PER-INTERVAL)
-        interval-start-time (atom (current-time-secs))
-        interval-errors (atom 0)
-        ]
-    (fn [error]
-      (log-error error)
-      (when (> (time-delta @interval-start-time)
-               error-interval-secs)
-        (reset! interval-errors 0)
-        (reset! interval-start-time (current-time-secs)))
-      (swap! interval-errors inc)
-
-      (when (<= @interval-errors max-per-interval)
-        (cluster/report-error (:storm-cluster-state executor) (:storm-id executor) (:component-id executor)
-                              (hostname storm-conf)
-                              (.getThisWorkerPort (:worker-context executor)) error)
-        ))))
-
-;; in its own function so that it can be mocked out by tracked topologies
-(defn mk-executor-transfer-fn [batch-transfer->worker storm-conf]
-  (fn this
-    [task tuple]
-    (let [val (AddressedTuple. task tuple)]
-      (when (= true (storm-conf TOPOLOGY-DEBUG))
-        (log-message "TRANSFERING tuple " val))
-      (disruptor/publish batch-transfer->worker val))))
-
-(defn mk-executor-data [worker executor-id]
-  (let [worker-context (worker-context worker)
-        task-ids (executor-id->tasks executor-id)
-        component-id (.getComponentId worker-context (first task-ids))
-        storm-conf (normalized-component-conf (:storm-conf worker) worker-context component-id)
-        executor-type (executor-type worker-context component-id)
-        batch-transfer->worker (disruptor/disruptor-queue
-                                  (str "executor"  executor-id "-send-queue")
-                                  (storm-conf TOPOLOGY-EXECUTOR-SEND-BUFFER-SIZE)
-                                  (storm-conf TOPOLOGY-DISRUPTOR-WAIT-TIMEOUT-MILLIS)
-                                  :producer-type :single-threaded
-                                  :batch-size (storm-conf TOPOLOGY-DISRUPTOR-BATCH-SIZE)
-                                  :batch-timeout (storm-conf TOPOLOGY-DISRUPTOR-BATCH-TIMEOUT-MILLIS))
-        ]
-    (recursive-map
-     :worker worker
-     :worker-context worker-context
-     :executor-id executor-id
-     :task-ids task-ids
-     :component-id component-id
-     :open-or-prepare-was-called? (atom false)
-     :storm-conf storm-conf
-     :receive-queue ((:executor-receive-queue-map worker) executor-id)
-     :storm-id (:storm-id worker)
-     :conf (:conf worker)
-     :shared-executor-data (HashMap.)
-     :storm-active-atom (:storm-active-atom worker)
-     :storm-component->debug-atom (:storm-component->debug-atom worker)
-     :batch-transfer-queue batch-transfer->worker
-     :transfer-fn (mk-executor-transfer-fn batch-transfer->worker storm-conf)
-     :suicide-fn (:suicide-fn worker)
-     :storm-cluster-state (cluster/mk-storm-cluster-state (:cluster-state worker) 
-                                                          :acls (Utils/getWorkerACL storm-conf)
-                                                          :context (ClusterStateContext. DaemonType/WORKER))
-     :type executor-type
-     ;; TODO: should refactor this to be part of the executor specific map (spout or bolt with :common field)
-     :stats (mk-executor-stats <> (sampling-rate storm-conf))
-     :interval->task->metric-registry (HashMap.)
-     :task->component (:task->component worker)
-     :stream->component->grouper (outbound-components worker-context component-id storm-conf)
-     :report-error (throttled-report-error-fn <>)
-     :report-error-and-die (fn [error]
-                             ((:report-error <>) error)
-                             (if (or
-                                    (exception-cause? InterruptedException error)
-                                    (exception-cause? java.io.InterruptedIOException error))
-                               (log-message "Got interrupted excpetion shutting thread down...")
-                               ((:suicide-fn <>))))
-     :sampler (mk-stats-sampler storm-conf)
-     :backpressure (atom false)
-     :spout-throttling-metrics (if (= executor-type :spout) 
-                                (builtin-metrics/make-spout-throttling-data)
-                                nil)
-     ;; TODO: add in the executor-specific stuff in a :specific... or make a spout-data, bolt-data function?
-     )))
-
-(defn- mk-disruptor-backpressure-handler [executor-data]
-  "make a handler for the executor's receive disruptor queue to
-  check highWaterMark and lowWaterMark for backpressure"
-  (disruptor/disruptor-backpressure-handler
-    (fn []
-      "When receive queue is above highWaterMark"
-      (if (not @(:backpressure executor-data))
-        (do (reset! (:backpressure executor-data) true)
-            (log-debug "executor " (:executor-id executor-data) " is congested, set backpressure flag true")
-            (WorkerBackpressureThread/notifyBackpressureChecker (:backpressure-trigger (:worker executor-data))))))
-    (fn []
-      "When receive queue is below lowWaterMark"
-      (if @(:backpressure executor-data)
-        (do (reset! (:backpressure executor-data) false)
-            (log-debug "executor " (:executor-id executor-data) " is not-congested, set backpressure flag false")
-            (WorkerBackpressureThread/notifyBackpressureChecker (:backpressure-trigger (:worker executor-data))))))))
-
-(defn start-batch-transfer->worker-handler! [worker executor-data]
-  (let [worker-transfer-fn (:transfer-fn worker)
-        cached-emit (MutableObject. (ArrayList.))
-        storm-conf (:storm-conf executor-data)
-        serializer (KryoTupleSerializer. storm-conf (:worker-context executor-data))
-        ]
-    (disruptor/consume-loop*
-      (:batch-transfer-queue executor-data)
-      (disruptor/handler [o seq-id batch-end?]
-        (let [^ArrayList alist (.getObject cached-emit)]
-          (.add alist o)
-          (when batch-end?
-            (worker-transfer-fn serializer alist)
-            (.setObject cached-emit (ArrayList.)))))
-      :kill-fn (:report-error-and-die executor-data))))
-
-(defn setup-metrics! [executor-data]
-  (let [{:keys [storm-conf receive-queue worker-context interval->task->metric-registry]} executor-data
-        distinct-time-bucket-intervals (keys interval->task->metric-registry)]
-    (doseq [interval distinct-time-bucket-intervals]
-      (schedule-recurring 
-       (:user-timer (:worker executor-data)) 
-       interval
-       interval
-       (fn []
-         (let [val [(AddressedTuple. AddressedTuple/BROADCAST_DEST (TupleImpl. worker-context [interval] Constants/SYSTEM_TASK_ID Constants/METRICS_TICK_STREAM_ID))]]
-           (disruptor/publish receive-queue val)))))))
-
-(defn metrics-tick
-  [executor-data task-data ^TupleImpl tuple]
-   (let [{:keys [interval->task->metric-registry ^WorkerTopologyContext worker-context]} executor-data
-         interval (.getInteger tuple 0)
-         task-id (:task-id task-data)
-         name->imetric (-> interval->task->metric-registry (get interval) (get task-id))
-         task-info (IMetricsConsumer$TaskInfo.
-                     (hostname (:storm-conf executor-data))
-                     (.getThisWorkerPort worker-context)
-                     (:component-id executor-data)
-                     task-id
-                     (long (/ (System/currentTimeMillis) 1000))
-                     interval)
-         data-points (->> name->imetric
-                          (map (fn [[name imetric]]
-                                 (let [value (.getValueAndReset ^IMetric imetric)]
-                                   (if value
-                                     (IMetricsConsumer$DataPoint. name value)))))
-                          (filter identity)
-                          (into []))]
-     (when (seq data-points)
-       (task/send-unanchored task-data Constants/METRICS_STREAM_ID [task-info data-points]))))
-
-(defn setup-ticks! [worker executor-data]
-  (let [storm-conf (:storm-conf executor-data)
-        tick-time-secs (storm-conf TOPOLOGY-TICK-TUPLE-FREQ-SECS)
-        receive-queue (:receive-queue executor-data)
-        context (:worker-context executor-data)]
-    (when tick-time-secs
-      (if (or (Utils/isSystemId (:component-id executor-data))
-              (and (= false (storm-conf TOPOLOGY-ENABLE-MESSAGE-TIMEOUTS))
-                   (= :spout (:type executor-data))))
-        (log-message "Timeouts disabled for executor " (:component-id executor-data) ":" (:executor-id executor-data))
-        (schedule-recurring
-          (:user-timer worker)
-          tick-time-secs
-          tick-time-secs
-          (fn []
-            (let [val [(AddressedTuple. AddressedTuple/BROADCAST_DEST (TupleImpl. context [tick-time-secs] Constants/SYSTEM_TASK_ID Constants/SYSTEM_TICK_STREAM_ID))]]
-              (disruptor/publish receive-queue val))))))))
-
-(defn mk-executor [worker executor-id initial-credentials]
-  (let [executor-data (mk-executor-data worker executor-id)
-        _ (log-message "Loading executor " (:component-id executor-data) ":" (pr-str executor-id))
-        task-datas (->> executor-data
-                        :task-ids
-                        (map (fn [t] [t (task/mk-task executor-data t)]))
-                        (into {})
-                        (HashMap.))
-        _ (log-message "Loaded executor tasks " (:component-id executor-data) ":" (pr-str executor-id))
-        report-error-and-die (:report-error-and-die executor-data)
-        component-id (:component-id executor-data)
-
-
-        disruptor-handler (mk-disruptor-backpressure-handler executor-data)
-        _ (.registerBackpressureCallback (:receive-queue executor-data) disruptor-handler)
-        _ (-> (.setHighWaterMark (:receive-queue executor-data) ((:storm-conf executor-data) BACKPRESSURE-DISRUPTOR-HIGH-WATERMARK))
-              (.setLowWaterMark ((:storm-conf executor-data) BACKPRESSURE-DISRUPTOR-LOW-WATERMARK))
-              (.setEnableBackpressure ((:storm-conf executor-data) TOPOLOGY-BACKPRESSURE-ENABLE)))
-
-        ;; starting the batch-transfer->worker ensures that anything publishing to that queue 
-        ;; doesn't block (because it's a single threaded queue and the caching/consumer started
-        ;; trick isn't thread-safe)
-        system-threads [(start-batch-transfer->worker-handler! worker executor-data)]
-        handlers (with-error-reaction report-error-and-die
-                   (mk-threads executor-data task-datas initial-credentials))
-        threads (concat handlers system-threads)]    
-    (setup-ticks! worker executor-data)
-
-    (log-message "Finished loading executor " component-id ":" (pr-str executor-id))
-    ;; TODO: add method here to get rendered stats... have worker call that when heartbeating
-    (reify
-      RunningExecutor
-      (render-stats [this]
-        (stats/render-stats! (:stats executor-data)))
-      (get-executor-id [this]
-        executor-id)
-      (credentials-changed [this creds]
-        (let [receive-queue (:receive-queue executor-data)
-              context (:worker-context executor-data)
-              val [(AddressedTuple. AddressedTuple/BROADCAST_DEST (TupleImpl. context [creds] Constants/SYSTEM_TASK_ID Constants/CREDENTIALS_CHANGED_STREAM_ID))]]
-          (disruptor/publish receive-queue val)))
-      (get-backpressure-flag [this]
-        @(:backpressure executor-data))
-      Shutdownable
-      (shutdown
-        [this]
-        (log-message "Shutting down executor " component-id ":" (pr-str executor-id))
-        (disruptor/halt-with-interrupt! (:receive-queue executor-data))
-        (disruptor/halt-with-interrupt! (:batch-transfer-queue executor-data))
-        (doseq [t threads]
-          (.interrupt t)
-          (.join t))
-        
-        (doseq [user-context (map :user-context (vals task-datas))]
-          (doseq [hook (.getHooks user-context)]
-            (.cleanup hook)))
-        (.disconnect (:storm-cluster-state executor-data))
-        (when @(:open-or-prepare-was-called? executor-data)
-          (doseq [obj (map :object (vals task-datas))]
-            (close-component executor-data obj)))
-        (log-message "Shut down executor " component-id ":" (pr-str executor-id)))
-        )))
-
-(defn- fail-spout-msg [executor-data task-data msg-id tuple-info time-delta reason id]
-  (let [^ISpout spout (:object task-data)
-        storm-conf (:storm-conf executor-data)
-        task-id (:task-id task-data)]
-    ;;TODO: need to throttle these when there's lots of failures
-    (when (= true (storm-conf TOPOLOGY-DEBUG))
-      (log-message "SPOUT Failing " id ": " tuple-info " REASON: " reason " MSG-ID: " msg-id))
-    (.fail spout msg-id)
-    (task/apply-hooks (:user-context task-data) .spoutFail (SpoutFailInfo. msg-id task-id time-delta))
-    (when time-delta
-      (stats/spout-failed-tuple! (:stats executor-data) (:stream tuple-info) time-delta))))
-
-(defn- ack-spout-msg [executor-data task-data msg-id tuple-info time-delta id]
-  (let [storm-conf (:storm-conf executor-data)
-        ^ISpout spout (:object task-data)
-        task-id (:task-id task-data)]
-    (when (= true (storm-conf TOPOLOGY-DEBUG))
-      (log-message "SPOUT Acking message " id " " msg-id))
-    (.ack spout msg-id)
-    (task/apply-hooks (:user-context task-data) .spoutAck (SpoutAckInfo. msg-id task-id time-delta))
-    (when time-delta
-      (stats/spout-acked-tuple! (:stats executor-data) (:stream tuple-info) time-delta))))
-
-(defn mk-task-receiver [executor-data tuple-action-fn]
-  (let [task-ids (:task-ids executor-data)
-        debug? (= true (-> executor-data :storm-conf (get TOPOLOGY-DEBUG)))
-        ]
-    (disruptor/clojure-handler
-      (fn [tuple-batch sequence-id end-of-batch?]
-        (fast-list-iter [^AddressedTuple addressed-tuple tuple-batch]
-          (let [^TupleImpl tuple (.getTuple addressed-tuple)
-                task-id (.getDest addressed-tuple)]
-            (when debug? (log-message "Processing received message FOR " task-id " TUPLE: " tuple))
-            (if (not= task-id AddressedTuple/BROADCAST_DEST)
-              (tuple-action-fn task-id tuple)
-              ;; null task ids are broadcast tuples
-              (fast-list-iter [task-id task-ids]
-                (tuple-action-fn task-id tuple)
-                ))
-            ))))))
-
-(defn executor-max-spout-pending [storm-conf num-tasks]
-  (let [p (storm-conf TOPOLOGY-MAX-SPOUT-PENDING)]
-    (if p (* p num-tasks))))
-
-(defn init-spout-wait-strategy [storm-conf]
-  (let [ret (-> storm-conf (get TOPOLOGY-SPOUT-WAIT-STRATEGY) new-instance)]
-    (.prepare ret storm-conf)
-    ret
-    ))
-
-;; Send sampled data to the eventlogger if the global or component level
-;; debug flag is set (via nimbus api).
-(defn send-to-eventlogger [executor-data task-data values component-id message-id random]
-    (let [c->d @(:storm-component->debug-atom executor-data)
-          options (get c->d component-id (get c->d (:storm-id executor-data)))
-          spct    (if (and (not-nil? options) (:enable options)) (:samplingpct options) 0)]
-      ;; the thread's initialized random number generator is used to generate
-      ;; uniformily distributed random numbers.
-      (when (and (> spct 0) (< (* 100 (.nextDouble random)) spct))
-        (task/send-unanchored
-          task-data
-          EVENTLOGGER-STREAM-ID
-          [component-id message-id (System/currentTimeMillis) values]))))
-
-(defmethod mk-threads :spout [executor-data task-datas initial-credentials]
-  (let [{:keys [storm-conf component-id worker-context transfer-fn report-error sampler open-or-prepare-was-called?]} executor-data
-        ^ISpoutWaitStrategy spout-wait-strategy (init-spout-wait-strategy storm-conf)
-        max-spout-pending (executor-max-spout-pending storm-conf (count task-datas))
-        ^Integer max-spout-pending (if max-spout-pending (int max-spout-pending))        
-        last-active (atom false)        
-        spouts (ArrayList. (map :object (vals task-datas)))
-        rand (Random. (Utils/secureRandomLong))
-        ^DisruptorQueue transfer-queue (executor-data :batch-transfer-queue)
-        debug? (= true (storm-conf TOPOLOGY-DEBUG))
-
-        pending (RotatingMap.
-                 2 ;; microoptimize for performance of .size method
-                 (reify RotatingMap$ExpiredCallback
-                   (expire [this id [task-id spout-id tuple-info start-time-ms]]
-                     (let [time-delta (if start-time-ms (time-delta-ms start-time-ms))]
-                       (fail-spout-msg executor-data (get task-datas task-id) spout-id tuple-info time-delta "TIMEOUT" id)
-                       ))))
-        tuple-action-fn (fn [task-id ^TupleImpl tuple]
-                          (let [stream-id (.getSourceStreamId tuple)]
-                            (condp = stream-id
-                              Constants/SYSTEM_TICK_STREAM_ID (.rotate pending)
-                              Constants/METRICS_TICK_STREAM_ID (metrics-tick executor-data (get task-datas task-id) tuple)
-                              Constants/CREDENTIALS_CHANGED_STREAM_ID 
-                                (let [task-data (get task-datas task-id)
-                                      spout-obj (:object task-data)]
-                                  (when (instance? ICredentialsListener spout-obj)
-                                    (.setCredentials spout-obj (.getValue tuple 0))))
-                              (let [id (.getValue tuple 0)
-                                    [stored-task-id spout-id tuple-finished-info start-time-ms] (.remove pending id)]
-                                (when spout-id
-                                  (when-not (= stored-task-id task-id)
-                                    (throw-runtime "Fatal error, mismatched task ids: " task-id " " stored-task-id))
-                                  (let [time-delta (if start-time-ms (time-delta-ms start-time-ms))]
-                                    (condp = stream-id
-                                      ACKER-ACK-STREAM-ID (ack-spout-msg executor-data (get task-datas task-id)
-                                                                         spout-id tuple-finished-info time-delta id)
-                                      ACKER-FAIL-STREAM-ID (fail-spout-msg executor-data (get task-datas task-id)
-                                                                           spout-id tuple-finished-info time-delta "FAIL-STREAM" id)
-                                      )))
-                                ;; TODO: on failure, emit tuple to failure stream
-                                ))))
-        receive-queue (:receive-queue executor-data)
-        event-handler (mk-task-receiver executor-data tuple-action-fn)
-        has-ackers? (has-ackers? storm-conf)
-        has-eventloggers? (has-eventloggers? storm-conf)
-        emitted-count (MutableLong. 0)
-        empty-emit-streak (MutableLong. 0)]
-   
-    [(async-loop
-      (fn []
-        ;; If topology was started in inactive state, don't call (.open spout) until it's activated first.
-        (while (not @(:storm-active-atom executor-data))
-          (Thread/sleep 100))
-        
-        (log-message "Opening spout " component-id ":" (keys task-datas))
-        (builtin-metrics/register-spout-throttling-metrics (:spout-throttling-metrics executor-data) storm-conf (:user-context (first (vals task-datas))))
-        (doseq [[task-id task-data] task-datas
-                :let [^ISpout spout-obj (:object task-data)
-                     tasks-fn (:tasks-fn task-data)
-                     send-spout-msg (fn [out-stream-id values message-id out-task-id]
-                                       (.increment emitted-count)
-                                       (let [out-tasks (if out-task-id
-                                                         (tasks-fn out-task-id out-stream-id values)
-                                                         (tasks-fn out-stream-id values))
-                                             rooted? (and message-id has-ackers?)
-                                             root-id (if rooted? (MessageId/generateId rand))
-                                             ^List out-ids (fast-list-for [t out-tasks] (if rooted? (MessageId/generateId rand)))]
-                                         (fast-list-iter [out-task out-tasks id out-ids]
-                                                         (let [tuple-id (if rooted?
-                                                                          (MessageId/makeRootId root-id id)
-                                                                          (MessageId/makeUnanchored))
-                                                               out-tuple (TupleImpl. worker-context
-                                                                                     values
-                                                                                     task-id
-                                                                                     out-stream-id
-                                                                                     tuple-id)]
-                                                           (transfer-fn out-task out-tuple)))
-                                         (if has-eventloggers?
-                                           (send-to-eventlogger executor-data task-data values component-id message-id rand))
-                                         (if (and rooted?
-                                                  (not (.isEmpty out-ids)))
-                                           (do
-                                             (.put pending root-id [task-id
-                                                                    message-id
-                                                                    {:stream out-stream-id 
-                                                                     :values (if debug? values nil)}
-                                                                    (if (sampler) (System/currentTimeMillis))])
-                                             (task/send-unanchored task-data
-                                                                   ACKER-INIT-STREAM-ID
-                                                                   [root-id (bit-xor-vals out-ids) task-id]))
-                                           (when message-id
-                                             (ack-spout-msg executor-data task-data message-id
-                                                            {:stream out-stream-id :values values}
-                                                            (if (sampler) 0) "0:")))
-                                         (or out-tasks [])
-                                         ))]]
-          (builtin-metrics/register-all (:builtin-metrics task-data) storm-conf (:user-context task-data))
-          (builtin-metrics/register-queue-metrics {:sendqueue (:batch-transfer-queue executor-data)
-                                                   :receive receive-queue}
-                                                  storm-conf (:user-context task-data))
-          (when (instance? ICredentialsListener spout-obj) (.setCredentials spout-obj initial-credentials))
-
-          (.open spout-obj
-                 storm-conf
-                 (:user-context task-data)
-                 (SpoutOutputCollector.
-                  (reify ISpoutOutputCollector
-                    (^long getPendingCount[this]
-                      (.size pending)
-                      )
-                    (^List emit [this ^String stream-id ^List tuple ^Object message-id]
-                      (send-spout-msg stream-id tuple message-id nil)
-                      )
-                    (^void emitDirect [this ^int out-task-id ^String stream-id
-                                       ^List tuple ^Object message-id]
-                      (send-spout-msg stream-id tuple message-id out-task-id)
-                      )
-                    (reportError [this error]
-                      (report-error error)
-                      )))))
-        (reset! open-or-prepare-was-called? true) 
-        (log-message "Opened spout " component-id ":" (keys task-datas))
-        (setup-metrics! executor-data)
-        
-        (fn []
-          ;; This design requires that spouts be non-blocking
-          (disruptor/consume-batch receive-queue event-handler)
-          
-          (let [active? @(:storm-active-atom executor-data)
-                curr-count (.get emitted-count)
-                backpressure-enabled ((:storm-conf executor-data) TOPOLOGY-BACKPRESSURE-ENABLE)
-                throttle-on (and backpressure-enabled
-                              @(:throttle-on (:worker executor-data)))
-                reached-max-spout-pending (and max-spout-pending
-                                               (>= (.size pending) max-spout-pending))
-                ]
-            (if active?
-              ; activated
-              (do
-                (when-not @last-active
-                  (reset! last-active true)
-                  (log-message "Activating spout " component-id ":" (keys task-datas))
-                  (fast-list-iter [^ISpout spout spouts] (.activate spout)))
-
-                (if (and (not (.isFull transfer-queue))
-                      (not throttle-on)
-                      (not reached-max-spout-pending))
-                  (fast-list-iter [^ISpout spout spouts] (.nextTuple spout))))
-              ; deactivated
-              (do
-                (when @last-active
-                  (reset! last-active false)
-                  (log-message "Deactivating spout " component-id ":" (keys task-datas))
-                  (fast-list-iter [^ISpout spout spouts] (.deactivate spout)))
-                ;; TODO: log that it's getting throttled
-                (Time/sleep 100)
-                (builtin-metrics/skipped-inactive! (:spout-throttling-metrics executor-data) (:stats executor-data))))
-
-            (if (and (= curr-count (.get emitted-count)) active?)
-              (do (.increment empty-emit-streak)
-                  (.emptyEmit spout-wait-strategy (.get empty-emit-streak))
-                  ;; update the spout throttling metrics
-                  (if throttle-on
-                    (builtin-metrics/skipped-throttle! (:spout-throttling-metrics executor-data) (:stats executor-data))
-                    (if reached-max-spout-pending
-                      (builtin-metrics/skipped-max-spout! (:spout-throttling-metrics executor-data) (:stats executor-data)))))
-              (.set empty-emit-streak 0)
-              ))
-          0))
-      :kill-fn (:report-error-and-die executor-data)
-      :factory? true
-      :thread-name (str component-id "-executor" (:executor-id executor-data)))]))
-
-(defn- tuple-time-delta! [^TupleImpl tuple]
-  (let [ms (.getProcessSampleStartTime tuple)]
-    (if ms
-      (time-delta-ms ms))))
-      
-(defn- tuple-execute-time-delta! [^TupleImpl tuple]
-  (let [ms (.getExecuteSampleStartTime tuple)]
-    (if ms
-      (time-delta-ms ms))))
-
-(defn put-xor! [^Map pending key id]
-  (let [curr (or (.get pending key) (long 0))]
-    (.put pending key (bit-xor curr id))))
-
-(defmethod mk-threads :bolt [executor-data task-datas initial-credentials]
-  (let [storm-conf (:storm-conf executor-data)
-        execute-sampler (mk-stats-sampler storm-conf)
-        executor-stats (:stats executor-data)
-        {:keys [storm-conf component-id worker-context transfer-fn report-error sampler
-                open-or-prepare-was-called?]} executor-data
-        rand (Random. (Utils/secureRandomLong))
-
-        tuple-action-fn (fn [task-id ^TupleImpl tuple]
-                          ;; synchronization needs to be done with a key provided by this bolt, otherwise:
-                          ;; spout 1 sends synchronization (s1), dies, same spout restarts somewhere else, sends synchronization (s2) and incremental update. s2 and update finish before s1 -> lose the incremental update
-                          ;; TODO: for state sync, need to first send sync messages in a loop and receive tuples until synchronization
-                          ;; buffer other tuples until fully synchronized, then process all of those tuples
-                          ;; then go into normal loop
-                          ;; spill to disk?
-                          ;; could be receiving incremental updates while waiting for sync or even a partial sync because of another failed task
-                          ;; should remember sync requests and include a random sync id in the request. drop anything not related to active sync requests
-                          ;; or just timeout the sync messages that are coming in until full sync is hit from that task
-                          ;; need to drop incremental updates from tasks where waiting for sync. otherwise, buffer the incremental updates
-                          ;; TODO: for state sync, need to check if tuple comes from state spout. if so, update state
-                          ;; TODO: how to handle incremental updates as well as synchronizations at same time
-                          ;; TODO: need to version tuples somehow
-                          
-                          ;;(log-debug "Received tuple " tuple " at task " task-id)
-                          ;; need to do it this way to avoid reflection
-                          (let [stream-id (.getSourceStreamId tuple)]
-                            (condp = stream-id
-                              Constants/CREDENTIALS_CHANGED_STREAM_ID 
-                                (let [task-data (get task-datas task-id)
-                                      bolt-obj (:object task-data)]
-                                  (when (instance? ICredentialsListener bolt-obj)
-                                    (.setCredentials bolt-obj (.getValue tuple 0))))
-                              Constants/METRICS_TICK_STREAM_ID (metrics-tick executor-data (get task-datas task-id) tuple)
-                              (let [task-data (get task-datas task-id)
-                                    ^IBolt bolt-obj (:object task-data)
-                                    user-context (:user-context task-data)
-                                    sampler? (sampler)
-                                    execute-sampler? (execute-sampler)
-                                    now (if (or sampler? execute-sampler?) (System/currentTimeMillis))
-                                    receive-queue (:receive-queue executor-data)]
-                                (when sampler?
-                                  (.setProcessSampleStartTime tuple now))
-                                (when execute-sampler?
-                                  (.setExecuteSampleStartTime tuple now))
-                                (.execute bolt-obj tuple)
-                                (let [delta (tuple-execute-time-delta! tuple)]
-                                  (when (= true (storm-conf TOPOLOGY-DEBUG))
-                                    (log-message "Execute done TUPLE " tuple " TASK: " task-id " DELTA: " delta))
- 
-                                  (task/apply-hooks user-context .boltExecute (BoltExecuteInfo. tuple task-id delta))
-                                  (when delta
-                                    (stats/bolt-execute-tuple! executor-stats
-                                                               (.getSourceComponent tuple)
-                                                               (.getSourceStreamId tuple)
-                                                               delta)))))))
-        has-eventloggers? (has-eventloggers? storm-conf)]
-    
-    ;; TODO: can get any SubscribedState objects out of the context now
-
-    [(async-loop
-      (fn []
-        ;; If topology was started in inactive state, don't call prepare bolt until it's activated first.
-        (while (not @(:storm-active-atom executor-data))          
-          (Thread/sleep 100))
-        
-        (log-message "Preparing bolt " component-id ":" (keys task-datas))
-        (doseq [[task-id task-data] task-datas
-                :let [^IBolt bolt-obj (:object task-data)
-                      tasks-fn (:tasks-fn task-data)
-                      user-context (:user-context task-data)
-                      bolt-emit (fn [stream anchors values task]
-                                  (let [out-tasks (if task
-                                                    (tasks-fn task stream values)
-                                                    (tasks-fn stream values))]
-                                    (fast-list-iter [t out-tasks]
-                                                    (let [anchors-to-ids (HashMap.)]
-                                                      (fast-list-iter [^TupleImpl a anchors]
-                                                                      (let [root-ids (-> a .getMessageId .getAnchorsToIds .keySet)]
-                                                                        (when (pos? (count root-ids))
-                                                                          (let [edge-id (MessageId/generateId rand)]
-                                                                            (.updateAckVal a edge-id)
-                                                                            (fast-list-iter [root-id root-ids]
-                                                                                            (put-xor! anchors-to-ids root-id edge-id))
-                                                                            ))))
-                                                        (let [tuple (TupleImpl. worker-context
-                                                                               values
-                                                                               task-id
-                                                                               stream
-                                                                               (MessageId/makeId anchors-to-ids))]
-                                                          (transfer-fn t tuple))))
-                                    (if has-eventloggers?
-                                      (send-to-eventlogger executor-data task-data values component-id nil rand))
-                                    (or out-tasks [])))]]
-          (builtin-metrics/register-all (:builtin-metrics task-data) storm-conf user-context)
-          (when (instance? ICredentialsListener bolt-obj) (.setCredentials bolt-obj initial-credentials)) 
-          (if (= component-id Constants/SYSTEM_COMPONENT_ID)
-            (do
-              (builtin-metrics/register-queue-metrics {:sendqueue (:batch-transfer-queue executor-data)
-                                                       :receive (:receive-queue executor-data)
-                                                       :transfer (:transfer-queue (:worker executor-data))}
-                                                      storm-conf user-context)
-              (builtin-metrics/register-iconnection-client-metrics (:cached-node+port->socket (:worker executor-data)) storm-conf user-context)
-              (builtin-metrics/register-iconnection-server-metric (:receiver (:worker executor-data)) storm-conf user-context))
-            (builtin-metrics/register-queue-metrics {:sendqueue (:batch-transfer-queue executor-data)
-                                                     :receive (:receive-queue executor-data)}
-                                                    storm-conf user-context)
-            )
-
-          (.prepare bolt-obj
-                    storm-conf
-                    user-context
-                    (OutputCollector.
-                     (reify IOutputCollector
-                       (emit [this stream anchors values]
-                         (bolt-emit stream anchors values nil))
-                       (emitDirect [this task stream anchors values]
-                         (bolt-emit stream anchors values task))
-                       (^void ack [this ^Tuple tuple]
-                         (let [^TupleImpl tuple tuple
-                               ack-val (.getAckVal tuple)]
-                           (fast-map-iter [[root id] (.. tuple getMessageId getAnchorsToIds)]
-                                          (task/send-unanchored task-data
-                                                                ACKER-ACK-STREAM-ID
-                                                                [root (bit-xor id ack-val)])))
-                         (let [delta (tuple-time-delta! tuple)
-                               debug? (= true (storm-conf TOPOLOGY-DEBUG))]
-                           (when debug? 
-                             (log-message "BOLT ack TASK: " task-id " TIME: " delta " TUPLE: " tuple))
-                           (task/apply-hooks user-context .boltAck (BoltAckInfo. tuple task-id delta))
-                           (when delta
-                             (stats/bolt-acked-tuple! executor-stats
-                                                      (.getSourceComponent tuple)
-                                                      (.getSourceStreamId tuple)
-                                                      delta))))
-                       (^void fail [this ^Tuple tuple]
-                         (fast-list-iter [root (.. tuple getMessageId getAnchors)]
-                                         (task/send-unanchored task-data
-                                                               ACKER-FAIL-STREAM-ID
-                                                               [root]))
-                         (let [delta (tuple-time-delta! tuple)
-                               debug? (= true (storm-conf TOPOLOGY-DEBUG))]
-                           (when debug? 
-                             (log-message "BOLT fail TASK: " task-id " TIME: " delta " TUPLE: " tuple))
-                           (task/apply-hooks user-context .boltFail (BoltFailInfo. tuple task-id delta))
-                           (when delta
-                             (stats/bolt-failed-tuple! executor-stats
-                                                       (.getSourceComponent tuple)
-                                                       (.getSourceStreamId tuple)
-                                                       delta))))
-                       (reportError [this error]
-                         (report-error error)
-                         )))))
-        (reset! open-or-prepare-was-called? true)        
-        (log-message "Prepared bolt " component-id ":" (keys task-datas))
-        (setup-metrics! executor-data)
-
-        (let [receive-queue (:receive-queue executor-data)
-              event-handler (mk-task-receiver executor-data tuple-action-fn)]
-          (fn []            
-            (disruptor/consume-batch-when-available receive-queue event-handler)
-            0)))
-      :kill-fn (:report-error-and-die executor-data)
-      :factory? true
-      :thread-name (str component-id "-executor" (:executor-id executor-data)))]))
-
-(defmethod close-component :spout [executor-data spout]
-  (.close spout))
-
-(defmethod close-component :bolt [executor-data bolt]
-  (.cleanup bolt))
-
-;; TODO: refactor this to be part of an executor-specific map
-(defmethod mk-executor-stats :spout [_ rate]
-  (stats/mk-spout-stats rate))
-
-(defmethod mk-executor-stats :bolt [_ rate]
-  (stats/mk-bolt-stats rate))


[14/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/testing.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/testing.clj b/storm-core/src/clj/org/apache/storm/testing.clj
new file mode 100644
index 0000000..87ca2de
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/testing.clj
@@ -0,0 +1,701 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+
+(ns org.apache.storm.testing
+  (:require [org.apache.storm.daemon
+             [nimbus :as nimbus]
+             [supervisor :as supervisor]
+             [common :as common]
+             [worker :as worker]
+             [executor :as executor]])
+  (:require [org.apache.storm [process-simulator :as psim]])
+  (:import [org.apache.commons.io FileUtils])
+  (:import [java.io File])
+  (:import [java.util HashMap ArrayList])
+  (:import [java.util.concurrent.atomic AtomicInteger])
+  (:import [java.util.concurrent ConcurrentHashMap])
+  (:import [org.apache.storm.utils Time Utils RegisteredGlobalState])
+  (:import [org.apache.storm.tuple Fields Tuple TupleImpl])
+  (:import [org.apache.storm.task TopologyContext])
+  (:import [org.apache.storm.generated GlobalStreamId Bolt KillOptions])
+  (:import [org.apache.storm.testing FeederSpout FixedTupleSpout FixedTuple
+            TupleCaptureBolt SpoutTracker BoltTracker NonRichBoltTracker
+            TestWordSpout MemoryTransactionalSpout])
+  (:import [org.apache.storm.security.auth ThriftServer ThriftConnectionType ReqContext AuthUtils])
+  (:import [org.apache.storm.generated NotAliveException AlreadyAliveException StormTopology ErrorInfo
+            ExecutorInfo InvalidTopologyException Nimbus$Iface Nimbus$Processor SubmitOptions TopologyInitialStatus
+            KillOptions RebalanceOptions ClusterSummary SupervisorSummary TopologySummary TopologyInfo
+            ExecutorSummary AuthorizationException GetInfoOptions NumErrorsChoice])
+  (:import [org.apache.storm.transactional TransactionalSpoutCoordinator])
+  (:import [org.apache.storm.transactional.partitioned PartitionedTransactionalSpoutExecutor])
+  (:import [org.apache.storm.tuple Tuple])
+  (:import [org.apache.storm.generated StormTopology])
+  (:import [org.apache.storm.task TopologyContext])
+  (:require [org.apache.storm [zookeeper :as zk]])
+  (:require [org.apache.storm.messaging.loader :as msg-loader])
+  (:require [org.apache.storm.daemon.acker :as acker])
+  (:use [org.apache.storm cluster util thrift config log local-state]))
+
+(defn feeder-spout
+  [fields]
+  (FeederSpout. (Fields. fields)))
+
+(defn local-temp-path
+  []
+  (str (System/getProperty "java.io.tmpdir") (if-not on-windows? "/") (uuid)))
+
+(defn delete-all
+  [paths]
+  (dorun
+    (for [t paths]
+      (if (.exists (File. t))
+        (try
+          (FileUtils/forceDelete (File. t))
+          (catch Exception e
+            (log-message (.getMessage e))))))))
+
+(defmacro with-local-tmp
+  [[& tmp-syms] & body]
+  (let [tmp-paths (mapcat (fn [t] [t `(local-temp-path)]) tmp-syms)]
+    `(let [~@tmp-paths]
+       (try
+         ~@body
+         (finally
+           (delete-all ~(vec tmp-syms)))))))
+
+(defn start-simulating-time!
+  []
+  (Time/startSimulating))
+
+(defn stop-simulating-time!
+  []
+  (Time/stopSimulating))
+
+ (defmacro with-simulated-time
+   [& body]
+   `(try
+     (start-simulating-time!)
+     ~@body
+     (finally
+       (stop-simulating-time!))))
+
+(defn advance-time-ms! [ms]
+  (Time/advanceTime ms))
+
+(defn advance-time-secs! [secs]
+  (advance-time-ms! (* (long secs) 1000)))
+
+(defnk add-supervisor
+  [cluster-map :ports 2 :conf {} :id nil]
+  (let [tmp-dir (local-temp-path)
+        port-ids (if (sequential? ports)
+                   ports
+                   (doall (repeatedly ports (:port-counter cluster-map))))
+        supervisor-conf (merge (:daemon-conf cluster-map)
+                               conf
+                               {STORM-LOCAL-DIR tmp-dir
+                                SUPERVISOR-SLOTS-PORTS port-ids})
+        id-fn (if id (fn [] id) supervisor/generate-supervisor-id)
+        daemon (with-var-roots [supervisor/generate-supervisor-id id-fn] (supervisor/mk-supervisor supervisor-conf (:shared-context cluster-map) (supervisor/standalone-supervisor)))]
+    (swap! (:supervisors cluster-map) conj daemon)
+    (swap! (:tmp-dirs cluster-map) conj tmp-dir)
+    daemon))
+
+(defn mk-shared-context [conf]
+  (if-not (conf STORM-LOCAL-MODE-ZMQ)
+    (msg-loader/mk-local-context)))
+
+(defn start-nimbus-daemon [conf nimbus]
+  (let [server (ThriftServer. conf (Nimbus$Processor. nimbus)
+                              ThriftConnectionType/NIMBUS)
+        nimbus-thread (Thread. (fn [] (.serve server)))]
+    (log-message "Starting Nimbus server...")
+    (.start nimbus-thread)
+    server))
+
+
+;; returns map containing cluster info
+;; local dir is always overridden in maps
+;; can customize the supervisors (except for ports) by passing in map for :supervisors parameter
+;; if need to customize amt of ports more, can use add-supervisor calls afterwards
+(defnk mk-local-storm-cluster [:supervisors 2 :ports-per-supervisor 3 :daemon-conf {} :inimbus nil :supervisor-slot-port-min 1024 :nimbus-daemon false]
+  (let [zk-tmp (local-temp-path)
+        [zk-port zk-handle] (if-not (contains? daemon-conf STORM-ZOOKEEPER-SERVERS)
+                              (zk/mk-inprocess-zookeeper zk-tmp))
+        daemon-conf (merge (read-storm-config)
+                           {TOPOLOGY-SKIP-MISSING-KRYO-REGISTRATIONS true
+                            ZMQ-LINGER-MILLIS 0
+                            TOPOLOGY-ENABLE-MESSAGE-TIMEOUTS false
+                            TOPOLOGY-TRIDENT-BATCH-EMIT-INTERVAL-MILLIS 50
+                            STORM-CLUSTER-MODE "local"
+                            BLOBSTORE-SUPERUSER (System/getProperty "user.name")}
+                           (if-not (contains? daemon-conf STORM-ZOOKEEPER-SERVERS)
+                             {STORM-ZOOKEEPER-PORT zk-port
+                              STORM-ZOOKEEPER-SERVERS ["localhost"]})
+                           daemon-conf)
+        nimbus-tmp (local-temp-path)
+        port-counter (mk-counter supervisor-slot-port-min)
+        nimbus (nimbus/service-handler
+                (assoc daemon-conf STORM-LOCAL-DIR nimbus-tmp)
+                (if inimbus inimbus (nimbus/standalone-nimbus)))
+        context (mk-shared-context daemon-conf)
+        nimbus-thrift-server (if nimbus-daemon (start-nimbus-daemon daemon-conf nimbus) nil)
+        cluster-map {:nimbus nimbus
+                     :port-counter port-counter
+                     :daemon-conf daemon-conf
+                     :supervisors (atom [])
+                     :state (mk-distributed-cluster-state daemon-conf)
+                     :storm-cluster-state (mk-storm-cluster-state daemon-conf)
+                     :tmp-dirs (atom [nimbus-tmp zk-tmp])
+                     :zookeeper (if (not-nil? zk-handle) zk-handle)
+                     :shared-context context
+                     :nimbus-thrift-server nimbus-thrift-server}
+        supervisor-confs (if (sequential? supervisors)
+                           supervisors
+                           (repeat supervisors {}))]
+
+    (doseq [sc supervisor-confs]
+      (add-supervisor cluster-map :ports ports-per-supervisor :conf sc))
+    cluster-map))
+
+(defn get-supervisor [cluster-map supervisor-id]
+  (let [finder-fn #(= (.get-id %) supervisor-id)]
+    (find-first finder-fn @(:supervisors cluster-map))))
+
+(defn kill-supervisor [cluster-map supervisor-id]
+  (let [finder-fn #(= (.get-id %) supervisor-id)
+        supervisors @(:supervisors cluster-map)
+        sup (find-first finder-fn
+                        supervisors)]
+    ;; tmp-dir will be taken care of by shutdown
+    (reset! (:supervisors cluster-map) (remove-first finder-fn supervisors))
+    (.shutdown sup)))
+
+(defn kill-local-storm-cluster [cluster-map]
+  (.shutdown (:nimbus cluster-map))
+  (if (not-nil? (:nimbus-thrift-server cluster-map))
+    (do
+      (log-message "shutting down thrift server")
+      (try
+        (.stop (:nimbus-thrift-server cluster-map))
+        (catch Exception e (log-message "failed to stop thrift")))
+      ))
+  (.close (:state cluster-map))
+  (.disconnect (:storm-cluster-state cluster-map))
+  (doseq [s @(:supervisors cluster-map)]
+    (.shutdown-all-workers s)
+    ;; race condition here? will it launch the workers again?
+    (supervisor/kill-supervisor s))
+  (psim/kill-all-processes)
+  (if (not-nil? (:zookeeper cluster-map))
+    (do
+      (log-message "Shutting down in process zookeeper")
+      (zk/shutdown-inprocess-zookeeper (:zookeeper cluster-map))
+      (log-message "Done shutting down in process zookeeper")))
+  (doseq [t @(:tmp-dirs cluster-map)]
+    (log-message "Deleting temporary path " t)
+    (try
+      (rmr t)
+      ;; on windows, the host process still holds lock on the logfile
+      (catch Exception e (log-message (.getMessage e)))) ))
+
+(def TEST-TIMEOUT-MS
+  (let [timeout (System/getenv "STORM_TEST_TIMEOUT_MS")]
+    (parse-int (if timeout timeout "5000"))))
+
+(defmacro while-timeout [timeout-ms condition & body]
+  `(let [end-time# (+ (System/currentTimeMillis) ~timeout-ms)]
+     (log-debug "Looping until " '~condition)
+     (while ~condition
+       (when (> (System/currentTimeMillis) end-time#)
+         (let [thread-dump# (Utils/threadDump)]
+           (log-message "Condition " '~condition  " not met in " ~timeout-ms "ms")
+           (log-message thread-dump#)
+           (throw (AssertionError. (str "Test timed out (" ~timeout-ms "ms) " '~condition)))))
+       ~@body)
+     (log-debug "Condition met " '~condition)))
+
+(defn wait-for-condition
+  ([apredicate]
+    (wait-for-condition TEST-TIMEOUT-MS apredicate))
+  ([timeout-ms apredicate]
+    (while-timeout timeout-ms (not (apredicate))
+      (Time/sleep 100))))
+
+(defn wait-until-cluster-waiting
+  "Wait until the cluster is idle. Should be used with time simulation."
+  ([cluster-map] (wait-until-cluster-waiting cluster-map TEST-TIMEOUT-MS))
+  ([cluster-map timeout-ms]
+  ;; wait until all workers, supervisors, and nimbus is waiting
+  (let [supervisors @(:supervisors cluster-map)
+        workers (filter (partial satisfies? common/DaemonCommon) (psim/all-processes))
+        daemons (concat
+                  [(:nimbus cluster-map)]
+                  supervisors
+                  ; because a worker may already be dead
+                  workers)]
+    (while-timeout timeout-ms (not (every? (memfn waiting?) daemons))
+                   (Thread/sleep (rand-int 20))
+                   ;;      (doseq [d daemons]
+                   ;;        (if-not ((memfn waiting?) d)
+                   ;;          (println d)))
+                   ))))
+
+(defn advance-cluster-time
+  ([cluster-map secs increment-secs]
+   (loop [left secs]
+     (when (> left 0)
+       (let [diff (min left increment-secs)]
+         (advance-time-secs! diff)
+         (wait-until-cluster-waiting cluster-map)
+         (recur (- left diff))))))
+  ([cluster-map secs]
+   (advance-cluster-time cluster-map secs 1)))
+
+(defmacro with-local-cluster
+  [[cluster-sym & args] & body]
+  `(let [~cluster-sym (mk-local-storm-cluster ~@args)]
+     (try
+       ~@body
+       (catch Throwable t#
+         (log-error t# "Error in cluster")
+         (throw t#))
+       (finally
+         (let [keep-waiting?# (atom true)
+               f# (future (while @keep-waiting?# (simulate-wait ~cluster-sym)))]
+           (kill-local-storm-cluster ~cluster-sym)
+           (reset! keep-waiting?# false)
+            @f#)))))
+
+(defmacro with-simulated-time-local-cluster
+  [& args]
+  `(with-simulated-time
+     (with-local-cluster ~@args)))
+
+(defmacro with-inprocess-zookeeper
+  [port-sym & body]
+  `(with-local-tmp [tmp#]
+                   (let [[~port-sym zks#] (zk/mk-inprocess-zookeeper tmp#)]
+                     (try
+                       ~@body
+                       (finally
+                         (zk/shutdown-inprocess-zookeeper zks#))))))
+
+(defn submit-local-topology
+  [nimbus storm-name conf topology]
+  (when-not (Utils/isValidConf conf)
+    (throw (IllegalArgumentException. "Topology conf is not json-serializable")))
+  (.submitTopology nimbus storm-name nil (to-json conf) topology))
+
+(defn submit-local-topology-with-opts
+  [nimbus storm-name conf topology submit-opts]
+  (when-not (Utils/isValidConf conf)
+    (throw (IllegalArgumentException. "Topology conf is not json-serializable")))
+  (.submitTopologyWithOpts nimbus storm-name nil (to-json conf) topology submit-opts))
+
+(defn mocked-convert-assignments-to-worker->resources [storm-cluster-state storm-name worker->resources]
+  (fn [existing-assignments]
+    (let [topology-id (common/get-storm-id storm-cluster-state storm-name)
+          existing-assignments (into {} (for [[tid assignment] existing-assignments]
+                                          {tid (:worker->resources assignment)}))
+          new-assignments (assoc existing-assignments topology-id worker->resources)]
+      new-assignments)))
+
+(defn mocked-compute-new-topology->executor->node+port [storm-cluster-state storm-name executor->node+port]
+  (fn [new-scheduler-assignments existing-assignments]
+    (let [topology-id (common/get-storm-id storm-cluster-state storm-name)
+          existing-assignments (into {} (for [[tid assignment] existing-assignments]
+                                          {tid (:executor->node+port assignment)}))
+          new-assignments (assoc existing-assignments topology-id executor->node+port)]
+      new-assignments)))
+
+(defn mocked-compute-new-scheduler-assignments []
+  (fn [nimbus existing-assignments topologies scratch-topology-id]
+    existing-assignments))
+
+(defn submit-mocked-assignment
+  [nimbus storm-cluster-state storm-name conf topology task->component executor->node+port worker->resources]
+  (with-var-roots [common/storm-task-info (fn [& ignored] task->component)
+                   nimbus/compute-new-scheduler-assignments (mocked-compute-new-scheduler-assignments)
+                   nimbus/convert-assignments-to-worker->resources (mocked-convert-assignments-to-worker->resources
+                                                          storm-cluster-state
+                                                          storm-name
+                                                          worker->resources)
+                   nimbus/compute-new-topology->executor->node+port (mocked-compute-new-topology->executor->node+port
+                                                                      storm-cluster-state
+                                                                      storm-name
+                                                                      executor->node+port)]
+    (submit-local-topology nimbus storm-name conf topology)))
+
+(defn mk-capture-launch-fn [capture-atom]
+  (fn [supervisor storm-id port worker-id mem-onheap]
+    (let [supervisor-id (:supervisor-id supervisor)
+          conf (:conf supervisor)
+          existing (get @capture-atom [supervisor-id port] [])]
+      (set-worker-user! conf worker-id "")
+      (swap! capture-atom assoc [supervisor-id port] (conj existing storm-id)))))
+
+(defn find-worker-id
+  [supervisor-conf port]
+  (let [supervisor-state (supervisor-state supervisor-conf)
+        worker->port (ls-approved-workers supervisor-state)]
+    (first ((reverse-map worker->port) port))))
+
+(defn find-worker-port
+  [supervisor-conf worker-id]
+  (let [supervisor-state (supervisor-state supervisor-conf)
+        worker->port (ls-approved-workers supervisor-state)]
+    (worker->port worker-id)))
+
+(defn mk-capture-shutdown-fn
+  [capture-atom]
+  (let [existing-fn supervisor/shutdown-worker]
+    (fn [supervisor worker-id]
+      (let [conf (:conf supervisor)
+            supervisor-id (:supervisor-id supervisor)
+            port (find-worker-port conf worker-id)
+            existing (get @capture-atom [supervisor-id port] 0)]
+        (swap! capture-atom assoc [supervisor-id port] (inc existing))
+        (existing-fn supervisor worker-id)))))
+
+(defmacro capture-changed-workers
+  [& body]
+  `(let [launch-captured# (atom {})
+         shutdown-captured# (atom {})]
+     (with-var-roots [supervisor/launch-worker (mk-capture-launch-fn launch-captured#)
+                      supervisor/shutdown-worker (mk-capture-shutdown-fn shutdown-captured#)]
+                     ~@body
+                     {:launched @launch-captured#
+                      :shutdown @shutdown-captured#})))
+
+(defmacro capture-launched-workers
+  [& body]
+  `(:launched (capture-changed-workers ~@body)))
+
+(defmacro capture-shutdown-workers
+  [& body]
+  `(:shutdown (capture-changed-workers ~@body)))
+
+(defnk aggregated-stat
+  [cluster-map storm-name stat-key :component-ids nil]
+  (let [state (:storm-cluster-state cluster-map)
+        nimbus (:nimbus cluster-map)
+        storm-id (common/get-storm-id state storm-name)
+        component->tasks (reverse-map
+                           (common/storm-task-info
+                             (.getUserTopology nimbus storm-id)
+                             (from-json (.getTopologyConf nimbus storm-id))))
+        component->tasks (if component-ids
+                           (select-keys component->tasks component-ids)
+                           component->tasks)
+        task-ids (apply concat (vals component->tasks))
+        assignment (.assignment-info state storm-id nil)
+        taskbeats (.taskbeats state storm-id (:task->node+port assignment))
+        heartbeats (dofor [id task-ids] (get taskbeats id))
+        stats (dofor [hb heartbeats] (if hb (stat-key (:stats hb)) 0))]
+    (reduce + stats)))
+
+(defn emitted-spout-tuples
+  [cluster-map topology storm-name]
+  (aggregated-stat
+    cluster-map
+    storm-name
+    :emitted
+    :component-ids (keys (.get_spouts topology))))
+
+(defn transferred-tuples
+  [cluster-map storm-name]
+  (aggregated-stat cluster-map storm-name :transferred))
+
+(defn acked-tuples
+  [cluster-map storm-name]
+  (aggregated-stat cluster-map storm-name :acked))
+
+(defn simulate-wait
+  [cluster-map]
+  (if (Time/isSimulating)
+    (advance-cluster-time cluster-map 10)
+    (Thread/sleep 100)))
+
+(defprotocol CompletableSpout
+  (exhausted?
+    [this]
+    "Whether all the tuples for this spout have been completed.")
+  (cleanup
+    [this]
+    "Cleanup any global state kept")
+  (startup
+    [this]
+    "Prepare the spout (globally) before starting the topology"))
+
+(extend-type FixedTupleSpout
+  CompletableSpout
+  (exhausted? [this]
+              (= (-> this .getSourceTuples count)
+                 (.getCompleted this)))
+  (cleanup [this]
+           (.cleanup this))
+  (startup [this]))
+
+(extend-type TransactionalSpoutCoordinator
+  CompletableSpout
+  (exhausted? [this]
+              (exhausted? (.getSpout this)))
+  (cleanup [this]
+           (cleanup (.getSpout this)))
+  (startup [this]
+           (startup (.getSpout this))))
+
+(extend-type PartitionedTransactionalSpoutExecutor
+  CompletableSpout
+  (exhausted? [this]
+              (exhausted? (.getPartitionedSpout this)))
+  (cleanup [this]
+           (cleanup (.getPartitionedSpout this)))
+  (startup [this]
+           (startup (.getPartitionedSpout this))))
+
+(extend-type MemoryTransactionalSpout
+  CompletableSpout
+  (exhausted? [this]
+              (.isExhaustedTuples this))
+  (cleanup [this]
+           (.cleanup this))
+  (startup [this]
+           (.startup this)))
+
+(defn spout-objects [spec-map]
+  (for [[_ spout-spec] spec-map]
+    (-> spout-spec
+        .get_spout_object
+        deserialized-component-object)))
+
+(defn capture-topology
+  [topology]
+  (let [topology (.deepCopy topology)
+        spouts (.get_spouts topology)
+        bolts (.get_bolts topology)
+        all-streams (apply concat
+                           (for [[id spec] (merge (clojurify-structure spouts)
+                                                  (clojurify-structure bolts))]
+                             (for [[stream info] (.. spec get_common get_streams)]
+                               [(GlobalStreamId. id stream) (.is_direct info)])))
+        capturer (TupleCaptureBolt.)]
+    (.set_bolts topology
+                (assoc (clojurify-structure bolts)
+                  (uuid)
+                  (Bolt.
+                    (serialize-component-object capturer)
+                    (mk-plain-component-common (into {} (for [[id direct?] all-streams]
+                                                          [id (if direct?
+                                                                (mk-direct-grouping)
+                                                                (mk-global-grouping))]))
+                                               {}
+                                               nil))))
+    {:topology topology
+     :capturer capturer}))
+
+;; TODO: mock-sources needs to be able to mock out state spouts as well
+(defnk complete-topology
+  [cluster-map topology
+   :mock-sources {}
+   :storm-conf {}
+   :cleanup-state true
+   :topology-name nil
+   :timeout-ms TEST-TIMEOUT-MS]
+  ;; TODO: the idea of mocking for transactional topologies should be done an
+  ;; abstraction level above... should have a complete-transactional-topology for this
+  (let [{topology :topology capturer :capturer} (capture-topology topology)
+        storm-name (or topology-name (str "topologytest-" (uuid)))
+        state (:storm-cluster-state cluster-map)
+        spouts (.get_spouts topology)
+        replacements (map-val (fn [v]
+                                (FixedTupleSpout.
+                                  (for [tup v]
+                                    (if (map? tup)
+                                      (FixedTuple. (:stream tup) (:values tup))
+                                      tup))))
+                              mock-sources)]
+    (doseq [[id spout] replacements]
+      (let [spout-spec (get spouts id)]
+        (.set_spout_object spout-spec (serialize-component-object spout))))
+    (doseq [spout (spout-objects spouts)]
+      (when-not (extends? CompletableSpout (.getClass spout))
+        (throw (RuntimeException. (str "Cannot complete topology unless every spout is a CompletableSpout (or mocked to be); failed by " spout)))))
+
+    (doseq [spout (spout-objects spouts)]
+      (startup spout))
+
+    (submit-local-topology (:nimbus cluster-map) storm-name storm-conf topology)
+    (advance-cluster-time cluster-map 11)
+
+    (let [storm-id (common/get-storm-id state storm-name)]
+      ;;Give the topology time to come up without using it to wait for the spouts to complete
+      (simulate-wait cluster-map)
+
+      (while-timeout timeout-ms (not (every? exhausted? (spout-objects spouts)))
+                     (simulate-wait cluster-map))
+
+      (.killTopologyWithOpts (:nimbus cluster-map) storm-name (doto (KillOptions.) (.set_wait_secs 0)))
+      (while-timeout timeout-ms (.assignment-info state storm-id nil)
+                     (simulate-wait cluster-map))
+      (when cleanup-state
+        (doseq [spout (spout-objects spouts)]
+          (cleanup spout))))
+
+    (if cleanup-state
+      (.getAndRemoveResults capturer)
+      (.getAndClearResults capturer))))
+
+(defn read-tuples
+  ([results component-id stream-id]
+   (let [fixed-tuples (get results component-id [])]
+     (mapcat
+       (fn [ft]
+         (if (= stream-id (. ft stream))
+           [(vec (. ft values))]))
+       fixed-tuples)
+     ))
+  ([results component-id]
+   (read-tuples results component-id Utils/DEFAULT_STREAM_ID)))
+
+(defn ms=
+  [& args]
+  (apply = (map multi-set args)))
+
+(def TRACKER-BOLT-ID "+++tracker-bolt")
+
+;; TODO: should override system-topology! and wrap everything there
+(defn mk-tracked-topology
+  ([tracked-cluster topology]
+   (let [track-id (::track-id tracked-cluster)
+         ret (.deepCopy topology)]
+     (dofor [[_ bolt] (.get_bolts ret)
+             :let [obj (deserialized-component-object (.get_bolt_object bolt))]]
+            (.set_bolt_object bolt (serialize-component-object
+                                     (BoltTracker. obj track-id))))
+     (dofor [[_ spout] (.get_spouts ret)
+             :let [obj (deserialized-component-object (.get_spout_object spout))]]
+            (.set_spout_object spout (serialize-component-object
+                                       (SpoutTracker. obj track-id))))
+     {:topology ret
+      :last-spout-emit (atom 0)
+      :cluster tracked-cluster})))
+
+(defn assoc-track-id
+  [cluster track-id]
+  (assoc cluster ::track-id track-id))
+
+(defn increment-global!
+  [id key amt]
+  (-> (RegisteredGlobalState/getState id)
+      (get key)
+      (.addAndGet amt)))
+
+(defn global-amt
+  [id key]
+  (-> (RegisteredGlobalState/getState id)
+      (get key)
+      .get))
+
+(defmacro with-tracked-cluster
+  [[cluster-sym & cluster-args] & body]
+  `(let [id# (uuid)]
+     (RegisteredGlobalState/setState
+       id#
+       (doto (ConcurrentHashMap.)
+         (.put "spout-emitted" (AtomicInteger. 0))
+         (.put "transferred" (AtomicInteger. 0))
+         (.put "processed" (AtomicInteger. 0))))
+     (with-var-roots
+       [acker/mk-acker-bolt
+        (let [old# acker/mk-acker-bolt]
+          (fn [& args#] (NonRichBoltTracker. (apply old# args#) id#)))
+        ;; critical that this particular function is overridden here,
+        ;; since the transferred stat needs to be incremented at the moment
+        ;; of tuple emission (and not on a separate thread later) for
+        ;; topologies to be tracked correctly. This is because "transferred" *must*
+        ;; be incremented before "processing".
+        executor/mk-executor-transfer-fn
+        (let [old# executor/mk-executor-transfer-fn]
+          (fn [& args#]
+            (let [transferrer# (apply old# args#)]
+              (fn [& args2#]
+                ;; (log-message "Transferring: " transfer-args#)
+                (increment-global! id# "transferred" 1)
+                (apply transferrer# args2#)))))]
+       (with-simulated-time-local-cluster [~cluster-sym ~@cluster-args]
+                           (let [~cluster-sym (assoc-track-id ~cluster-sym id#)]
+                             ~@body)))
+     (RegisteredGlobalState/clearState id#)))
+
+(defn tracked-wait
+  "Waits until topology is idle and 'amt' more tuples have been emitted by spouts."
+  ([tracked-topology]
+     (tracked-wait tracked-topology 1 TEST-TIMEOUT-MS))
+  ([tracked-topology amt]
+     (tracked-wait tracked-topology amt TEST-TIMEOUT-MS))
+  ([tracked-topology amt timeout-ms]
+    (let [target (+ amt @(:last-spout-emit tracked-topology))
+          track-id (-> tracked-topology :cluster ::track-id)
+          waiting? (fn []
+                     (or (not= target (global-amt track-id "spout-emitted"))
+                         (not= (global-amt track-id "transferred")
+                               (global-amt track-id "processed"))))]
+      (while-timeout timeout-ms (waiting?)
+                     ;; (println "Spout emitted: " (global-amt track-id "spout-emitted"))
+                     ;; (println "Processed: " (global-amt track-id "processed"))
+                     ;; (println "Transferred: " (global-amt track-id "transferred"))
+                    (Thread/sleep (rand-int 200)))
+      (reset! (:last-spout-emit tracked-topology) target))))
+
+(defnk test-tuple
+  [values
+   :stream Utils/DEFAULT_STREAM_ID
+   :component "component"
+   :fields nil]
+  (let [fields (or fields
+                   (->> (iterate inc 1)
+                        (take (count values))
+                        (map #(str "field" %))))
+        spout-spec (mk-spout-spec* (TestWordSpout.)
+                                   {stream fields})
+        topology (StormTopology. {component spout-spec} {} {})
+        context (TopologyContext.
+                  topology
+                  (read-storm-config)
+                  {(int 1) component}
+                  {component [(int 1)]}
+                  {component {stream (Fields. fields)}}
+                  "test-storm-id"
+                  nil
+                  nil
+                  (int 1)
+                  nil
+                  [(int 1)]
+                  {}
+                  {}
+                  (HashMap.)
+                  (HashMap.)
+                  (atom false))]
+    (TupleImpl. context values 1 stream)))
+
+(defmacro with-timeout
+  [millis unit & body]
+  `(let [f# (future ~@body)]
+     (try
+       (.get f# ~millis ~unit)
+       (finally (future-cancel f#)))))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/testing4j.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/testing4j.clj b/storm-core/src/clj/org/apache/storm/testing4j.clj
new file mode 100644
index 0000000..5850262
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/testing4j.clj
@@ -0,0 +1,184 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+(ns org.apache.storm.testing4j
+  (:import [java.util Map List Collection ArrayList])
+  (:require [org.apache.storm [LocalCluster :as LocalCluster]])
+  (:import [org.apache.storm Config ILocalCluster LocalCluster])
+  (:import [org.apache.storm.generated StormTopology])
+  (:import [org.apache.storm.daemon nimbus])
+  (:import [org.apache.storm.testing TestJob MockedSources TrackedTopology
+            MkClusterParam CompleteTopologyParam MkTupleParam])
+  (:import [org.apache.storm.utils Utils])
+  (:use [org.apache.storm testing util log])
+  (:gen-class
+   :name org.apache.storm.Testing
+   :methods [^:static [completeTopology
+                       [org.apache.storm.ILocalCluster  org.apache.storm.generated.StormTopology
+                        org.apache.storm.testing.CompleteTopologyParam]
+                       java.util.Map]
+             ^:static [completeTopology
+                       [org.apache.storm.ILocalCluster org.apache.storm.generated.StormTopology]
+                       java.util.Map]
+             ^:static [withSimulatedTime [Runnable] void]
+             ^:static [withLocalCluster [org.apache.storm.testing.TestJob] void]
+             ^:static [withLocalCluster [org.apache.storm.testing.MkClusterParam org.apache.storm.testing.TestJob] void]
+             ^:static [getLocalCluster [java.util.Map] org.apache.storm.ILocalCluster]
+             ^:static [withSimulatedTimeLocalCluster [org.apache.storm.testing.TestJob] void]
+             ^:static [withSimulatedTimeLocalCluster [org.apache.storm.testing.MkClusterParam org.apache.storm.testing.TestJob] void]
+             ^:static [withTrackedCluster [org.apache.storm.testing.TestJob] void]
+             ^:static [withTrackedCluster [org.apache.storm.testing.MkClusterParam org.apache.storm.testing.TestJob] void]
+             ^:static [readTuples [java.util.Map String String] java.util.List]
+             ^:static [readTuples [java.util.Map String] java.util.List]
+             ^:static [mkTrackedTopology [org.apache.storm.ILocalCluster org.apache.storm.generated.StormTopology] org.apache.storm.testing.TrackedTopology]
+             ^:static [trackedWait [org.apache.storm.testing.TrackedTopology] void]
+             ^:static [trackedWait [org.apache.storm.testing.TrackedTopology Integer] void]
+             ^:static [trackedWait [org.apache.storm.testing.TrackedTopology Integer Integer] void]
+             ^:static [advanceClusterTime [org.apache.storm.ILocalCluster Integer Integer] void]
+             ^:static [advanceClusterTime [org.apache.storm.ILocalCluster Integer] void]
+             ^:static [multiseteq [java.util.Collection java.util.Collection] boolean]
+             ^:static [multiseteq [java.util.Map java.util.Map] boolean]
+             ^:static [testTuple [java.util.List] org.apache.storm.tuple.Tuple]
+             ^:static [testTuple [java.util.List org.apache.storm.testing.MkTupleParam] org.apache.storm.tuple.Tuple]]))
+
+(defn -completeTopology
+  ([^ILocalCluster cluster ^StormTopology topology ^CompleteTopologyParam completeTopologyParam]
+    (let [mocked-sources (or (-> completeTopologyParam .getMockedSources .getData) {})
+          storm-conf (or (.getStormConf completeTopologyParam) {})
+          cleanup-state (or (.getCleanupState completeTopologyParam) true)
+          topology-name (.getTopologyName completeTopologyParam)
+          timeout-ms (or (.getTimeoutMs completeTopologyParam) TEST-TIMEOUT-MS)]
+      (complete-topology (.getState cluster) topology
+        :mock-sources mocked-sources
+        :storm-conf storm-conf
+        :cleanup-state cleanup-state
+        :topology-name topology-name
+        :timeout-ms timeout-ms)))
+  ([^ILocalCluster cluster ^StormTopology topology]
+    (-completeTopology cluster topology (CompleteTopologyParam.))))
+
+
+(defn -withSimulatedTime
+  [^Runnable code]
+  (with-simulated-time
+    (.run code)))
+
+(defmacro with-cluster
+  [cluster-type mkClusterParam code]
+  `(let [supervisors# (or (.getSupervisors ~mkClusterParam) 2)
+         ports-per-supervisor# (or (.getPortsPerSupervisor ~mkClusterParam) 3)
+         daemon-conf# (or (.getDaemonConf ~mkClusterParam) {})]
+     (~cluster-type [cluster# :supervisors supervisors#
+                     :ports-per-supervisor ports-per-supervisor#
+                     :daemon-conf daemon-conf#]
+                    (let [cluster# (LocalCluster. cluster#)]
+                      (.run ~code cluster#)))))
+
+(defn -withLocalCluster
+  ([^MkClusterParam mkClusterParam ^TestJob code]
+     (with-cluster with-local-cluster mkClusterParam code))
+  ([^TestJob code]
+     (-withLocalCluster (MkClusterParam.) code)))
+
+(defn -getLocalCluster
+  ([^Map clusterConf]
+     (let [daemon-conf (get-in clusterConf ["daemon-conf"] {})
+           supervisors (get-in clusterConf ["supervisors"] 2)
+           ports-per-supervisor (get-in clusterConf ["ports-per-supervisor"] 3)
+           inimbus (get-in clusterConf ["inimbus"] nil)
+           supervisor-slot-port-min (get-in clusterConf ["supervisor-slot-port-min"] 1024)
+           nimbus-daemon (get-in clusterConf ["nimbus-daemon"] false)
+           local-cluster-map (mk-local-storm-cluster :supervisors supervisors
+                                                     :ports-per-supervisor ports-per-supervisor
+                                                     :daemon-conf daemon-conf
+                                                     :inimbus inimbus
+                                                     :supervisor-slot-port-min supervisor-slot-port-min
+                                                     :nimbus-daemon nimbus-daemon
+                                                     )]
+       (LocalCluster. local-cluster-map))))
+
+(defn -withSimulatedTimeLocalCluster
+  ([^MkClusterParam mkClusterParam ^TestJob code]
+     (with-cluster with-simulated-time-local-cluster mkClusterParam code))
+  ([^TestJob code]
+     (-withSimulatedTimeLocalCluster (MkClusterParam.) code)))
+
+(defn -withTrackedCluster
+  ([^MkClusterParam mkClusterParam ^TestJob code]
+     (with-cluster with-tracked-cluster mkClusterParam code))
+  ([^TestJob code]
+     (-withTrackedCluster (MkClusterParam.) code)))
+
+(defn- find-tuples
+  [^List fixed-tuples ^String stream]
+  (let [ret (ArrayList.)]
+    (doseq [fixed-tuple fixed-tuples]
+      (if (= (.stream fixed-tuple) stream)
+        (.add ret (.values fixed-tuple))))
+    ret))
+
+(defn -readTuples
+  ([^Map result ^String componentId ^String streamId]
+   (let [stream-result (.get result componentId)
+         ret (if stream-result
+               (find-tuples stream-result streamId)
+               [])]
+     ret))
+  ([^Map result ^String componentId]
+   (-readTuples result componentId Utils/DEFAULT_STREAM_ID)))
+
+(defn -mkTrackedTopology
+  [^ILocalCluster trackedCluster ^StormTopology topology]
+  (-> (mk-tracked-topology (.getState trackedCluster) topology)
+      (TrackedTopology.)))
+
+(defn -trackedWait
+  ([^TrackedTopology trackedTopology ^Integer amt ^Integer timeout-ms]
+   (tracked-wait trackedTopology amt timeout-ms))
+  ([^TrackedTopology trackedTopology ^Integer amt]
+   (tracked-wait trackedTopology amt))
+  ([^TrackedTopology trackedTopology]
+   (-trackedWait trackedTopology 1)))
+
+(defn -advanceClusterTime
+  ([^ILocalCluster cluster ^Integer secs ^Integer step]
+   (advance-cluster-time (.getState cluster) secs step))
+  ([^ILocalCluster cluster ^Integer secs]
+   (-advanceClusterTime cluster secs 1)))
+
+(defn- multiseteq
+  [^Object obj1 ^Object obj2]
+  (let [obj1 (clojurify-structure obj1)
+        obj2 (clojurify-structure obj2)]
+    (ms= obj1 obj2)))
+
+(defn -multiseteq
+  [^Collection coll1 ^Collection coll2]
+  (multiseteq coll1 coll2))
+
+(defn -multiseteq
+  [^Map coll1 ^Map coll2]
+  (multiseteq coll1 coll2))
+
+(defn -testTuple
+  ([^List values]
+   (-testTuple values nil))
+  ([^List values ^MkTupleParam param]
+   (if (nil? param)
+     (test-tuple values)
+     (let [stream (or (.getStream param) Utils/DEFAULT_STREAM_ID)
+           component (or (.getComponent param) "component")
+           fields (.getFields param)]
+       (test-tuple values :stream stream :component component :fields fields)))))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/thrift.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/thrift.clj b/storm-core/src/clj/org/apache/storm/thrift.clj
new file mode 100644
index 0000000..47e233a
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/thrift.clj
@@ -0,0 +1,284 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+
+(ns org.apache.storm.thrift
+  (:import [java.util HashMap]
+           [java.io Serializable]
+           [org.apache.storm.generated NodeInfo Assignment])
+  (:import [org.apache.storm.generated JavaObject Grouping Nimbus StormTopology
+            StormTopology$_Fields Bolt Nimbus$Client Nimbus$Iface
+            ComponentCommon Grouping$_Fields SpoutSpec NullStruct StreamInfo
+            GlobalStreamId ComponentObject ComponentObject$_Fields
+            ShellComponent SupervisorInfo])
+  (:import [org.apache.storm.utils Utils NimbusClient])
+  (:import [org.apache.storm Constants])
+  (:import [org.apache.storm.security.auth ReqContext])
+  (:import [org.apache.storm.grouping CustomStreamGrouping])
+  (:import [org.apache.storm.topology TopologyBuilder])
+  (:import [org.apache.storm.clojure RichShellBolt RichShellSpout])
+  (:import [org.apache.thrift.transport TTransport])
+  (:use [org.apache.storm util config log zookeeper]))
+
+(defn instantiate-java-object
+  [^JavaObject obj]
+  (let [name (symbol (.get_full_class_name obj))
+        args (map (memfn getFieldValue) (.get_args_list obj))]
+    (eval `(new ~name ~@args))))
+
+(def grouping-constants
+  {Grouping$_Fields/FIELDS :fields
+   Grouping$_Fields/SHUFFLE :shuffle
+   Grouping$_Fields/ALL :all
+   Grouping$_Fields/NONE :none
+   Grouping$_Fields/CUSTOM_SERIALIZED :custom-serialized
+   Grouping$_Fields/CUSTOM_OBJECT :custom-object
+   Grouping$_Fields/DIRECT :direct
+   Grouping$_Fields/LOCAL_OR_SHUFFLE :local-or-shuffle})
+
+(defn grouping-type
+  [^Grouping grouping]
+  (grouping-constants (.getSetField grouping)))
+
+(defn field-grouping
+  [^Grouping grouping]
+  (when-not (= (grouping-type grouping) :fields)
+    (throw (IllegalArgumentException. "Tried to get grouping fields from non fields grouping")))
+  (.get_fields grouping))
+
+(defn global-grouping?
+  [^Grouping grouping]
+  (and (= :fields (grouping-type grouping))
+       (empty? (field-grouping grouping))))
+
+(defn parallelism-hint
+  [^ComponentCommon component-common]
+  (let [phint (.get_parallelism_hint component-common)]
+    (if-not (.is_set_parallelism_hint component-common) 1 phint)))
+
+(defn nimbus-client-and-conn
+  ([host port]
+    (nimbus-client-and-conn host port nil))
+  ([host port as-user]
+  (log-message "Connecting to Nimbus at " host ":" port " as user: " as-user)
+  (let [conf (read-storm-config)
+        nimbusClient (NimbusClient. conf host port nil as-user)
+        client (.getClient nimbusClient)
+        transport (.transport nimbusClient)]
+        [client transport] )))
+
+(defmacro with-nimbus-connection
+  [[client-sym host port] & body]
+  `(let [[^Nimbus$Client ~client-sym ^TTransport conn#] (nimbus-client-and-conn ~host ~port)]
+    (try
+      ~@body
+    (finally (.close conn#)))))
+
+(defmacro with-configured-nimbus-connection
+  [client-sym & body]
+  `(let [conf# (read-storm-config)
+         context# (ReqContext/context)
+         user# (if (.principal context#) (.getName (.principal context#)))
+         nimbusClient# (NimbusClient/getConfiguredClientAs conf# user#)
+         ~client-sym (.getClient nimbusClient#)
+         conn# (.transport nimbusClient#)
+         ]
+     (try
+       ~@body
+     (finally (.close conn#)))))
+
+(defn direct-output-fields
+  [fields]
+  (StreamInfo. fields true))
+
+(defn output-fields
+  [fields]
+  (StreamInfo. fields false))
+
+(defn mk-output-spec
+  [output-spec]
+  (let [output-spec (if (map? output-spec)
+                      output-spec
+                      {Utils/DEFAULT_STREAM_ID output-spec})]
+    (map-val
+      (fn [out]
+        (if (instance? StreamInfo out)
+          out
+          (StreamInfo. out false)))
+      output-spec)))
+
+(defnk mk-plain-component-common
+  [inputs output-spec parallelism-hint :conf nil]
+  (let [ret (ComponentCommon. (HashMap. inputs) (HashMap. (mk-output-spec output-spec)))]
+    (when parallelism-hint
+      (.set_parallelism_hint ret parallelism-hint))
+    (when conf
+      (.set_json_conf ret (to-json conf)))
+    ret))
+
+(defnk mk-spout-spec*
+  [spout outputs :p nil :conf nil]
+  (SpoutSpec. (ComponentObject/serialized_java (Utils/javaSerialize spout))
+              (mk-plain-component-common {} outputs p :conf conf)))
+
+(defn mk-shuffle-grouping
+  []
+  (Grouping/shuffle (NullStruct.)))
+
+(defn mk-local-or-shuffle-grouping
+  []
+  (Grouping/local_or_shuffle (NullStruct.)))
+
+(defn mk-fields-grouping
+  [fields]
+  (Grouping/fields fields))
+
+(defn mk-global-grouping
+  []
+  (mk-fields-grouping []))
+
+(defn mk-direct-grouping
+  []
+  (Grouping/direct (NullStruct.)))
+
+(defn mk-all-grouping
+  []
+  (Grouping/all (NullStruct.)))
+
+(defn mk-none-grouping
+  []
+  (Grouping/none (NullStruct.)))
+
+(defn deserialized-component-object
+  [^ComponentObject obj]
+  (when (not= (.getSetField obj) ComponentObject$_Fields/SERIALIZED_JAVA)
+    (throw (RuntimeException. "Cannot deserialize non-java-serialized object")))
+  (Utils/javaDeserialize (.get_serialized_java obj) Serializable))
+
+(defn serialize-component-object
+  [obj]
+  (ComponentObject/serialized_java (Utils/javaSerialize obj)))
+
+(defn- mk-grouping
+  [grouping-spec]
+  (cond (nil? grouping-spec)
+        (mk-none-grouping)
+
+        (instance? Grouping grouping-spec)
+        grouping-spec
+
+        (instance? CustomStreamGrouping grouping-spec)
+        (Grouping/custom_serialized (Utils/javaSerialize grouping-spec))
+
+        (instance? JavaObject grouping-spec)
+        (Grouping/custom_object grouping-spec)
+
+        (sequential? grouping-spec)
+        (mk-fields-grouping grouping-spec)
+
+        (= grouping-spec :shuffle)
+        (mk-shuffle-grouping)
+
+        (= grouping-spec :local-or-shuffle)
+        (mk-local-or-shuffle-grouping)
+        (= grouping-spec :none)
+        (mk-none-grouping)
+
+        (= grouping-spec :all)
+        (mk-all-grouping)
+
+        (= grouping-spec :global)
+        (mk-global-grouping)
+
+        (= grouping-spec :direct)
+        (mk-direct-grouping)
+
+        true
+        (throw (IllegalArgumentException.
+                 (str grouping-spec " is not a valid grouping")))))
+
+(defn- mk-inputs
+  [inputs]
+  (into {} (for [[stream-id grouping-spec] inputs]
+             [(if (sequential? stream-id)
+                (GlobalStreamId. (first stream-id) (second stream-id))
+                (GlobalStreamId. stream-id Utils/DEFAULT_STREAM_ID))
+              (mk-grouping grouping-spec)])))
+
+(defnk mk-bolt-spec*
+  [inputs bolt outputs :p nil :conf nil]
+  (let [common (mk-plain-component-common (mk-inputs inputs) outputs p :conf conf)]
+    (Bolt. (ComponentObject/serialized_java (Utils/javaSerialize bolt))
+           common)))
+
+(defnk mk-spout-spec
+  [spout :parallelism-hint nil :p nil :conf nil]
+  (let [parallelism-hint (if p p parallelism-hint)]
+    {:obj spout :p parallelism-hint :conf conf}))
+
+(defn- shell-component-params
+  [command script-or-output-spec kwargs]
+  (if (string? script-or-output-spec)
+    [(into-array String [command script-or-output-spec])
+     (first kwargs)
+     (rest kwargs)]
+    [(into-array String command)
+     script-or-output-spec
+     kwargs]))
+
+(defnk mk-bolt-spec
+  [inputs bolt :parallelism-hint nil :p nil :conf nil]
+  (let [parallelism-hint (if p p parallelism-hint)]
+    {:obj bolt :inputs inputs :p parallelism-hint :conf conf}))
+
+(defn mk-shell-bolt-spec
+  [inputs command script-or-output-spec & kwargs]
+  (let [[command output-spec kwargs]
+        (shell-component-params command script-or-output-spec kwargs)]
+    (apply mk-bolt-spec inputs
+           (RichShellBolt. command (mk-output-spec output-spec)) kwargs)))
+
+(defn mk-shell-spout-spec
+  [command script-or-output-spec & kwargs]
+  (let [[command output-spec kwargs]
+        (shell-component-params command script-or-output-spec kwargs)]
+    (apply mk-spout-spec
+           (RichShellSpout. command (mk-output-spec output-spec)) kwargs)))
+
+(defn- add-inputs
+  [declarer inputs]
+  (doseq [[id grouping] (mk-inputs inputs)]
+    (.grouping declarer id grouping)))
+
+(defn mk-topology
+  ([spout-map bolt-map]
+   (let [builder (TopologyBuilder.)]
+     (doseq [[name {spout :obj p :p conf :conf}] spout-map]
+       (-> builder (.setSpout name spout (if-not (nil? p) (int p) p)) (.addConfigurations conf)))
+     (doseq [[name {bolt :obj p :p conf :conf inputs :inputs}] bolt-map]
+       (-> builder (.setBolt name bolt (if-not (nil? p) (int p) p)) (.addConfigurations conf) (add-inputs inputs)))
+     (.createTopology builder)))
+  ([spout-map bolt-map state-spout-map]
+   (mk-topology spout-map bolt-map)))
+
+;; clojurify-structure is needed or else every element becomes the same after successive calls
+;; don't know why this happens
+(def STORM-TOPOLOGY-FIELDS
+  (-> StormTopology/metaDataMap clojurify-structure keys))
+
+(def SPOUT-FIELDS
+  [StormTopology$_Fields/SPOUTS
+   StormTopology$_Fields/STATE_SPOUTS])
+

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/timer.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/timer.clj b/storm-core/src/clj/org/apache/storm/timer.clj
new file mode 100644
index 0000000..0d8839e
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/timer.clj
@@ -0,0 +1,128 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+
+(ns org.apache.storm.timer
+  (:import [org.apache.storm.utils Time])
+  (:import [java.util PriorityQueue Comparator Random])
+  (:import [java.util.concurrent Semaphore])
+  (:use [org.apache.storm util log]))
+
+;; The timer defined in this file is very similar to java.util.Timer, except
+;; it integrates with Storm's time simulation capabilities. This lets us test
+;; code that does asynchronous work on the timer thread
+
+(defnk mk-timer [:kill-fn (fn [& _] ) :timer-name nil]
+  (let [queue (PriorityQueue. 10 (reify Comparator
+                                   (compare
+                                     [this o1 o2]
+                                     (- (first o1) (first o2)))
+                                   (equals
+                                     [this obj]
+                                     true)))
+        active (atom true)
+        lock (Object.)
+        notifier (Semaphore. 0)
+        thread-name (if timer-name timer-name "timer")
+        timer-thread (Thread.
+                       (fn []
+                         (while @active
+                           (try
+                             (let [[time-millis _ _ :as elem] (locking lock (.peek queue))]
+                               (if (and elem (>= (current-time-millis) time-millis))
+                                 ;; It is imperative to not run the function
+                                 ;; inside the timer lock. Otherwise, it is
+                                 ;; possible to deadlock if the fn deals with
+                                 ;; other locks, like the submit lock.
+                                 (let [afn (locking lock (second (.poll queue)))]
+                                   (afn))
+                                 (if time-millis
+                                   ;; If any events are scheduled, sleep until
+                                   ;; event generation. If any recurring events
+                                   ;; are scheduled then we will always go
+                                   ;; through this branch, sleeping only the
+                                   ;; exact necessary amount of time. We give
+                                   ;; an upper bound, e.g. 1000 millis, to the
+                                   ;; sleeping time, to limit the response time
+                                   ;; for detecting any new event within 1 secs.
+                                   (Time/sleep (min 1000 (- time-millis (current-time-millis))))
+                                   ;; Otherwise poll to see if any new event
+                                   ;; was scheduled. This is, in essence, the
+                                   ;; response time for detecting any new event
+                                   ;; schedulings when there are no scheduled
+                                   ;; events.
+                                   (Time/sleep 1000))))
+                             (catch Throwable t
+                               ;; Because the interrupted exception can be
+                               ;; wrapped in a RuntimeException.
+                               (when-not (exception-cause? InterruptedException t)
+                                 (kill-fn t)
+                                 (reset! active false)
+                                 (throw t)))))
+                         (.release notifier)) thread-name)]
+    (.setDaemon timer-thread true)
+    (.setPriority timer-thread Thread/MAX_PRIORITY)
+    (.start timer-thread)
+    {:timer-thread timer-thread
+     :queue queue
+     :active active
+     :lock lock
+     :random (Random.)
+     :cancel-notifier notifier}))
+
+(defn- check-active!
+  [timer]
+  (when-not @(:active timer)
+    (throw (IllegalStateException. "Timer is not active"))))
+
+(defnk schedule
+  [timer delay-secs afn :check-active true :jitter-ms 0]
+  (when check-active (check-active! timer))
+  (let [id (uuid)
+        ^PriorityQueue queue (:queue timer)
+        end-time-ms (+ (current-time-millis) (secs-to-millis-long delay-secs))
+        end-time-ms (if (< 0 jitter-ms) (+ (.nextInt (:random timer) jitter-ms) end-time-ms) end-time-ms)]
+    (locking (:lock timer)
+      (.add queue [end-time-ms afn id]))))
+
+(defn schedule-recurring
+  [timer delay-secs recur-secs afn]
+  (schedule timer
+            delay-secs
+            (fn this []
+              (afn)
+              ; This avoids a race condition with cancel-timer.
+              (schedule timer recur-secs this :check-active false))))
+
+(defn schedule-recurring-with-jitter
+  [timer delay-secs recur-secs jitter-ms afn]
+  (schedule timer
+            delay-secs
+            (fn this []
+              (afn)
+              ; This avoids a race condition with cancel-timer.
+              (schedule timer recur-secs this :check-active false :jitter-ms jitter-ms))))
+
+(defn cancel-timer
+  [timer]
+  (check-active! timer)
+  (locking (:lock timer)
+    (reset! (:active timer) false)
+    (.interrupt (:timer-thread timer)))
+  (.acquire (:cancel-notifier timer)))
+
+(defn timer-waiting?
+  [timer]
+  (Time/isThreadWaiting (:timer-thread timer)))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/trident/testing.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/trident/testing.clj b/storm-core/src/clj/org/apache/storm/trident/testing.clj
new file mode 100644
index 0000000..44e5ca9
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/trident/testing.clj
@@ -0,0 +1,79 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+(ns org.apache.storm.trident.testing
+  (:require [org.apache.storm.LocalDRPC :as LocalDRPC])
+  (:import [org.apache.storm.trident.testing FeederBatchSpout FeederCommitterBatchSpout MemoryMapState MemoryMapState$Factory TuplifyArgs])
+  (:require [org.apache.storm [LocalDRPC]])
+  (:import [org.apache.storm LocalDRPC])
+  (:import [org.apache.storm.tuple Fields])
+  (:import [org.apache.storm.generated KillOptions])
+  (:require [org.apache.storm [testing :as t]])
+  (:use [org.apache.storm util])
+  )
+
+(defn local-drpc []
+  (LocalDRPC.))
+
+(defn exec-drpc [^LocalDRPC drpc function-name args]
+  (let [res (.execute drpc function-name args)]
+    (from-json res)))
+
+(defn exec-drpc-tuples [^LocalDRPC drpc function-name tuples]
+  (exec-drpc drpc function-name (to-json tuples)))
+
+(defn feeder-spout [fields]
+  (FeederBatchSpout. fields))
+
+(defn feeder-committer-spout [fields]
+  (FeederCommitterBatchSpout. fields))
+
+(defn feed [feeder tuples]
+  (.feed feeder tuples))
+
+(defn fields [& fields]
+  (Fields. fields))
+
+(defn memory-map-state []
+  (MemoryMapState$Factory.))
+
+(defmacro with-drpc [[drpc] & body]
+  `(let [~drpc (org.apache.storm.LocalDRPC.)]
+     ~@body
+     (.shutdown ~drpc)
+     ))
+
+(defn with-topology* [cluster topo body-fn]
+  (t/submit-local-topology (:nimbus cluster) "tester" {} (.build topo))
+  (body-fn)
+  (.killTopologyWithOpts (:nimbus cluster) "tester" (doto (KillOptions.) (.set_wait_secs 0)))
+  )
+
+(defmacro with-topology [[cluster topo] & body]
+  `(with-topology* ~cluster ~topo (fn [] ~@body)))
+
+(defn bootstrap-imports []
+  (import 'org.apache.storm.LocalDRPC)
+  (import 'org.apache.storm.trident.TridentTopology)
+  (import '[org.apache.storm.trident.operation.builtin Count Sum Equals MapGet Debug FilterNull FirstN TupleCollectionGet])
+  )
+
+(defn drpc-tuples-input [topology function-name drpc outfields]
+  (-> topology
+      (.newDRPCStream function-name drpc)
+      (.each (fields "args") (TuplifyArgs.) outfields)
+      ))
+
+


[42/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/trident/state/CassandraStateUpdater.java
----------------------------------------------------------------------
diff --git a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/trident/state/CassandraStateUpdater.java b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/trident/state/CassandraStateUpdater.java
index 6453198..ad185b9 100644
--- a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/trident/state/CassandraStateUpdater.java
+++ b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/trident/state/CassandraStateUpdater.java
@@ -18,9 +18,9 @@
  */
 package org.apache.storm.cassandra.trident.state;
 
-import storm.trident.operation.TridentCollector;
-import storm.trident.state.BaseStateUpdater;
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.trident.operation.TridentCollector;
+import org.apache.storm.trident.state.BaseStateUpdater;
+import org.apache.storm.trident.tuple.TridentTuple;
 
 import java.util.List;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/trident/state/TridentResultSetValuesMapper.java
----------------------------------------------------------------------
diff --git a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/trident/state/TridentResultSetValuesMapper.java b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/trident/state/TridentResultSetValuesMapper.java
index be7bda1..9e41413 100644
--- a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/trident/state/TridentResultSetValuesMapper.java
+++ b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/trident/state/TridentResultSetValuesMapper.java
@@ -18,9 +18,9 @@
  */
 package org.apache.storm.cassandra.trident.state;
 
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.ITuple;
-import backtype.storm.tuple.Values;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.ITuple;
+import org.apache.storm.tuple.Values;
 import com.datastax.driver.core.ResultSet;
 import com.datastax.driver.core.Row;
 import com.datastax.driver.core.Session;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-cassandra/src/test/java/org/apache/storm/cassandra/DynamicStatementBuilderTest.java
----------------------------------------------------------------------
diff --git a/external/storm-cassandra/src/test/java/org/apache/storm/cassandra/DynamicStatementBuilderTest.java b/external/storm-cassandra/src/test/java/org/apache/storm/cassandra/DynamicStatementBuilderTest.java
index 800cad7..f96dae8 100644
--- a/external/storm-cassandra/src/test/java/org/apache/storm/cassandra/DynamicStatementBuilderTest.java
+++ b/external/storm-cassandra/src/test/java/org/apache/storm/cassandra/DynamicStatementBuilderTest.java
@@ -18,8 +18,8 @@
  */
 package org.apache.storm.cassandra;
 
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
 import com.datastax.driver.core.BatchStatement;
 import com.datastax.driver.core.ProtocolVersion;
 import com.datastax.driver.core.SimpleStatement;
@@ -160,4 +160,4 @@ public class DynamicStatementBuilderTest {
             Assert.assertEquals(3, ((SimpleStatement)s).getValues(ProtocolVersion.V3).length);
         }
     }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-cassandra/src/test/java/org/apache/storm/cassandra/WeatherSpout.java
----------------------------------------------------------------------
diff --git a/external/storm-cassandra/src/test/java/org/apache/storm/cassandra/WeatherSpout.java b/external/storm-cassandra/src/test/java/org/apache/storm/cassandra/WeatherSpout.java
index 0b7d8a5..6dc6c06 100644
--- a/external/storm-cassandra/src/test/java/org/apache/storm/cassandra/WeatherSpout.java
+++ b/external/storm-cassandra/src/test/java/org/apache/storm/cassandra/WeatherSpout.java
@@ -18,12 +18,12 @@
  */
 package org.apache.storm.cassandra;
 
-import backtype.storm.spout.SpoutOutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.base.BaseRichSpout;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Values;
+import org.apache.storm.spout.SpoutOutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.base.BaseRichSpout;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Values;
 import org.junit.Assert;
 
 import java.util.Map;
@@ -81,4 +81,4 @@ public class WeatherSpout extends BaseRichSpout {
             spoutOutputCollector.emit(new Values(stationID, "38°C"), emit.incrementAndGet());
         }
     }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-cassandra/src/test/java/org/apache/storm/cassandra/bolt/BaseTopologyTest.java
----------------------------------------------------------------------
diff --git a/external/storm-cassandra/src/test/java/org/apache/storm/cassandra/bolt/BaseTopologyTest.java b/external/storm-cassandra/src/test/java/org/apache/storm/cassandra/bolt/BaseTopologyTest.java
index 49159bc..a6d47c9 100644
--- a/external/storm-cassandra/src/test/java/org/apache/storm/cassandra/bolt/BaseTopologyTest.java
+++ b/external/storm-cassandra/src/test/java/org/apache/storm/cassandra/bolt/BaseTopologyTest.java
@@ -18,13 +18,13 @@
  */
 package org.apache.storm.cassandra.bolt;
 
-import backtype.storm.Config;
-import backtype.storm.generated.StormTopology;
-import backtype.storm.topology.TopologyBuilder;
-import backtype.storm.utils.Utils;
+import org.apache.storm.Config;
+import org.apache.storm.generated.StormTopology;
+import org.apache.storm.topology.TopologyBuilder;
+import org.apache.storm.utils.Utils;
 import org.cassandraunit.CassandraCQLUnit;
 import org.cassandraunit.dataset.cql.ClassPathCQLDataSet;
-import backtype.storm.LocalCluster;
+import org.apache.storm.LocalCluster;
 
 import org.junit.Rule;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-cassandra/src/test/java/org/apache/storm/cassandra/bolt/BatchCassandraWriterBoltTest.java
----------------------------------------------------------------------
diff --git a/external/storm-cassandra/src/test/java/org/apache/storm/cassandra/bolt/BatchCassandraWriterBoltTest.java b/external/storm-cassandra/src/test/java/org/apache/storm/cassandra/bolt/BatchCassandraWriterBoltTest.java
index c5116e2..d0471f4 100644
--- a/external/storm-cassandra/src/test/java/org/apache/storm/cassandra/bolt/BatchCassandraWriterBoltTest.java
+++ b/external/storm-cassandra/src/test/java/org/apache/storm/cassandra/bolt/BatchCassandraWriterBoltTest.java
@@ -18,7 +18,7 @@
  */
 package org.apache.storm.cassandra.bolt;
 
-import backtype.storm.topology.TopologyBuilder;
+import org.apache.storm.topology.TopologyBuilder;
 import com.datastax.driver.core.ResultSet;
 import org.apache.storm.cassandra.WeatherSpout;
 import org.apache.storm.cassandra.query.impl.SimpleCQLStatementMapper;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-cassandra/src/test/java/org/apache/storm/cassandra/bolt/CassandraWriterBoltTest.java
----------------------------------------------------------------------
diff --git a/external/storm-cassandra/src/test/java/org/apache/storm/cassandra/bolt/CassandraWriterBoltTest.java b/external/storm-cassandra/src/test/java/org/apache/storm/cassandra/bolt/CassandraWriterBoltTest.java
index 3d1b623..4e922e3 100644
--- a/external/storm-cassandra/src/test/java/org/apache/storm/cassandra/bolt/CassandraWriterBoltTest.java
+++ b/external/storm-cassandra/src/test/java/org/apache/storm/cassandra/bolt/CassandraWriterBoltTest.java
@@ -18,7 +18,7 @@
  */
 package org.apache.storm.cassandra.bolt;
 
-import backtype.storm.topology.TopologyBuilder;
+import org.apache.storm.topology.TopologyBuilder;
 import com.datastax.driver.core.ResultSet;
 import org.apache.storm.cassandra.WeatherSpout;
 import org.apache.storm.cassandra.query.impl.SimpleCQLStatementMapper;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-cassandra/src/test/java/org/apache/storm/cassandra/trident/TridentTopologyTest.java
----------------------------------------------------------------------
diff --git a/external/storm-cassandra/src/test/java/org/apache/storm/cassandra/trident/TridentTopologyTest.java b/external/storm-cassandra/src/test/java/org/apache/storm/cassandra/trident/TridentTopologyTest.java
index bdacfe4..764a574 100644
--- a/external/storm-cassandra/src/test/java/org/apache/storm/cassandra/trident/TridentTopologyTest.java
+++ b/external/storm-cassandra/src/test/java/org/apache/storm/cassandra/trident/TridentTopologyTest.java
@@ -18,9 +18,9 @@
  */
 package org.apache.storm.cassandra.trident;
 
-import backtype.storm.LocalCluster;
-import backtype.storm.generated.StormTopology;
-import backtype.storm.tuple.Fields;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.generated.StormTopology;
+import org.apache.storm.tuple.Fields;
 import com.datastax.driver.core.ResultSet;
 import com.datastax.driver.core.Row;
 import com.datastax.driver.core.Session;
@@ -34,12 +34,12 @@ import org.apache.storm.cassandra.trident.state.CassandraStateUpdater;
 import org.apache.storm.cassandra.trident.state.TridentResultSetValuesMapper;
 import org.junit.Assert;
 import org.junit.Test;
-import storm.trident.Stream;
-import storm.trident.TridentState;
-import storm.trident.TridentTopology;
-import storm.trident.operation.BaseFunction;
-import storm.trident.operation.TridentCollector;
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.trident.Stream;
+import org.apache.storm.trident.TridentState;
+import org.apache.storm.trident.TridentTopology;
+import org.apache.storm.trident.operation.BaseFunction;
+import org.apache.storm.trident.operation.TridentCollector;
+import org.apache.storm.trident.tuple.TridentTuple;
 
 import java.util.Random;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-cassandra/src/test/java/org/apache/storm/cassandra/trident/WeatherBatchSpout.java
----------------------------------------------------------------------
diff --git a/external/storm-cassandra/src/test/java/org/apache/storm/cassandra/trident/WeatherBatchSpout.java b/external/storm-cassandra/src/test/java/org/apache/storm/cassandra/trident/WeatherBatchSpout.java
index 62f31db..622a28f 100644
--- a/external/storm-cassandra/src/test/java/org/apache/storm/cassandra/trident/WeatherBatchSpout.java
+++ b/external/storm-cassandra/src/test/java/org/apache/storm/cassandra/trident/WeatherBatchSpout.java
@@ -18,11 +18,11 @@
  */
 package org.apache.storm.cassandra.trident;
 
-import backtype.storm.task.TopologyContext;
-import backtype.storm.tuple.Fields;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.tuple.Fields;
 import com.google.common.collect.Lists;
-import storm.trident.operation.TridentCollector;
-import storm.trident.spout.IBatchSpout;
+import org.apache.storm.trident.operation.TridentCollector;
+import org.apache.storm.trident.spout.IBatchSpout;
 
 import java.util.ArrayList;
 import java.util.Arrays;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/ElasticsearchGetRequest.java
----------------------------------------------------------------------
diff --git a/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/ElasticsearchGetRequest.java b/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/ElasticsearchGetRequest.java
index 6a7ce61..b9a7885 100644
--- a/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/ElasticsearchGetRequest.java
+++ b/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/ElasticsearchGetRequest.java
@@ -21,7 +21,7 @@ import java.io.Serializable;
 
 import org.elasticsearch.action.get.GetRequest;
 
-import backtype.storm.tuple.ITuple;
+import org.apache.storm.tuple.ITuple;
 
 /**
  * @since 0.11

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/EsLookupResultOutput.java
----------------------------------------------------------------------
diff --git a/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/EsLookupResultOutput.java b/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/EsLookupResultOutput.java
index 9ccb9e6..d00fd47 100644
--- a/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/EsLookupResultOutput.java
+++ b/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/EsLookupResultOutput.java
@@ -22,8 +22,8 @@ import java.util.Collection;
 
 import org.elasticsearch.action.get.GetResponse;
 
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Values;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Values;
 
 /**
  * @since 0.11

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/bolt/AbstractEsBolt.java
----------------------------------------------------------------------
diff --git a/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/bolt/AbstractEsBolt.java b/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/bolt/AbstractEsBolt.java
index 784a57f..1ea80ad 100644
--- a/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/bolt/AbstractEsBolt.java
+++ b/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/bolt/AbstractEsBolt.java
@@ -26,11 +26,11 @@ import org.elasticsearch.common.annotations.VisibleForTesting;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import backtype.storm.task.OutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.base.BaseRichBolt;
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.base.BaseRichBolt;
+import org.apache.storm.tuple.Tuple;
 
 import static org.elasticsearch.common.base.Preconditions.checkNotNull;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/bolt/EsIndexBolt.java
----------------------------------------------------------------------
diff --git a/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/bolt/EsIndexBolt.java b/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/bolt/EsIndexBolt.java
index 1c5983e..f79d38d 100644
--- a/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/bolt/EsIndexBolt.java
+++ b/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/bolt/EsIndexBolt.java
@@ -17,10 +17,10 @@
  */
 package org.apache.storm.elasticsearch.bolt;
 
-import backtype.storm.task.OutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.tuple.Tuple;
 import org.apache.storm.elasticsearch.common.EsConfig;
 import org.apache.storm.elasticsearch.common.EsTupleMapper;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/bolt/EsLookupBolt.java
----------------------------------------------------------------------
diff --git a/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/bolt/EsLookupBolt.java b/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/bolt/EsLookupBolt.java
index 1676a79..0cc2c79 100644
--- a/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/bolt/EsLookupBolt.java
+++ b/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/bolt/EsLookupBolt.java
@@ -25,9 +25,9 @@ import org.apache.storm.elasticsearch.common.EsConfig;
 import org.elasticsearch.action.get.GetRequest;
 import org.elasticsearch.action.get.GetResponse;
 
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.Values;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.Values;
 
 import static org.elasticsearch.common.base.Preconditions.checkNotNull;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/bolt/EsPercolateBolt.java
----------------------------------------------------------------------
diff --git a/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/bolt/EsPercolateBolt.java b/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/bolt/EsPercolateBolt.java
index a361464..ad8f3f0 100644
--- a/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/bolt/EsPercolateBolt.java
+++ b/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/bolt/EsPercolateBolt.java
@@ -17,12 +17,12 @@
  */
 package org.apache.storm.elasticsearch.bolt;
 
-import backtype.storm.task.OutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.Values;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.Values;
 import org.apache.storm.elasticsearch.common.EsConfig;
 import org.apache.storm.elasticsearch.common.EsTupleMapper;
 import org.elasticsearch.action.percolate.PercolateResponse;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/common/DefaultEsTupleMapper.java
----------------------------------------------------------------------
diff --git a/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/common/DefaultEsTupleMapper.java b/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/common/DefaultEsTupleMapper.java
index fd0bbcc..0a15922 100644
--- a/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/common/DefaultEsTupleMapper.java
+++ b/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/common/DefaultEsTupleMapper.java
@@ -17,7 +17,7 @@
  */
 package org.apache.storm.elasticsearch.common;
 
-import backtype.storm.tuple.ITuple;
+import org.apache.storm.tuple.ITuple;
 
 public class DefaultEsTupleMapper implements EsTupleMapper {
     @Override

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/common/EsTupleMapper.java
----------------------------------------------------------------------
diff --git a/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/common/EsTupleMapper.java b/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/common/EsTupleMapper.java
index f8a66bd..5b6c425 100644
--- a/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/common/EsTupleMapper.java
+++ b/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/common/EsTupleMapper.java
@@ -17,7 +17,7 @@
  */
 package org.apache.storm.elasticsearch.common;
 
-import backtype.storm.tuple.ITuple;
+import org.apache.storm.tuple.ITuple;
 
 import java.io.Serializable;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/trident/EsState.java
----------------------------------------------------------------------
diff --git a/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/trident/EsState.java b/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/trident/EsState.java
index c066553..2241f4b 100644
--- a/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/trident/EsState.java
+++ b/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/trident/EsState.java
@@ -17,7 +17,7 @@
  */
 package org.apache.storm.elasticsearch.trident;
 
-import backtype.storm.topology.FailedException;
+import org.apache.storm.topology.FailedException;
 
 import org.apache.storm.elasticsearch.common.StormElasticSearchClient;
 import org.apache.storm.elasticsearch.common.EsConfig;
@@ -27,8 +27,8 @@ import org.elasticsearch.action.bulk.BulkResponse;
 import org.elasticsearch.client.Client;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import storm.trident.state.State;
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.trident.state.State;
+import org.apache.storm.trident.tuple.TridentTuple;
 
 import java.util.List;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/trident/EsStateFactory.java
----------------------------------------------------------------------
diff --git a/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/trident/EsStateFactory.java b/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/trident/EsStateFactory.java
index e85cdc2..5ae174f 100644
--- a/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/trident/EsStateFactory.java
+++ b/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/trident/EsStateFactory.java
@@ -17,12 +17,12 @@
  */
 package org.apache.storm.elasticsearch.trident;
 
-import backtype.storm.task.IMetricsContext;
+import org.apache.storm.task.IMetricsContext;
 import org.apache.storm.elasticsearch.common.EsConfig;
 import org.apache.storm.elasticsearch.common.EsTupleMapper;
 
-import storm.trident.state.State;
-import storm.trident.state.StateFactory;
+import org.apache.storm.trident.state.State;
+import org.apache.storm.trident.state.StateFactory;
 
 import java.util.Map;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/trident/EsUpdater.java
----------------------------------------------------------------------
diff --git a/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/trident/EsUpdater.java b/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/trident/EsUpdater.java
index 1fb998b..37c213d 100644
--- a/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/trident/EsUpdater.java
+++ b/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/trident/EsUpdater.java
@@ -17,9 +17,9 @@
  */
 package org.apache.storm.elasticsearch.trident;
 
-import storm.trident.operation.TridentCollector;
-import storm.trident.state.BaseStateUpdater;
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.trident.operation.TridentCollector;
+import org.apache.storm.trident.state.BaseStateUpdater;
+import org.apache.storm.trident.tuple.TridentTuple;
 
 import java.util.List;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/bolt/AbstractEsBoltIntegrationTest.java
----------------------------------------------------------------------
diff --git a/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/bolt/AbstractEsBoltIntegrationTest.java b/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/bolt/AbstractEsBoltIntegrationTest.java
index 5121b6a..87ffefa 100644
--- a/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/bolt/AbstractEsBoltIntegrationTest.java
+++ b/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/bolt/AbstractEsBoltIntegrationTest.java
@@ -17,7 +17,7 @@
  */
 package org.apache.storm.elasticsearch.bolt;
 
-import backtype.storm.testing.IntegrationTest;
+import org.apache.storm.testing.IntegrationTest;
 import org.apache.commons.io.FileUtils;
 import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
 import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/bolt/AbstractEsBoltTest.java
----------------------------------------------------------------------
diff --git a/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/bolt/AbstractEsBoltTest.java b/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/bolt/AbstractEsBoltTest.java
index 07b7c43..fb9739c 100644
--- a/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/bolt/AbstractEsBoltTest.java
+++ b/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/bolt/AbstractEsBoltTest.java
@@ -19,8 +19,8 @@ package org.apache.storm.elasticsearch.bolt;
 
 import com.google.common.testing.NullPointerTester;
 
-import backtype.storm.Config;
-import backtype.storm.task.OutputCollector;
+import org.apache.storm.Config;
+import org.apache.storm.task.OutputCollector;
 import org.apache.storm.elasticsearch.common.EsConfig;
 import org.junit.After;
 import org.junit.Before;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/bolt/EsIndexBoltTest.java
----------------------------------------------------------------------
diff --git a/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/bolt/EsIndexBoltTest.java b/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/bolt/EsIndexBoltTest.java
index 13fade6..9f7592d 100644
--- a/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/bolt/EsIndexBoltTest.java
+++ b/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/bolt/EsIndexBoltTest.java
@@ -17,8 +17,8 @@
  */
 package org.apache.storm.elasticsearch.bolt;
 
-import backtype.storm.testing.IntegrationTest;
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.testing.IntegrationTest;
+import org.apache.storm.tuple.Tuple;
 import org.apache.storm.elasticsearch.common.EsConfig;
 import org.apache.storm.elasticsearch.common.EsTestUtil;
 import org.apache.storm.elasticsearch.common.EsTupleMapper;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/bolt/EsIndexTopology.java
----------------------------------------------------------------------
diff --git a/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/bolt/EsIndexTopology.java b/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/bolt/EsIndexTopology.java
index 70e0738..d30424b 100644
--- a/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/bolt/EsIndexTopology.java
+++ b/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/bolt/EsIndexTopology.java
@@ -17,15 +17,15 @@
  */
 package org.apache.storm.elasticsearch.bolt;
 
-import backtype.storm.Config;
-import backtype.storm.LocalCluster;
-import backtype.storm.spout.SpoutOutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.TopologyBuilder;
-import backtype.storm.topology.base.BaseRichSpout;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Values;
+import org.apache.storm.Config;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.spout.SpoutOutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.TopologyBuilder;
+import org.apache.storm.topology.base.BaseRichSpout;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Values;
 import org.apache.storm.elasticsearch.common.EsConfig;
 import org.apache.storm.elasticsearch.common.EsConstants;
 import org.apache.storm.elasticsearch.common.EsTestUtil;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/bolt/EsLookupBoltIntegrationTest.java
----------------------------------------------------------------------
diff --git a/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/bolt/EsLookupBoltIntegrationTest.java b/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/bolt/EsLookupBoltIntegrationTest.java
index e5016c8..038dcce 100644
--- a/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/bolt/EsLookupBoltIntegrationTest.java
+++ b/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/bolt/EsLookupBoltIntegrationTest.java
@@ -17,11 +17,11 @@
  */
 package org.apache.storm.elasticsearch.bolt;
 
-import backtype.storm.testing.IntegrationTest;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.ITuple;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.Values;
+import org.apache.storm.testing.IntegrationTest;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.ITuple;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.Values;
 import org.apache.storm.elasticsearch.ElasticsearchGetRequest;
 import org.apache.storm.elasticsearch.EsLookupResultOutput;
 import org.apache.storm.elasticsearch.common.EsConfig;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/bolt/EsLookupBoltTest.java
----------------------------------------------------------------------
diff --git a/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/bolt/EsLookupBoltTest.java b/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/bolt/EsLookupBoltTest.java
index 175b2a4..c876b2c 100644
--- a/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/bolt/EsLookupBoltTest.java
+++ b/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/bolt/EsLookupBoltTest.java
@@ -35,10 +35,10 @@ import org.mockito.ArgumentCaptor;
 import org.mockito.Mock;
 import org.mockito.runners.MockitoJUnitRunner;
 
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.Values;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.Values;
 
 import static org.hamcrest.CoreMatchers.is;
 import static org.junit.Assert.assertThat;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/bolt/EsPercolateBoltTest.java
----------------------------------------------------------------------
diff --git a/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/bolt/EsPercolateBoltTest.java b/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/bolt/EsPercolateBoltTest.java
index e4f2be0..61acd36 100644
--- a/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/bolt/EsPercolateBoltTest.java
+++ b/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/bolt/EsPercolateBoltTest.java
@@ -17,9 +17,9 @@
  */
 package org.apache.storm.elasticsearch.bolt;
 
-import backtype.storm.testing.IntegrationTest;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.Values;
+import org.apache.storm.testing.IntegrationTest;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.Values;
 import org.apache.storm.elasticsearch.common.EsConfig;
 import org.apache.storm.elasticsearch.common.EsTestUtil;
 import org.apache.storm.elasticsearch.common.EsTupleMapper;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/common/EsTestUtil.java
----------------------------------------------------------------------
diff --git a/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/common/EsTestUtil.java b/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/common/EsTestUtil.java
index 30e684f..cb1c745 100644
--- a/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/common/EsTestUtil.java
+++ b/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/common/EsTestUtil.java
@@ -17,14 +17,14 @@
  */
 package org.apache.storm.elasticsearch.common;
 
-import backtype.storm.Config;
-import backtype.storm.task.GeneralTopologyContext;
-import backtype.storm.topology.TopologyBuilder;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.ITuple;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.TupleImpl;
-import backtype.storm.tuple.Values;
+import org.apache.storm.Config;
+import org.apache.storm.task.GeneralTopologyContext;
+import org.apache.storm.topology.TopologyBuilder;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.ITuple;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.TupleImpl;
+import org.apache.storm.tuple.Values;
 import org.elasticsearch.cluster.ClusterName;
 import org.elasticsearch.cluster.metadata.IndexMetaData;
 import org.elasticsearch.common.settings.ImmutableSettings;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/trident/TridentEsTopology.java
----------------------------------------------------------------------
diff --git a/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/trident/TridentEsTopology.java b/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/trident/TridentEsTopology.java
index 7b525a5..67eab5b 100644
--- a/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/trident/TridentEsTopology.java
+++ b/external/storm-elasticsearch/src/test/java/org/apache/storm/elasticsearch/trident/TridentEsTopology.java
@@ -17,21 +17,21 @@
  */
 package org.apache.storm.elasticsearch.trident;
 
-import backtype.storm.Config;
-import backtype.storm.LocalCluster;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Values;
+import org.apache.storm.Config;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Values;
 import org.apache.storm.elasticsearch.common.EsConfig;
 import org.apache.storm.elasticsearch.common.EsConstants;
 import org.apache.storm.elasticsearch.common.EsTestUtil;
 import org.apache.storm.elasticsearch.common.EsTupleMapper;
-import storm.trident.Stream;
-import storm.trident.TridentState;
-import storm.trident.TridentTopology;
-import storm.trident.operation.TridentCollector;
-import storm.trident.spout.IBatchSpout;
-import storm.trident.state.StateFactory;
+import org.apache.storm.trident.Stream;
+import org.apache.storm.trident.TridentState;
+import org.apache.storm.trident.TridentTopology;
+import org.apache.storm.trident.operation.TridentCollector;
+import org.apache.storm.trident.spout.IBatchSpout;
+import org.apache.storm.trident.state.StateFactory;
 
 import java.util.*;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/bolt/DefaultEventDataFormat.java
----------------------------------------------------------------------
diff --git a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/bolt/DefaultEventDataFormat.java b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/bolt/DefaultEventDataFormat.java
index 6b3eba7..21940de 100644
--- a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/bolt/DefaultEventDataFormat.java
+++ b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/bolt/DefaultEventDataFormat.java
@@ -17,7 +17,7 @@
  *******************************************************************************/
 package org.apache.storm.eventhubs.bolt;
 
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.tuple.Tuple;
 
 /**
  * A default implementation of IEventDataFormat that converts the tuple

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/bolt/EventHubBolt.java
----------------------------------------------------------------------
diff --git a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/bolt/EventHubBolt.java b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/bolt/EventHubBolt.java
index 9acf7fa..ac5018b 100755
--- a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/bolt/EventHubBolt.java
+++ b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/bolt/EventHubBolt.java
@@ -26,11 +26,11 @@ import com.microsoft.eventhubs.client.EventHubClient;
 import com.microsoft.eventhubs.client.EventHubException;
 import com.microsoft.eventhubs.client.EventHubSender;
 
-import backtype.storm.task.OutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.base.BaseRichBolt;
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.base.BaseRichBolt;
+import org.apache.storm.tuple.Tuple;
 
 /**
  * A bolt that writes event message to EventHub.

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/bolt/IEventDataFormat.java
----------------------------------------------------------------------
diff --git a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/bolt/IEventDataFormat.java b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/bolt/IEventDataFormat.java
index 2003c34..743d5bb 100644
--- a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/bolt/IEventDataFormat.java
+++ b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/bolt/IEventDataFormat.java
@@ -18,7 +18,7 @@
 package org.apache.storm.eventhubs.bolt;
 
 import java.io.Serializable;
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.tuple.Tuple;
 
 /**
  * Serialize a tuple to a byte array to be sent to EventHubs

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/samples/EventCount.java
----------------------------------------------------------------------
diff --git a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/samples/EventCount.java b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/samples/EventCount.java
index 94fdb49..2c2261c 100755
--- a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/samples/EventCount.java
+++ b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/samples/EventCount.java
@@ -17,11 +17,11 @@
  *******************************************************************************/
 package org.apache.storm.eventhubs.samples;
 
-import backtype.storm.StormSubmitter;
-import backtype.storm.Config;
-import backtype.storm.LocalCluster;
-import backtype.storm.generated.StormTopology;
-import backtype.storm.topology.TopologyBuilder;
+import org.apache.storm.StormSubmitter;
+import org.apache.storm.Config;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.generated.StormTopology;
+import org.apache.storm.topology.TopologyBuilder;
 
 import org.apache.storm.eventhubs.samples.bolt.GlobalCountBolt;
 import org.apache.storm.eventhubs.samples.bolt.PartialCountBolt;
@@ -126,7 +126,7 @@ public class EventCount {
 	  Config config = new Config();
     config.setDebug(false);
     //Enable metrics
-    config.registerMetricsConsumer(backtype.storm.metric.LoggingMetricsConsumer.class, 1);
+    config.registerMetricsConsumer(org.apache.storm.metric.LoggingMetricsConsumer.class, 1);
 
     
 	  if (args != null && args.length > 0) {

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/samples/EventHubLoop.java
----------------------------------------------------------------------
diff --git a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/samples/EventHubLoop.java b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/samples/EventHubLoop.java
index 2f62a23..665fef9 100755
--- a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/samples/EventHubLoop.java
+++ b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/samples/EventHubLoop.java
@@ -17,8 +17,8 @@
  *******************************************************************************/
 package org.apache.storm.eventhubs.samples;
 
-import backtype.storm.generated.StormTopology;
-import backtype.storm.topology.TopologyBuilder;
+import org.apache.storm.generated.StormTopology;
+import org.apache.storm.topology.TopologyBuilder;
 
 import org.apache.storm.eventhubs.bolt.EventHubBolt;
 import org.apache.storm.eventhubs.bolt.EventHubBoltConfig;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/samples/OpaqueTridentEventCount.java
----------------------------------------------------------------------
diff --git a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/samples/OpaqueTridentEventCount.java b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/samples/OpaqueTridentEventCount.java
index f4fe127..e8538c1 100755
--- a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/samples/OpaqueTridentEventCount.java
+++ b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/samples/OpaqueTridentEventCount.java
@@ -17,13 +17,13 @@
  *******************************************************************************/
 package org.apache.storm.eventhubs.samples;
 
-import storm.trident.TridentState;
-import storm.trident.TridentTopology;
-import storm.trident.operation.builtin.Count;
-import storm.trident.operation.builtin.Sum;
-import storm.trident.testing.MemoryMapState;
-import backtype.storm.generated.StormTopology;
-import backtype.storm.tuple.Fields;
+import org.apache.storm.trident.TridentState;
+import org.apache.storm.trident.TridentTopology;
+import org.apache.storm.trident.operation.builtin.Count;
+import org.apache.storm.trident.operation.builtin.Sum;
+import org.apache.storm.trident.testing.MemoryMapState;
+import org.apache.storm.generated.StormTopology;
+import org.apache.storm.tuple.Fields;
 
 import org.apache.storm.eventhubs.samples.TransactionalTridentEventCount.LoggingFilter;
 import org.apache.storm.eventhubs.spout.EventHubSpout;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/samples/TransactionalTridentEventCount.java
----------------------------------------------------------------------
diff --git a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/samples/TransactionalTridentEventCount.java b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/samples/TransactionalTridentEventCount.java
index 1e7628b..0a5295f 100755
--- a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/samples/TransactionalTridentEventCount.java
+++ b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/samples/TransactionalTridentEventCount.java
@@ -20,19 +20,19 @@ package org.apache.storm.eventhubs.samples;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import backtype.storm.generated.StormTopology;
-import backtype.storm.tuple.Fields;
+import org.apache.storm.generated.StormTopology;
+import org.apache.storm.tuple.Fields;
 
 import org.apache.storm.eventhubs.spout.EventHubSpout;
 import org.apache.storm.eventhubs.trident.TransactionalTridentEventHubSpout;
 
-import storm.trident.TridentState;
-import storm.trident.TridentTopology;
-import storm.trident.operation.BaseFilter;
-import storm.trident.operation.builtin.Count;
-import storm.trident.operation.builtin.Sum;
-import storm.trident.testing.MemoryMapState;
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.trident.TridentState;
+import org.apache.storm.trident.TridentTopology;
+import org.apache.storm.trident.operation.BaseFilter;
+import org.apache.storm.trident.operation.builtin.Count;
+import org.apache.storm.trident.operation.builtin.Sum;
+import org.apache.storm.trident.testing.MemoryMapState;
+import org.apache.storm.trident.tuple.TridentTuple;
 
 /**
  * A simple Trident topology uses TransactionalTridentEventHubSpout

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/samples/bolt/GlobalCountBolt.java
----------------------------------------------------------------------
diff --git a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/samples/bolt/GlobalCountBolt.java b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/samples/bolt/GlobalCountBolt.java
index 16b34a6..6a34788 100755
--- a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/samples/bolt/GlobalCountBolt.java
+++ b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/samples/bolt/GlobalCountBolt.java
@@ -23,13 +23,13 @@ import java.util.Map;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import backtype.storm.Config;
-import backtype.storm.metric.api.IMetric;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.BasicOutputCollector;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.base.BaseBasicBolt;
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.Config;
+import org.apache.storm.metric.api.IMetric;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.BasicOutputCollector;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.base.BaseBasicBolt;
+import org.apache.storm.tuple.Tuple;
 
 /**
  * Globally count number of messages

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/samples/bolt/PartialCountBolt.java
----------------------------------------------------------------------
diff --git a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/samples/bolt/PartialCountBolt.java b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/samples/bolt/PartialCountBolt.java
index 21f1ab4..215f8f7 100755
--- a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/samples/bolt/PartialCountBolt.java
+++ b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/samples/bolt/PartialCountBolt.java
@@ -22,13 +22,13 @@ import java.util.Map;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.BasicOutputCollector;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.base.BaseBasicBolt;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.Values;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.BasicOutputCollector;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.base.BaseBasicBolt;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.Values;
 
 /**
  * Partially count number of messages from EventHubs

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/spout/EventDataScheme.java
----------------------------------------------------------------------
diff --git a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/spout/EventDataScheme.java b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/spout/EventDataScheme.java
index d01050d..0e275a5 100755
--- a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/spout/EventDataScheme.java
+++ b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/spout/EventDataScheme.java
@@ -17,7 +17,7 @@
  *******************************************************************************/
 package org.apache.storm.eventhubs.spout;
 
-import backtype.storm.tuple.Fields;
+import org.apache.storm.tuple.Fields;
 import java.util.ArrayList;
 import java.util.List;
 import org.apache.qpid.amqp_1_0.client.Message;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/spout/EventHubReceiverImpl.java
----------------------------------------------------------------------
diff --git a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/spout/EventHubReceiverImpl.java b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/spout/EventHubReceiverImpl.java
index 0fcad99..5f9acbd 100755
--- a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/spout/EventHubReceiverImpl.java
+++ b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/spout/EventHubReceiverImpl.java
@@ -21,9 +21,9 @@ import org.apache.qpid.amqp_1_0.client.Message;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import backtype.storm.metric.api.CountMetric;
-import backtype.storm.metric.api.MeanReducer;
-import backtype.storm.metric.api.ReducedMetric;
+import org.apache.storm.metric.api.CountMetric;
+import org.apache.storm.metric.api.MeanReducer;
+import org.apache.storm.metric.api.ReducedMetric;
 
 import com.microsoft.eventhubs.client.Constants;
 import com.microsoft.eventhubs.client.EventHubException;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/spout/EventHubSpout.java
----------------------------------------------------------------------
diff --git a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/spout/EventHubSpout.java b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/spout/EventHubSpout.java
index d08ec3a..ff40315 100755
--- a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/spout/EventHubSpout.java
+++ b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/spout/EventHubSpout.java
@@ -17,12 +17,12 @@
  *******************************************************************************/
 package org.apache.storm.eventhubs.spout;
 
-import backtype.storm.Config;
-import backtype.storm.metric.api.IMetric;
-import backtype.storm.spout.SpoutOutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.base.BaseRichSpout;
+import org.apache.storm.Config;
+import org.apache.storm.metric.api.IMetric;
+import org.apache.storm.spout.SpoutOutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.base.BaseRichSpout;
 
 import java.util.HashMap;
 import java.util.List;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/spout/IEventDataScheme.java
----------------------------------------------------------------------
diff --git a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/spout/IEventDataScheme.java b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/spout/IEventDataScheme.java
index c96767d..b7e03b4 100755
--- a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/spout/IEventDataScheme.java
+++ b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/spout/IEventDataScheme.java
@@ -17,7 +17,7 @@
  *******************************************************************************/
 package org.apache.storm.eventhubs.spout;
 
-import backtype.storm.tuple.Fields;
+import org.apache.storm.tuple.Fields;
 import java.io.Serializable;
 import java.util.List;
 import org.apache.qpid.amqp_1_0.client.Message;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/trident/Coordinator.java
----------------------------------------------------------------------
diff --git a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/trident/Coordinator.java b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/trident/Coordinator.java
index a43193d..ad3c75e 100755
--- a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/trident/Coordinator.java
+++ b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/trident/Coordinator.java
@@ -20,8 +20,8 @@ package org.apache.storm.eventhubs.trident;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import storm.trident.spout.IOpaquePartitionedTridentSpout;
-import storm.trident.spout.IPartitionedTridentSpout;
+import org.apache.storm.trident.spout.IOpaquePartitionedTridentSpout;
+import org.apache.storm.trident.spout.IPartitionedTridentSpout;
 
 import org.apache.storm.eventhubs.spout.EventHubSpoutConfig;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/trident/OpaqueTridentEventHubEmitter.java
----------------------------------------------------------------------
diff --git a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/trident/OpaqueTridentEventHubEmitter.java b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/trident/OpaqueTridentEventHubEmitter.java
index c7bd8c3..ae21ab3 100755
--- a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/trident/OpaqueTridentEventHubEmitter.java
+++ b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/trident/OpaqueTridentEventHubEmitter.java
@@ -23,9 +23,9 @@ import java.util.Map;
 import org.apache.storm.eventhubs.spout.EventHubSpoutConfig;
 import org.apache.storm.eventhubs.spout.IEventHubReceiverFactory;
 
-import storm.trident.operation.TridentCollector;
-import storm.trident.spout.IOpaquePartitionedTridentSpout;
-import storm.trident.topology.TransactionAttempt;
+import org.apache.storm.trident.operation.TridentCollector;
+import org.apache.storm.trident.spout.IOpaquePartitionedTridentSpout;
+import org.apache.storm.trident.topology.TransactionAttempt;
 
 /**
  * A thin wrapper of TransactionalTridentEventHubEmitter for OpaqueTridentEventHubSpout

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/trident/OpaqueTridentEventHubSpout.java
----------------------------------------------------------------------
diff --git a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/trident/OpaqueTridentEventHubSpout.java b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/trident/OpaqueTridentEventHubSpout.java
index 17c8da2..559571f 100755
--- a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/trident/OpaqueTridentEventHubSpout.java
+++ b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/trident/OpaqueTridentEventHubSpout.java
@@ -22,9 +22,9 @@ import java.util.Map;
 import org.apache.storm.eventhubs.spout.EventHubSpoutConfig;
 import org.apache.storm.eventhubs.spout.IEventDataScheme;
 
-import backtype.storm.task.TopologyContext;
-import backtype.storm.tuple.Fields;
-import storm.trident.spout.IOpaquePartitionedTridentSpout;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.trident.spout.IOpaquePartitionedTridentSpout;
 
 /**
  * Opaque Trident EventHubs Spout

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/trident/Partition.java
----------------------------------------------------------------------
diff --git a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/trident/Partition.java b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/trident/Partition.java
index 8b166cd..b726e7f 100755
--- a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/trident/Partition.java
+++ b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/trident/Partition.java
@@ -19,7 +19,7 @@ package org.apache.storm.eventhubs.trident;
 
 import java.io.Serializable;
 import org.apache.storm.eventhubs.spout.EventHubSpoutConfig;
-import storm.trident.spout.ISpoutPartition;
+import org.apache.storm.trident.spout.ISpoutPartition;
 
 /**
  * Represents an EventHub partition

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/trident/TransactionalTridentEventHubEmitter.java
----------------------------------------------------------------------
diff --git a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/trident/TransactionalTridentEventHubEmitter.java b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/trident/TransactionalTridentEventHubEmitter.java
index bf7f339..e5c1c50 100755
--- a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/trident/TransactionalTridentEventHubEmitter.java
+++ b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/trident/TransactionalTridentEventHubEmitter.java
@@ -31,10 +31,10 @@ import org.apache.storm.eventhubs.spout.IEventHubReceiver;
 import org.apache.storm.eventhubs.spout.IEventHubReceiverFactory;
 import com.microsoft.eventhubs.client.Constants;
 
-import storm.trident.operation.TridentCollector;
-import storm.trident.spout.IOpaquePartitionedTridentSpout;
-import storm.trident.spout.IPartitionedTridentSpout;
-import storm.trident.topology.TransactionAttempt;
+import org.apache.storm.trident.operation.TridentCollector;
+import org.apache.storm.trident.spout.IOpaquePartitionedTridentSpout;
+import org.apache.storm.trident.spout.IPartitionedTridentSpout;
+import org.apache.storm.trident.topology.TransactionAttempt;
 
 
 public class TransactionalTridentEventHubEmitter

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/trident/TransactionalTridentEventHubSpout.java
----------------------------------------------------------------------
diff --git a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/trident/TransactionalTridentEventHubSpout.java b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/trident/TransactionalTridentEventHubSpout.java
index 8a01052..12b94b4 100755
--- a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/trident/TransactionalTridentEventHubSpout.java
+++ b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/trident/TransactionalTridentEventHubSpout.java
@@ -22,9 +22,9 @@ import java.util.Map;
 import org.apache.storm.eventhubs.spout.EventHubSpoutConfig;
 import org.apache.storm.eventhubs.spout.IEventDataScheme;
 
-import backtype.storm.task.TopologyContext;
-import backtype.storm.tuple.Fields;
-import storm.trident.spout.IPartitionedTridentSpout;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.trident.spout.IPartitionedTridentSpout;
 import org.apache.storm.eventhubs.trident.Partition;
 
 /**

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-eventhubs/src/test/java/org/apache/storm/eventhubs/spout/EventHubSpoutCallerMock.java
----------------------------------------------------------------------
diff --git a/external/storm-eventhubs/src/test/java/org/apache/storm/eventhubs/spout/EventHubSpoutCallerMock.java b/external/storm-eventhubs/src/test/java/org/apache/storm/eventhubs/spout/EventHubSpoutCallerMock.java
index d5ba90a..7eb625c 100755
--- a/external/storm-eventhubs/src/test/java/org/apache/storm/eventhubs/spout/EventHubSpoutCallerMock.java
+++ b/external/storm-eventhubs/src/test/java/org/apache/storm/eventhubs/spout/EventHubSpoutCallerMock.java
@@ -17,7 +17,7 @@
  *******************************************************************************/
 package org.apache.storm.eventhubs.spout;
 
-import backtype.storm.spout.SpoutOutputCollector;
+import org.apache.storm.spout.SpoutOutputCollector;
 
 /**
  * Mocks EventHubSpout's caller (storm framework)
@@ -93,4 +93,4 @@ public class EventHubSpoutCallerMock {
     String statePath = statePathPrefix + partitionIndex;
     return stateStore.readData(statePath);
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-eventhubs/src/test/java/org/apache/storm/eventhubs/spout/SpoutOutputCollectorMock.java
----------------------------------------------------------------------
diff --git a/external/storm-eventhubs/src/test/java/org/apache/storm/eventhubs/spout/SpoutOutputCollectorMock.java b/external/storm-eventhubs/src/test/java/org/apache/storm/eventhubs/spout/SpoutOutputCollectorMock.java
index df4a3ba..88bafd2 100755
--- a/external/storm-eventhubs/src/test/java/org/apache/storm/eventhubs/spout/SpoutOutputCollectorMock.java
+++ b/external/storm-eventhubs/src/test/java/org/apache/storm/eventhubs/spout/SpoutOutputCollectorMock.java
@@ -19,7 +19,7 @@ package org.apache.storm.eventhubs.spout;
 
 import java.util.List;
 
-import backtype.storm.spout.ISpoutOutputCollector;
+import org.apache.storm.spout.ISpoutOutputCollector;
 
 /**
  * Mock of ISpoutOutputCollector

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-eventhubs/src/test/java/org/apache/storm/eventhubs/trident/TridentCollectorMock.java
----------------------------------------------------------------------
diff --git a/external/storm-eventhubs/src/test/java/org/apache/storm/eventhubs/trident/TridentCollectorMock.java b/external/storm-eventhubs/src/test/java/org/apache/storm/eventhubs/trident/TridentCollectorMock.java
index cc4686e..3fe5de0 100755
--- a/external/storm-eventhubs/src/test/java/org/apache/storm/eventhubs/trident/TridentCollectorMock.java
+++ b/external/storm-eventhubs/src/test/java/org/apache/storm/eventhubs/trident/TridentCollectorMock.java
@@ -19,7 +19,7 @@ package org.apache.storm.eventhubs.trident;
 
 import java.util.List;
 
-import storm.trident.operation.TridentCollector;
+import org.apache.storm.trident.operation.TridentCollector;
 
 /**
  * A mock of TridentCollector

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hbase/src/main/java/org/apache/storm/hbase/bolt/AbstractHBaseBolt.java
----------------------------------------------------------------------
diff --git a/external/storm-hbase/src/main/java/org/apache/storm/hbase/bolt/AbstractHBaseBolt.java b/external/storm-hbase/src/main/java/org/apache/storm/hbase/bolt/AbstractHBaseBolt.java
index 404aa7a..3546f75 100644
--- a/external/storm-hbase/src/main/java/org/apache/storm/hbase/bolt/AbstractHBaseBolt.java
+++ b/external/storm-hbase/src/main/java/org/apache/storm/hbase/bolt/AbstractHBaseBolt.java
@@ -17,10 +17,10 @@
  */
 package org.apache.storm.hbase.bolt;
 
-import backtype.storm.Config;
-import backtype.storm.task.OutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.base.BaseRichBolt;
+import org.apache.storm.Config;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.base.BaseRichBolt;
 import org.apache.commons.lang.Validate;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hbase/src/main/java/org/apache/storm/hbase/bolt/HBaseBolt.java
----------------------------------------------------------------------
diff --git a/external/storm-hbase/src/main/java/org/apache/storm/hbase/bolt/HBaseBolt.java b/external/storm-hbase/src/main/java/org/apache/storm/hbase/bolt/HBaseBolt.java
index 4413361..2a48f10 100644
--- a/external/storm-hbase/src/main/java/org/apache/storm/hbase/bolt/HBaseBolt.java
+++ b/external/storm-hbase/src/main/java/org/apache/storm/hbase/bolt/HBaseBolt.java
@@ -17,10 +17,10 @@
  */
 package org.apache.storm.hbase.bolt;
 
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.utils.TupleUtils;
-import backtype.storm.Config;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.utils.TupleUtils;
+import org.apache.storm.Config;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.storm.hbase.bolt.mapper.HBaseMapper;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hbase/src/main/java/org/apache/storm/hbase/bolt/HBaseLookupBolt.java
----------------------------------------------------------------------
diff --git a/external/storm-hbase/src/main/java/org/apache/storm/hbase/bolt/HBaseLookupBolt.java b/external/storm-hbase/src/main/java/org/apache/storm/hbase/bolt/HBaseLookupBolt.java
index 36e5606..58ef674 100644
--- a/external/storm-hbase/src/main/java/org/apache/storm/hbase/bolt/HBaseLookupBolt.java
+++ b/external/storm-hbase/src/main/java/org/apache/storm/hbase/bolt/HBaseLookupBolt.java
@@ -17,10 +17,10 @@
  */
 package org.apache.storm.hbase.bolt;
 
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.Values;
-import backtype.storm.utils.TupleUtils;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.Values;
+import org.apache.storm.utils.TupleUtils;
 
 import com.google.common.collect.Lists;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hbase/src/main/java/org/apache/storm/hbase/bolt/mapper/HBaseMapper.java
----------------------------------------------------------------------
diff --git a/external/storm-hbase/src/main/java/org/apache/storm/hbase/bolt/mapper/HBaseMapper.java b/external/storm-hbase/src/main/java/org/apache/storm/hbase/bolt/mapper/HBaseMapper.java
index 626ce96..70016c5 100644
--- a/external/storm-hbase/src/main/java/org/apache/storm/hbase/bolt/mapper/HBaseMapper.java
+++ b/external/storm-hbase/src/main/java/org/apache/storm/hbase/bolt/mapper/HBaseMapper.java
@@ -18,13 +18,13 @@
 package org.apache.storm.hbase.bolt.mapper;
 
 
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.tuple.Tuple;
 import org.apache.storm.hbase.common.ColumnList;
 
 import java.io.Serializable;
 
 /**
- * Maps a <code>backtype.storm.tuple.Tuple</code> object
+ * Maps a <code>org.apache.storm.tuple.Tuple</code> object
  * to a row in an HBase table.
  */
 public interface HBaseMapper extends Serializable {

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hbase/src/main/java/org/apache/storm/hbase/bolt/mapper/HBaseValueMapper.java
----------------------------------------------------------------------
diff --git a/external/storm-hbase/src/main/java/org/apache/storm/hbase/bolt/mapper/HBaseValueMapper.java b/external/storm-hbase/src/main/java/org/apache/storm/hbase/bolt/mapper/HBaseValueMapper.java
index bc38b83..38e879f 100644
--- a/external/storm-hbase/src/main/java/org/apache/storm/hbase/bolt/mapper/HBaseValueMapper.java
+++ b/external/storm-hbase/src/main/java/org/apache/storm/hbase/bolt/mapper/HBaseValueMapper.java
@@ -17,9 +17,9 @@
  */
 package org.apache.storm.hbase.bolt.mapper;
 
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.tuple.ITuple;
-import backtype.storm.tuple.Values;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.tuple.ITuple;
+import org.apache.storm.tuple.Values;
 import org.apache.hadoop.hbase.client.Result;
 
 import java.io.Serializable;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hbase/src/main/java/org/apache/storm/hbase/bolt/mapper/SimpleHBaseMapper.java
----------------------------------------------------------------------
diff --git a/external/storm-hbase/src/main/java/org/apache/storm/hbase/bolt/mapper/SimpleHBaseMapper.java b/external/storm-hbase/src/main/java/org/apache/storm/hbase/bolt/mapper/SimpleHBaseMapper.java
index da0efd4..8747405 100644
--- a/external/storm-hbase/src/main/java/org/apache/storm/hbase/bolt/mapper/SimpleHBaseMapper.java
+++ b/external/storm-hbase/src/main/java/org/apache/storm/hbase/bolt/mapper/SimpleHBaseMapper.java
@@ -17,8 +17,8 @@
  */
 package org.apache.storm.hbase.bolt.mapper;
 
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
 import org.apache.storm.hbase.common.ColumnList;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hbase/src/main/java/org/apache/storm/hbase/security/AutoHBase.java
----------------------------------------------------------------------
diff --git a/external/storm-hbase/src/main/java/org/apache/storm/hbase/security/AutoHBase.java b/external/storm-hbase/src/main/java/org/apache/storm/hbase/security/AutoHBase.java
index 02c81bb..a2ca68e 100644
--- a/external/storm-hbase/src/main/java/org/apache/storm/hbase/security/AutoHBase.java
+++ b/external/storm-hbase/src/main/java/org/apache/storm/hbase/security/AutoHBase.java
@@ -18,10 +18,10 @@
 
 package org.apache.storm.hbase.security;
 
-import backtype.storm.Config;
-import backtype.storm.security.INimbusCredentialPlugin;
-import backtype.storm.security.auth.IAutoCredentials;
-import backtype.storm.security.auth.ICredentialsRenewer;
+import org.apache.storm.Config;
+import org.apache.storm.security.INimbusCredentialPlugin;
+import org.apache.storm.security.auth.IAutoCredentials;
+import org.apache.storm.security.auth.ICredentialsRenewer;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.security.User;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hbase/src/main/java/org/apache/storm/hbase/security/HBaseSecurityUtil.java
----------------------------------------------------------------------
diff --git a/external/storm-hbase/src/main/java/org/apache/storm/hbase/security/HBaseSecurityUtil.java b/external/storm-hbase/src/main/java/org/apache/storm/hbase/security/HBaseSecurityUtil.java
index 99dfeba..f306a51 100644
--- a/external/storm-hbase/src/main/java/org/apache/storm/hbase/security/HBaseSecurityUtil.java
+++ b/external/storm-hbase/src/main/java/org/apache/storm/hbase/security/HBaseSecurityUtil.java
@@ -28,7 +28,7 @@ import java.net.InetAddress;
 import java.util.List;
 import java.util.Map;
 
-import static backtype.storm.Config.TOPOLOGY_AUTO_CREDENTIALS;
+import static org.apache.storm.Config.TOPOLOGY_AUTO_CREDENTIALS;
 
 /**
  * This class provides util methods for storm-hbase connector communicating

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hbase/src/main/java/org/apache/storm/hbase/trident/mapper/SimpleTridentHBaseMapper.java
----------------------------------------------------------------------
diff --git a/external/storm-hbase/src/main/java/org/apache/storm/hbase/trident/mapper/SimpleTridentHBaseMapper.java b/external/storm-hbase/src/main/java/org/apache/storm/hbase/trident/mapper/SimpleTridentHBaseMapper.java
index be3ab95..eda9a32 100644
--- a/external/storm-hbase/src/main/java/org/apache/storm/hbase/trident/mapper/SimpleTridentHBaseMapper.java
+++ b/external/storm-hbase/src/main/java/org/apache/storm/hbase/trident/mapper/SimpleTridentHBaseMapper.java
@@ -17,13 +17,13 @@
  */
 package org.apache.storm.hbase.trident.mapper;
 
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
 import org.apache.storm.hbase.bolt.mapper.HBaseMapper;
 import org.apache.storm.hbase.common.ColumnList;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.trident.tuple.TridentTuple;
 
 import static org.apache.storm.hbase.common.Utils.toBytes;
 import static org.apache.storm.hbase.common.Utils.toLong;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hbase/src/main/java/org/apache/storm/hbase/trident/mapper/TridentHBaseMapper.java
----------------------------------------------------------------------
diff --git a/external/storm-hbase/src/main/java/org/apache/storm/hbase/trident/mapper/TridentHBaseMapper.java b/external/storm-hbase/src/main/java/org/apache/storm/hbase/trident/mapper/TridentHBaseMapper.java
index 64d10d0..bb95497 100644
--- a/external/storm-hbase/src/main/java/org/apache/storm/hbase/trident/mapper/TridentHBaseMapper.java
+++ b/external/storm-hbase/src/main/java/org/apache/storm/hbase/trident/mapper/TridentHBaseMapper.java
@@ -19,13 +19,13 @@
 package org.apache.storm.hbase.trident.mapper;
 
 
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.tuple.Tuple;
 import org.apache.storm.hbase.common.ColumnList;
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.trident.tuple.TridentTuple;
 
 import java.io.Serializable;
 /**
- * Maps a <code>storm.trident.tuple.TridentTuple</code> object
+ * Maps a <code>org.apache.storm.trident.tuple.TridentTuple</code> object
  * to a row in an HBase table.
  */
 public interface TridentHBaseMapper extends Serializable {

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hbase/src/main/java/org/apache/storm/hbase/trident/state/HBaseMapState.java
----------------------------------------------------------------------
diff --git a/external/storm-hbase/src/main/java/org/apache/storm/hbase/trident/state/HBaseMapState.java b/external/storm-hbase/src/main/java/org/apache/storm/hbase/trident/state/HBaseMapState.java
index 99ce4bf..541fa86 100644
--- a/external/storm-hbase/src/main/java/org/apache/storm/hbase/trident/state/HBaseMapState.java
+++ b/external/storm-hbase/src/main/java/org/apache/storm/hbase/trident/state/HBaseMapState.java
@@ -17,9 +17,9 @@
  */
 package org.apache.storm.hbase.trident.state;
 
-import backtype.storm.task.IMetricsContext;
-import backtype.storm.topology.FailedException;
-import backtype.storm.tuple.Values;
+import org.apache.storm.task.IMetricsContext;
+import org.apache.storm.topology.FailedException;
+import org.apache.storm.tuple.Values;
 import com.google.common.collect.Maps;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
@@ -29,8 +29,8 @@ import org.apache.storm.hbase.security.HBaseSecurityUtil;
 import org.apache.storm.hbase.trident.mapper.TridentHBaseMapMapper;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import storm.trident.state.*;
-import storm.trident.state.map.*;
+import org.apache.storm.trident.state.*;
+import org.apache.storm.trident.state.map.*;
 
 import java.io.IOException;
 import java.io.InterruptedIOException;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hbase/src/main/java/org/apache/storm/hbase/trident/state/HBaseQuery.java
----------------------------------------------------------------------
diff --git a/external/storm-hbase/src/main/java/org/apache/storm/hbase/trident/state/HBaseQuery.java b/external/storm-hbase/src/main/java/org/apache/storm/hbase/trident/state/HBaseQuery.java
index c7836ed..4d95eb5 100644
--- a/external/storm-hbase/src/main/java/org/apache/storm/hbase/trident/state/HBaseQuery.java
+++ b/external/storm-hbase/src/main/java/org/apache/storm/hbase/trident/state/HBaseQuery.java
@@ -17,10 +17,10 @@
  */
 package org.apache.storm.hbase.trident.state;
 
-import backtype.storm.tuple.Values;
-import storm.trident.operation.TridentCollector;
-import storm.trident.state.BaseQueryFunction;
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.tuple.Values;
+import org.apache.storm.trident.operation.TridentCollector;
+import org.apache.storm.trident.state.BaseQueryFunction;
+import org.apache.storm.trident.tuple.TridentTuple;
 
 import java.util.List;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hbase/src/main/java/org/apache/storm/hbase/trident/state/HBaseState.java
----------------------------------------------------------------------
diff --git a/external/storm-hbase/src/main/java/org/apache/storm/hbase/trident/state/HBaseState.java b/external/storm-hbase/src/main/java/org/apache/storm/hbase/trident/state/HBaseState.java
index 04518ca..b199514 100644
--- a/external/storm-hbase/src/main/java/org/apache/storm/hbase/trident/state/HBaseState.java
+++ b/external/storm-hbase/src/main/java/org/apache/storm/hbase/trident/state/HBaseState.java
@@ -17,9 +17,9 @@
  */
 package org.apache.storm.hbase.trident.state;
 
-import backtype.storm.Config;
-import backtype.storm.topology.FailedException;
-import backtype.storm.tuple.Values;
+import org.apache.storm.Config;
+import org.apache.storm.topology.FailedException;
+import org.apache.storm.tuple.Values;
 import com.google.common.collect.Lists;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
@@ -31,9 +31,9 @@ import org.apache.storm.hbase.common.HBaseClient;
 import org.apache.storm.hbase.trident.mapper.TridentHBaseMapper;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import storm.trident.operation.TridentCollector;
-import storm.trident.state.State;
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.trident.operation.TridentCollector;
+import org.apache.storm.trident.state.State;
+import org.apache.storm.trident.tuple.TridentTuple;
 
 import java.io.Serializable;
 import java.util.HashMap;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hbase/src/main/java/org/apache/storm/hbase/trident/state/HBaseStateFactory.java
----------------------------------------------------------------------
diff --git a/external/storm-hbase/src/main/java/org/apache/storm/hbase/trident/state/HBaseStateFactory.java b/external/storm-hbase/src/main/java/org/apache/storm/hbase/trident/state/HBaseStateFactory.java
index 1fedc61..e208ef3 100644
--- a/external/storm-hbase/src/main/java/org/apache/storm/hbase/trident/state/HBaseStateFactory.java
+++ b/external/storm-hbase/src/main/java/org/apache/storm/hbase/trident/state/HBaseStateFactory.java
@@ -17,9 +17,9 @@
  */
 package org.apache.storm.hbase.trident.state;
 
-import backtype.storm.task.IMetricsContext;
-import storm.trident.state.State;
-import storm.trident.state.StateFactory;
+import org.apache.storm.task.IMetricsContext;
+import org.apache.storm.trident.state.State;
+import org.apache.storm.trident.state.StateFactory;
 
 import java.util.Map;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hbase/src/main/java/org/apache/storm/hbase/trident/state/HBaseUpdater.java
----------------------------------------------------------------------
diff --git a/external/storm-hbase/src/main/java/org/apache/storm/hbase/trident/state/HBaseUpdater.java b/external/storm-hbase/src/main/java/org/apache/storm/hbase/trident/state/HBaseUpdater.java
index 248ea2d..57ca844 100644
--- a/external/storm-hbase/src/main/java/org/apache/storm/hbase/trident/state/HBaseUpdater.java
+++ b/external/storm-hbase/src/main/java/org/apache/storm/hbase/trident/state/HBaseUpdater.java
@@ -17,9 +17,9 @@
  */
 package org.apache.storm.hbase.trident.state;
 
-import storm.trident.operation.TridentCollector;
-import storm.trident.state.BaseStateUpdater;
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.trident.operation.TridentCollector;
+import org.apache.storm.trident.state.BaseStateUpdater;
+import org.apache.storm.trident.tuple.TridentTuple;
 
 import java.util.List;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hbase/src/test/java/org/apache/storm/hbase/topology/LookupWordCount.java
----------------------------------------------------------------------
diff --git a/external/storm-hbase/src/test/java/org/apache/storm/hbase/topology/LookupWordCount.java b/external/storm-hbase/src/test/java/org/apache/storm/hbase/topology/LookupWordCount.java
index 656bce5..43f72ae 100644
--- a/external/storm-hbase/src/test/java/org/apache/storm/hbase/topology/LookupWordCount.java
+++ b/external/storm-hbase/src/test/java/org/apache/storm/hbase/topology/LookupWordCount.java
@@ -17,11 +17,11 @@
  */
 package org.apache.storm.hbase.topology;
 
-import backtype.storm.Config;
-import backtype.storm.LocalCluster;
-import backtype.storm.StormSubmitter;
-import backtype.storm.topology.TopologyBuilder;
-import backtype.storm.tuple.Fields;
+import org.apache.storm.Config;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.StormSubmitter;
+import org.apache.storm.topology.TopologyBuilder;
+import org.apache.storm.tuple.Fields;
 import org.apache.storm.hbase.bolt.HBaseLookupBolt;
 import org.apache.storm.hbase.bolt.mapper.HBaseProjectionCriteria;
 import org.apache.storm.hbase.bolt.mapper.SimpleHBaseMapper;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hbase/src/test/java/org/apache/storm/hbase/topology/PersistentWordCount.java
----------------------------------------------------------------------
diff --git a/external/storm-hbase/src/test/java/org/apache/storm/hbase/topology/PersistentWordCount.java b/external/storm-hbase/src/test/java/org/apache/storm/hbase/topology/PersistentWordCount.java
index 0d807b2..a171a26 100644
--- a/external/storm-hbase/src/test/java/org/apache/storm/hbase/topology/PersistentWordCount.java
+++ b/external/storm-hbase/src/test/java/org/apache/storm/hbase/topology/PersistentWordCount.java
@@ -17,11 +17,11 @@
  */
 package org.apache.storm.hbase.topology;
 
-import backtype.storm.Config;
-import backtype.storm.LocalCluster;
-import backtype.storm.StormSubmitter;
-import backtype.storm.topology.TopologyBuilder;
-import backtype.storm.tuple.Fields;
+import org.apache.storm.Config;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.StormSubmitter;
+import org.apache.storm.topology.TopologyBuilder;
+import org.apache.storm.tuple.Fields;
 import org.apache.storm.hbase.bolt.HBaseBolt;
 import org.apache.storm.hbase.bolt.mapper.SimpleHBaseMapper;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hbase/src/test/java/org/apache/storm/hbase/topology/TotalWordCounter.java
----------------------------------------------------------------------
diff --git a/external/storm-hbase/src/test/java/org/apache/storm/hbase/topology/TotalWordCounter.java b/external/storm-hbase/src/test/java/org/apache/storm/hbase/topology/TotalWordCounter.java
index 93bd522..61b0dd8 100644
--- a/external/storm-hbase/src/test/java/org/apache/storm/hbase/topology/TotalWordCounter.java
+++ b/external/storm-hbase/src/test/java/org/apache/storm/hbase/topology/TotalWordCounter.java
@@ -17,12 +17,12 @@
  */
 package org.apache.storm.hbase.topology;
 
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.BasicOutputCollector;
-import backtype.storm.topology.IBasicBolt;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.BasicOutputCollector;
+import org.apache.storm.topology.IBasicBolt;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -30,7 +30,7 @@ import java.math.BigInteger;
 import java.util.Map;
 import java.util.Random;
 
-import static backtype.storm.utils.Utils.tuple;
+import static org.apache.storm.utils.Utils.tuple;
 
 public class TotalWordCounter implements IBasicBolt {
 
@@ -67,4 +67,4 @@ public class TotalWordCounter implements IBasicBolt {
         return null;
     }
 
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hbase/src/test/java/org/apache/storm/hbase/topology/WordCountValueMapper.java
----------------------------------------------------------------------
diff --git a/external/storm-hbase/src/test/java/org/apache/storm/hbase/topology/WordCountValueMapper.java b/external/storm-hbase/src/test/java/org/apache/storm/hbase/topology/WordCountValueMapper.java
index 2463085..6c3301b 100644
--- a/external/storm-hbase/src/test/java/org/apache/storm/hbase/topology/WordCountValueMapper.java
+++ b/external/storm-hbase/src/test/java/org/apache/storm/hbase/topology/WordCountValueMapper.java
@@ -18,10 +18,10 @@
 package org.apache.storm.hbase.topology;
 
 
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.ITuple;
-import backtype.storm.tuple.Values;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.ITuple;
+import org.apache.storm.tuple.Values;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.client.Result;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hbase/src/test/java/org/apache/storm/hbase/topology/WordCounter.java
----------------------------------------------------------------------
diff --git a/external/storm-hbase/src/test/java/org/apache/storm/hbase/topology/WordCounter.java b/external/storm-hbase/src/test/java/org/apache/storm/hbase/topology/WordCounter.java
index 602978e..3a350a8 100644
--- a/external/storm-hbase/src/test/java/org/apache/storm/hbase/topology/WordCounter.java
+++ b/external/storm-hbase/src/test/java/org/apache/storm/hbase/topology/WordCounter.java
@@ -17,16 +17,16 @@
  */
 package org.apache.storm.hbase.topology;
 
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.BasicOutputCollector;
-import backtype.storm.topology.IBasicBolt;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.BasicOutputCollector;
+import org.apache.storm.topology.IBasicBolt;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
 
 import java.util.Map;
 
-import static backtype.storm.utils.Utils.tuple;
+import static org.apache.storm.utils.Utils.tuple;
 
 public class WordCounter implements IBasicBolt {
 
@@ -56,4 +56,4 @@ public class WordCounter implements IBasicBolt {
         return null;
     }
 
-}
\ No newline at end of file
+}


[43/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/flux/flux-core/src/test/java/org/apache/storm/flux/test/TridentTopologySource.java
----------------------------------------------------------------------
diff --git a/external/flux/flux-core/src/test/java/org/apache/storm/flux/test/TridentTopologySource.java b/external/flux/flux-core/src/test/java/org/apache/storm/flux/test/TridentTopologySource.java
index 24cee7d..36b272b 100644
--- a/external/flux/flux-core/src/test/java/org/apache/storm/flux/test/TridentTopologySource.java
+++ b/external/flux/flux-core/src/test/java/org/apache/storm/flux/test/TridentTopologySource.java
@@ -17,18 +17,18 @@
  */
 package org.apache.storm.flux.test;
 
-import backtype.storm.Config;
-import backtype.storm.generated.StormTopology;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Values;
-import storm.kafka.StringScheme;
-import storm.trident.TridentTopology;
-import storm.trident.operation.BaseFunction;
-import storm.trident.operation.TridentCollector;
-import storm.trident.operation.builtin.Count;
-import storm.trident.testing.FixedBatchSpout;
-import storm.trident.testing.MemoryMapState;
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.Config;
+import org.apache.storm.generated.StormTopology;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Values;
+import org.apache.storm.kafka.StringScheme;
+import org.apache.storm.trident.TridentTopology;
+import org.apache.storm.trident.operation.BaseFunction;
+import org.apache.storm.trident.operation.TridentCollector;
+import org.apache.storm.trident.operation.builtin.Count;
+import org.apache.storm.trident.testing.FixedBatchSpout;
+import org.apache.storm.trident.testing.MemoryMapState;
+import org.apache.storm.trident.tuple.TridentTuple;
 
 /**
  * Basic Trident example that will return a `StormTopology` from a `getTopology()` method.

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/flux/flux-core/src/test/resources/configs/bad_hbase.yaml
----------------------------------------------------------------------
diff --git a/external/flux/flux-core/src/test/resources/configs/bad_hbase.yaml b/external/flux/flux-core/src/test/resources/configs/bad_hbase.yaml
index 5d91400..a29e314 100644
--- a/external/flux/flux-core/src/test/resources/configs/bad_hbase.yaml
+++ b/external/flux/flux-core/src/test/resources/configs/bad_hbase.yaml
@@ -29,12 +29,12 @@ name: "hbase-wordcount"
 
 components:
   - id: "columnFields"
-    className: "backtype.storm.tuple.Fields"
+    className: "org.apache.storm.tuple.Fields"
     constructorArgs:
       - ["word"]
 
   - id: "counterFields"
-    className: "backtype.storm.tuple.Fields"
+    className: "org.apache.storm.tuple.Fields"
     constructorArgs:
       # !!! the following won't work, and should thow an IllegalArgumentException...
       - "count"
@@ -63,14 +63,14 @@ config:
 # spout definitions
 spouts:
   - id: "word-spout"
-    className: "backtype.storm.testing.TestWordSpout"
+    className: "org.apache.storm.testing.TestWordSpout"
     parallelism: 1
 
 # bolt definitions
 
 bolts:
   - id: "count-bolt"
-    className: "backtype.storm.testing.TestWordCounter"
+    className: "org.apache.storm.testing.TestWordCounter"
 
   - id: "hbase-bolt"
     className: "org.apache.storm.hbase.bolt.HBaseBolt"
@@ -95,4 +95,4 @@ streams:
     to: "hbase-bolt"
     grouping:
       type: FIELDS
-      args: ["word"]
\ No newline at end of file
+      args: ["word"]

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/flux/flux-core/src/test/resources/configs/config-methods-test.yaml
----------------------------------------------------------------------
diff --git a/external/flux/flux-core/src/test/resources/configs/config-methods-test.yaml b/external/flux/flux-core/src/test/resources/configs/config-methods-test.yaml
index cda151e..bacf203 100644
--- a/external/flux/flux-core/src/test/resources/configs/config-methods-test.yaml
+++ b/external/flux/flux-core/src/test/resources/configs/config-methods-test.yaml
@@ -24,7 +24,7 @@ config:
 # spout definitions
 spouts:
   - id: "spout-1"
-    className: "backtype.storm.testing.TestWordSpout"
+    className: "org.apache.storm.testing.TestWordSpout"
     parallelism: 1
     # ...
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/flux/flux-core/src/test/resources/configs/diamond-topology.yaml
----------------------------------------------------------------------
diff --git a/external/flux/flux-core/src/test/resources/configs/diamond-topology.yaml b/external/flux/flux-core/src/test/resources/configs/diamond-topology.yaml
index c400d6d..957c258 100644
--- a/external/flux/flux-core/src/test/resources/configs/diamond-topology.yaml
+++ b/external/flux/flux-core/src/test/resources/configs/diamond-topology.yaml
@@ -29,7 +29,7 @@ config:
 # spout definitions
 spouts:
   - id: "spout-1"
-    className: "backtype.storm.testing.TestWordSpout"
+    className: "org.apache.storm.testing.TestWordSpout"
     parallelism: 1
 
 # bolt definitions

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/flux/flux-core/src/test/resources/configs/hdfs_test.yaml
----------------------------------------------------------------------
diff --git a/external/flux/flux-core/src/test/resources/configs/hdfs_test.yaml b/external/flux/flux-core/src/test/resources/configs/hdfs_test.yaml
index 8fe0a9a..2bccb33 100644
--- a/external/flux/flux-core/src/test/resources/configs/hdfs_test.yaml
+++ b/external/flux/flux-core/src/test/resources/configs/hdfs_test.yaml
@@ -60,7 +60,7 @@ components:
 # spout definitions
 spouts:
   - id: "spout-1"
-    className: "backtype.storm.testing.TestWordSpout"
+    className: "org.apache.storm.testing.TestWordSpout"
     parallelism: 1
     # ...
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/flux/flux-core/src/test/resources/configs/kafka_test.yaml
----------------------------------------------------------------------
diff --git a/external/flux/flux-core/src/test/resources/configs/kafka_test.yaml b/external/flux/flux-core/src/test/resources/configs/kafka_test.yaml
index 17cd8e2..bc01d93 100644
--- a/external/flux/flux-core/src/test/resources/configs/kafka_test.yaml
+++ b/external/flux/flux-core/src/test/resources/configs/kafka_test.yaml
@@ -26,21 +26,21 @@ name: "kafka-topology"
 # for the time being, components must be declared in the order they are referenced
 components:
   - id: "stringScheme"
-    className: "storm.kafka.StringScheme"
+    className: "org.apache.storm.kafka.StringScheme"
 
   - id: "stringMultiScheme"
-    className: "backtype.storm.spout.SchemeAsMultiScheme"
+    className: "org.apache.storm.spout.SchemeAsMultiScheme"
     constructorArgs:
       - ref: "stringScheme"
 
   - id: "zkHosts"
-    className: "storm.kafka.ZkHosts"
+    className: "org.apache.storm.kafka.ZkHosts"
     constructorArgs:
       - "localhost:2181"
 
 # Alternative kafka config
 #  - id: "kafkaConfig"
-#    className: "storm.kafka.KafkaConfig"
+#    className: "org.apache.storm.kafka.KafkaConfig"
 #    constructorArgs:
 #      # brokerHosts
 #      - ref: "zkHosts"
@@ -50,7 +50,7 @@ components:
 #      - "myKafkaClientId"
 
   - id: "spoutConfig"
-    className: "storm.kafka.SpoutConfig"
+    className: "org.apache.storm.kafka.SpoutConfig"
     constructorArgs:
       # brokerHosts
       - ref: "zkHosts"
@@ -76,7 +76,7 @@ config:
 # spout definitions
 spouts:
   - id: "kafka-spout"
-    className: "storm.kafka.KafkaSpout"
+    className: "org.apache.storm.kafka.KafkaSpout"
     constructorArgs:
       - ref: "spoutConfig"
 
@@ -97,7 +97,7 @@ bolts:
     # ...
 
   - id: "count"
-    className: "backtype.storm.testing.TestWordCounter"
+    className: "org.apache.storm.testing.TestWordCounter"
     parallelism: 1
 
 #stream definitions

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/flux/flux-core/src/test/resources/configs/shell_test.yaml
----------------------------------------------------------------------
diff --git a/external/flux/flux-core/src/test/resources/configs/shell_test.yaml b/external/flux/flux-core/src/test/resources/configs/shell_test.yaml
index b473fa7..d885975 100644
--- a/external/flux/flux-core/src/test/resources/configs/shell_test.yaml
+++ b/external/flux/flux-core/src/test/resources/configs/shell_test.yaml
@@ -74,7 +74,7 @@ bolts:
     # ...
 
   - id: "count"
-    className: "backtype.storm.testing.TestWordCounter"
+    className: "org.apache.storm.testing.TestWordCounter"
     parallelism: 1
     # ...
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/flux/flux-core/src/test/resources/configs/simple_hbase.yaml
----------------------------------------------------------------------
diff --git a/external/flux/flux-core/src/test/resources/configs/simple_hbase.yaml b/external/flux/flux-core/src/test/resources/configs/simple_hbase.yaml
index e407bd9..b841b53 100644
--- a/external/flux/flux-core/src/test/resources/configs/simple_hbase.yaml
+++ b/external/flux/flux-core/src/test/resources/configs/simple_hbase.yaml
@@ -52,12 +52,12 @@ name: "hbase-wordcount"
 
 components:
   - id: "columnFields"
-    className: "backtype.storm.tuple.Fields"
+    className: "org.apache.storm.tuple.Fields"
     constructorArgs:
       - ["word"]
 
   - id: "counterFields"
-    className: "backtype.storm.tuple.Fields"
+    className: "org.apache.storm.tuple.Fields"
     constructorArgs:
       - ["count"]
 
@@ -85,14 +85,14 @@ config:
 # spout definitions
 spouts:
   - id: "word-spout"
-    className: "backtype.storm.testing.TestWordSpout"
+    className: "org.apache.storm.testing.TestWordSpout"
     parallelism: 1
 
 # bolt definitions
 
 bolts:
   - id: "count-bolt"
-    className: "backtype.storm.testing.TestWordCounter"
+    className: "org.apache.storm.testing.TestWordCounter"
 
   - id: "hbase-bolt"
     className: "org.apache.storm.hbase.bolt.HBaseBolt"
@@ -117,4 +117,4 @@ streams:
     to: "hbase-bolt"
     grouping:
       type: FIELDS
-      args: ["word"]
\ No newline at end of file
+      args: ["word"]

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/flux/flux-core/src/test/resources/configs/substitution-test.yaml
----------------------------------------------------------------------
diff --git a/external/flux/flux-core/src/test/resources/configs/substitution-test.yaml b/external/flux/flux-core/src/test/resources/configs/substitution-test.yaml
index 13f1960..ce9e62d 100644
--- a/external/flux/flux-core/src/test/resources/configs/substitution-test.yaml
+++ b/external/flux/flux-core/src/test/resources/configs/substitution-test.yaml
@@ -76,7 +76,7 @@ bolts:
     # ...
 
   - id: "count"
-    className: "backtype.storm.testing.TestWordCounter"
+    className: "org.apache.storm.testing.TestWordCounter"
     parallelism: 1
     # ...
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/flux/flux-core/src/test/resources/configs/tck.yaml
----------------------------------------------------------------------
diff --git a/external/flux/flux-core/src/test/resources/configs/tck.yaml b/external/flux/flux-core/src/test/resources/configs/tck.yaml
index 7e9b614..5d40445 100644
--- a/external/flux/flux-core/src/test/resources/configs/tck.yaml
+++ b/external/flux/flux-core/src/test/resources/configs/tck.yaml
@@ -49,14 +49,14 @@ config:
 # spout definitions
 spouts:
   - id: "spout-1"
-    className: "backtype.storm.testing.TestWordSpout"
+    className: "org.apache.storm.testing.TestWordSpout"
     parallelism: 1
     # ...
 
 # bolt definitions
 bolts:
   - id: "bolt-1"
-    className: "backtype.storm.testing.TestWordCounter"
+    className: "org.apache.storm.testing.TestWordCounter"
     parallelism: 1
     # ...
 
@@ -83,7 +83,7 @@ streams:
     grouping:
       type: CUSTOM
       customClass:
-        className: "backtype.storm.testing.NGrouping"
+        className: "org.apache.storm.testing.NGrouping"
         constructorArgs:
           - 1
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/flux/flux-examples/src/main/java/org/apache/storm/flux/examples/TestPrintBolt.java
----------------------------------------------------------------------
diff --git a/external/flux/flux-examples/src/main/java/org/apache/storm/flux/examples/TestPrintBolt.java b/external/flux/flux-examples/src/main/java/org/apache/storm/flux/examples/TestPrintBolt.java
index 7e84441..137e354 100644
--- a/external/flux/flux-examples/src/main/java/org/apache/storm/flux/examples/TestPrintBolt.java
+++ b/external/flux/flux-examples/src/main/java/org/apache/storm/flux/examples/TestPrintBolt.java
@@ -17,10 +17,10 @@
  */
 package org.apache.storm.flux.examples;
 
-import backtype.storm.topology.BasicOutputCollector;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.base.BaseBasicBolt;
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.topology.BasicOutputCollector;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.base.BaseBasicBolt;
+import org.apache.storm.tuple.Tuple;
 
 /**
  * Prints the tuples to stdout
@@ -36,4 +36,4 @@ public class TestPrintBolt extends BaseBasicBolt {
     public void declareOutputFields(OutputFieldsDeclarer ofd) {
     }
 
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/flux/flux-examples/src/main/java/org/apache/storm/flux/examples/TestWindowBolt.java
----------------------------------------------------------------------
diff --git a/external/flux/flux-examples/src/main/java/org/apache/storm/flux/examples/TestWindowBolt.java b/external/flux/flux-examples/src/main/java/org/apache/storm/flux/examples/TestWindowBolt.java
index 3aab9b6..8c904d9 100644
--- a/external/flux/flux-examples/src/main/java/org/apache/storm/flux/examples/TestWindowBolt.java
+++ b/external/flux/flux-examples/src/main/java/org/apache/storm/flux/examples/TestWindowBolt.java
@@ -17,13 +17,13 @@
  */
 package org.apache.storm.flux.examples;
 
-import backtype.storm.task.OutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.base.BaseWindowedBolt;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Values;
-import backtype.storm.windowing.TupleWindow;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.base.BaseWindowedBolt;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Values;
+import org.apache.storm.windowing.TupleWindow;
 
 import java.util.Map;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/flux/flux-examples/src/main/java/org/apache/storm/flux/examples/WordCounter.java
----------------------------------------------------------------------
diff --git a/external/flux/flux-examples/src/main/java/org/apache/storm/flux/examples/WordCounter.java b/external/flux/flux-examples/src/main/java/org/apache/storm/flux/examples/WordCounter.java
index f7c80c7..7093105 100644
--- a/external/flux/flux-examples/src/main/java/org/apache/storm/flux/examples/WordCounter.java
+++ b/external/flux/flux-examples/src/main/java/org/apache/storm/flux/examples/WordCounter.java
@@ -17,19 +17,19 @@
  */
 package org.apache.storm.flux.examples;
 
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.BasicOutputCollector;
-import backtype.storm.topology.IBasicBolt;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.base.BaseBasicBolt;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.BasicOutputCollector;
+import org.apache.storm.topology.IBasicBolt;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.base.BaseBasicBolt;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.util.Map;
 
-import static backtype.storm.utils.Utils.tuple;
+import static org.apache.storm.utils.Utils.tuple;
 
 /**
  * This bolt is used by the HBase example. It simply emits the first field
@@ -68,4 +68,4 @@ public class WordCounter extends BaseBasicBolt {
         return null;
     }
 
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/flux/flux-examples/src/main/resources/kafka_spout.yaml
----------------------------------------------------------------------
diff --git a/external/flux/flux-examples/src/main/resources/kafka_spout.yaml b/external/flux/flux-examples/src/main/resources/kafka_spout.yaml
index 8ffddc5..db68b1b 100644
--- a/external/flux/flux-examples/src/main/resources/kafka_spout.yaml
+++ b/external/flux/flux-examples/src/main/resources/kafka_spout.yaml
@@ -29,21 +29,21 @@ name: "kafka-topology"
 # for the time being, components must be declared in the order they are referenced
 components:
   - id: "stringScheme"
-    className: "storm.kafka.StringScheme"
+    className: "org.apache.storm.kafka.StringScheme"
 
   - id: "stringMultiScheme"
-    className: "backtype.storm.spout.SchemeAsMultiScheme"
+    className: "org.apache.storm.spout.SchemeAsMultiScheme"
     constructorArgs:
       - ref: "stringScheme"
 
   - id: "zkHosts"
-    className: "storm.kafka.ZkHosts"
+    className: "org.apache.storm.kafka.ZkHosts"
     constructorArgs:
       - "localhost:2181"
 
 # Alternative kafka config
 #  - id: "kafkaConfig"
-#    className: "storm.kafka.KafkaConfig"
+#    className: "org.apache.storm.kafka.KafkaConfig"
 #    constructorArgs:
 #      # brokerHosts
 #      - ref: "zkHosts"
@@ -53,7 +53,7 @@ components:
 #      - "myKafkaClientId"
 
   - id: "spoutConfig"
-    className: "storm.kafka.SpoutConfig"
+    className: "org.apache.storm.kafka.SpoutConfig"
     constructorArgs:
       # brokerHosts
       - ref: "zkHosts"
@@ -84,7 +84,7 @@ config:
 # spout definitions
 spouts:
   - id: "kafka-spout"
-    className: "storm.kafka.KafkaSpout"
+    className: "org.apache.storm.kafka.KafkaSpout"
     constructorArgs:
       - ref: "spoutConfig"
 
@@ -106,7 +106,7 @@ bolts:
     # ...
 
   - id: "count"
-    className: "backtype.storm.testing.TestWordCounter"
+    className: "org.apache.storm.testing.TestWordCounter"
     parallelism: 1
     # ...
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/flux/flux-examples/src/main/resources/multilang.yaml
----------------------------------------------------------------------
diff --git a/external/flux/flux-examples/src/main/resources/multilang.yaml b/external/flux/flux-examples/src/main/resources/multilang.yaml
index 4f80667..aaab5d3 100644
--- a/external/flux/flux-examples/src/main/resources/multilang.yaml
+++ b/external/flux/flux-examples/src/main/resources/multilang.yaml
@@ -59,7 +59,7 @@ bolts:
     # ...
 
   - id: "count"
-    className: "backtype.storm.testing.TestWordCounter"
+    className: "org.apache.storm.testing.TestWordCounter"
     parallelism: 1
     # ...
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/flux/flux-examples/src/main/resources/simple_hbase.yaml
----------------------------------------------------------------------
diff --git a/external/flux/flux-examples/src/main/resources/simple_hbase.yaml b/external/flux/flux-examples/src/main/resources/simple_hbase.yaml
index 62686d0..93a2781 100644
--- a/external/flux/flux-examples/src/main/resources/simple_hbase.yaml
+++ b/external/flux/flux-examples/src/main/resources/simple_hbase.yaml
@@ -25,12 +25,12 @@ name: "hbase-persistent-wordcount"
 # Components
 components:
   - id: "columnFields"
-    className: "backtype.storm.tuple.Fields"
+    className: "org.apache.storm.tuple.Fields"
     constructorArgs:
       - ["word"]
 
   - id: "counterFields"
-    className: "backtype.storm.tuple.Fields"
+    className: "org.apache.storm.tuple.Fields"
     constructorArgs:
       - ["count"]
 
@@ -57,7 +57,7 @@ config:
 # spout definitions
 spouts:
   - id: "word-spout"
-    className: "backtype.storm.testing.TestWordSpout"
+    className: "org.apache.storm.testing.TestWordSpout"
     parallelism: 1
 
 # bolt definitions

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/flux/flux-examples/src/main/resources/simple_hdfs.yaml
----------------------------------------------------------------------
diff --git a/external/flux/flux-examples/src/main/resources/simple_hdfs.yaml b/external/flux/flux-examples/src/main/resources/simple_hdfs.yaml
index 9007869..b8d4020 100644
--- a/external/flux/flux-examples/src/main/resources/simple_hdfs.yaml
+++ b/external/flux/flux-examples/src/main/resources/simple_hdfs.yaml
@@ -60,7 +60,7 @@ components:
 # spout definitions
 spouts:
   - id: "spout-1"
-    className: "backtype.storm.testing.TestWordSpout"
+    className: "org.apache.storm.testing.TestWordSpout"
     parallelism: 1
     # ...
 
@@ -102,4 +102,4 @@ streams:
     from: "spout-1"
     to: "bolt-2"
     grouping:
-      type: SHUFFLE
\ No newline at end of file
+      type: SHUFFLE

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/flux/flux-examples/src/main/resources/simple_windowing.yaml
----------------------------------------------------------------------
diff --git a/external/flux/flux-examples/src/main/resources/simple_windowing.yaml b/external/flux/flux-examples/src/main/resources/simple_windowing.yaml
index 31be109..f2f74ff 100755
--- a/external/flux/flux-examples/src/main/resources/simple_windowing.yaml
+++ b/external/flux/flux-examples/src/main/resources/simple_windowing.yaml
@@ -20,11 +20,11 @@ name: "sliding-window-topology"
 
 components:
   - id: "windowLength"
-    className: "backtype.storm.topology.base.BaseWindowedBolt$Count"
+    className: "org.apache.storm.topology.base.BaseWindowedBolt$Count"
     constructorArgs:
       - 5
   - id: "slidingInterval"
-    className: "backtype.storm.topology.base.BaseWindowedBolt$Count"
+    className: "org.apache.storm.topology.base.BaseWindowedBolt$Count"
     constructorArgs:
       - 3
 
@@ -34,7 +34,7 @@ config:
 # spout definitions
 spouts:
   - id: "spout-1"
-    className: "backtype.storm.testing.TestWordSpout"
+    className: "org.apache.storm.testing.TestWordSpout"
     parallelism: 1
 
 # bolt definitions

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/flux/flux-examples/src/main/resources/simple_wordcount.yaml
----------------------------------------------------------------------
diff --git a/external/flux/flux-examples/src/main/resources/simple_wordcount.yaml b/external/flux/flux-examples/src/main/resources/simple_wordcount.yaml
index 380f9d2..6443a97 100644
--- a/external/flux/flux-examples/src/main/resources/simple_wordcount.yaml
+++ b/external/flux/flux-examples/src/main/resources/simple_wordcount.yaml
@@ -29,13 +29,13 @@ config:
 # spout definitions
 spouts:
   - id: "spout-1"
-    className: "backtype.storm.testing.TestWordSpout"
+    className: "org.apache.storm.testing.TestWordSpout"
     parallelism: 1
 
 # bolt definitions
 bolts:
   - id: "bolt-1"
-    className: "backtype.storm.testing.TestWordCounter"
+    className: "org.apache.storm.testing.TestWordCounter"
     parallelism: 1
 
   - id: "bolt-2"

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/flux/flux-wrappers/src/main/java/org/apache/storm/flux/wrappers/bolts/FluxShellBolt.java
----------------------------------------------------------------------
diff --git a/external/flux/flux-wrappers/src/main/java/org/apache/storm/flux/wrappers/bolts/FluxShellBolt.java b/external/flux/flux-wrappers/src/main/java/org/apache/storm/flux/wrappers/bolts/FluxShellBolt.java
index 4e0f91c..1af1f42 100644
--- a/external/flux/flux-wrappers/src/main/java/org/apache/storm/flux/wrappers/bolts/FluxShellBolt.java
+++ b/external/flux/flux-wrappers/src/main/java/org/apache/storm/flux/wrappers/bolts/FluxShellBolt.java
@@ -17,10 +17,10 @@
  */
 package org.apache.storm.flux.wrappers.bolts;
 
-import backtype.storm.task.ShellBolt;
-import backtype.storm.topology.IRichBolt;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.tuple.Fields;
+import org.apache.storm.task.ShellBolt;
+import org.apache.storm.topology.IRichBolt;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.tuple.Fields;
 
 import java.util.Map;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/flux/flux-wrappers/src/main/java/org/apache/storm/flux/wrappers/bolts/LogInfoBolt.java
----------------------------------------------------------------------
diff --git a/external/flux/flux-wrappers/src/main/java/org/apache/storm/flux/wrappers/bolts/LogInfoBolt.java b/external/flux/flux-wrappers/src/main/java/org/apache/storm/flux/wrappers/bolts/LogInfoBolt.java
index a42d7c3..5f0e84b 100644
--- a/external/flux/flux-wrappers/src/main/java/org/apache/storm/flux/wrappers/bolts/LogInfoBolt.java
+++ b/external/flux/flux-wrappers/src/main/java/org/apache/storm/flux/wrappers/bolts/LogInfoBolt.java
@@ -18,10 +18,10 @@
 
 package org.apache.storm.flux.wrappers.bolts;
 
-import backtype.storm.topology.BasicOutputCollector;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.base.BaseBasicBolt;
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.topology.BasicOutputCollector;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.base.BaseBasicBolt;
+import org.apache.storm.tuple.Tuple;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/flux/flux-wrappers/src/main/java/org/apache/storm/flux/wrappers/spouts/FluxShellSpout.java
----------------------------------------------------------------------
diff --git a/external/flux/flux-wrappers/src/main/java/org/apache/storm/flux/wrappers/spouts/FluxShellSpout.java b/external/flux/flux-wrappers/src/main/java/org/apache/storm/flux/wrappers/spouts/FluxShellSpout.java
index c7e9058..1951c1e 100644
--- a/external/flux/flux-wrappers/src/main/java/org/apache/storm/flux/wrappers/spouts/FluxShellSpout.java
+++ b/external/flux/flux-wrappers/src/main/java/org/apache/storm/flux/wrappers/spouts/FluxShellSpout.java
@@ -17,10 +17,10 @@
  */
 package org.apache.storm.flux.wrappers.spouts;
 
-import backtype.storm.spout.ShellSpout;
-import backtype.storm.topology.IRichSpout;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.tuple.Fields;
+import org.apache.storm.spout.ShellSpout;
+import org.apache.storm.topology.IRichSpout;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.tuple.Fields;
 
 import java.util.Map;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/flux/flux-wrappers/src/main/resources/resources/randomsentence.js
----------------------------------------------------------------------
diff --git a/external/flux/flux-wrappers/src/main/resources/resources/randomsentence.js b/external/flux/flux-wrappers/src/main/resources/resources/randomsentence.js
index 36fc5f5..b121915 100644
--- a/external/flux/flux-wrappers/src/main/resources/resources/randomsentence.js
+++ b/external/flux/flux-wrappers/src/main/resources/resources/randomsentence.js
@@ -18,7 +18,7 @@
 
 /**
  * Example for storm spout. Emits random sentences.
- * The original class in java - storm.starter.spout.RandomSentenceSpout.
+ * The original class in java - org.apache.storm.starter.spout.RandomSentenceSpout.
  *
  */
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/sql/README.md
----------------------------------------------------------------------
diff --git a/external/sql/README.md b/external/sql/README.md
index 3caba58..6f68951 100644
--- a/external/sql/README.md
+++ b/external/sql/README.md
@@ -26,7 +26,7 @@ In StormSQL data is represented by external tables. Users can specify data sourc
 statement. For example, the following statement specifies a Kafka spouts and sink:
 
 ```
-CREATE EXTERNAL TABLE FOO (ID INT PRIMARY KEY) LOCATION 'kafka://localhost:2181/brokers?topic=test' TBLPROPERTIES '{"producer":{"bootstrap.servers":"localhost:9092","acks":"1","key.serializer":"storm.kafka.IntSerializer","value.serializer":"storm.kafka.ByteBufferSerializer"}}'
+CREATE EXTERNAL TABLE FOO (ID INT PRIMARY KEY) LOCATION 'kafka://localhost:2181/brokers?topic=test' TBLPROPERTIES '{"producer":{"bootstrap.servers":"localhost:9092","acks":"1","key.serializer":"org.apache.storm.kafka.IntSerializer","value.serializer":"org.apache.storm.kafka.ByteBufferSerializer"}}'
 ```
 
 The syntax of `CREATE EXTERNAL TABLE` closely follows the one defined in
@@ -47,9 +47,9 @@ transactions are significant and to insert these orders into another Kafka strea
 The user can specify the following SQL statements in the SQL file:
 
 ```
-CREATE EXTERNAL TABLE ORDERS (ID INT PRIMARY KEY, UNIT_PRICE INT, QUANTITY INT) LOCATION 'kafka://localhost:2181/brokers?topic=orders' TBLPROPERTIES '{"producer":{"bootstrap.servers":"localhost:9092","acks":"1","key.serializer":"storm.kafka.IntSerializer","value.serializer":"storm.kafka.ByteBufferSerializer"}}'
+CREATE EXTERNAL TABLE ORDERS (ID INT PRIMARY KEY, UNIT_PRICE INT, QUANTITY INT) LOCATION 'kafka://localhost:2181/brokers?topic=orders' TBLPROPERTIES '{"producer":{"bootstrap.servers":"localhost:9092","acks":"1","key.serializer":"org.apache.storm.kafka.IntSerializer","value.serializer":"org.apache.storm.kafka.ByteBufferSerializer"}}'
 
-CREATE EXTERNAL TABLE LARGE_ORDERS (ID INT PRIMARY KEY, TOTAL INT) LOCATION 'kafka://localhost:2181/brokers?topic=large_orders' TBLPROPERTIES '{"producer":{"bootstrap.servers":"localhost:9092","acks":"1","key.serializer":"storm.kafka.IntSerializer","value.serializer":"storm.kafka.ByteBufferSerializer"}}'
+CREATE EXTERNAL TABLE LARGE_ORDERS (ID INT PRIMARY KEY, TOTAL INT) LOCATION 'kafka://localhost:2181/brokers?topic=large_orders' TBLPROPERTIES '{"producer":{"bootstrap.servers":"localhost:9092","acks":"1","key.serializer":"org.apache.storm.kafka.IntSerializer","value.serializer":"org.apache.storm.kafka.ByteBufferSerializer"}}'
 
 INSERT INTO LARGE_ORDERS SELECT ID, UNIT_PRICE * QUANTITY AS TOTAL FROM ORDERS WHERE UNIT_PRICE * QUANTITY > 50
 ```

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/sql/storm-sql-core/src/jvm/org/apache/storm/sql/StormSql.java
----------------------------------------------------------------------
diff --git a/external/sql/storm-sql-core/src/jvm/org/apache/storm/sql/StormSql.java b/external/sql/storm-sql-core/src/jvm/org/apache/storm/sql/StormSql.java
index eb1e452..57b64f6 100644
--- a/external/sql/storm-sql-core/src/jvm/org/apache/storm/sql/StormSql.java
+++ b/external/sql/storm-sql-core/src/jvm/org/apache/storm/sql/StormSql.java
@@ -17,8 +17,8 @@
  */
 package org.apache.storm.sql;
 
-import backtype.storm.StormSubmitter;
-import backtype.storm.generated.SubmitOptions;
+import org.apache.storm.StormSubmitter;
+import org.apache.storm.generated.SubmitOptions;
 import org.apache.storm.sql.runtime.ChannelHandler;
 
 import java.util.Map;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/sql/storm-sql-core/src/jvm/org/apache/storm/sql/StormSqlImpl.java
----------------------------------------------------------------------
diff --git a/external/sql/storm-sql-core/src/jvm/org/apache/storm/sql/StormSqlImpl.java b/external/sql/storm-sql-core/src/jvm/org/apache/storm/sql/StormSqlImpl.java
index c2c8bc8..7e5dfcc 100644
--- a/external/sql/storm-sql-core/src/jvm/org/apache/storm/sql/StormSqlImpl.java
+++ b/external/sql/storm-sql-core/src/jvm/org/apache/storm/sql/StormSqlImpl.java
@@ -17,8 +17,8 @@
  */
 package org.apache.storm.sql;
 
-import backtype.storm.StormSubmitter;
-import backtype.storm.generated.SubmitOptions;
+import org.apache.storm.StormSubmitter;
+import org.apache.storm.generated.SubmitOptions;
 import org.apache.calcite.adapter.java.JavaTypeFactory;
 import org.apache.calcite.jdbc.JavaTypeFactoryImpl;
 import org.apache.calcite.rel.RelNode;
@@ -38,7 +38,7 @@ import org.apache.storm.sql.parser.SqlCreateTable;
 import org.apache.storm.sql.parser.StormParser;
 import org.apache.storm.sql.runtime.*;
 import org.apache.storm.sql.runtime.trident.AbstractTridentProcessor;
-import storm.trident.TridentTopology;
+import org.apache.storm.trident.TridentTopology;
 
 import java.io.BufferedOutputStream;
 import java.io.ByteArrayOutputStream;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/sql/storm-sql-core/src/jvm/org/apache/storm/sql/StormSqlRunner.java
----------------------------------------------------------------------
diff --git a/external/sql/storm-sql-core/src/jvm/org/apache/storm/sql/StormSqlRunner.java b/external/sql/storm-sql-core/src/jvm/org/apache/storm/sql/StormSqlRunner.java
index 970ccd2..22981d5 100644
--- a/external/sql/storm-sql-core/src/jvm/org/apache/storm/sql/StormSqlRunner.java
+++ b/external/sql/storm-sql-core/src/jvm/org/apache/storm/sql/StormSqlRunner.java
@@ -17,9 +17,9 @@
  */
 package org.apache.storm.sql;
 
-import backtype.storm.generated.SubmitOptions;
-import backtype.storm.generated.TopologyInitialStatus;
-import backtype.storm.utils.Utils;
+import org.apache.storm.generated.SubmitOptions;
+import org.apache.storm.generated.TopologyInitialStatus;
+import org.apache.storm.utils.Utils;
 
 import java.nio.charset.StandardCharsets;
 import java.nio.file.Files;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/sql/storm-sql-core/src/jvm/org/apache/storm/sql/compiler/backends/standalone/PlanCompiler.java
----------------------------------------------------------------------
diff --git a/external/sql/storm-sql-core/src/jvm/org/apache/storm/sql/compiler/backends/standalone/PlanCompiler.java b/external/sql/storm-sql-core/src/jvm/org/apache/storm/sql/compiler/backends/standalone/PlanCompiler.java
index 64bc06e..eb7f4ea 100644
--- a/external/sql/storm-sql-core/src/jvm/org/apache/storm/sql/compiler/backends/standalone/PlanCompiler.java
+++ b/external/sql/storm-sql-core/src/jvm/org/apache/storm/sql/compiler/backends/standalone/PlanCompiler.java
@@ -39,7 +39,7 @@ public class PlanCompiler {
   private static final String PROLOGUE = NEW_LINE_JOINER.join(
       "// GENERATED CODE", "package " + PACKAGE_NAME + ";", "",
       "import java.util.Iterator;", "import java.util.Map;",
-      "import backtype.storm.tuple.Values;",
+      "import org.apache.storm.tuple.Values;",
       "import org.apache.storm.sql.runtime.AbstractChannelHandler;",
       "import org.apache.storm.sql.runtime.Channels;",
       "import org.apache.storm.sql.runtime.ChannelContext;",

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/sql/storm-sql-core/src/jvm/org/apache/storm/sql/compiler/backends/trident/PlanCompiler.java
----------------------------------------------------------------------
diff --git a/external/sql/storm-sql-core/src/jvm/org/apache/storm/sql/compiler/backends/trident/PlanCompiler.java b/external/sql/storm-sql-core/src/jvm/org/apache/storm/sql/compiler/backends/trident/PlanCompiler.java
index 35e0cb7..7a5516d 100644
--- a/external/sql/storm-sql-core/src/jvm/org/apache/storm/sql/compiler/backends/trident/PlanCompiler.java
+++ b/external/sql/storm-sql-core/src/jvm/org/apache/storm/sql/compiler/backends/trident/PlanCompiler.java
@@ -45,17 +45,17 @@ public class PlanCompiler {
       "// GENERATED CODE", "package " + PACKAGE_NAME + ";", "",
       "import java.util.List;",
       "import java.util.Map;",
-      "import backtype.storm.tuple.Fields;",
-      "import backtype.storm.tuple.Values;",
+      "import org.apache.storm.tuple.Fields;",
+      "import org.apache.storm.tuple.Values;",
       "import org.apache.storm.sql.runtime.ISqlTridentDataSource;",
       "import org.apache.storm.sql.runtime.trident.AbstractTridentProcessor;",
-      "import storm.trident.Stream;",
-      "import storm.trident.TridentTopology;",
-      "import storm.trident.fluent.IAggregatableStream;",
-      "import storm.trident.operation.TridentCollector;",
-      "import storm.trident.operation.BaseFunction;",
-      "import storm.trident.spout.IBatchSpout;",
-      "import storm.trident.tuple.TridentTuple;",
+      "import org.apache.storm.trident.Stream;",
+      "import org.apache.storm.trident.TridentTopology;",
+      "import org.apache.storm.trident.fluent.IAggregatableStream;",
+      "import org.apache.storm.trident.operation.TridentCollector;",
+      "import org.apache.storm.trident.operation.BaseFunction;",
+      "import org.apache.storm.trident.spout.IBatchSpout;",
+      "import org.apache.storm.trident.tuple.TridentTuple;",
       "",
       "public final class TridentProcessor extends AbstractTridentProcessor {",
       "");

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/sql/storm-sql-core/src/jvm/org/apache/storm/sql/compiler/backends/trident/RelNodeCompiler.java
----------------------------------------------------------------------
diff --git a/external/sql/storm-sql-core/src/jvm/org/apache/storm/sql/compiler/backends/trident/RelNodeCompiler.java b/external/sql/storm-sql-core/src/jvm/org/apache/storm/sql/compiler/backends/trident/RelNodeCompiler.java
index 1de39d3..340b9a2 100644
--- a/external/sql/storm-sql-core/src/jvm/org/apache/storm/sql/compiler/backends/trident/RelNodeCompiler.java
+++ b/external/sql/storm-sql-core/src/jvm/org/apache/storm/sql/compiler/backends/trident/RelNodeCompiler.java
@@ -19,7 +19,7 @@
  */
 package org.apache.storm.sql.compiler.backends.trident;
 
-import backtype.storm.tuple.Fields;
+import org.apache.storm.tuple.Fields;
 import com.google.common.base.Joiner;
 import org.apache.calcite.adapter.java.JavaTypeFactory;
 import org.apache.calcite.rel.RelNode;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/sql/storm-sql-core/src/test/org/apache/storm/sql/TestStormSql.java
----------------------------------------------------------------------
diff --git a/external/sql/storm-sql-core/src/test/org/apache/storm/sql/TestStormSql.java b/external/sql/storm-sql-core/src/test/org/apache/storm/sql/TestStormSql.java
index f145180..511e5ab 100644
--- a/external/sql/storm-sql-core/src/test/org/apache/storm/sql/TestStormSql.java
+++ b/external/sql/storm-sql-core/src/test/org/apache/storm/sql/TestStormSql.java
@@ -17,12 +17,12 @@
  */
 package org.apache.storm.sql;
 
-import backtype.storm.Config;
-import backtype.storm.ILocalCluster;
-import backtype.storm.StormSubmitter;
-import backtype.storm.generated.SubmitOptions;
-import backtype.storm.generated.TopologyInitialStatus;
-import backtype.storm.tuple.Values;
+import org.apache.storm.Config;
+import org.apache.storm.ILocalCluster;
+import org.apache.storm.StormSubmitter;
+import org.apache.storm.generated.SubmitOptions;
+import org.apache.storm.generated.TopologyInitialStatus;
+import org.apache.storm.tuple.Values;
 import org.apache.storm.sql.runtime.*;
 import org.junit.AfterClass;
 import org.junit.Assert;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/sql/storm-sql-core/src/test/org/apache/storm/sql/compiler/TestExprSemantic.java
----------------------------------------------------------------------
diff --git a/external/sql/storm-sql-core/src/test/org/apache/storm/sql/compiler/TestExprSemantic.java b/external/sql/storm-sql-core/src/test/org/apache/storm/sql/compiler/TestExprSemantic.java
index febfdb5..8304a33 100644
--- a/external/sql/storm-sql-core/src/test/org/apache/storm/sql/compiler/TestExprSemantic.java
+++ b/external/sql/storm-sql-core/src/test/org/apache/storm/sql/compiler/TestExprSemantic.java
@@ -17,7 +17,7 @@
  */
 package org.apache.storm.sql.compiler;
 
-import backtype.storm.tuple.Values;
+import org.apache.storm.tuple.Values;
 import com.google.common.base.Joiner;
 import com.google.common.collect.Lists;
 import org.apache.calcite.adapter.java.JavaTypeFactory;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/sql/storm-sql-core/src/test/org/apache/storm/sql/compiler/backends/standalone/TestPlanCompiler.java
----------------------------------------------------------------------
diff --git a/external/sql/storm-sql-core/src/test/org/apache/storm/sql/compiler/backends/standalone/TestPlanCompiler.java b/external/sql/storm-sql-core/src/test/org/apache/storm/sql/compiler/backends/standalone/TestPlanCompiler.java
index 8d7fc65..ff28231 100644
--- a/external/sql/storm-sql-core/src/test/org/apache/storm/sql/compiler/backends/standalone/TestPlanCompiler.java
+++ b/external/sql/storm-sql-core/src/test/org/apache/storm/sql/compiler/backends/standalone/TestPlanCompiler.java
@@ -17,7 +17,7 @@
  */
 package org.apache.storm.sql.compiler.backends.standalone;
 
-import backtype.storm.tuple.Values;
+import org.apache.storm.tuple.Values;
 import org.apache.calcite.adapter.java.JavaTypeFactory;
 import org.apache.calcite.jdbc.JavaTypeFactoryImpl;
 import org.apache.calcite.rel.type.RelDataTypeSystem;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/sql/storm-sql-core/src/test/org/apache/storm/sql/compiler/backends/trident/TestPlanCompiler.java
----------------------------------------------------------------------
diff --git a/external/sql/storm-sql-core/src/test/org/apache/storm/sql/compiler/backends/trident/TestPlanCompiler.java b/external/sql/storm-sql-core/src/test/org/apache/storm/sql/compiler/backends/trident/TestPlanCompiler.java
index a68ba0c..ddc671a 100644
--- a/external/sql/storm-sql-core/src/test/org/apache/storm/sql/compiler/backends/trident/TestPlanCompiler.java
+++ b/external/sql/storm-sql-core/src/test/org/apache/storm/sql/compiler/backends/trident/TestPlanCompiler.java
@@ -19,13 +19,13 @@
  */
 package org.apache.storm.sql.compiler.backends.trident;
 
-import backtype.storm.Config;
-import backtype.storm.ILocalCluster;
-import backtype.storm.LocalCluster;
-import backtype.storm.generated.StormTopology;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Values;
-import backtype.storm.utils.Utils;
+import org.apache.storm.Config;
+import org.apache.storm.ILocalCluster;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.generated.StormTopology;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Values;
+import org.apache.storm.utils.Utils;
 import org.apache.calcite.adapter.java.JavaTypeFactory;
 import org.apache.calcite.jdbc.JavaTypeFactoryImpl;
 import org.apache.calcite.rel.type.RelDataTypeSystem;
@@ -37,7 +37,7 @@ import org.apache.storm.sql.runtime.trident.AbstractTridentProcessor;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
-import storm.trident.TridentTopology;
+import org.apache.storm.trident.TridentTopology;
 
 import java.util.HashMap;
 import java.util.Map;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/sql/storm-sql-kafka/src/jvm/org/apache/storm/sql/kafka/JsonScheme.java
----------------------------------------------------------------------
diff --git a/external/sql/storm-sql-kafka/src/jvm/org/apache/storm/sql/kafka/JsonScheme.java b/external/sql/storm-sql-kafka/src/jvm/org/apache/storm/sql/kafka/JsonScheme.java
index 1b45b30..eed1282 100644
--- a/external/sql/storm-sql-kafka/src/jvm/org/apache/storm/sql/kafka/JsonScheme.java
+++ b/external/sql/storm-sql-kafka/src/jvm/org/apache/storm/sql/kafka/JsonScheme.java
@@ -17,9 +17,9 @@
  */
 package org.apache.storm.sql.kafka;
 
-import backtype.storm.spout.Scheme;
-import backtype.storm.tuple.Fields;
-import backtype.storm.utils.Utils;
+import org.apache.storm.spout.Scheme;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.utils.Utils;
 import com.fasterxml.jackson.databind.ObjectMapper;
 
 import java.io.IOException;
@@ -55,4 +55,4 @@ public class JsonScheme implements Scheme {
   public Fields getOutputFields() {
     return new Fields(fields);
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/sql/storm-sql-kafka/src/jvm/org/apache/storm/sql/kafka/KafkaDataSourcesProvider.java
----------------------------------------------------------------------
diff --git a/external/sql/storm-sql-kafka/src/jvm/org/apache/storm/sql/kafka/KafkaDataSourcesProvider.java b/external/sql/storm-sql-kafka/src/jvm/org/apache/storm/sql/kafka/KafkaDataSourcesProvider.java
index 7da57ba..0236948 100644
--- a/external/sql/storm-sql-kafka/src/jvm/org/apache/storm/sql/kafka/KafkaDataSourcesProvider.java
+++ b/external/sql/storm-sql-kafka/src/jvm/org/apache/storm/sql/kafka/KafkaDataSourcesProvider.java
@@ -17,22 +17,22 @@
  */
 package org.apache.storm.sql.kafka;
 
-import backtype.storm.spout.SchemeAsMultiScheme;
+import org.apache.storm.spout.SchemeAsMultiScheme;
 import com.fasterxml.jackson.databind.ObjectMapper;
 import com.google.common.base.Preconditions;
 import org.apache.storm.sql.runtime.*;
-import storm.kafka.ZkHosts;
-import storm.kafka.trident.OpaqueTridentKafkaSpout;
-import storm.kafka.trident.TridentKafkaConfig;
-import storm.kafka.trident.TridentKafkaState;
-import storm.kafka.trident.mapper.TridentTupleToKafkaMapper;
-import storm.kafka.trident.selector.KafkaTopicSelector;
-import storm.trident.operation.BaseFunction;
-import storm.trident.operation.Function;
-import storm.trident.operation.TridentCollector;
-import storm.trident.operation.TridentOperationContext;
-import storm.trident.spout.ITridentDataSource;
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.kafka.ZkHosts;
+import org.apache.storm.kafka.trident.OpaqueTridentKafkaSpout;
+import org.apache.storm.kafka.trident.TridentKafkaConfig;
+import org.apache.storm.kafka.trident.TridentKafkaState;
+import org.apache.storm.kafka.trident.mapper.TridentTupleToKafkaMapper;
+import org.apache.storm.kafka.trident.selector.KafkaTopicSelector;
+import org.apache.storm.trident.operation.BaseFunction;
+import org.apache.storm.trident.operation.Function;
+import org.apache.storm.trident.operation.TridentCollector;
+import org.apache.storm.trident.operation.TridentOperationContext;
+import org.apache.storm.trident.spout.ITridentDataSource;
+import org.apache.storm.trident.tuple.TridentTuple;
 
 import java.io.IOException;
 import java.net.URI;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/sql/storm-sql-kafka/src/test/org/apache/storm/sql/kafka/TestJsonRepresentation.java
----------------------------------------------------------------------
diff --git a/external/sql/storm-sql-kafka/src/test/org/apache/storm/sql/kafka/TestJsonRepresentation.java b/external/sql/storm-sql-kafka/src/test/org/apache/storm/sql/kafka/TestJsonRepresentation.java
index 5973672..7e85410 100644
--- a/external/sql/storm-sql-kafka/src/test/org/apache/storm/sql/kafka/TestJsonRepresentation.java
+++ b/external/sql/storm-sql-kafka/src/test/org/apache/storm/sql/kafka/TestJsonRepresentation.java
@@ -17,7 +17,7 @@
  */
 package org.apache.storm.sql.kafka;
 
-import backtype.storm.utils.Utils;
+import org.apache.storm.utils.Utils;
 import com.google.common.collect.Lists;
 import org.junit.Test;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/sql/storm-sql-kafka/src/test/org/apache/storm/sql/kafka/TestKafkaDataSourcesProvider.java
----------------------------------------------------------------------
diff --git a/external/sql/storm-sql-kafka/src/test/org/apache/storm/sql/kafka/TestKafkaDataSourcesProvider.java b/external/sql/storm-sql-kafka/src/test/org/apache/storm/sql/kafka/TestKafkaDataSourcesProvider.java
index 399bb3e..f6e75ab 100644
--- a/external/sql/storm-sql-kafka/src/test/org/apache/storm/sql/kafka/TestKafkaDataSourcesProvider.java
+++ b/external/sql/storm-sql-kafka/src/test/org/apache/storm/sql/kafka/TestKafkaDataSourcesProvider.java
@@ -30,8 +30,8 @@ import org.junit.Assert;
 import org.junit.Test;
 import org.mockito.ArgumentMatcher;
 import org.mockito.internal.util.reflection.Whitebox;
-import storm.kafka.trident.TridentKafkaState;
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.kafka.trident.TridentKafkaState;
+import org.apache.storm.trident.tuple.TridentTuple;
 
 import java.net.URI;
 import java.nio.ByteBuffer;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/sql/storm-sql-runtime/src/jvm/org/apache/storm/sql/runtime/AbstractChannelHandler.java
----------------------------------------------------------------------
diff --git a/external/sql/storm-sql-runtime/src/jvm/org/apache/storm/sql/runtime/AbstractChannelHandler.java b/external/sql/storm-sql-runtime/src/jvm/org/apache/storm/sql/runtime/AbstractChannelHandler.java
index 892d2e4..6a8bbe5 100644
--- a/external/sql/storm-sql-runtime/src/jvm/org/apache/storm/sql/runtime/AbstractChannelHandler.java
+++ b/external/sql/storm-sql-runtime/src/jvm/org/apache/storm/sql/runtime/AbstractChannelHandler.java
@@ -19,7 +19,7 @@
  */
 package org.apache.storm.sql.runtime;
 
-import backtype.storm.tuple.Values;
+import org.apache.storm.tuple.Values;
 
 public abstract class AbstractChannelHandler implements ChannelHandler {
   @Override

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/sql/storm-sql-runtime/src/jvm/org/apache/storm/sql/runtime/AbstractValuesProcessor.java
----------------------------------------------------------------------
diff --git a/external/sql/storm-sql-runtime/src/jvm/org/apache/storm/sql/runtime/AbstractValuesProcessor.java b/external/sql/storm-sql-runtime/src/jvm/org/apache/storm/sql/runtime/AbstractValuesProcessor.java
index 11aa065..ad09319 100644
--- a/external/sql/storm-sql-runtime/src/jvm/org/apache/storm/sql/runtime/AbstractValuesProcessor.java
+++ b/external/sql/storm-sql-runtime/src/jvm/org/apache/storm/sql/runtime/AbstractValuesProcessor.java
@@ -20,7 +20,7 @@
 
 package org.apache.storm.sql.runtime;
 
-import backtype.storm.tuple.Values;
+import org.apache.storm.tuple.Values;
 import org.apache.storm.sql.runtime.ChannelHandler;
 import org.apache.storm.sql.runtime.DataSource;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/sql/storm-sql-runtime/src/jvm/org/apache/storm/sql/runtime/ChannelContext.java
----------------------------------------------------------------------
diff --git a/external/sql/storm-sql-runtime/src/jvm/org/apache/storm/sql/runtime/ChannelContext.java b/external/sql/storm-sql-runtime/src/jvm/org/apache/storm/sql/runtime/ChannelContext.java
index 71aba03..c29c6b1 100644
--- a/external/sql/storm-sql-runtime/src/jvm/org/apache/storm/sql/runtime/ChannelContext.java
+++ b/external/sql/storm-sql-runtime/src/jvm/org/apache/storm/sql/runtime/ChannelContext.java
@@ -19,7 +19,7 @@
  */
 package org.apache.storm.sql.runtime;
 
-import backtype.storm.tuple.Values;
+import org.apache.storm.tuple.Values;
 
 public interface ChannelContext {
   /**

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/sql/storm-sql-runtime/src/jvm/org/apache/storm/sql/runtime/ChannelHandler.java
----------------------------------------------------------------------
diff --git a/external/sql/storm-sql-runtime/src/jvm/org/apache/storm/sql/runtime/ChannelHandler.java b/external/sql/storm-sql-runtime/src/jvm/org/apache/storm/sql/runtime/ChannelHandler.java
index 117f312..f30d0a3 100644
--- a/external/sql/storm-sql-runtime/src/jvm/org/apache/storm/sql/runtime/ChannelHandler.java
+++ b/external/sql/storm-sql-runtime/src/jvm/org/apache/storm/sql/runtime/ChannelHandler.java
@@ -19,7 +19,7 @@
  */
 package org.apache.storm.sql.runtime;
 
-import backtype.storm.tuple.Values;
+import org.apache.storm.tuple.Values;
 
 /**
  * DataListener provides an event-driven interface for the user to process

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/sql/storm-sql-runtime/src/jvm/org/apache/storm/sql/runtime/Channels.java
----------------------------------------------------------------------
diff --git a/external/sql/storm-sql-runtime/src/jvm/org/apache/storm/sql/runtime/Channels.java b/external/sql/storm-sql-runtime/src/jvm/org/apache/storm/sql/runtime/Channels.java
index 7214f9a..47dab11 100644
--- a/external/sql/storm-sql-runtime/src/jvm/org/apache/storm/sql/runtime/Channels.java
+++ b/external/sql/storm-sql-runtime/src/jvm/org/apache/storm/sql/runtime/Channels.java
@@ -19,7 +19,7 @@
  */
 package org.apache.storm.sql.runtime;
 
-import backtype.storm.tuple.Values;
+import org.apache.storm.tuple.Values;
 
 public class Channels {
   private static final ChannelContext VOID_CTX = new ChannelContext() {

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/sql/storm-sql-runtime/src/jvm/org/apache/storm/sql/runtime/ISqlTridentDataSource.java
----------------------------------------------------------------------
diff --git a/external/sql/storm-sql-runtime/src/jvm/org/apache/storm/sql/runtime/ISqlTridentDataSource.java b/external/sql/storm-sql-runtime/src/jvm/org/apache/storm/sql/runtime/ISqlTridentDataSource.java
index d9e1db7..92961dc 100644
--- a/external/sql/storm-sql-runtime/src/jvm/org/apache/storm/sql/runtime/ISqlTridentDataSource.java
+++ b/external/sql/storm-sql-runtime/src/jvm/org/apache/storm/sql/runtime/ISqlTridentDataSource.java
@@ -17,9 +17,9 @@
  */
 package org.apache.storm.sql.runtime;
 
-import storm.trident.operation.Function;
-import storm.trident.spout.IBatchSpout;
-import storm.trident.spout.ITridentDataSource;
+import org.apache.storm.trident.operation.Function;
+import org.apache.storm.trident.spout.IBatchSpout;
+import org.apache.storm.trident.spout.ITridentDataSource;
 
 /**
  * A ISqlTridentDataSource specifies how an external data source produces and consumes data.

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/sql/storm-sql-runtime/src/jvm/org/apache/storm/sql/runtime/trident/AbstractTridentProcessor.java
----------------------------------------------------------------------
diff --git a/external/sql/storm-sql-runtime/src/jvm/org/apache/storm/sql/runtime/trident/AbstractTridentProcessor.java b/external/sql/storm-sql-runtime/src/jvm/org/apache/storm/sql/runtime/trident/AbstractTridentProcessor.java
index 7faa7e4..8743795 100644
--- a/external/sql/storm-sql-runtime/src/jvm/org/apache/storm/sql/runtime/trident/AbstractTridentProcessor.java
+++ b/external/sql/storm-sql-runtime/src/jvm/org/apache/storm/sql/runtime/trident/AbstractTridentProcessor.java
@@ -21,8 +21,8 @@
 package org.apache.storm.sql.runtime.trident;
 
 import org.apache.storm.sql.runtime.ISqlTridentDataSource;
-import storm.trident.Stream;
-import storm.trident.TridentTopology;
+import org.apache.storm.trident.Stream;
+import org.apache.storm.trident.TridentTopology;
 
 import java.util.Map;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/sql/storm-sql-runtime/src/test/org/apache/storm/sql/TestUtils.java
----------------------------------------------------------------------
diff --git a/external/sql/storm-sql-runtime/src/test/org/apache/storm/sql/TestUtils.java b/external/sql/storm-sql-runtime/src/test/org/apache/storm/sql/TestUtils.java
index 58efdf6..c5a4043 100644
--- a/external/sql/storm-sql-runtime/src/test/org/apache/storm/sql/TestUtils.java
+++ b/external/sql/storm-sql-runtime/src/test/org/apache/storm/sql/TestUtils.java
@@ -19,20 +19,20 @@
  */
 package org.apache.storm.sql;
 
-import backtype.storm.ILocalCluster;
-import backtype.storm.LocalCluster;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Values;
+import org.apache.storm.ILocalCluster;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Values;
 import org.apache.storm.sql.runtime.ChannelContext;
 import org.apache.storm.sql.runtime.ChannelHandler;
 import org.apache.storm.sql.runtime.DataSource;
 import org.apache.storm.sql.runtime.ISqlTridentDataSource;
-import storm.trident.operation.BaseFunction;
-import storm.trident.operation.Function;
-import storm.trident.operation.TridentCollector;
-import storm.trident.spout.IBatchSpout;
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.trident.operation.BaseFunction;
+import org.apache.storm.trident.operation.Function;
+import org.apache.storm.trident.operation.TridentCollector;
+import org.apache.storm.trident.spout.IBatchSpout;
+import org.apache.storm.trident.tuple.TridentTuple;
 
 import java.util.ArrayList;
 import java.util.List;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/AbstractExecutionResultHandler.java
----------------------------------------------------------------------
diff --git a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/AbstractExecutionResultHandler.java b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/AbstractExecutionResultHandler.java
index 80ae284..cf68c4f 100644
--- a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/AbstractExecutionResultHandler.java
+++ b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/AbstractExecutionResultHandler.java
@@ -18,8 +18,8 @@
  */
 package org.apache.storm.cassandra;
 
-import backtype.storm.task.OutputCollector;
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.tuple.Tuple;
 import com.datastax.driver.core.exceptions.QueryValidationException;
 import com.datastax.driver.core.exceptions.ReadTimeoutException;
 import com.datastax.driver.core.exceptions.UnavailableException;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/BaseExecutionResultHandler.java
----------------------------------------------------------------------
diff --git a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/BaseExecutionResultHandler.java b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/BaseExecutionResultHandler.java
index c7fc4f1..18f9ebf 100644
--- a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/BaseExecutionResultHandler.java
+++ b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/BaseExecutionResultHandler.java
@@ -18,8 +18,8 @@
  */
 package org.apache.storm.cassandra;
 
-import backtype.storm.task.OutputCollector;
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.tuple.Tuple;
 import com.datastax.driver.core.exceptions.*;
 import org.slf4j.LoggerFactory;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/ExecutionResultHandler.java
----------------------------------------------------------------------
diff --git a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/ExecutionResultHandler.java b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/ExecutionResultHandler.java
index b804ee5..ac3c76e 100644
--- a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/ExecutionResultHandler.java
+++ b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/ExecutionResultHandler.java
@@ -18,8 +18,8 @@
  */
 package org.apache.storm.cassandra;
 
-import backtype.storm.task.OutputCollector;
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.tuple.Tuple;
 import com.datastax.driver.core.exceptions.QueryValidationException;
 import com.datastax.driver.core.exceptions.ReadTimeoutException;
 import com.datastax.driver.core.exceptions.UnavailableException;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/Murmur3StreamGrouping.java
----------------------------------------------------------------------
diff --git a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/Murmur3StreamGrouping.java b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/Murmur3StreamGrouping.java
index 992bfd0..5d67796 100644
--- a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/Murmur3StreamGrouping.java
+++ b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/Murmur3StreamGrouping.java
@@ -18,11 +18,11 @@
  */
 package org.apache.storm.cassandra;
 
-import backtype.storm.generated.GlobalStreamId;
-import backtype.storm.grouping.CustomStreamGrouping;
-import backtype.storm.task.WorkerTopologyContext;
-import backtype.storm.topology.FailedException;
-import backtype.storm.tuple.Fields;
+import org.apache.storm.generated.GlobalStreamId;
+import org.apache.storm.grouping.CustomStreamGrouping;
+import org.apache.storm.task.WorkerTopologyContext;
+import org.apache.storm.topology.FailedException;
+import org.apache.storm.tuple.Fields;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Lists;
 import com.google.common.hash.Hashing;
@@ -36,7 +36,7 @@ import java.util.List;
 
 /**
  *
- * Simple {@link backtype.storm.grouping.CustomStreamGrouping} that uses Murmur3 algorithm to choose the target task of a tuple.
+ * Simple {@link org.apache.storm.grouping.CustomStreamGrouping} that uses Murmur3 algorithm to choose the target task of a tuple.
  *
  * This stream grouping may be used to optimise writes to Apache Cassandra.
  */

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/bolt/BaseCassandraBolt.java
----------------------------------------------------------------------
diff --git a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/bolt/BaseCassandraBolt.java b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/bolt/BaseCassandraBolt.java
index dafcb22..7891fe1 100644
--- a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/bolt/BaseCassandraBolt.java
+++ b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/bolt/BaseCassandraBolt.java
@@ -18,15 +18,15 @@
  */
 package org.apache.storm.cassandra.bolt;
 
-import backtype.storm.Config;
-import backtype.storm.task.OutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.base.BaseRichBolt;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.utils.TupleUtils;
-import backtype.storm.utils.Utils;
+import org.apache.storm.Config;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.base.BaseRichBolt;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.utils.TupleUtils;
+import org.apache.storm.utils.Utils;
 import com.datastax.driver.core.Session;
 import com.datastax.driver.core.exceptions.NoHostAvailableException;
 import org.apache.storm.cassandra.BaseExecutionResultHandler;
@@ -48,7 +48,7 @@ import java.util.Map;
 /**
  * A base cassandra bolt.
  *
- * Default {@link backtype.storm.topology.base.BaseRichBolt}
+ * Default {@link org.apache.storm.topology.base.BaseRichBolt}
  */
 public abstract class BaseCassandraBolt<T> extends BaseRichBolt {
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/bolt/BatchCassandraWriterBolt.java
----------------------------------------------------------------------
diff --git a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/bolt/BatchCassandraWriterBolt.java b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/bolt/BatchCassandraWriterBolt.java
index fd597df..286edf8 100644
--- a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/bolt/BatchCassandraWriterBolt.java
+++ b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/bolt/BatchCassandraWriterBolt.java
@@ -18,11 +18,11 @@
  */
 package org.apache.storm.cassandra.bolt;
 
-import backtype.storm.Config;
-import backtype.storm.task.OutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.utils.Time;
+import org.apache.storm.Config;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.utils.Time;
 import com.datastax.driver.core.Statement;
 import org.apache.storm.cassandra.executor.AsyncResultHandler;
 import org.apache.storm.cassandra.executor.impl.BatchAsyncResultHandler;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/bolt/CassandraWriterBolt.java
----------------------------------------------------------------------
diff --git a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/bolt/CassandraWriterBolt.java b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/bolt/CassandraWriterBolt.java
index 19097f2..3d1229e 100644
--- a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/bolt/CassandraWriterBolt.java
+++ b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/bolt/CassandraWriterBolt.java
@@ -18,7 +18,7 @@
  */
 package org.apache.storm.cassandra.bolt;
 
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.tuple.Tuple;
 import com.datastax.driver.core.Statement;
 import org.apache.storm.cassandra.executor.AsyncResultHandler;
 import org.apache.storm.cassandra.executor.impl.SingleAsyncResultHandler;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/bolt/GroupingBatchBuilder.java
----------------------------------------------------------------------
diff --git a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/bolt/GroupingBatchBuilder.java b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/bolt/GroupingBatchBuilder.java
index ea63b3d..fdafd50 100644
--- a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/bolt/GroupingBatchBuilder.java
+++ b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/bolt/GroupingBatchBuilder.java
@@ -18,7 +18,7 @@
  */
 package org.apache.storm.cassandra.bolt;
 
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.tuple.Tuple;
 import com.datastax.driver.core.BatchStatement;
 import com.google.common.base.Function;
 import com.google.common.collect.Iterables;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/bolt/PairBatchStatementTuples.java
----------------------------------------------------------------------
diff --git a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/bolt/PairBatchStatementTuples.java b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/bolt/PairBatchStatementTuples.java
index 736c482..cef422e 100644
--- a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/bolt/PairBatchStatementTuples.java
+++ b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/bolt/PairBatchStatementTuples.java
@@ -18,7 +18,7 @@
  */
 package org.apache.storm.cassandra.bolt;
 
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.tuple.Tuple;
 import com.datastax.driver.core.BatchStatement;
 
 import java.util.List;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/bolt/PairStatementTuple.java
----------------------------------------------------------------------
diff --git a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/bolt/PairStatementTuple.java b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/bolt/PairStatementTuple.java
index 8f50574..0f501a3 100644
--- a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/bolt/PairStatementTuple.java
+++ b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/bolt/PairStatementTuple.java
@@ -18,7 +18,7 @@
  */
 package org.apache.storm.cassandra.bolt;
 
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.tuple.Tuple;
 import com.datastax.driver.core.Statement;
 
 /**

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/client/CassandraConf.java
----------------------------------------------------------------------
diff --git a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/client/CassandraConf.java b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/client/CassandraConf.java
index 9201801..0f0de53 100644
--- a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/client/CassandraConf.java
+++ b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/client/CassandraConf.java
@@ -18,7 +18,7 @@
  */
 package org.apache.storm.cassandra.client;
 
-import backtype.storm.utils.Utils;
+import org.apache.storm.utils.Utils;
 import com.datastax.driver.core.ConsistencyLevel;
 import com.datastax.driver.core.policies.DefaultRetryPolicy;
 import com.datastax.driver.core.policies.DowngradingConsistencyRetryPolicy;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/executor/AsyncResultHandler.java
----------------------------------------------------------------------
diff --git a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/executor/AsyncResultHandler.java b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/executor/AsyncResultHandler.java
index 9b51696..f827d45 100644
--- a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/executor/AsyncResultHandler.java
+++ b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/executor/AsyncResultHandler.java
@@ -18,7 +18,7 @@
  */
 package org.apache.storm.cassandra.executor;
 
-import backtype.storm.task.OutputCollector;
+import org.apache.storm.task.OutputCollector;
 
 import java.io.Serializable;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/executor/ExecutionResultCollector.java
----------------------------------------------------------------------
diff --git a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/executor/ExecutionResultCollector.java b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/executor/ExecutionResultCollector.java
index d0f5e1d..882aeb4 100644
--- a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/executor/ExecutionResultCollector.java
+++ b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/executor/ExecutionResultCollector.java
@@ -18,8 +18,8 @@
  */
 package org.apache.storm.cassandra.executor;
 
-import backtype.storm.task.OutputCollector;
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.tuple.Tuple;
 import com.google.common.collect.Lists;
 import org.apache.storm.cassandra.ExecutionResultHandler;
 
@@ -53,7 +53,7 @@ public interface ExecutionResultCollector {
         }
 
         /**
-         * Calls {@link ExecutionResultHandler#onQuerySuccess(backtype.storm.task.OutputCollector, backtype.storm.tuple.Tuple)} before
+         * Calls {@link ExecutionResultHandler#onQuerySuccess(org.apache.storm.task.OutputCollector, org.apache.storm.tuple.Tuple)} before
          * acknowledging an single input tuple.
          */
         @Override
@@ -88,7 +88,7 @@ public interface ExecutionResultCollector {
         }
 
         /**
-         * Calls {@link ExecutionResultHandler#onThrowable(Throwable, backtype.storm.task.OutputCollector, backtype.storm.tuple.Tuple)} .
+         * Calls {@link ExecutionResultHandler#onThrowable(Throwable, org.apache.storm.task.OutputCollector, org.apache.storm.tuple.Tuple)} .
          */
         @Override
         public void handle(OutputCollector collector, ExecutionResultHandler handler) {

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/executor/impl/BatchAsyncResultHandler.java
----------------------------------------------------------------------
diff --git a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/executor/impl/BatchAsyncResultHandler.java b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/executor/impl/BatchAsyncResultHandler.java
index c81da8c..f7a8fcc 100644
--- a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/executor/impl/BatchAsyncResultHandler.java
+++ b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/executor/impl/BatchAsyncResultHandler.java
@@ -18,8 +18,8 @@
  */
 package org.apache.storm.cassandra.executor.impl;
 
-import backtype.storm.task.OutputCollector;
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.tuple.Tuple;
 import org.apache.storm.cassandra.ExecutionResultHandler;
 import org.apache.storm.cassandra.executor.AsyncResultHandler;
 import org.apache.storm.cassandra.executor.ExecutionResultCollector;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/executor/impl/SingleAsyncResultHandler.java
----------------------------------------------------------------------
diff --git a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/executor/impl/SingleAsyncResultHandler.java b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/executor/impl/SingleAsyncResultHandler.java
index 62a5a3b..ac79543 100644
--- a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/executor/impl/SingleAsyncResultHandler.java
+++ b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/executor/impl/SingleAsyncResultHandler.java
@@ -18,8 +18,8 @@
  */
 package org.apache.storm.cassandra.executor.impl;
 
-import backtype.storm.task.OutputCollector;
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.tuple.Tuple;
 import org.apache.storm.cassandra.ExecutionResultHandler;
 import org.apache.storm.cassandra.executor.AsyncResultHandler;
 import org.apache.storm.cassandra.executor.ExecutionResultCollector;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/BaseCQLStatementTupleMapper.java
----------------------------------------------------------------------
diff --git a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/BaseCQLStatementTupleMapper.java b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/BaseCQLStatementTupleMapper.java
index c9ba6fa..3bc0f08 100644
--- a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/BaseCQLStatementTupleMapper.java
+++ b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/BaseCQLStatementTupleMapper.java
@@ -18,7 +18,7 @@
  */
 package org.apache.storm.cassandra.query;
 
-import backtype.storm.tuple.ITuple;
+import org.apache.storm.tuple.ITuple;
 import com.datastax.driver.core.Session;
 import com.datastax.driver.core.Statement;
 
@@ -28,7 +28,7 @@ import java.util.List;
 import java.util.Map;
 
 /**
- * Default interface to map a {@link backtype.storm.tuple.ITuple} to a CQL {@link com.datastax.driver.core.Statement}.
+ * Default interface to map a {@link org.apache.storm.tuple.ITuple} to a CQL {@link com.datastax.driver.core.Statement}.
  *
  */
 public abstract class BaseCQLStatementTupleMapper implements CQLStatementTupleMapper, Serializable {

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/CQLResultSetValuesMapper.java
----------------------------------------------------------------------
diff --git a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/CQLResultSetValuesMapper.java b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/CQLResultSetValuesMapper.java
index 80b1173..d5495fb 100644
--- a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/CQLResultSetValuesMapper.java
+++ b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/CQLResultSetValuesMapper.java
@@ -18,8 +18,8 @@
  */
 package org.apache.storm.cassandra.query;
 
-import backtype.storm.tuple.ITuple;
-import backtype.storm.tuple.Values;
+import org.apache.storm.tuple.ITuple;
+import org.apache.storm.tuple.Values;
 import com.datastax.driver.core.Session;
 import com.datastax.driver.core.Statement;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/CQLStatementTupleMapper.java
----------------------------------------------------------------------
diff --git a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/CQLStatementTupleMapper.java b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/CQLStatementTupleMapper.java
index fc960dd..d82f8a2 100644
--- a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/CQLStatementTupleMapper.java
+++ b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/CQLStatementTupleMapper.java
@@ -18,7 +18,7 @@
  */
 package org.apache.storm.cassandra.query;
 
-import backtype.storm.tuple.ITuple;
+import org.apache.storm.tuple.ITuple;
 import com.datastax.driver.core.Session;
 import com.datastax.driver.core.Statement;
 
@@ -28,7 +28,7 @@ import java.util.List;
 import java.util.Map;
 
 /**
- * Default interface to map a {@link backtype.storm.tuple.ITuple} to a CQL {@link com.datastax.driver.core.Statement}.
+ * Default interface to map a {@link org.apache.storm.tuple.ITuple} to a CQL {@link com.datastax.driver.core.Statement}.
  */
 public interface CQLStatementTupleMapper extends Serializable {
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/ContextQuery.java
----------------------------------------------------------------------
diff --git a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/ContextQuery.java b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/ContextQuery.java
index d0a5491..58f0213 100644
--- a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/ContextQuery.java
+++ b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/ContextQuery.java
@@ -18,7 +18,7 @@
  */
 package org.apache.storm.cassandra.query;
 
-import backtype.storm.tuple.ITuple;
+import org.apache.storm.tuple.ITuple;
 
 import java.io.Serializable;
 import java.util.Map;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/CqlMapper.java
----------------------------------------------------------------------
diff --git a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/CqlMapper.java b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/CqlMapper.java
index 2ab8f92..c497f3e 100644
--- a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/CqlMapper.java
+++ b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/CqlMapper.java
@@ -18,7 +18,7 @@
  */
 package org.apache.storm.cassandra.query;
 
-import backtype.storm.tuple.ITuple;
+import org.apache.storm.tuple.ITuple;
 import org.apache.storm.cassandra.query.selector.FieldSelector;
 
 import java.io.Serializable;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/impl/BatchCQLStatementTupleMapper.java
----------------------------------------------------------------------
diff --git a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/impl/BatchCQLStatementTupleMapper.java b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/impl/BatchCQLStatementTupleMapper.java
index fe948f5..43077e2 100644
--- a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/impl/BatchCQLStatementTupleMapper.java
+++ b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/impl/BatchCQLStatementTupleMapper.java
@@ -18,7 +18,7 @@
  */
 package org.apache.storm.cassandra.query.impl;
 
-import backtype.storm.tuple.ITuple;
+import org.apache.storm.tuple.ITuple;
 import com.datastax.driver.core.BatchStatement;
 import com.datastax.driver.core.Session;
 import com.datastax.driver.core.Statement;
@@ -55,4 +55,4 @@ public class BatchCQLStatementTupleMapper implements CQLStatementTupleMapper {
             batch.addAll(m.map(conf, session, tuple));
         return Arrays.asList((Statement)batch);
     }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/impl/BoundCQLStatementTupleMapper.java
----------------------------------------------------------------------
diff --git a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/impl/BoundCQLStatementTupleMapper.java b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/impl/BoundCQLStatementTupleMapper.java
index 8cce418..dbef606 100644
--- a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/impl/BoundCQLStatementTupleMapper.java
+++ b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/impl/BoundCQLStatementTupleMapper.java
@@ -18,7 +18,7 @@
  */
 package org.apache.storm.cassandra.query.impl;
 
-import backtype.storm.tuple.ITuple;
+import org.apache.storm.tuple.ITuple;
 import com.datastax.driver.core.PreparedStatement;
 import com.datastax.driver.core.Session;
 import com.datastax.driver.core.Statement;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/impl/RoutingKeyGenerator.java
----------------------------------------------------------------------
diff --git a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/impl/RoutingKeyGenerator.java b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/impl/RoutingKeyGenerator.java
index 3f4f47e..57a6689 100644
--- a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/impl/RoutingKeyGenerator.java
+++ b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/impl/RoutingKeyGenerator.java
@@ -18,7 +18,7 @@
  */
 package org.apache.storm.cassandra.query.impl;
 
-import backtype.storm.tuple.ITuple;
+import org.apache.storm.tuple.ITuple;
 import com.datastax.driver.core.DataType;
 import com.datastax.driver.core.ProtocolVersion;
 import com.google.common.base.Preconditions;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/impl/SimpleCQLStatementMapper.java
----------------------------------------------------------------------
diff --git a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/impl/SimpleCQLStatementMapper.java b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/impl/SimpleCQLStatementMapper.java
index ab9adbf..2825c0a 100644
--- a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/impl/SimpleCQLStatementMapper.java
+++ b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/impl/SimpleCQLStatementMapper.java
@@ -18,7 +18,7 @@
  */
 package org.apache.storm.cassandra.query.impl;
 
-import backtype.storm.tuple.ITuple;
+import org.apache.storm.tuple.ITuple;
 import com.datastax.driver.core.Session;
 import com.datastax.driver.core.SimpleStatement;
 import com.datastax.driver.core.Statement;
@@ -85,4 +85,4 @@ public class SimpleCQLStatementMapper implements CQLStatementTupleMapper {
     private boolean hasRoutingKeys() {
         return rkGenerator != null;
     }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/selector/FieldSelector.java
----------------------------------------------------------------------
diff --git a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/selector/FieldSelector.java b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/selector/FieldSelector.java
index bba7fb5..835a3e8 100644
--- a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/selector/FieldSelector.java
+++ b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/selector/FieldSelector.java
@@ -18,7 +18,7 @@
  */
 package org.apache.storm.cassandra.query.selector;
 
-import backtype.storm.tuple.ITuple;
+import org.apache.storm.tuple.ITuple;
 import com.datastax.driver.core.utils.UUIDs;
 import org.apache.storm.cassandra.query.Column;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/trident/state/CassandraQuery.java
----------------------------------------------------------------------
diff --git a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/trident/state/CassandraQuery.java b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/trident/state/CassandraQuery.java
index 085cbca..ca4416d 100644
--- a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/trident/state/CassandraQuery.java
+++ b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/trident/state/CassandraQuery.java
@@ -18,10 +18,10 @@
  */
 package org.apache.storm.cassandra.trident.state;
 
-import backtype.storm.tuple.Values;
-import storm.trident.operation.TridentCollector;
-import storm.trident.state.BaseQueryFunction;
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.tuple.Values;
+import org.apache.storm.trident.operation.TridentCollector;
+import org.apache.storm.trident.state.BaseQueryFunction;
+import org.apache.storm.trident.tuple.TridentTuple;
 
 import java.util.List;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/trident/state/CassandraState.java
----------------------------------------------------------------------
diff --git a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/trident/state/CassandraState.java b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/trident/state/CassandraState.java
index b807a60..937b8c9 100644
--- a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/trident/state/CassandraState.java
+++ b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/trident/state/CassandraState.java
@@ -18,8 +18,8 @@
  */
 package org.apache.storm.cassandra.trident.state;
 
-import backtype.storm.topology.FailedException;
-import backtype.storm.tuple.Values;
+import org.apache.storm.topology.FailedException;
+import org.apache.storm.tuple.Values;
 import com.datastax.driver.core.BatchStatement;
 import com.datastax.driver.core.Session;
 import com.datastax.driver.core.Statement;
@@ -30,9 +30,9 @@ import org.apache.storm.cassandra.query.CQLResultSetValuesMapper;
 import org.apache.storm.cassandra.query.CQLStatementTupleMapper;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import storm.trident.operation.TridentCollector;
-import storm.trident.state.State;
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.trident.operation.TridentCollector;
+import org.apache.storm.trident.state.State;
+import org.apache.storm.trident.tuple.TridentTuple;
 
 import java.io.Serializable;
 import java.util.ArrayList;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/trident/state/CassandraStateFactory.java
----------------------------------------------------------------------
diff --git a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/trident/state/CassandraStateFactory.java b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/trident/state/CassandraStateFactory.java
index ceaa11d..697f15f 100644
--- a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/trident/state/CassandraStateFactory.java
+++ b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/trident/state/CassandraStateFactory.java
@@ -18,12 +18,12 @@
  */
 package org.apache.storm.cassandra.trident.state;
 
-import backtype.storm.task.IMetricsContext;
+import org.apache.storm.task.IMetricsContext;
 import org.apache.storm.cassandra.CassandraContext;
 import org.apache.storm.cassandra.query.CQLResultSetValuesMapper;
 import org.apache.storm.cassandra.query.CQLStatementTupleMapper;
-import storm.trident.state.State;
-import storm.trident.state.StateFactory;
+import org.apache.storm.trident.state.State;
+import org.apache.storm.trident.state.StateFactory;
 
 import java.util.Map;
 


[52/53] [abbrv] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability


Project: http://git-wip-us.apache.org/repos/asf/storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/storm/commit/b5fd753f
Tree: http://git-wip-us.apache.org/repos/asf/storm/tree/b5fd753f
Diff: http://git-wip-us.apache.org/repos/asf/storm/diff/b5fd753f

Branch: refs/heads/master
Commit: b5fd753f362c8c0737c1a205ba0d5faad08d4765
Parents: d839d1b
Author: Robert (Bobby) Evans <ev...@yahoo-inc.com>
Authored: Mon Jan 11 14:43:28 2016 -0600
Committer: Robert (Bobby) Evans <ev...@yahoo-inc.com>
Committed: Mon Jan 11 14:43:28 2016 -0600

----------------------------------------------------------------------
 dev-tools/cleanup.sh               | 21 ----------
 dev-tools/move_package.sh          | 73 ---------------------------------
 dev-tools/travis/travis-install.sh |  3 +-
 3 files changed, 1 insertion(+), 96 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/storm/blob/b5fd753f/dev-tools/cleanup.sh
----------------------------------------------------------------------
diff --git a/dev-tools/cleanup.sh b/dev-tools/cleanup.sh
deleted file mode 100755
index 6ff75eb..0000000
--- a/dev-tools/cleanup.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/sh
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-BASE="$1"
-git reset HEAD -- "$BASE"
-git checkout -- "$BASE"
-git clean -fdx

http://git-wip-us.apache.org/repos/asf/storm/blob/b5fd753f/dev-tools/move_package.sh
----------------------------------------------------------------------
diff --git a/dev-tools/move_package.sh b/dev-tools/move_package.sh
deleted file mode 100755
index a2bfd17..0000000
--- a/dev-tools/move_package.sh
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/bin/sh -x
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-set -u
-set -e
-
-BASE="$1"
-find "$BASE" -type f -print0 | xargs -0 egrep -l 'backtype.storm|storm.trident|storm.starter|storm.kafka|"backtype", "storm"' | egrep -v '.git/|docs/|CHANGELOG.md|dev-tools/move_package.sh|StormShadeRequest.java' | xargs sed -i.back -e 's/storm\(.\)trident/org\1apache\1storm\1trident/g' -e 's/backtype\(.\)storm/org\1apache\1storm/g' -e 's/storm\([\.\\]\)starter/org\1apache\1storm\1starter/g' -e 's/storm\([\.\\]\)kafka/org\1apache\1storm\1kafka/g' -e 's/"backtype", "storm"/"org", "apache", "storm"/g'
-find "$BASE" -iname \*.back | xargs rm
-mkdir -p "$BASE"/storm-core/src/jvm/org/apache/storm/ "$BASE"/storm-core/src/clj/org/apache/storm/ "$BASE"/storm-core/test/jvm/org/apache/storm/ "$BASE"/storm-core/test/clj/org/apache/storm/ "$BASE"/storm-core/test/clj/integration/org/apache/storm/
-#STORM-CORE
-#SRC JVM
-git mv "$BASE"/storm-core/src/jvm/backtype/storm/* "$BASE"/storm-core/src/jvm/org/apache/storm/
-rm -rf "$BASE"/storm-core/src/jvm/backtype
-git mv "$BASE"/storm-core/src/jvm/storm/trident "$BASE"/storm-core/src/jvm/org/apache/storm
-rm -rf "$BASE"/storm-core/src/jvm/storm
-
-#SRC CLJ
-git mv "$BASE"/storm-core/src/clj/backtype/storm/* "$BASE"/storm-core/src/clj/org/apache/storm/
-rm -rf "$BASE"/storm-core/src/clj/backtype
-git mv "$BASE"/storm-core/src/clj/storm/trident "$BASE"/storm-core/src/clj/org/apache/storm
-rm -rf "$BASE"/storm-core/src/clj/storm
-
-#TEST JVM
-git mv "$BASE"/storm-core/test/jvm/backtype/storm/* "$BASE"/storm-core/test/jvm/org/apache/storm/
-rm -rf "$BASE"/storm-core/test/jvm/backtype
-#git mv "$BASE"/storm-core/test/jvm/storm/trident "$BASE"/storm-core/test/jvm/org/apache/storm
-#rm -rf "$BASE"/storm-core/test/jvm/storm
-
-#TEST CLJ
-git mv "$BASE"/storm-core/test/clj/backtype/storm/* "$BASE"/storm-core/test/clj/org/apache/storm/
-rm -rf "$BASE"/storm-core/test/clj/backtype
-git mv "$BASE"/storm-core/test/clj/storm/trident "$BASE"/storm-core/test/clj/org/apache/storm
-rm -rf "$BASE"/storm-core/test/clj/storm
-git mv "$BASE"/storm-core/test/clj/integration/storm/* "$BASE"/storm-core/test/clj/integration/org/apache/storm
-rm -rf "$BASE"/storm-core/test/clj/integration/storm
-git mv "$BASE"/storm-core/test/clj/integration/backtype/storm/* "$BASE"/storm-core/test/clj/integration/org/apache/storm
-rm -rf "$BASE"/storm-core/test/clj/integration/backtype
-
-#STORM-STARTER
-mkdir -p "$BASE"/examples/storm-starter/src/jvm/org/apache/ "$BASE"/examples/storm-starter/src/clj/org/apache/ "$BASE"/examples/storm-starter/test/jvm/org/apache/
-#SRC JVM
-git mv "$BASE"/examples/storm-starter/src/jvm/storm "$BASE"/examples/storm-starter/src/jvm/org/apache/
-
-#SRC CLJ
-git mv "$BASE"/examples/storm-starter/src/clj/storm "$BASE"/examples/storm-starter/src/clj/org/apache/
-
-#TEST JVM
-git mv "$BASE"/examples/storm-starter/test/jvm/storm "$BASE"/examples/storm-starter/test/jvm/org/apache/
-
-
-#STORM-KAFKA
-mkdir -p "$BASE"/external/storm-kafka/src/jvm/org/apache/ "$BASE"/external/storm-kafka/src/test/org/apache/
-
-#SRC JVM
-git mv "$BASE"/external/storm-kafka/src/jvm/storm "$BASE"/external/storm-kafka/src/jvm/org/apache/
-
-#TEST JVM
-git mv "$BASE"/external/storm-kafka/src/test/storm "$BASE"/external/storm-kafka/src/test/org/apache/
-

http://git-wip-us.apache.org/repos/asf/storm/blob/b5fd753f/dev-tools/travis/travis-install.sh
----------------------------------------------------------------------
diff --git a/dev-tools/travis/travis-install.sh b/dev-tools/travis/travis-install.sh
index 62bd666..5a7ec50 100755
--- a/dev-tools/travis/travis-install.sh
+++ b/dev-tools/travis/travis-install.sh
@@ -1,4 +1,4 @@
-#!/bin/bash -x
+#!/bin/bash
 #  Licensed under the Apache License, Version 2.0 (the "License");
 #  you may not use this file except in compliance with the License.
 #  You may obtain a copy of the License at
@@ -19,7 +19,6 @@ STORM_SRC_ROOT_DIR=$1
 TRAVIS_SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
 
 cd ${STORM_SRC_ROOT_DIR}
-./dev-tools/move_package.sh "${STORM_SRC_ROOT_DIR}" || exit 1
 python ${TRAVIS_SCRIPT_DIR}/save-logs.py "install.txt" mvn clean install -DskipTests -Pnative --batch-mode
 BUILD_RET_VAL=$?
 


[08/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/cluster/ClusterState.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/cluster/ClusterState.java b/storm-core/src/jvm/backtype/storm/cluster/ClusterState.java
deleted file mode 100644
index 1960371..0000000
--- a/storm-core/src/jvm/backtype/storm/cluster/ClusterState.java
+++ /dev/null
@@ -1,217 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm.cluster;
-
-import clojure.lang.APersistentMap;
-import clojure.lang.IFn;
-import java.util.List;
-import org.apache.zookeeper.data.ACL;
-
-/**
- * ClusterState provides the API for the pluggable state store used by the
- * Storm daemons. Data is stored in path/value format, and the store supports
- * listing sub-paths at a given path.
- * All data should be available across all nodes with eventual consistency.
- *
- * IMPORTANT NOTE:
- * Heartbeats have different api calls used to interact with them. The root
- * path (/) may or may not be the same as the root path for the other api calls.
- *
- * For example, performing these two calls:
- *     set_data("/path", data, acls);
- *     void set_worker_hb("/path", heartbeat, acls);
- * may or may not cause a collision in "/path".
- * Never use the same paths with the *_hb* methods as you do with the others.
- */
-public interface ClusterState {
-
-    /**
-     * Registers a callback function that gets called when CuratorEvents happen.
-     * @param callback is a clojure IFn that accepts the type - translated to
-     * clojure keyword as in zookeeper.clj - and the path: (callback type path)
-     * @return is an id that can be passed to unregister(...) to unregister the
-     * callback.
-     */
-    String register(IFn callback);
-
-    /**
-     * Unregisters a callback function that was registered with register(...).
-     * @param id is the String id that was returned from register(...).
-     */
-    void unregister(String id);
-
-    /**
-     * Path will be appended with a monotonically increasing integer, a new node
-     * will be created there, and data will be put at that node.
-     * @param path The path that the monotonically increasing integer suffix will
-     * be added to.
-     * @param data The data that will be written at the suffixed path's node.
-     * @param acls The acls to apply to the path. May be null.
-     * @return The path with the integer suffix appended.
-     */
-    String create_sequential(String path, byte[] data, List<ACL> acls);
-
-    /**
-     * Creates nodes for path and all its parents. Path elements are separated by
-     * a "/", as in *nix filesystem notation. Equivalent to mkdir -p in *nix.
-     * @param path The path to create, along with all its parents.
-     * @param acls The acls to apply to the path. May be null.
-     * @return path
-     */
-    String mkdirs(String path, List<ACL> acls);
-
-    /**
-     * Deletes the node at a given path, and any child nodes that may exist.
-     * @param path The path to delete
-     */
-    void delete_node(String path);
-
-    /**
-     * Creates an ephemeral node at path. Ephemeral nodes are destroyed
-     * by the store when the client disconnects.
-     * @param path The path where a node will be created.
-     * @param data The data to be written at the node.
-     * @param acls The acls to apply to the path. May be null.
-     */
-    void set_ephemeral_node(String path, byte[] data, List<ACL> acls);
-
-    /**
-     * Gets the 'version' of the node at a path. Optionally sets a watch
-     * on that node. The version should increase whenever a write happens.
-     * @param path The path to get the version of.
-     * @param watch Whether or not to set a watch on the path. Watched paths
-     * emit events which are consumed by functions registered with the
-     * register method. Very useful for catching updates to nodes.
-     * @return The integer version of this node.
-     */
-    Integer get_version(String path, boolean watch);
-
-    /**
-     * Check if a node exists and optionally set a watch on the path.
-     * @param path The path to check for the existence of a node.
-     * @param watch Whether or not to set a watch on the path. Watched paths
-     * emit events which are consumed by functions registered with the
-     * register method. Very useful for catching updates to nodes.
-     * @return Whether or not a node exists at path.
-     */
-    boolean node_exists(String path, boolean watch);
-
-    /**
-     * Get a list of paths of all the child nodes which exist immediately
-     * under path.
-     * @param path The path to look under
-     * @param watch Whether or not to set a watch on the path. Watched paths
-     * emit events which are consumed by functions registered with the
-     * register method. Very useful for catching updates to nodes.
-     * @return list of string paths under path.
-     */
-    List<String> get_children(String path, boolean watch);
-
-    /**
-     * Close the connection to the data store.
-     */
-    void close();
-
-    /**
-     * Set the value of the node at path to data.
-     * @param path The path whose node we want to set.
-     * @param data The data to put in the node.
-     * @param acls The acls to apply to the path. May be null.
-     */
-    void set_data(String path, byte[] data, List<ACL> acls);
-
-    /**
-     * Get the data from the node at path
-     * @param path The path to look under
-     * @param watch Whether or not to set a watch on the path. Watched paths
-     * emit events which are consumed by functions registered with the
-     * register method. Very useful for catching updates to nodes.
-     * @return The data at the node.
-     */
-    byte[] get_data(String path, boolean watch);
-
-    /**
-     * Get the data at the node along with its version. Data is returned
-     * in an APersistentMap with clojure keyword keys :data and :version.
-     * @param path The path to look under
-     * @param watch Whether or not to set a watch on the path. Watched paths
-     * emit events which are consumed by functions registered with the
-     * register method. Very useful for catching updates to nodes.
-     * @return An APersistentMap in the form {:data data :version version}
-     */
-    APersistentMap get_data_with_version(String path, boolean watch);
-
-    /**
-     * Write a worker heartbeat at the path.
-     * @param path The path whose node we want to set.
-     * @param data The data to put in the node.
-     * @param acls The acls to apply to the path. May be null.
-     */
-    void set_worker_hb(String path, byte[] data, List<ACL> acls);
-
-    /**
-     * Get the heartbeat from the node at path
-     * @param path The path to look under
-     * @param watch Whether or not to set a watch on the path. Watched paths
-     * emit events which are consumed by functions registered with the
-     * register method. Very useful for catching updates to nodes.
-     * @return The heartbeat at the node.
-     */
-    byte[] get_worker_hb(String path, boolean watch);
-
-    /**
-     * Get a list of paths of all the child nodes which exist immediately
-     * under path. This is similar to get_children, but must be used for
-     * any nodes
-     * @param path The path to look under
-     * @param watch Whether or not to set a watch on the path. Watched paths
-     * emit events which are consumed by functions registered with the
-     * register method. Very useful for catching updates to nodes.
-     * @return list of string paths under path.
-     */
-    List<String> get_worker_hb_children(String path, boolean watch);
-
-    /**
-     * Deletes the heartbeat at a given path, and any child nodes that may exist.
-     * @param path The path to delete.
-     */
-    void delete_worker_hb(String path);
-
-    /**
-     * Add a ClusterStateListener to the connection.
-     * @param listener A ClusterStateListener to handle changing cluster state
-     * events.
-     */
-    void add_listener(ClusterStateListener listener);
-
-    /**
-     * Force consistency on a path. Any writes committed on the path before
-     * this call will be completely propagated when it returns.
-     * @param path The path to synchronize.
-     */
-    void sync_path(String path);
-
-    /**
-     * Allows us to delete the znodes within /storm/blobstore/key_name
-     * whose znodes start with the corresponding nimbusHostPortInfo
-     * @param path /storm/blobstore/key_name
-     * @param nimbusHostPortInfo Contains the host port information of
-     * a nimbus node.
-     */
-    void delete_node_blobstore(String path, String nimbusHostPortInfo);
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/cluster/ClusterStateContext.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/cluster/ClusterStateContext.java b/storm-core/src/jvm/backtype/storm/cluster/ClusterStateContext.java
deleted file mode 100644
index 5ccde23..0000000
--- a/storm-core/src/jvm/backtype/storm/cluster/ClusterStateContext.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package backtype.storm.cluster;
-
-/**
- * This class is intended to provide runtime-context to ClusterStateFactory
- * implementors, giving information such as what daemon is creating it.
- */
-public class ClusterStateContext {
-    
-    private DaemonType daemonType;
-
-    public ClusterStateContext() {
-        daemonType = DaemonType.UNKNOWN;
-    }
-    
-    public ClusterStateContext(DaemonType daemonType) {
-        this.daemonType = daemonType;
-    }
-    
-    public DaemonType getDaemonType() {
-        return daemonType;
-    }
-    
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/cluster/ClusterStateFactory.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/cluster/ClusterStateFactory.java b/storm-core/src/jvm/backtype/storm/cluster/ClusterStateFactory.java
deleted file mode 100644
index 1f946ee..0000000
--- a/storm-core/src/jvm/backtype/storm/cluster/ClusterStateFactory.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm.cluster;
-
-import clojure.lang.APersistentMap;
-import java.util.List;
-import org.apache.zookeeper.data.ACL;
-
-public interface ClusterStateFactory {
-    
-    ClusterState mkState(APersistentMap config, APersistentMap auth_conf, List<ACL> acls, ClusterStateContext context);
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/cluster/ClusterStateListener.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/cluster/ClusterStateListener.java b/storm-core/src/jvm/backtype/storm/cluster/ClusterStateListener.java
deleted file mode 100644
index 22693f8..0000000
--- a/storm-core/src/jvm/backtype/storm/cluster/ClusterStateListener.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm.cluster;
-
-public interface ClusterStateListener {
-    void stateChanged(ConnectionState newState);
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/cluster/ConnectionState.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/cluster/ConnectionState.java b/storm-core/src/jvm/backtype/storm/cluster/ConnectionState.java
deleted file mode 100644
index d6887da..0000000
--- a/storm-core/src/jvm/backtype/storm/cluster/ConnectionState.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm.cluster;
-
-public enum ConnectionState {
-    CONNECTED,
-    RECONNECTED,
-    LOST
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/cluster/DaemonType.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/cluster/DaemonType.java b/storm-core/src/jvm/backtype/storm/cluster/DaemonType.java
deleted file mode 100644
index 684d0ef..0000000
--- a/storm-core/src/jvm/backtype/storm/cluster/DaemonType.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package backtype.storm.cluster;
-
-public enum DaemonType {
-    SUPERVISOR,
-    NIMBUS,
-    WORKER,
-    PACEMAKER,
-    UNKNOWN
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/coordination/BatchBoltExecutor.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/coordination/BatchBoltExecutor.java b/storm-core/src/jvm/backtype/storm/coordination/BatchBoltExecutor.java
deleted file mode 100644
index 55590d0..0000000
--- a/storm-core/src/jvm/backtype/storm/coordination/BatchBoltExecutor.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm.coordination;
-
-import backtype.storm.coordination.CoordinatedBolt.FinishedCallback;
-import backtype.storm.coordination.CoordinatedBolt.TimeoutCallback;
-import backtype.storm.task.OutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.FailedException;
-import backtype.storm.topology.IRichBolt;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.utils.Utils;
-import java.util.HashMap;
-import java.util.Map;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class BatchBoltExecutor implements IRichBolt, FinishedCallback, TimeoutCallback {
-    public static final Logger LOG = LoggerFactory.getLogger(BatchBoltExecutor.class);
-
-    byte[] _boltSer;
-    Map<Object, IBatchBolt> _openTransactions;
-    Map _conf;
-    TopologyContext _context;
-    BatchOutputCollectorImpl _collector;
-    
-    public BatchBoltExecutor(IBatchBolt bolt) {
-        _boltSer = Utils.javaSerialize(bolt);
-    }
-    
-    @Override
-    public void prepare(Map conf, TopologyContext context, OutputCollector collector) {
-        _conf = conf;
-        _context = context;
-        _collector = new BatchOutputCollectorImpl(collector);
-        _openTransactions = new HashMap<>();
-    }
-
-    @Override
-    public void execute(Tuple input) {
-        Object id = input.getValue(0);
-        IBatchBolt bolt = getBatchBolt(id);
-        try {
-             bolt.execute(input);
-            _collector.ack(input);
-        } catch(FailedException e) {
-            LOG.error("Failed to process tuple in batch", e);
-            _collector.fail(input);                
-        }
-    }
-
-    @Override
-    public void cleanup() {
-    }
-
-    @Override
-    public void finishedId(Object id) {
-        IBatchBolt bolt = getBatchBolt(id);
-        _openTransactions.remove(id);
-        bolt.finishBatch();
-    }
-
-    @Override
-    public void timeoutId(Object attempt) {
-        _openTransactions.remove(attempt);        
-    }    
-    
-
-    @Override
-    public void declareOutputFields(OutputFieldsDeclarer declarer) {
-        newTransactionalBolt().declareOutputFields(declarer);
-    }
-    
-    @Override
-    public Map<String, Object> getComponentConfiguration() {
-        return newTransactionalBolt().getComponentConfiguration();
-    }
-    
-    private IBatchBolt getBatchBolt(Object id) {
-        IBatchBolt bolt = _openTransactions.get(id);
-        if(bolt==null) {
-            bolt = newTransactionalBolt();
-            bolt.prepare(_conf, _context, _collector, id);
-            _openTransactions.put(id, bolt);            
-        }
-        return bolt;
-    }
-    
-    private IBatchBolt newTransactionalBolt() {
-        return Utils.javaDeserialize(_boltSer, IBatchBolt.class);
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/coordination/BatchOutputCollector.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/coordination/BatchOutputCollector.java b/storm-core/src/jvm/backtype/storm/coordination/BatchOutputCollector.java
deleted file mode 100644
index f5f3457..0000000
--- a/storm-core/src/jvm/backtype/storm/coordination/BatchOutputCollector.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm.coordination;
-
-import backtype.storm.utils.Utils;
-import java.util.List;
-
-public abstract class BatchOutputCollector {
-
-    /**
-     * Emits a tuple to the default output stream.
-     */
-    public List<Integer> emit(List<Object> tuple) {
-        return emit(Utils.DEFAULT_STREAM_ID, tuple);
-    }
-
-    public abstract List<Integer> emit(String streamId, List<Object> tuple);
-    
-    /**
-     * Emits a tuple to the specified task on the default output stream. This output
-     * stream must have been declared as a direct stream, and the specified task must
-     * use a direct grouping on this stream to receive the message.
-     */
-    public void emitDirect(int taskId, List<Object> tuple) {
-        emitDirect(taskId, Utils.DEFAULT_STREAM_ID, tuple);
-    }
-    
-    public abstract void emitDirect(int taskId, String streamId, List<Object> tuple); 
-    
-    public abstract void reportError(Throwable error);
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/coordination/BatchOutputCollectorImpl.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/coordination/BatchOutputCollectorImpl.java b/storm-core/src/jvm/backtype/storm/coordination/BatchOutputCollectorImpl.java
deleted file mode 100644
index cae7560..0000000
--- a/storm-core/src/jvm/backtype/storm/coordination/BatchOutputCollectorImpl.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm.coordination;
-
-import backtype.storm.task.OutputCollector;
-import backtype.storm.tuple.Tuple;
-import java.util.List;
-
-public class BatchOutputCollectorImpl extends BatchOutputCollector {
-    OutputCollector _collector;
-    
-    public BatchOutputCollectorImpl(OutputCollector collector) {
-        _collector = collector;
-    }
-    
-    @Override
-    public List<Integer> emit(String streamId, List<Object> tuple) {
-        return _collector.emit(streamId, tuple);
-    }
-
-    @Override
-    public void emitDirect(int taskId, String streamId, List<Object> tuple) {
-        _collector.emitDirect(taskId, streamId, tuple);
-    }
-
-    @Override
-    public void reportError(Throwable error) {
-        _collector.reportError(error);
-    }
-    
-    public void ack(Tuple tup) {
-        _collector.ack(tup);
-    }
-    
-    public void fail(Tuple tup) {
-        _collector.fail(tup);
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/coordination/BatchSubtopologyBuilder.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/coordination/BatchSubtopologyBuilder.java b/storm-core/src/jvm/backtype/storm/coordination/BatchSubtopologyBuilder.java
deleted file mode 100644
index 1dd1c9f..0000000
--- a/storm-core/src/jvm/backtype/storm/coordination/BatchSubtopologyBuilder.java
+++ /dev/null
@@ -1,447 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm.coordination;
-
-import backtype.storm.Constants;
-import backtype.storm.coordination.CoordinatedBolt.SourceArgs;
-import backtype.storm.generated.GlobalStreamId;
-import backtype.storm.generated.Grouping;
-import backtype.storm.grouping.CustomStreamGrouping;
-import backtype.storm.grouping.PartialKeyGrouping;
-import backtype.storm.topology.BaseConfigurationDeclarer;
-import backtype.storm.topology.BasicBoltExecutor;
-import backtype.storm.topology.BoltDeclarer;
-import backtype.storm.topology.IBasicBolt;
-import backtype.storm.topology.IRichBolt;
-import backtype.storm.topology.InputDeclarer;
-import backtype.storm.topology.TopologyBuilder;
-import backtype.storm.tuple.Fields;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-public class BatchSubtopologyBuilder {
-    Map<String, Component> _bolts = new HashMap<String, Component>();
-    Component _masterBolt;
-    String _masterId;
-    
-    public BatchSubtopologyBuilder(String masterBoltId, IBasicBolt masterBolt, Number boltParallelism) {
-        Integer p = boltParallelism == null ? null : boltParallelism.intValue();
-        _masterBolt = new Component(new BasicBoltExecutor(masterBolt), p);
-        _masterId = masterBoltId;
-    }
-    
-    public BatchSubtopologyBuilder(String masterBoltId, IBasicBolt masterBolt) {
-        this(masterBoltId, masterBolt, null);
-    }
-    
-    public BoltDeclarer getMasterDeclarer() {
-        return new BoltDeclarerImpl(_masterBolt);
-    }
-        
-    public BoltDeclarer setBolt(String id, IBatchBolt bolt) {
-        return setBolt(id, bolt, null);
-    }
-    
-    public BoltDeclarer setBolt(String id, IBatchBolt bolt, Number parallelism) {
-        return setBolt(id, new BatchBoltExecutor(bolt), parallelism);
-    }     
-    
-    public BoltDeclarer setBolt(String id, IBasicBolt bolt) {
-        return setBolt(id, bolt, null);
-    }    
-    
-    public BoltDeclarer setBolt(String id, IBasicBolt bolt, Number parallelism) {
-        return setBolt(id, new BasicBoltExecutor(bolt), parallelism);
-    }
-    
-    private BoltDeclarer setBolt(String id, IRichBolt bolt, Number parallelism) {
-        Integer p = null;
-        if(parallelism!=null) p = parallelism.intValue();
-        Component component = new Component(bolt, p);
-        _bolts.put(id, component);
-        return new BoltDeclarerImpl(component);
-    }
-    
-    public void extendTopology(TopologyBuilder builder) {
-        BoltDeclarer declarer = builder.setBolt(_masterId, new CoordinatedBolt(_masterBolt.bolt), _masterBolt.parallelism);
-        for(InputDeclaration decl: _masterBolt.declarations) {
-            decl.declare(declarer);
-        }
-        for(Map conf: _masterBolt.componentConfs) {
-            declarer.addConfigurations(conf);
-        }
-        for(String id: _bolts.keySet()) {
-            Component component = _bolts.get(id);
-            Map<String, SourceArgs> coordinatedArgs = new HashMap<String, SourceArgs>();
-            for(String c: componentBoltSubscriptions(component)) {
-                SourceArgs source;
-                if(c.equals(_masterId)) {
-                    source = SourceArgs.single();
-                } else {
-                    source = SourceArgs.all();
-                }
-                coordinatedArgs.put(c, source);                    
-            }
-            
-
-            BoltDeclarer input = builder.setBolt(id,
-                                                  new CoordinatedBolt(component.bolt,
-                                                                      coordinatedArgs,
-                                                                      null),
-                                                  component.parallelism);
-            for(Map conf: component.componentConfs) {
-                input.addConfigurations(conf);
-            }
-            for(String c: componentBoltSubscriptions(component)) {
-                input.directGrouping(c, Constants.COORDINATED_STREAM_ID);
-            }
-            for(InputDeclaration d: component.declarations) {
-                d.declare(input);
-            }
-        }        
-    }
-        
-    private Set<String> componentBoltSubscriptions(Component component) {
-        Set<String> ret = new HashSet<String>();
-        for(InputDeclaration d: component.declarations) {
-            ret.add(d.getComponent());
-        }
-        return ret;
-    }
-
-    private static class Component {
-        public IRichBolt bolt;
-        public Integer parallelism;
-        public List<InputDeclaration> declarations = new ArrayList<InputDeclaration>();
-        public List<Map<String, Object>> componentConfs = new ArrayList<>();
-        
-        public Component(IRichBolt bolt, Integer parallelism) {
-            this.bolt = bolt;
-            this.parallelism = parallelism;
-        }
-    }
-    
-    private static interface InputDeclaration {
-        void declare(InputDeclarer declarer);
-        String getComponent();
-    }
-        
-    private static class BoltDeclarerImpl extends BaseConfigurationDeclarer<BoltDeclarer> implements BoltDeclarer {
-        Component _component;
-        
-        public BoltDeclarerImpl(Component component) {
-            _component = component;
-        }
-        
-        @Override
-        public BoltDeclarer fieldsGrouping(final String component, final Fields fields) {
-            addDeclaration(new InputDeclaration() {
-                @Override
-                public void declare(InputDeclarer declarer) {
-                    declarer.fieldsGrouping(component, fields);
-                }
-
-                @Override
-                public String getComponent() {
-                    return component;
-                }                
-            });
-            return this;
-        }
-
-        @Override
-        public BoltDeclarer fieldsGrouping(final String component, final String streamId, final Fields fields) {
-            addDeclaration(new InputDeclaration() {
-                @Override
-                public void declare(InputDeclarer declarer) {
-                    declarer.fieldsGrouping(component, streamId, fields);
-                }                
-
-                @Override
-                public String getComponent() {
-                    return component;
-                }                
-            });
-            return this;
-        }
-
-        @Override
-        public BoltDeclarer globalGrouping(final String component) {
-            addDeclaration(new InputDeclaration() {
-                @Override
-                public void declare(InputDeclarer declarer) {
-                    declarer.globalGrouping(component);
-                }                
-
-                @Override
-                public String getComponent() {
-                    return component;
-                }                
-            });
-            return this;
-        }
-
-        @Override
-        public BoltDeclarer globalGrouping(final String component, final String streamId) {
-            addDeclaration(new InputDeclaration() {
-                @Override
-                public void declare(InputDeclarer declarer) {
-                    declarer.globalGrouping(component, streamId);
-                }                
-
-                @Override
-                public String getComponent() {
-                    return component;
-                }                
-            });
-            return this;
-        }
-
-        @Override
-        public BoltDeclarer shuffleGrouping(final String component) {
-            addDeclaration(new InputDeclaration() {
-                @Override
-                public void declare(InputDeclarer declarer) {
-                    declarer.shuffleGrouping(component);
-                }                
-
-                @Override
-                public String getComponent() {
-                    return component;
-                }                
-            });
-            return this;
-        }
-
-        @Override
-        public BoltDeclarer shuffleGrouping(final String component, final String streamId) {
-            addDeclaration(new InputDeclaration() {
-                @Override
-                public void declare(InputDeclarer declarer) {
-                    declarer.shuffleGrouping(component, streamId);
-                }                
-
-                @Override
-                public String getComponent() {
-                    return component;
-                }                
-            });
-            return this;
-        }
-
-        @Override
-        public BoltDeclarer localOrShuffleGrouping(final String component) {
-            addDeclaration(new InputDeclaration() {
-                @Override
-                public void declare(InputDeclarer declarer) {
-                    declarer.localOrShuffleGrouping(component);
-                }                
-
-                @Override
-                public String getComponent() {
-                    return component;
-                }                
-            });
-            return this;
-        }
-
-        @Override
-        public BoltDeclarer localOrShuffleGrouping(final String component, final String streamId) {
-            addDeclaration(new InputDeclaration() {
-                @Override
-                public void declare(InputDeclarer declarer) {
-                    declarer.localOrShuffleGrouping(component, streamId);
-                }                
-
-                @Override
-                public String getComponent() {
-                    return component;
-                }                
-            });
-            return this;
-        }
-        
-        @Override
-        public BoltDeclarer noneGrouping(final String component) {
-            addDeclaration(new InputDeclaration() {
-                @Override
-                public void declare(InputDeclarer declarer) {
-                    declarer.noneGrouping(component);
-                }                
-
-                @Override
-                public String getComponent() {
-                    return component;
-                }                
-            });
-            return this;
-        }
-
-        @Override
-        public BoltDeclarer noneGrouping(final String component, final String streamId) {
-            addDeclaration(new InputDeclaration() {
-                @Override
-                public void declare(InputDeclarer declarer) {
-                    declarer.noneGrouping(component, streamId);
-                }                
-
-                @Override
-                public String getComponent() {
-                    return component;
-                }                
-            });
-            return this;
-        }
-
-        @Override
-        public BoltDeclarer allGrouping(final String component) {
-            addDeclaration(new InputDeclaration() {
-                @Override
-                public void declare(InputDeclarer declarer) {
-                    declarer.allGrouping(component);
-                }                
-
-                @Override
-                public String getComponent() {
-                    return component;
-                }                
-            });
-            return this;
-        }
-
-        @Override
-        public BoltDeclarer allGrouping(final String component, final String streamId) {
-            addDeclaration(new InputDeclaration() {
-                @Override
-                public void declare(InputDeclarer declarer) {
-                    declarer.allGrouping(component, streamId);
-                }                
-
-                @Override
-                public String getComponent() {
-                    return component;
-                }                
-            });
-            return this;
-        }
-
-        @Override
-        public BoltDeclarer directGrouping(final String component) {
-            addDeclaration(new InputDeclaration() {
-                @Override
-                public void declare(InputDeclarer declarer) {
-                    declarer.directGrouping(component);
-                }                
-
-                @Override
-                public String getComponent() {
-                    return component;
-                }                
-            });
-            return this;
-        }
-
-        @Override
-        public BoltDeclarer directGrouping(final String component, final String streamId) {
-            addDeclaration(new InputDeclaration() {
-                @Override
-                public void declare(InputDeclarer declarer) {
-                    declarer.directGrouping(component, streamId);
-                }                
-
-                @Override
-                public String getComponent() {
-                    return component;
-                }                
-            });
-            return this;
-        }
-
-        @Override
-        public BoltDeclarer partialKeyGrouping(String componentId, Fields fields) {
-            return customGrouping(componentId, new PartialKeyGrouping(fields));
-        }
-
-        @Override
-        public BoltDeclarer partialKeyGrouping(String componentId, String streamId, Fields fields) {
-            return customGrouping(componentId, streamId, new PartialKeyGrouping(fields));
-        }
-        
-        @Override
-        public BoltDeclarer customGrouping(final String component, final CustomStreamGrouping grouping) {
-            addDeclaration(new InputDeclaration() {
-                @Override
-                public void declare(InputDeclarer declarer) {
-                    declarer.customGrouping(component, grouping);
-                }                
-
-                @Override
-                public String getComponent() {
-                    return component;
-                }                
-            });
-            return this;        
-        }
-
-        @Override
-        public BoltDeclarer customGrouping(final String component, final String streamId, final CustomStreamGrouping grouping) {
-            addDeclaration(new InputDeclaration() {
-                @Override
-                public void declare(InputDeclarer declarer) {
-                    declarer.customGrouping(component, streamId, grouping);
-                }                
-
-                @Override
-                public String getComponent() {
-                    return component;
-                }                
-            });
-            return this;
-        }
-
-        @Override
-        public BoltDeclarer grouping(final GlobalStreamId stream, final Grouping grouping) {
-            addDeclaration(new InputDeclaration() {
-                @Override
-                public void declare(InputDeclarer declarer) {
-                    declarer.grouping(stream, grouping);
-                }                
-
-                @Override
-                public String getComponent() {
-                    return stream.get_componentId();
-                }                
-            });
-            return this;
-        }
-        
-        private void addDeclaration(InputDeclaration declaration) {
-            _component.declarations.add(declaration);
-        }
-
-        @Override
-        public BoltDeclarer addConfigurations(Map<String, Object> conf) {
-            _component.componentConfs.add(conf);
-            return this;
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/coordination/CoordinatedBolt.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/coordination/CoordinatedBolt.java b/storm-core/src/jvm/backtype/storm/coordination/CoordinatedBolt.java
deleted file mode 100644
index c3a428c..0000000
--- a/storm-core/src/jvm/backtype/storm/coordination/CoordinatedBolt.java
+++ /dev/null
@@ -1,382 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm.coordination;
-
-import backtype.storm.topology.FailedException;
-import java.util.Map.Entry;
-import backtype.storm.tuple.Values;
-import backtype.storm.generated.GlobalStreamId;
-import java.util.Collection;
-import backtype.storm.Constants;
-import backtype.storm.generated.Grouping;
-import backtype.storm.task.IOutputCollector;
-import backtype.storm.task.OutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.IRichBolt;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.utils.TimeCacheMap;
-import backtype.storm.utils.Utils;
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import static backtype.storm.utils.Utils.get;
-
-/**
- * Coordination requires the request ids to be globally unique for awhile. This is so it doesn't get confused
- * in the case of retries.
- */
-public class CoordinatedBolt implements IRichBolt {
-    public static final Logger LOG = LoggerFactory.getLogger(CoordinatedBolt.class);
-
-    public static interface FinishedCallback {
-        void finishedId(Object id);
-    }
-
-    public static interface TimeoutCallback {
-        void timeoutId(Object id);
-    }
-    
-    
-    public static class SourceArgs implements Serializable {
-        public boolean singleCount;
-
-        protected SourceArgs(boolean singleCount) {
-            this.singleCount = singleCount;
-        }
-
-        public static SourceArgs single() {
-            return new SourceArgs(true);
-        }
-
-        public static SourceArgs all() {
-            return new SourceArgs(false);
-        }
-        
-        @Override
-        public String toString() {
-            return "<Single: " + singleCount + ">";
-        }
-    }
-
-    public class CoordinatedOutputCollector implements IOutputCollector {
-        IOutputCollector _delegate;
-
-        public CoordinatedOutputCollector(IOutputCollector delegate) {
-            _delegate = delegate;
-        }
-
-        public List<Integer> emit(String stream, Collection<Tuple> anchors, List<Object> tuple) {
-            List<Integer> tasks = _delegate.emit(stream, anchors, tuple);
-            updateTaskCounts(tuple.get(0), tasks);
-            return tasks;
-        }
-
-        public void emitDirect(int task, String stream, Collection<Tuple> anchors, List<Object> tuple) {
-            updateTaskCounts(tuple.get(0), Arrays.asList(task));
-            _delegate.emitDirect(task, stream, anchors, tuple);
-        }
-
-        public void ack(Tuple tuple) {
-            Object id = tuple.getValue(0);
-            synchronized(_tracked) {
-                TrackingInfo track = _tracked.get(id);
-                if (track != null)
-                    track.receivedTuples++;
-            }
-            boolean failed = checkFinishId(tuple, TupleType.REGULAR);
-            if(failed) {
-                _delegate.fail(tuple);                
-            } else {
-                _delegate.ack(tuple);
-            }
-        }
-
-        public void fail(Tuple tuple) {
-            Object id = tuple.getValue(0);
-            synchronized(_tracked) {
-                TrackingInfo track = _tracked.get(id);
-                if (track != null)
-                    track.failed = true;
-            }
-            checkFinishId(tuple, TupleType.REGULAR);
-            _delegate.fail(tuple);
-        }
-        
-        public void reportError(Throwable error) {
-            _delegate.reportError(error);
-        }
-
-
-        private void updateTaskCounts(Object id, List<Integer> tasks) {
-            synchronized(_tracked) {
-                TrackingInfo track = _tracked.get(id);
-                if (track != null) {
-                    Map<Integer, Integer> taskEmittedTuples = track.taskEmittedTuples;
-                    for(Integer task: tasks) {
-                        int newCount = get(taskEmittedTuples, task, 0) + 1;
-                        taskEmittedTuples.put(task, newCount);
-                    }
-                }
-            }
-        }
-    }
-
-    private Map<String, SourceArgs> _sourceArgs;
-    private IdStreamSpec _idStreamSpec;
-    private IRichBolt _delegate;
-    private Integer _numSourceReports;
-    private List<Integer> _countOutTasks = new ArrayList<>();
-    private OutputCollector _collector;
-    private TimeCacheMap<Object, TrackingInfo> _tracked;
-
-    public static class TrackingInfo {
-        int reportCount = 0;
-        int expectedTupleCount = 0;
-        int receivedTuples = 0;
-        boolean failed = false;
-        Map<Integer, Integer> taskEmittedTuples = new HashMap<>();
-        boolean receivedId = false;
-        boolean finished = false;
-        List<Tuple> ackTuples = new ArrayList<>();
-        
-        @Override
-        public String toString() {
-            return "reportCount: " + reportCount + "\n" +
-                   "expectedTupleCount: " + expectedTupleCount + "\n" +
-                   "receivedTuples: " + receivedTuples + "\n" +
-                   "failed: " + failed + "\n" +
-                   taskEmittedTuples.toString();
-        }
-    }
-
-    
-    public static class IdStreamSpec implements Serializable {
-        GlobalStreamId _id;
-        
-        public GlobalStreamId getGlobalStreamId() {
-            return _id;
-        }
-
-        public static IdStreamSpec makeDetectSpec(String component, String stream) {
-            return new IdStreamSpec(component, stream);
-        }        
-        
-        protected IdStreamSpec(String component, String stream) {
-            _id = new GlobalStreamId(component, stream);
-        }
-    }
-    
-    public CoordinatedBolt(IRichBolt delegate) {
-        this(delegate, null, null);
-    }
-
-    public CoordinatedBolt(IRichBolt delegate, String sourceComponent, SourceArgs sourceArgs, IdStreamSpec idStreamSpec) {
-        this(delegate, singleSourceArgs(sourceComponent, sourceArgs), idStreamSpec);
-    }
-    
-    public CoordinatedBolt(IRichBolt delegate, Map<String, SourceArgs> sourceArgs, IdStreamSpec idStreamSpec) {
-        _sourceArgs = sourceArgs;
-        if(_sourceArgs==null) _sourceArgs = new HashMap<>();
-        _delegate = delegate;
-        _idStreamSpec = idStreamSpec;
-    }
-    
-    public void prepare(Map config, TopologyContext context, OutputCollector collector) {
-        TimeCacheMap.ExpiredCallback<Object, TrackingInfo> callback = null;
-        if(_delegate instanceof TimeoutCallback) {
-            callback = new TimeoutItems();
-        }
-        _tracked = new TimeCacheMap<>(context.maxTopologyMessageTimeout(), callback);
-        _collector = collector;
-        _delegate.prepare(config, context, new OutputCollector(new CoordinatedOutputCollector(collector)));
-        for(String component: Utils.get(context.getThisTargets(),
-                                        Constants.COORDINATED_STREAM_ID,
-                                        new HashMap<String, Grouping>())
-                                        .keySet()) {
-            for(Integer task: context.getComponentTasks(component)) {
-                _countOutTasks.add(task);
-            }
-        }
-        if(!_sourceArgs.isEmpty()) {
-            _numSourceReports = 0;
-            for(Entry<String, SourceArgs> entry: _sourceArgs.entrySet()) {
-                if(entry.getValue().singleCount) {
-                    _numSourceReports+=1;
-                } else {
-                    _numSourceReports+=context.getComponentTasks(entry.getKey()).size();
-                }
-            }
-        }
-    }
-
-    private boolean checkFinishId(Tuple tup, TupleType type) {
-        Object id = tup.getValue(0);
-        boolean failed = false;
-        
-        synchronized(_tracked) {
-            TrackingInfo track = _tracked.get(id);
-            try {
-                if(track!=null) {
-                    boolean delayed = false;
-                    if(_idStreamSpec==null && type == TupleType.COORD || _idStreamSpec!=null && type==TupleType.ID) {
-                        track.ackTuples.add(tup);
-                        delayed = true;
-                    }
-                    if(track.failed) {
-                        failed = true;
-                        for(Tuple t: track.ackTuples) {
-                            _collector.fail(t);
-                        }
-                        _tracked.remove(id);
-                    } else if(track.receivedId
-                             && (_sourceArgs.isEmpty() ||
-                                  track.reportCount==_numSourceReports &&
-                                  track.expectedTupleCount == track.receivedTuples)){
-                        if(_delegate instanceof FinishedCallback) {
-                            ((FinishedCallback)_delegate).finishedId(id);
-                        }
-                        if(!(_sourceArgs.isEmpty() || type!=TupleType.REGULAR)) {
-                            throw new IllegalStateException("Coordination condition met on a non-coordinating tuple. Should be impossible");
-                        }
-                        Iterator<Integer> outTasks = _countOutTasks.iterator();
-                        while(outTasks.hasNext()) {
-                            int task = outTasks.next();
-                            int numTuples = get(track.taskEmittedTuples, task, 0);
-                            _collector.emitDirect(task, Constants.COORDINATED_STREAM_ID, tup, new Values(id, numTuples));
-                        }
-                        for(Tuple t: track.ackTuples) {
-                            _collector.ack(t);
-                        }
-                        track.finished = true;
-                        _tracked.remove(id);
-                    }
-                    if(!delayed && type!=TupleType.REGULAR) {
-                        if(track.failed) {
-                            _collector.fail(tup);
-                        } else {
-                            _collector.ack(tup);                            
-                        }
-                    }
-                } else {
-                    if(type!=TupleType.REGULAR) _collector.fail(tup);
-                }
-            } catch(FailedException e) {
-                LOG.error("Failed to finish batch", e);
-                for(Tuple t: track.ackTuples) {
-                    _collector.fail(t);
-                }
-                _tracked.remove(id);
-                failed = true;
-            }
-        }
-        return failed;
-    }
-
-    public void execute(Tuple tuple) {
-        Object id = tuple.getValue(0);
-        TrackingInfo track;
-        TupleType type = getTupleType(tuple);
-        synchronized(_tracked) {
-            track = _tracked.get(id);
-            if(track==null) {
-                track = new TrackingInfo();
-                if(_idStreamSpec==null) track.receivedId = true;
-                _tracked.put(id, track);
-            }
-        }
-        
-        if(type==TupleType.ID) {
-            synchronized(_tracked) {
-                track.receivedId = true;
-            }
-            checkFinishId(tuple, type);            
-        } else if(type==TupleType.COORD) {
-            int count = (Integer) tuple.getValue(1);
-            synchronized(_tracked) {
-                track.reportCount++;
-                track.expectedTupleCount+=count;
-            }
-            checkFinishId(tuple, type);
-        } else {            
-            synchronized(_tracked) {
-                _delegate.execute(tuple);
-            }
-        }
-    }
-
-    public void cleanup() {
-        _delegate.cleanup();
-        _tracked.cleanup();
-    }
-
-    public void declareOutputFields(OutputFieldsDeclarer declarer) {
-        _delegate.declareOutputFields(declarer);
-        declarer.declareStream(Constants.COORDINATED_STREAM_ID, true, new Fields("id", "count"));
-    }
-
-    @Override
-    public Map<String, Object> getComponentConfiguration() {
-        return _delegate.getComponentConfiguration();
-    }
-    
-    private static Map<String, SourceArgs> singleSourceArgs(String sourceComponent, SourceArgs sourceArgs) {
-        Map<String, SourceArgs> ret = new HashMap<>();
-        ret.put(sourceComponent, sourceArgs);
-        return ret;
-    }
-    
-    private class TimeoutItems implements TimeCacheMap.ExpiredCallback<Object, TrackingInfo> {
-        @Override
-        public void expire(Object id, TrackingInfo val) {
-            synchronized(_tracked) {
-                // the combination of the lock and the finished flag ensure that
-                // an id is never timed out if it has been finished
-                val.failed = true;
-                if(!val.finished) {
-                    ((TimeoutCallback) _delegate).timeoutId(id);
-                }
-            }
-        }
-    }
-    
-    private TupleType getTupleType(Tuple tuple) {
-        if(_idStreamSpec!=null
-                && tuple.getSourceGlobalStreamId().equals(_idStreamSpec._id)) {
-            return TupleType.ID;
-        } else if(!_sourceArgs.isEmpty()
-                && tuple.getSourceStreamId().equals(Constants.COORDINATED_STREAM_ID)) {
-            return TupleType.COORD;
-        } else {
-            return TupleType.REGULAR;
-        }
-    }
-    
-    static enum TupleType {
-        REGULAR,
-        ID,
-        COORD
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/coordination/IBatchBolt.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/coordination/IBatchBolt.java b/storm-core/src/jvm/backtype/storm/coordination/IBatchBolt.java
deleted file mode 100644
index ee5d9bd..0000000
--- a/storm-core/src/jvm/backtype/storm/coordination/IBatchBolt.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm.coordination;
-
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.IComponent;
-import backtype.storm.tuple.Tuple;
-import java.io.Serializable;
-import java.util.Map;
-
-public interface IBatchBolt<T> extends Serializable, IComponent {
-    void prepare(Map conf, TopologyContext context, BatchOutputCollector collector, T id);
-    void execute(Tuple tuple);
-    void finishBatch();
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/daemon/ClientJarTransformerRunner.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/daemon/ClientJarTransformerRunner.java b/storm-core/src/jvm/backtype/storm/daemon/ClientJarTransformerRunner.java
deleted file mode 100644
index 3a0dfbb..0000000
--- a/storm-core/src/jvm/backtype/storm/daemon/ClientJarTransformerRunner.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package backtype.storm.daemon;
-
-import backtype.storm.utils.Utils;
-
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.io.InputStream;
-
-/**
- * Main executable to load and run a jar transformer
- */
-public class ClientJarTransformerRunner {
-    public static void main(String [] args) throws IOException {
-        JarTransformer transformer = Utils.jarTransformer(args[0]);
-        InputStream in = new FileInputStream(args[1]);
-        OutputStream out = new FileOutputStream(args[2]);
-        transformer.transform(in, out);
-        in.close();
-        out.close();
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/daemon/DirectoryCleaner.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/daemon/DirectoryCleaner.java b/storm-core/src/jvm/backtype/storm/daemon/DirectoryCleaner.java
deleted file mode 100644
index 67b6527..0000000
--- a/storm-core/src/jvm/backtype/storm/daemon/DirectoryCleaner.java
+++ /dev/null
@@ -1,177 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm.daemon;
-
-import java.io.IOException;
-import java.io.File;
-import java.io.FileInputStream;
-import java.nio.file.FileSystems;
-import java.nio.file.Files;
-import java.nio.file.Paths;
-import java.nio.file.Path;
-import java.nio.file.DirectoryStream;
-import java.util.Stack;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Set;
-import java.util.Comparator;
-import java.util.PriorityQueue;
-import java.util.regex.Pattern;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Provide methods to help Logviewer to clean up
- * files in directories and to get a list of files without
- * worrying about excessive memory usage.
- *
- */
-public class DirectoryCleaner {
-    private static final Logger LOG = LoggerFactory.getLogger(DirectoryCleaner.class);
-    // used to recognize the pattern of active log files, we may remove the "current" from this list
-    private static final Pattern ACTIVE_LOG_PATTERN = Pattern.compile(".*\\.(log|err|out|current|yaml|pid)$");
-    // used to recognize the pattern of some meta files in a worker log directory
-    private static final Pattern META_LOG_PATTERN= Pattern.compile(".*\\.(yaml|pid)$");
-
-    // not defining this as static is to allow for mocking in tests
-    public DirectoryStream<Path> getStreamForDirectory(File dir) throws IOException {
-        DirectoryStream<Path> stream = Files.newDirectoryStream(dir.toPath());
-        return stream;
-    }
-
-    /**
-     * If totalSize of files exceeds the either the per-worker quota or global quota,
-     * Logviewer deletes oldest inactive log files in a worker directory or in all worker dirs.
-     * We use the parameter for_per_dir to switch between the two deletion modes.
-     * @param dirs the list of directories to be scanned for deletion
-     * @param quota the per-dir quota or the total quota for the all directories
-     * @param for_per_dir if true, deletion happens for a single dir; otherwise, for all directories globally
-     * @param active_dirs only for global deletion, we want to skip the active logs in active_dirs
-     * @return number of files deleted
-     */
-    public int deleteOldestWhileTooLarge(List<File> dirs,
-                        long quota, boolean for_per_dir, Set<String> active_dirs) throws IOException {
-        final int PQ_SIZE = 1024; // max number of files to delete for every round
-        final int MAX_ROUNDS  = 512; // max rounds of scanning the dirs
-        long totalSize = 0;
-        int deletedFiles = 0;
-
-        for (File dir : dirs) {
-            try (DirectoryStream<Path> stream = getStreamForDirectory(dir)) {
-                for (Path path : stream) {
-                    File file = path.toFile();
-                    totalSize += file.length();
-                }
-            }
-        }
-        long toDeleteSize = totalSize - quota;
-        if (toDeleteSize <= 0) {
-            return deletedFiles;
-        }
-
-        Comparator<File> comparator = new Comparator<File>() {
-            public int compare(File f1, File f2) {
-                if (f1.lastModified() > f2.lastModified()) {
-                    return -1;
-                } else {
-                    return 1;
-                }
-            }
-        };
-        // the oldest pq_size files in this directory will be placed in PQ, with the newest at the root
-        PriorityQueue<File> pq = new PriorityQueue<File>(PQ_SIZE, comparator);
-        int round = 0;
-        while (toDeleteSize > 0) {
-            LOG.debug("To delete size is {}, start a new round of deletion, round: {}", toDeleteSize, round);
-            for (File dir : dirs) {
-                try (DirectoryStream<Path> stream = getStreamForDirectory(dir)) {
-                    for (Path path : stream) {
-                        File file = path.toFile();
-                        if (for_per_dir) {
-                            if (ACTIVE_LOG_PATTERN.matcher(file.getName()).matches()) {
-                                continue; // skip active log files
-                            }
-                        } else { // for global cleanup
-                            if (active_dirs.contains(dir.getCanonicalPath())) { // for an active worker's dir, make sure for the last "/"
-                                if (ACTIVE_LOG_PATTERN.matcher(file.getName()).matches()) {
-                                    continue; // skip active log files
-                                }
-                            } else {
-                                if (META_LOG_PATTERN.matcher(file.getName()).matches()) {
-                                    continue; // skip yaml and pid files
-                                }
-                            }
-                        }
-                        if (pq.size() < PQ_SIZE) {
-                            pq.offer(file);
-                        } else {
-                            if (file.lastModified() < pq.peek().lastModified()) {
-                                pq.poll();
-                                pq.offer(file);
-                            }
-                        }
-                    }
-                }
-            }
-            // need to reverse the order of elements in PQ to delete files from oldest to newest
-            Stack<File> stack = new Stack<File>();
-            while (!pq.isEmpty()) {
-                File file = pq.poll();
-                stack.push(file);
-            }
-            while (!stack.isEmpty() && toDeleteSize > 0) {
-                File file = stack.pop();
-                toDeleteSize -= file.length();
-                LOG.info("Delete file: {}, size: {}, lastModified: {}", file.getName(), file.length(), file.lastModified());
-                file.delete();
-                deletedFiles++;
-            }
-            pq.clear();
-            round++;
-            if (round >= MAX_ROUNDS) {
-                if (for_per_dir) {
-                    LOG.warn("Reach the MAX_ROUNDS: {} during per-dir deletion, you may have too many files in " +
-                            "a single directory : {}, will delete the rest files in next interval.",
-                            MAX_ROUNDS, dirs.get(0).getCanonicalPath());
-                } else {
-                    LOG.warn("Reach the MAX_ROUNDS: {} during global deletion, you may have too many files, " +
-                            "will delete the rest files in next interval.", MAX_ROUNDS);
-                }
-                break;
-            }
-        }
-        return deletedFiles;
-    }
-
-    // Note that to avoid memory problem, we only return the first 1024 files in a directory
-    public static List<File> getFilesForDir(File dir) throws IOException {
-        List<File> files = new ArrayList<File>();
-        final int MAX_NUM = 1024;
-
-        try (DirectoryStream<Path> stream = Files.newDirectoryStream(dir.toPath())) {
-            for (Path path : stream) {
-                files.add(path.toFile());
-                if (files.size() >= MAX_NUM) {
-                    break;
-                }
-            }
-        }
-        return files;
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/daemon/JarTransformer.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/daemon/JarTransformer.java b/storm-core/src/jvm/backtype/storm/daemon/JarTransformer.java
deleted file mode 100644
index 914710a..0000000
--- a/storm-core/src/jvm/backtype/storm/daemon/JarTransformer.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package backtype.storm.daemon;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-
-/**
- * A plugin that can be used to transform a jar file in nimbus before it
- * is used by a topology.
- */
-public interface JarTransformer {
-    public void transform(InputStream input, OutputStream output) throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/daemon/Shutdownable.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/daemon/Shutdownable.java b/storm-core/src/jvm/backtype/storm/daemon/Shutdownable.java
deleted file mode 100644
index b1d8ddf..0000000
--- a/storm-core/src/jvm/backtype/storm/daemon/Shutdownable.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm.daemon;
-
-public interface Shutdownable {
-    public void shutdown();
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/drpc/DRPCInvocationsClient.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/drpc/DRPCInvocationsClient.java b/storm-core/src/jvm/backtype/storm/drpc/DRPCInvocationsClient.java
deleted file mode 100644
index 78e8d9b..0000000
--- a/storm-core/src/jvm/backtype/storm/drpc/DRPCInvocationsClient.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm.drpc;
-
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicReference;
-
-import backtype.storm.generated.DRPCRequest;
-import backtype.storm.generated.DistributedRPCInvocations;
-import backtype.storm.generated.AuthorizationException;
-import backtype.storm.security.auth.ThriftClient;
-import backtype.storm.security.auth.ThriftConnectionType;
-import org.apache.thrift.transport.TTransportException;
-import org.apache.thrift.TException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class DRPCInvocationsClient extends ThriftClient implements DistributedRPCInvocations.Iface {
-    public static final Logger LOG = LoggerFactory.getLogger(DRPCInvocationsClient.class);
-    private final AtomicReference<DistributedRPCInvocations.Client> client = new AtomicReference<>();
-    private String host;
-    private int port;
-
-    public DRPCInvocationsClient(Map conf, String host, int port) throws TTransportException {
-        super(conf, ThriftConnectionType.DRPC_INVOCATIONS, host, port, null);
-        this.host = host;
-        this.port = port;
-        client.set(new DistributedRPCInvocations.Client(_protocol));
-    }
-        
-    public String getHost() {
-        return host;
-    }
-    
-    public int getPort() {
-        return port;
-    }       
-
-    public void reconnectClient() throws TException {
-        if (client.get() == null) {
-            reconnect();
-            client.set(new DistributedRPCInvocations.Client(_protocol));
-        }
-    }
-
-    public boolean isConnected() {
-        return client.get() != null;
-    }
-
-    public void result(String id, String result) throws TException, AuthorizationException {
-        DistributedRPCInvocations.Client c = client.get();
-        try {
-            if (c == null) {
-                throw new TException("Client is not connected...");
-            }
-            c.result(id, result);
-        } catch(AuthorizationException aze) {
-            throw aze;
-        } catch(TException e) {
-            client.compareAndSet(c, null);
-            throw e;
-        }
-    }
-
-    public DRPCRequest fetchRequest(String func) throws TException, AuthorizationException {
-        DistributedRPCInvocations.Client c = client.get();
-        try {
-            if (c == null) {
-                throw new TException("Client is not connected...");
-            }
-            return c.fetchRequest(func);
-        } catch(AuthorizationException aze) {
-            throw aze;
-        } catch(TException e) {
-            client.compareAndSet(c, null);
-            throw e;
-        }
-    }    
-
-    public void failRequest(String id) throws TException, AuthorizationException {
-        DistributedRPCInvocations.Client c = client.get();
-        try {
-            if (c == null) {
-                throw new TException("Client is not connected...");
-            }
-            c.failRequest(id);
-        } catch(AuthorizationException aze) {
-            throw aze;
-        } catch(TException e) {
-            client.compareAndSet(c, null);
-            throw e;
-        }
-    }
-
-    public DistributedRPCInvocations.Client getClient() {
-        return client.get();
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/drpc/DRPCSpout.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/drpc/DRPCSpout.java b/storm-core/src/jvm/backtype/storm/drpc/DRPCSpout.java
deleted file mode 100644
index 4ed15c0..0000000
--- a/storm-core/src/jvm/backtype/storm/drpc/DRPCSpout.java
+++ /dev/null
@@ -1,261 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm.drpc;
-
-import backtype.storm.Config;
-import backtype.storm.ILocalDRPC;
-import backtype.storm.generated.DRPCRequest;
-import backtype.storm.generated.DistributedRPCInvocations;
-import backtype.storm.generated.AuthorizationException;
-import backtype.storm.spout.SpoutOutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.base.BaseRichSpout;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Values;
-import backtype.storm.utils.ExtendedThreadPoolExecutor;
-import backtype.storm.utils.ServiceRegistry;
-import backtype.storm.utils.Utils;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.LinkedList;
-import java.util.Map;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Future;
-import java.util.concurrent.Callable;
-import java.util.concurrent.SynchronousQueue;
-import java.util.concurrent.TimeUnit;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.thrift.TException;
-import org.json.simple.JSONValue;
-
-public class DRPCSpout extends BaseRichSpout {
-    //ANY CHANGE TO THIS CODE MUST BE SERIALIZABLE COMPATIBLE OR THERE WILL BE PROBLEMS
-    static final long serialVersionUID = 2387848310969237877L;
-
-    public static final Logger LOG = LoggerFactory.getLogger(DRPCSpout.class);
-    
-    SpoutOutputCollector _collector;
-    List<DRPCInvocationsClient> _clients = new ArrayList<>();
-    transient LinkedList<Future<Void>> _futures = null;
-    transient ExecutorService _backround = null;
-    String _function;
-    String _local_drpc_id = null;
-    
-    private static class DRPCMessageId {
-        String id;
-        int index;
-        
-        public DRPCMessageId(String id, int index) {
-            this.id = id;
-            this.index = index;
-        }
-    }
-    
-    
-    public DRPCSpout(String function) {
-        _function = function;
-    }
-
-    public DRPCSpout(String function, ILocalDRPC drpc) {
-        _function = function;
-        _local_drpc_id = drpc.getServiceId();
-    }
-
-    public String get_function() {
-        return _function;
-    }
-
-    private class Adder implements Callable<Void> {
-        private String server;
-        private int port;
-        private Map conf;
-
-        public Adder(String server, int port, Map conf) {
-            this.server = server;
-            this.port = port;
-            this.conf = conf;
-        }
-
-        @Override
-        public Void call() throws Exception {
-            DRPCInvocationsClient c = new DRPCInvocationsClient(conf, server, port);
-            synchronized (_clients) {
-                _clients.add(c);
-            }
-            return null;
-        }
-    }
-
-    private void reconnect(final DRPCInvocationsClient c) {
-        _futures.add(_backround.submit(new Callable<Void>() {
-            @Override
-            public Void call() throws Exception {
-                c.reconnectClient();
-                return null;
-            }
-        }));
-    }
-
-    private void checkFutures() {
-        Iterator<Future<Void>> i = _futures.iterator();
-        while (i.hasNext()) {
-            Future<Void> f = i.next();
-            if (f.isDone()) {
-                i.remove();
-            }
-            try {
-                f.get();
-            } catch (Exception e) {
-                throw new RuntimeException(e);
-            }
-        }
-    }
- 
-    @Override
-    public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
-        _collector = collector;
-        if(_local_drpc_id==null) {
-            _backround = new ExtendedThreadPoolExecutor(0, Integer.MAX_VALUE,
-                60L, TimeUnit.SECONDS,
-                new SynchronousQueue<Runnable>());
-            _futures = new LinkedList<>();
-
-            int numTasks = context.getComponentTasks(context.getThisComponentId()).size();
-            int index = context.getThisTaskIndex();
-
-            int port = Utils.getInt(conf.get(Config.DRPC_INVOCATIONS_PORT));
-            List<String> servers = (List<String>) conf.get(Config.DRPC_SERVERS);
-            if(servers == null || servers.isEmpty()) {
-                throw new RuntimeException("No DRPC servers configured for topology");   
-            }
-            
-            if (numTasks < servers.size()) {
-                for (String s: servers) {
-                    _futures.add(_backround.submit(new Adder(s, port, conf)));
-                }
-            } else {        
-                int i = index % servers.size();
-                _futures.add(_backround.submit(new Adder(servers.get(i), port, conf)));
-            }
-        }
-        
-    }
-
-    @Override
-    public void close() {
-        for(DRPCInvocationsClient client: _clients) {
-            client.close();
-        }
-    }
-
-    @Override
-    public void nextTuple() {
-        boolean gotRequest = false;
-        if(_local_drpc_id==null) {
-            int size;
-            synchronized (_clients) {
-                size = _clients.size(); //This will only ever grow, so no need to worry about falling off the end
-            }
-            for(int i=0; i<size; i++) {
-                DRPCInvocationsClient client;
-                synchronized (_clients) {
-                    client = _clients.get(i);
-                }
-                if (!client.isConnected()) {
-                    continue;
-                }
-                try {
-                    DRPCRequest req = client.fetchRequest(_function);
-                    if(req.get_request_id().length() > 0) {
-                        Map returnInfo = new HashMap();
-                        returnInfo.put("id", req.get_request_id());
-                        returnInfo.put("host", client.getHost());
-                        returnInfo.put("port", client.getPort());
-                        gotRequest = true;
-                        _collector.emit(new Values(req.get_func_args(), JSONValue.toJSONString(returnInfo)), new DRPCMessageId(req.get_request_id(), i));
-                        break;
-                    }
-                } catch (AuthorizationException aze) {
-                    reconnect(client);
-                    LOG.error("Not authorized to fetch DRPC result from DRPC server", aze);
-                } catch (TException e) {
-                    reconnect(client);
-                    LOG.error("Failed to fetch DRPC result from DRPC server", e);
-                } catch (Exception e) {
-                    LOG.error("Failed to fetch DRPC result from DRPC server", e);
-                }
-            }
-            checkFutures();
-        } else {
-            DistributedRPCInvocations.Iface drpc = (DistributedRPCInvocations.Iface) ServiceRegistry.getService(_local_drpc_id);
-            if(drpc!=null) { // can happen during shutdown of drpc while topology is still up
-                try {
-                    DRPCRequest req = drpc.fetchRequest(_function);
-                    if(req.get_request_id().length() > 0) {
-                        Map returnInfo = new HashMap();
-                        returnInfo.put("id", req.get_request_id());
-                        returnInfo.put("host", _local_drpc_id);
-                        returnInfo.put("port", 0);
-                        gotRequest = true;
-                        _collector.emit(new Values(req.get_func_args(), JSONValue.toJSONString(returnInfo)), new DRPCMessageId(req.get_request_id(), 0));
-                    }
-                } catch (AuthorizationException aze) {
-                    throw new RuntimeException(aze);
-                } catch (TException e) {
-                    throw new RuntimeException(e);
-                }
-            }
-        }
-        if(!gotRequest) {
-            Utils.sleep(1);
-        }
-    }
-
-    @Override
-    public void ack(Object msgId) {
-    }
-
-    @Override
-    public void fail(Object msgId) {
-        DRPCMessageId did = (DRPCMessageId) msgId;
-        DistributedRPCInvocations.Iface client;
-        
-        if(_local_drpc_id == null) {
-            client = _clients.get(did.index);
-        } else {
-            client = (DistributedRPCInvocations.Iface) ServiceRegistry.getService(_local_drpc_id);
-        }
-        try {
-            client.failRequest(did.id);
-        } catch (AuthorizationException aze) {
-            LOG.error("Not authorized to failREquest from DRPC server", aze);
-        } catch (TException e) {
-            LOG.error("Failed to fail request", e);
-        }
-    }
-
-    @Override
-    public void declareOutputFields(OutputFieldsDeclarer declarer) {
-        declarer.declare(new Fields("args", "return-info"));
-    }    
-}


[23/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/util.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/util.clj b/storm-core/src/clj/backtype/storm/util.clj
deleted file mode 100644
index aa2b043..0000000
--- a/storm-core/src/clj/backtype/storm/util.clj
+++ /dev/null
@@ -1,1118 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-
-(ns backtype.storm.util
-  (:import [java.net InetAddress])
-  (:import [java.util Map Map$Entry List ArrayList Collection Iterator HashMap])
-  (:import [java.io FileReader FileNotFoundException])
-  (:import [java.nio.file Paths])
-  (:import [backtype.storm Config])
-  (:import [backtype.storm.utils Time Container ClojureTimerTask Utils
-            MutableObject MutableInt])
-  (:import [backtype.storm.security.auth NimbusPrincipal])
-  (:import [javax.security.auth Subject])
-  (:import [java.util UUID Random ArrayList List Collections])
-  (:import [java.util.zip ZipFile])
-  (:import [java.util.concurrent.locks ReentrantReadWriteLock])
-  (:import [java.util.concurrent Semaphore])
-  (:import [java.nio.file Files Paths])
-  (:import [java.nio.file.attribute FileAttribute])
-  (:import [java.io File FileOutputStream RandomAccessFile StringWriter
-            PrintWriter BufferedReader InputStreamReader IOException])
-  (:import [java.lang.management ManagementFactory])
-  (:import [org.apache.commons.exec DefaultExecutor CommandLine])
-  (:import [org.apache.commons.io FileUtils])
-  (:import [backtype.storm.logging ThriftAccessLogger])
-  (:import [org.apache.commons.exec ExecuteException])
-  (:import [org.json.simple JSONValue])
-  (:import [org.yaml.snakeyaml Yaml]
-           [org.yaml.snakeyaml.constructor SafeConstructor])
-  (:require [clojure [string :as str]])
-  (:import [clojure.lang RT])
-  (:require [clojure [set :as set]])
-  (:require [clojure.java.io :as io])
-  (:use [clojure walk])
-  (:require [ring.util.codec :as codec])
-  (:use [backtype.storm log]))
-
-(defn wrap-in-runtime
-  "Wraps an exception in a RuntimeException if needed"
-  [^Exception e]
-  (if (instance? RuntimeException e)
-    e
-    (RuntimeException. e)))
-
-(def on-windows?
-  (= "Windows_NT" (System/getenv "OS")))
-
-(def file-path-separator
-  (System/getProperty "file.separator"))
-
-(def class-path-separator
-  (System/getProperty "path.separator"))
-
-(defn is-absolute-path? [path]
-  (.isAbsolute (Paths/get path (into-array String []))))
-
-(defmacro defalias
-  "Defines an alias for a var: a new var with the same root binding (if
-  any) and similar metadata. The metadata of the alias is its initial
-  metadata (as provided by def) merged into the metadata of the original."
-  ([name orig]
-   `(do
-      (alter-meta!
-        (if (.hasRoot (var ~orig))
-          (def ~name (.getRawRoot (var ~orig)))
-          (def ~name))
-        ;; When copying metadata, disregard {:macro false}.
-        ;; Workaround for http://www.assembla.com/spaces/clojure/tickets/273
-        #(conj (dissoc % :macro)
-               (apply dissoc (meta (var ~orig)) (remove #{:macro} (keys %)))))
-      (var ~name)))
-  ([name orig doc]
-   (list `defalias (with-meta name (assoc (meta name) :doc doc)) orig)))
-
-;; name-with-attributes by Konrad Hinsen:
-(defn name-with-attributes
-  "To be used in macro definitions.
-  Handles optional docstrings and attribute maps for a name to be defined
-  in a list of macro arguments. If the first macro argument is a string,
-  it is added as a docstring to name and removed from the macro argument
-  list. If afterwards the first macro argument is a map, its entries are
-  added to the name's metadata map and the map is removed from the
-  macro argument list. The return value is a vector containing the name
-  with its extended metadata map and the list of unprocessed macro
-  arguments."
-  [name macro-args]
-  (let [[docstring macro-args] (if (string? (first macro-args))
-                                 [(first macro-args) (next macro-args)]
-                                 [nil macro-args])
-        [attr macro-args] (if (map? (first macro-args))
-                            [(first macro-args) (next macro-args)]
-                            [{} macro-args])
-        attr (if docstring
-               (assoc attr :doc docstring)
-               attr)
-        attr (if (meta name)
-               (conj (meta name) attr)
-               attr)]
-    [(with-meta name attr) macro-args]))
-
-(defmacro defnk
-  "Define a function accepting keyword arguments. Symbols up to the first
-  keyword in the parameter list are taken as positional arguments.  Then
-  an alternating sequence of keywords and defaults values is expected. The
-  values of the keyword arguments are available in the function body by
-  virtue of the symbol corresponding to the keyword (cf. :keys destructuring).
-  defnk accepts an optional docstring as well as an optional metadata map."
-  [fn-name & fn-tail]
-  (let [[fn-name [args & body]] (name-with-attributes fn-name fn-tail)
-        [pos kw-vals] (split-with symbol? args)
-        syms (map #(-> % name symbol) (take-nth 2 kw-vals))
-        values (take-nth 2 (rest kw-vals))
-        sym-vals (apply hash-map (interleave syms values))
-        de-map {:keys (vec syms) :or sym-vals}]
-    `(defn ~fn-name
-       [~@pos & options#]
-       (let [~de-map (apply hash-map options#)]
-         ~@body))))
-
-(defn find-first
-  "Returns the first item of coll for which (pred item) returns logical true.
-  Consumes sequences up to the first match, will consume the entire sequence
-  and return nil if no match is found."
-  [pred coll]
-  (first (filter pred coll)))
-
-(defn dissoc-in
-  "Dissociates an entry from a nested associative structure returning a new
-  nested structure. keys is a sequence of keys. Any empty maps that result
-  will not be present in the new structure."
-  [m [k & ks :as keys]]
-  (if ks
-    (if-let [nextmap (get m k)]
-      (let [newmap (dissoc-in nextmap ks)]
-        (if (seq newmap)
-          (assoc m k newmap)
-          (dissoc m k)))
-      m)
-    (dissoc m k)))
-
-(defn indexed
-  "Returns a lazy sequence of [index, item] pairs, where items come
-  from 's' and indexes count up from zero.
-
-  (indexed '(a b c d))  =>  ([0 a] [1 b] [2 c] [3 d])"
-  [s]
-  (map vector (iterate inc 0) s))
-
-(defn positions
-  "Returns a lazy sequence containing the positions at which pred
-  is true for items in coll."
-  [pred coll]
-  (for [[idx elt] (indexed coll) :when (pred elt)] idx))
-
-(defn exception-cause?
-  [klass ^Throwable t]
-  (->> (iterate #(.getCause ^Throwable %) t)
-       (take-while identity)
-       (some (partial instance? klass))
-       boolean))
-
-(defmacro thrown-cause?
-  [klass & body]
-  `(try
-     ~@body
-     false
-     (catch Throwable t#
-       (exception-cause? ~klass t#))))
-
-(defmacro thrown-cause-with-msg?
-  [klass re & body]
-  `(try
-     ~@body
-     false
-     (catch Throwable t#
-       (and (re-matches ~re (.getMessage t#))
-            (exception-cause? ~klass t#)))))
-
-(defmacro forcat
-  [[args aseq] & body]
-  `(mapcat (fn [~args]
-             ~@body)
-           ~aseq))
-
-(defmacro try-cause
-  [& body]
-  (let [checker (fn [form]
-                  (or (not (sequential? form))
-                      (not= 'catch (first form))))
-        [code guards] (split-with checker body)
-        error-local (gensym "t")
-        guards (forcat [[_ klass local & guard-body] guards]
-                       `((exception-cause? ~klass ~error-local)
-                         (let [~local ~error-local]
-                           ~@guard-body
-                           )))]
-    `(try ~@code
-       (catch Throwable ~error-local
-         (cond ~@guards
-               true (throw ~error-local)
-               )))))
-
-(defn local-hostname
-  []
-  (.getCanonicalHostName (InetAddress/getLocalHost)))
-
-(def memoized-local-hostname (memoize local-hostname))
-
-;; checks conf for STORM_LOCAL_HOSTNAME.
-;; when unconfigured, falls back to (memoized) guess by `local-hostname`.
-(defn hostname
-  [conf]
-  (conf Config/STORM_LOCAL_HOSTNAME (memoized-local-hostname)))
-
-(letfn [(try-port [port]
-                  (with-open [socket (java.net.ServerSocket. port)]
-                    (.getLocalPort socket)))]
-  (defn available-port
-    ([] (try-port 0))
-    ([preferred]
-     (try
-       (try-port preferred)
-       (catch java.io.IOException e
-         (available-port))))))
-
-(defn uuid []
-  (str (UUID/randomUUID)))
-
-(defn current-time-secs
-  []
-  (Time/currentTimeSecs))
-
-(defn current-time-millis
-  []
-  (Time/currentTimeMillis))
-
-(defn secs-to-millis-long
-  [secs]
-  (long (* (long 1000) secs)))
-
-(defn clojurify-structure
-  [s]
-  (prewalk (fn [x]
-             (cond (instance? Map x) (into {} x)
-                   (instance? List x) (vec x)
-                   ;; (Boolean. false) does not evaluate to false in an if.
-                   ;; This fixes that.
-                   (instance? Boolean x) (boolean x)
-                   true x))
-           s))
-
-(defmacro with-file-lock
-  [path & body]
-  `(let [f# (File. ~path)
-         _# (.createNewFile f#)
-         rf# (RandomAccessFile. f# "rw")
-         lock# (.. rf# (getChannel) (lock))]
-     (try
-       ~@body
-       (finally
-         (.release lock#)
-         (.close rf#)))))
-
-(defn tokenize-path
-  [^String path]
-  (let [toks (.split path "/")]
-    (vec (filter (complement empty?) toks))))
-
-(defn assoc-conj
-  [m k v]
-  (merge-with concat m {k [v]}))
-
-;; returns [ones in first set not in second, ones in second set not in first]
-(defn set-delta
-  [old curr]
-  (let [s1 (set old)
-        s2 (set curr)]
-    [(set/difference s1 s2) (set/difference s2 s1)]))
-
-(defn parent-path
-  [path]
-  (let [toks (tokenize-path path)]
-    (str "/" (str/join "/" (butlast toks)))))
-
-(defn toks->path
-  [toks]
-  (str "/" (str/join "/" toks)))
-
-(defn normalize-path
-  [^String path]
-  (toks->path (tokenize-path path)))
-
-(defn map-val
-  [afn amap]
-  (into {}
-        (for [[k v] amap]
-          [k (afn v)])))
-
-(defn filter-val
-  [afn amap]
-  (into {} (filter (fn [[k v]] (afn v)) amap)))
-
-(defn filter-key
-  [afn amap]
-  (into {} (filter (fn [[k v]] (afn k)) amap)))
-
-(defn map-key
-  [afn amap]
-  (into {} (for [[k v] amap] [(afn k) v])))
-
-(defn separate
-  [pred aseq]
-  [(filter pred aseq) (filter (complement pred) aseq)])
-
-(defn full-path
-  [parent name]
-  (let [toks (tokenize-path parent)]
-    (toks->path (conj toks name))))
-
-(def not-nil? (complement nil?))
-
-(defn barr
-  [& vals]
-  (byte-array (map byte vals)))
-
-(defn exit-process!
-  [val & msg]
-  (log-error (RuntimeException. (str msg)) "Halting process: " msg)
-  (.exit (Runtime/getRuntime) val))
-
-(defn sum
-  [vals]
-  (reduce + vals))
-
-(defn repeat-seq
-  ([aseq]
-   (apply concat (repeat aseq)))
-  ([amt aseq]
-   (apply concat (repeat amt aseq))))
-
-(defn div
-  "Perform floating point division on the arguments."
-  [f & rest]
-  (apply / (double f) rest))
-
-(defn defaulted
-  [val default]
-  (if val val default))
-
-(defn mk-counter
-  ([] (mk-counter 1))
-  ([start-val]
-   (let [val (atom (dec start-val))]
-     (fn [] (swap! val inc)))))
-
-(defmacro for-times [times & body]
-  `(for [i# (range ~times)]
-     ~@body))
-
-(defmacro dofor [& body]
-  `(doall (for ~@body)))
-
-(defn reverse-map
-  "{:a 1 :b 1 :c 2} -> {1 [:a :b] 2 :c}"
-  [amap]
-  (reduce (fn [m [k v]]
-            (let [existing (get m v [])]
-              (assoc m v (conj existing k))))
-          {} amap))
-
-(defmacro print-vars [& vars]
-  (let [prints (for [v vars] `(println ~(str v) ~v))]
-    `(do ~@prints)))
-
-(defn process-pid
-  "Gets the pid of this JVM. Hacky because Java doesn't provide a real way to do this."
-  []
-  (let [name (.getName (ManagementFactory/getRuntimeMXBean))
-        split (.split name "@")]
-    (when-not (= 2 (count split))
-      (throw (RuntimeException. (str "Got unexpected process name: " name))))
-    (first split)))
-
-(defn exec-command! [command]
-  (let [[comm-str & args] (seq (.split command " "))
-        command (CommandLine. comm-str)]
-    (doseq [a args]
-      (.addArgument command a))
-    (.execute (DefaultExecutor.) command)))
-
-(defn extract-dir-from-jar [jarpath dir destdir]
-  (try-cause
-    (with-open [jarpath (ZipFile. jarpath)]
-      (let [entries (enumeration-seq (.entries jarpath))]
-        (doseq [file (filter (fn [entry](and (not (.isDirectory entry)) (.startsWith (.getName entry) dir))) entries)]
-          (.mkdirs (.getParentFile (File. destdir (.getName file))))
-          (with-open [out (FileOutputStream. (File. destdir (.getName file)))]
-            (io/copy (.getInputStream jarpath file) out)))))
-    (catch IOException e
-      (log-message "Could not extract " dir " from " jarpath))))
-
-(defn sleep-secs [secs]
-  (when (pos? secs)
-    (Time/sleep (* (long secs) 1000))))
-
-(defn sleep-until-secs [target-secs]
-  (Time/sleepUntil (* (long target-secs) 1000)))
-
-(def ^:const sig-kill 9)
-
-(def ^:const sig-term 15)
-
-(defn send-signal-to-process
-  [pid signum]
-  (try-cause
-    (exec-command! (str (if on-windows?
-                          (if (== signum sig-kill) "taskkill /f /pid " "taskkill /pid ")
-                          (str "kill -" signum " "))
-                     pid))
-    (catch ExecuteException e
-      (log-message "Error when trying to kill " pid ". Process is probably already dead."))))
-
-(defn read-and-log-stream
-  [prefix stream]
-  (try
-    (let [reader (BufferedReader. (InputStreamReader. stream))]
-      (loop []
-        (if-let [line (.readLine reader)]
-                (do
-                  (log-warn (str prefix ":" line))
-                  (recur)))))
-    (catch IOException e
-      (log-warn "Error while trying to log stream" e))))
-
-(defn force-kill-process
-  [pid]
-  (send-signal-to-process pid sig-kill))
-
-(defn kill-process-with-sig-term
-  [pid]
-  (send-signal-to-process pid sig-term))
-
-(defn add-shutdown-hook-with-force-kill-in-1-sec
-  "adds the user supplied function as a shutdown hook for cleanup.
-   Also adds a function that sleeps for a second and then sends kill -9 to process to avoid any zombie process in case
-   cleanup function hangs."
-  [func]
-  (.addShutdownHook (Runtime/getRuntime) (Thread. #(func)))
-  (.addShutdownHook (Runtime/getRuntime) (Thread. #((sleep-secs 1)
-                                                    (.halt (Runtime/getRuntime) 20)))))
-
-(defprotocol SmartThread
-  (start [this])
-  (join [this])
-  (interrupt [this])
-  (sleeping? [this]))
-
-;; afn returns amount of time to sleep
-(defnk async-loop [afn
-                   :daemon false
-                   :kill-fn (fn [error] (exit-process! 1 "Async loop died!"))
-                   :priority Thread/NORM_PRIORITY
-                   :factory? false
-                   :start true
-                   :thread-name nil]
-  (let [thread (Thread.
-                 (fn []
-                   (try-cause
-                     (let [afn (if factory? (afn) afn)]
-                       (loop []
-                         (let [sleep-time (afn)]
-                           (when-not (nil? sleep-time)
-                             (sleep-secs sleep-time)
-                             (recur))
-                           )))
-                     (catch InterruptedException e
-                       (log-message "Async loop interrupted!")
-                       )
-                     (catch Throwable t
-                       (log-error t "Async loop died!")
-                       (kill-fn t)))))]
-    (.setDaemon thread daemon)
-    (.setPriority thread priority)
-    (when thread-name
-      (.setName thread (str (.getName thread) "-" thread-name)))
-    (when start
-      (.start thread))
-    ;; should return object that supports stop, interrupt, join, and waiting?
-    (reify SmartThread
-      (start
-        [this]
-        (.start thread))
-      (join
-        [this]
-        (.join thread))
-      (interrupt
-        [this]
-        (.interrupt thread))
-      (sleeping?
-        [this]
-        (Time/isThreadWaiting thread)))))
-
-(defn shell-cmd
-  [command]
-  (->> command
-    (map #(str \' (clojure.string/escape % {\' "'\"'\"'"}) \'))
-      (clojure.string/join " ")))
-
-(defn script-file-path [dir]
-  (str dir file-path-separator "storm-worker-script.sh"))
-
-(defn container-file-path [dir]
-  (str dir file-path-separator "launch_container.sh"))
-
-(defnk write-script
-  [dir command :environment {}]
-  (let [script-src (str "#!/bin/bash\n" (clojure.string/join "" (map (fn [[k v]] (str (shell-cmd ["export" (str k "=" v)]) ";\n")) environment)) "\nexec " (shell-cmd command) ";")
-        script-path (script-file-path dir)
-        _ (spit script-path script-src)]
-    script-path
-  ))
-
-(defnk launch-process
-  [command :environment {} :log-prefix nil :exit-code-callback nil :directory nil]
-  (let [builder (ProcessBuilder. command)
-        process-env (.environment builder)]
-    (when directory (.directory builder directory))
-    (.redirectErrorStream builder true)
-    (doseq [[k v] environment]
-      (.put process-env k v))
-    (let [process (.start builder)]
-      (if (or log-prefix exit-code-callback)
-        (async-loop
-         (fn []
-           (if log-prefix
-             (read-and-log-stream log-prefix (.getInputStream process)))
-           (when exit-code-callback
-             (try
-               (.waitFor process)
-               (catch InterruptedException e
-                 (log-message log-prefix " interrupted.")))
-             (exit-code-callback (.exitValue process)))
-           nil)))                    
-      process)))
-   
-(defn exists-file?
-  [path]
-  (.exists (File. path)))
-
-(defn rmr
-  [path]
-  (log-debug "Rmr path " path)
-  (when (exists-file? path)
-    (try
-      (FileUtils/forceDelete (File. path))
-      (catch FileNotFoundException e))))
-
-(defn rmpath
-  "Removes file or directory at the path. Not recursive. Throws exception on failure"
-  [path]
-  (log-debug "Removing path " path)
-  (when (exists-file? path)
-    (let [deleted? (.delete (File. path))]
-      (when-not deleted?
-        (throw (RuntimeException. (str "Failed to delete " path)))))))
-
-(defn local-mkdirs
-  [path]
-  (log-debug "Making dirs at " path)
-  (FileUtils/forceMkdir (File. path)))
-
-(defn touch
-  [path]
-  (log-debug "Touching file at " path)
-  (let [success? (do (if on-windows? (.mkdirs (.getParentFile (File. path))))
-                   (.createNewFile (File. path)))]
-    (when-not success?
-      (throw (RuntimeException. (str "Failed to touch " path))))))
-
-(defn create-symlink!
-  "Create symlink is to the target"
-  ([path-dir target-dir file-name]
-    (create-symlink! path-dir target-dir file-name file-name))
-  ([path-dir target-dir from-file-name to-file-name]
-    (let [path (str path-dir file-path-separator from-file-name)
-          target (str target-dir file-path-separator to-file-name)
-          empty-array (make-array String 0)
-          attrs (make-array FileAttribute 0)
-          abs-path (.toAbsolutePath (Paths/get path empty-array))
-          abs-target (.toAbsolutePath (Paths/get target empty-array))]
-      (log-debug "Creating symlink [" abs-path "] to [" abs-target "]")
-      (if (not (.exists (.toFile abs-path)))
-        (Files/createSymbolicLink abs-path abs-target attrs)))))
-
-(defn read-dir-contents
-  [dir]
-  (if (exists-file? dir)
-    (let [content-files (.listFiles (File. dir))]
-      (map #(.getName ^File %) content-files))
-    []))
-
-(defn compact
-  [aseq]
-  (filter (complement nil?) aseq))
-
-(defn current-classpath
-  []
-  (System/getProperty "java.class.path"))
-
-(defn get-full-jars
-  [dir]
-  (map #(str dir file-path-separator %) (filter #(.endsWith % ".jar") (read-dir-contents dir))))
-
-(defn worker-classpath
-  []
-  (let [storm-dir (System/getProperty "storm.home")
-        storm-lib-dir (str storm-dir file-path-separator "lib")
-        storm-conf-dir (if-let [confdir (System/getenv "STORM_CONF_DIR")]
-                         confdir 
-                         (str storm-dir file-path-separator "conf"))
-        storm-extlib-dir (str storm-dir file-path-separator "extlib")
-        extcp (System/getenv "STORM_EXT_CLASSPATH")]
-    (if (nil? storm-dir) 
-      (current-classpath)
-      (str/join class-path-separator
-                (remove nil? (concat (get-full-jars storm-lib-dir) (get-full-jars storm-extlib-dir) [extcp] [storm-conf-dir]))))))
-
-(defn add-to-classpath
-  [classpath paths]
-  (if (empty? paths)
-    classpath
-    (str/join class-path-separator (cons classpath paths))))
-
-(defn ^ReentrantReadWriteLock mk-rw-lock
-  []
-  (ReentrantReadWriteLock.))
-
-(defmacro read-locked
-  [rw-lock & body]
-  (let [lock (with-meta rw-lock {:tag `ReentrantReadWriteLock})]
-    `(let [rlock# (.readLock ~lock)]
-       (try (.lock rlock#)
-         ~@body
-         (finally (.unlock rlock#))))))
-
-(defmacro write-locked
-  [rw-lock & body]
-  (let [lock (with-meta rw-lock {:tag `ReentrantReadWriteLock})]
-    `(let [wlock# (.writeLock ~lock)]
-       (try (.lock wlock#)
-         ~@body
-         (finally (.unlock wlock#))))))
-
-(defn time-delta
-  [time-secs]
-  (- (current-time-secs) time-secs))
-
-(defn time-delta-ms
-  [time-ms]
-  (- (System/currentTimeMillis) (long time-ms)))
-
-(defn parse-int
-  [str]
-  (Integer/valueOf str))
-
-(defn integer-divided
-  [sum num-pieces]
-  (clojurify-structure (Utils/integerDivided sum num-pieces)))
-
-(defn collectify
-  [obj]
-  (if (or (sequential? obj) (instance? Collection obj))
-    obj
-    [obj]))
-
-(defn to-json
-  [obj]
-  (JSONValue/toJSONString obj))
-
-(defn from-json
-  [^String str]
-  (if str
-    (clojurify-structure
-      (JSONValue/parse str))
-    nil))
-
-(defmacro letlocals
-  [& body]
-  (let [[tobind lexpr] (split-at (dec (count body)) body)
-        binded (vec (mapcat (fn [e]
-                              (if (and (list? e) (= 'bind (first e)))
-                                [(second e) (last e)]
-                                ['_ e]
-                                ))
-                            tobind))]
-    `(let ~binded
-       ~(first lexpr))))
-
-(defn remove-first
-  [pred aseq]
-  (let [[b e] (split-with (complement pred) aseq)]
-    (when (empty? e)
-      (throw (IllegalArgumentException. "Nothing to remove")))
-    (concat b (rest e))))
-
-(defn assoc-non-nil
-  [m k v]
-  (if v (assoc m k v) m))
-
-(defn multi-set
-  "Returns a map of elem to count"
-  [aseq]
-  (apply merge-with +
-         (map #(hash-map % 1) aseq)))
-
-(defn set-var-root*
-  [avar val]
-  (alter-var-root avar (fn [avar] val)))
-
-(defmacro set-var-root
-  [var-sym val]
-  `(set-var-root* (var ~var-sym) ~val))
-
-(defmacro with-var-roots
-  [bindings & body]
-  (let [settings (partition 2 bindings)
-        tmpvars (repeatedly (count settings) (partial gensym "old"))
-        vars (map first settings)
-        savevals (vec (mapcat (fn [t v] [t v]) tmpvars vars))
-        setters (for [[v s] settings] `(set-var-root ~v ~s))
-        restorers (map (fn [v s] `(set-var-root ~v ~s)) vars tmpvars)]
-    `(let ~savevals
-       ~@setters
-       (try
-         ~@body
-         (finally
-           ~@restorers)))))
-
-(defn map-diff
-  "Returns mappings in m2 that aren't in m1"
-  [m1 m2]
-  (into {} (filter (fn [[k v]] (not= v (m1 k))) m2)))
-
-(defn select-keys-pred
-  [pred amap]
-  (into {} (filter (fn [[k v]] (pred k)) amap)))
-
-(defn rotating-random-range
-  [choices]
-  (let [rand (Random.)
-        choices (ArrayList. choices)]
-    (Collections/shuffle choices rand)
-    [(MutableInt. -1) choices rand]))
-
-(defn acquire-random-range-id
-  [[^MutableInt curr ^List state ^Random rand]]
-  (when (>= (.increment curr) (.size state))
-    (.set curr 0)
-    (Collections/shuffle state rand))
-  (.get state (.get curr)))
-
-; this can be rewritten to be tail recursive
-(defn interleave-all
-  [& colls]
-  (if (empty? colls)
-    []
-    (let [colls (filter (complement empty?) colls)
-          my-elems (map first colls)
-          rest-elems (apply interleave-all (map rest colls))]
-      (concat my-elems rest-elems))))
-
-(defn any-intersection
-  [& sets]
-  (let [elem->count (multi-set (apply concat sets))]
-    (-> (filter-val #(> % 1) elem->count)
-        keys)))
-
-(defn between?
-  "val >= lower and val <= upper"
-  [val lower upper]
-  (and (>= val lower)
-       (<= val upper)))
-
-(defmacro benchmark
-  [& body]
-  `(let [l# (doall (range 1000000))]
-     (time
-       (doseq [i# l#]
-         ~@body))))
-
-(defn rand-sampler
-  [freq]
-  (let [r (java.util.Random.)]
-    (fn [] (= 0 (.nextInt r freq)))))
-
-(defn even-sampler
-  [freq]
-  (let [freq (int freq)
-        start (int 0)
-        r (java.util.Random.)
-        curr (MutableInt. -1)
-        target (MutableInt. (.nextInt r freq))]
-    (with-meta
-      (fn []
-        (let [i (.increment curr)]
-          (when (>= i freq)
-            (.set curr start)
-            (.set target (.nextInt r freq))))
-        (= (.get curr) (.get target)))
-      {:rate freq})))
-
-(defn sampler-rate
-  [sampler]
-  (:rate (meta sampler)))
-
-(defn class-selector
-  [obj & args]
-  (class obj))
-
-(defn uptime-computer []
-  (let [start-time (current-time-secs)]
-    (fn [] (time-delta start-time))))
-
-(defn stringify-error [error]
-  (let [result (StringWriter.)
-        printer (PrintWriter. result)]
-    (.printStackTrace error printer)
-    (.toString result)))
-
-(defn nil-to-zero
-  [v]
-  (or v 0))
-
-(defn bit-xor-vals
-  [vals]
-  (reduce bit-xor 0 vals))
-
-(defmacro with-error-reaction
-  [afn & body]
-  `(try ~@body
-     (catch Throwable t# (~afn t#))))
-
-(defn container
-  []
-  (Container.))
-
-(defn container-set! [^Container container obj]
-  (set! (. container object) obj)
-  container)
-
-(defn container-get [^Container container]
-  (. container object))
-
-(defn to-millis [secs]
-  (* 1000 (long secs)))
-
-(defn throw-runtime [& strs]
-  (throw (RuntimeException. (apply str strs))))
-
-(defn redirect-stdio-to-slf4j!
-  []
-  ;; set-var-root doesn't work with *out* and *err*, so digging much deeper here
-  ;; Unfortunately, this code seems to work at the REPL but not when spawned as worker processes
-  ;; it might have something to do with being a child process
-  ;; (set! (. (.getThreadBinding RT/OUT) val)
-  ;;       (java.io.OutputStreamWriter.
-  ;;         (log-stream :info "STDIO")))
-  ;; (set! (. (.getThreadBinding RT/ERR) val)
-  ;;       (PrintWriter.
-  ;;         (java.io.OutputStreamWriter.
-  ;;           (log-stream :error "STDIO"))
-  ;;         true))
-  (log-capture! "STDIO"))
-
-(defn spy
-  [prefix val]
-  (log-message prefix ": " val)
-  val)
-
-(defn zip-contains-dir?
-  [zipfile target]
-  (let [entries (->> zipfile (ZipFile.) .entries enumeration-seq (map (memfn getName)))]
-    (boolean (some #(.startsWith % (str target "/")) entries))))
-
-(defn url-encode
-  [s]
-  (codec/url-encode s))
-
-(defn url-decode
-  [s]
-  (codec/url-decode s))
-
-(defn join-maps
-  [& maps]
-  (let [all-keys (apply set/union (for [m maps] (-> m keys set)))]
-    (into {} (for [k all-keys]
-               [k (for [m maps] (m k))]))))
-
-(defn partition-fixed
-  [max-num-chunks aseq]
-  (if (zero? max-num-chunks)
-    []
-    (let [chunks (->> (integer-divided (count aseq) max-num-chunks)
-                      (#(dissoc % 0))
-                      (sort-by (comp - first))
-                      (mapcat (fn [[size amt]] (repeat amt size)))
-                      )]
-      (loop [result []
-             [chunk & rest-chunks] chunks
-             data aseq]
-        (if (nil? chunk)
-          result
-          (let [[c rest-data] (split-at chunk data)]
-            (recur (conj result c)
-                   rest-chunks
-                   rest-data)))))))
-
-
-(defn assoc-apply-self
-  [curr key afn]
-  (assoc curr key (afn curr)))
-
-(defmacro recursive-map
-  [& forms]
-  (->> (partition 2 forms)
-       (map (fn [[key form]] `(assoc-apply-self ~key (fn [~'<>] ~form))))
-       (concat `(-> {}))))
-
-(defn current-stack-trace
-  []
-  (->> (Thread/currentThread)
-       .getStackTrace
-       (map str)
-       (str/join "\n")))
-
-(defn get-iterator
-  [^Iterable alist]
-  (if alist (.iterator alist)))
-
-(defn iter-has-next?
-  [^Iterator iter]
-  (if iter (.hasNext iter) false))
-
-(defn iter-next
-  [^Iterator iter]
-  (.next iter))
-
-(defmacro fast-list-iter
-  [pairs & body]
-  (let [pairs (partition 2 pairs)
-        lists (map second pairs)
-        elems (map first pairs)
-        iters (map (fn [_] (gensym)) lists)
-        bindings (->> (map (fn [i l] [i `(get-iterator ~l)]) iters lists)
-                      (apply concat))
-        tests (map (fn [i] `(iter-has-next? ~i)) iters)
-        assignments (->> (map (fn [e i] [e `(iter-next ~i)]) elems iters)
-                         (apply concat))]
-    `(let [~@bindings]
-       (while (and ~@tests)
-         (let [~@assignments]
-           ~@body)))))
-
-(defn fast-list-map
-  [afn alist]
-  (let [ret (ArrayList.)]
-    (fast-list-iter [e alist]
-                    (.add ret (afn e)))
-    ret))
-
-(defmacro fast-list-for
-  [[e alist] & body]
-  `(fast-list-map (fn [~e] ~@body) ~alist))
-
-(defn map-iter
-  [^Map amap]
-  (if amap (-> amap .entrySet .iterator)))
-
-(defn convert-entry
-  [^Map$Entry entry]
-  [(.getKey entry) (.getValue entry)])
-
-(defmacro fast-map-iter
-  [[bind amap] & body]
-  `(let [iter# (map-iter ~amap)]
-     (while (iter-has-next? iter#)
-       (let [entry# (iter-next iter#)
-             ~bind (convert-entry entry#)]
-         ~@body))))
-
-(defn fast-first
-  [^List alist]
-  (.get alist 0))
-
-(defmacro get-with-default
-  [amap key default-val]
-  `(let [curr# (.get ~amap ~key)]
-     (if curr#
-       curr#
-       (do
-         (let [new# ~default-val]
-           (.put ~amap ~key new#)
-           new#)))))
-
-(defn fast-group-by
-  [afn alist]
-  (let [ret (HashMap.)]
-    (fast-list-iter
-      [e alist]
-      (let [key (afn e)
-            ^List curr (get-with-default ret key (ArrayList.))]
-        (.add curr e)))
-    ret))
-
-(defn new-instance
-  [klass]
-  (let [klass (if (string? klass) (Class/forName klass) klass)]
-    (.newInstance klass)))
-
-(defn get-configured-class
-  [conf config-key]
-  (if (.get conf config-key) (new-instance (.get conf config-key)) nil))
-
-(defmacro -<>
-  ([x] x)
-  ([x form] (if (seq? form)
-              (with-meta
-                (let [[begin [_ & end]] (split-with #(not= % '<>) form)]
-                  (concat begin [x] end))
-                (meta form))
-              (list form x)))
-  ([x form & more] `(-<> (-<> ~x ~form) ~@more)))
-
-(defn logs-filename
-  [storm-id port]
-  (str storm-id file-path-separator port file-path-separator "worker.log"))
-
-(def worker-log-filename-pattern #"^worker.log(.*)")
-
-(defn event-logs-filename
-  [storm-id port]
-  (str storm-id file-path-separator port file-path-separator "events.log"))
-
-(defn clojure-from-yaml-file [yamlFile]
-  (try
-    (with-open [reader (java.io.FileReader. yamlFile)]
-      (clojurify-structure (.load (Yaml. (SafeConstructor.)) reader)))
-    (catch Exception ex
-      (log-error ex))))
-
-(defn hashmap-to-persistent [^HashMap m]
-  (zipmap (.keySet m) (.values m)))
-
-(defn retry-on-exception
-  "Retries specific function on exception based on retries count"
-  [retries task-description f & args]
-  (let [res (try {:value (apply f args)}
-              (catch Exception e
-                (if (<= 0 retries)
-                  (throw e)
-                  {:exception e})))]
-    (if (:exception res)
-      (do 
-        (log-error (:exception res) (str "Failed to " task-description ". Will make [" retries "] more attempts."))
-        (recur (dec retries) task-description f args))
-      (do 
-        (log-debug (str "Successful " task-description "."))
-        (:value res)))))
-
-(defn setup-default-uncaught-exception-handler
-  "Set a default uncaught exception handler to handle exceptions not caught in other threads."
-  []
-  (Thread/setDefaultUncaughtExceptionHandler
-    (proxy [Thread$UncaughtExceptionHandler] []
-      (uncaughtException [thread thrown]
-        (try
-          (Utils/handleUncaughtException thrown)
-          (catch Error err
-            (do
-              (log-error err "Received error in main thread.. terminating server...")
-              (.exit (Runtime/getRuntime) -2))))))))
-
-(defn redact-value
-  "Hides value for k in coll for printing coll safely"
-  [coll k]
-  (if (contains? coll k)
-    (assoc coll k (apply str (repeat (count (coll k)) "#")))
-    coll))
-
-(defn log-thrift-access
-  [request-id remoteAddress principal operation]
-  (doto
-    (ThriftAccessLogger.)
-    (.log (str "Request ID: " request-id " access from: " remoteAddress " principal: " principal " operation: " operation))))
-
-(def DISALLOWED-KEY-NAME-STRS #{"/" "." ":" "\\"})
-
-(defn validate-key-name!
-  [name]
-  (if (some #(.contains name %) DISALLOWED-KEY-NAME-STRS)
-    (throw (RuntimeException.
-             (str "Key name cannot contain any of the following: " (pr-str DISALLOWED-KEY-NAME-STRS))))
-    (if (clojure.string/blank? name)
-      (throw (RuntimeException.
-               ("Key name cannot be blank"))))))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/zookeeper.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/zookeeper.clj b/storm-core/src/clj/backtype/storm/zookeeper.clj
deleted file mode 100644
index c91ffa4..0000000
--- a/storm-core/src/clj/backtype/storm/zookeeper.clj
+++ /dev/null
@@ -1,308 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-
-(ns backtype.storm.zookeeper
-  (:import [org.apache.curator.retry RetryNTimes]
-           [backtype.storm Config])
-  (:import [org.apache.curator.framework.api CuratorEvent CuratorEventType CuratorListener UnhandledErrorListener])
-  (:import [org.apache.curator.framework.state ConnectionStateListener])
-  (:import [org.apache.curator.framework CuratorFramework CuratorFrameworkFactory])
-  (:import [org.apache.curator.framework.recipes.leader LeaderLatch LeaderLatch$State Participant LeaderLatchListener])
-  (:import [org.apache.zookeeper ZooKeeper Watcher KeeperException$NoNodeException
-            ZooDefs ZooDefs$Ids CreateMode WatchedEvent Watcher$Event Watcher$Event$KeeperState
-            Watcher$Event$EventType KeeperException$NodeExistsException])
-  (:import [org.apache.zookeeper.data Stat])
-  (:import [org.apache.zookeeper.server ZooKeeperServer NIOServerCnxnFactory])
-  (:import [java.net InetSocketAddress BindException InetAddress])
-  (:import [backtype.storm.nimbus ILeaderElector NimbusInfo])
-  (:import [java.io File])
-  (:import [java.util List Map])
-  (:import [backtype.storm.utils Utils ZookeeperAuthInfo])
-  (:use [backtype.storm util log config]))
-
-(def zk-keeper-states
-  {Watcher$Event$KeeperState/Disconnected :disconnected
-   Watcher$Event$KeeperState/SyncConnected :connected
-   Watcher$Event$KeeperState/AuthFailed :auth-failed
-   Watcher$Event$KeeperState/Expired :expired})
-
-(def zk-event-types
-  {Watcher$Event$EventType/None :none
-   Watcher$Event$EventType/NodeCreated :node-created
-   Watcher$Event$EventType/NodeDeleted :node-deleted
-   Watcher$Event$EventType/NodeDataChanged :node-data-changed
-   Watcher$Event$EventType/NodeChildrenChanged :node-children-changed})
-
-(defn- default-watcher
-  [state type path]
-  (log-message "Zookeeper state update: " state type path))
-
-(defnk mk-client
-  [conf servers port
-   :root ""
-   :watcher default-watcher
-   :auth-conf nil]
-  (let [fk (Utils/newCurator conf servers port root (when auth-conf (ZookeeperAuthInfo. auth-conf)))]
-    (.. fk
-        (getCuratorListenable)
-        (addListener
-          (reify CuratorListener
-            (^void eventReceived [this ^CuratorFramework _fk ^CuratorEvent e]
-                   (when (= (.getType e) CuratorEventType/WATCHED)
-                     (let [^WatchedEvent event (.getWatchedEvent e)]
-                       (watcher (zk-keeper-states (.getState event))
-                                (zk-event-types (.getType event))
-                                (.getPath event))))))))
-    ;;    (.. fk
-    ;;        (getUnhandledErrorListenable)
-    ;;        (addListener
-    ;;         (reify UnhandledErrorListener
-    ;;           (unhandledError [this msg error]
-    ;;             (if (or (exception-cause? InterruptedException error)
-    ;;                     (exception-cause? java.nio.channels.ClosedByInterruptException error))
-    ;;               (do (log-warn-error error "Zookeeper exception " msg)
-    ;;                   (let [to-throw (InterruptedException.)]
-    ;;                     (.initCause to-throw error)
-    ;;                     (throw to-throw)
-    ;;                     ))
-    ;;               (do (log-error error "Unrecoverable Zookeeper error " msg)
-    ;;                   (halt-process! 1 "Unrecoverable Zookeeper error")))
-    ;;             ))))
-    (.start fk)
-    fk))
-
-(def zk-create-modes
-  {:ephemeral CreateMode/EPHEMERAL
-   :persistent CreateMode/PERSISTENT
-   :sequential CreateMode/PERSISTENT_SEQUENTIAL})
-
-(defn create-node
-  ([^CuratorFramework zk ^String path ^bytes data mode acls]
-    (let [mode  (zk-create-modes mode)]
-      (try
-        (.. zk (create) (creatingParentsIfNeeded) (withMode mode) (withACL acls) (forPath (normalize-path path) data))
-        (catch Exception e (throw (wrap-in-runtime e))))))
-  ([^CuratorFramework zk ^String path ^bytes data acls]
-    (create-node zk path data :persistent acls)))
-
-(defn exists-node?
-  [^CuratorFramework zk ^String path watch?]
-  ((complement nil?)
-   (try
-     (if watch?
-       (.. zk (checkExists) (watched) (forPath (normalize-path path)))
-       (.. zk (checkExists) (forPath (normalize-path path))))
-     (catch Exception e (throw (wrap-in-runtime e))))))
-
-(defnk delete-node
-  [^CuratorFramework zk ^String path]
-  (let [path (normalize-path path)]
-    (when (exists-node? zk path false)
-      (try-cause  (.. zk (delete) (deletingChildrenIfNeeded) (forPath (normalize-path path)))
-                  (catch KeeperException$NoNodeException e
-                    ;; do nothing
-                    (log-message "exception" e)
-                  )
-                  (catch Exception e (throw (wrap-in-runtime e)))))))
-
-(defn mkdirs
-  [^CuratorFramework zk ^String path acls]
-  (let [path (normalize-path path)]
-    (when-not (or (= path "/") (exists-node? zk path false))
-      (mkdirs zk (parent-path path) acls)
-      (try-cause
-        (create-node zk path (barr 7) :persistent acls)
-        (catch KeeperException$NodeExistsException e
-          ;; this can happen when multiple clients doing mkdir at same time
-          ))
-      )))
-
-(defn sync-path
-  [^CuratorFramework zk ^String path]
-  (try
-    (.. zk (sync) (forPath (normalize-path path)))
-    (catch Exception e (throw (wrap-in-runtime e)))))
-
-
-(defn add-listener [^CuratorFramework zk ^ConnectionStateListener listener]
-  (.. zk (getConnectionStateListenable) (addListener listener)))
-
-(defn get-data
-  [^CuratorFramework zk ^String path watch?]
-  (let [path (normalize-path path)]
-    (try-cause
-      (if (exists-node? zk path watch?)
-        (if watch?
-          (.. zk (getData) (watched) (forPath path))
-          (.. zk (getData) (forPath path))))
-      (catch KeeperException$NoNodeException e
-        ;; this is fine b/c we still have a watch from the successful exists call
-        nil )
-      (catch Exception e (throw (wrap-in-runtime e))))))
-
-(defn get-data-with-version 
-  [^CuratorFramework zk ^String path watch?]
-  (let [stats (org.apache.zookeeper.data.Stat. )
-        path (normalize-path path)]
-    (try-cause
-     (if-let [data
-              (if (exists-node? zk path watch?)
-                (if watch?
-                  (.. zk (getData) (watched) (storingStatIn stats) (forPath path))
-                  (.. zk (getData) (storingStatIn stats) (forPath path))))]
-       {:data data
-        :version (.getVersion stats)})
-     (catch KeeperException$NoNodeException e
-       ;; this is fine b/c we still have a watch from the successful exists call
-       nil ))))
-
-(defn get-version 
-[^CuratorFramework zk ^String path watch?]
-  (if-let [stats
-           (if watch?
-             (.. zk (checkExists) (watched) (forPath (normalize-path path)))
-             (.. zk (checkExists) (forPath (normalize-path path))))]
-    (.getVersion stats)
-    nil))
-
-(defn get-children
-  [^CuratorFramework zk ^String path watch?]
-  (try
-    (if watch?
-      (.. zk (getChildren) (watched) (forPath (normalize-path path)))
-      (.. zk (getChildren) (forPath (normalize-path path))))
-    (catch Exception e (throw (wrap-in-runtime e)))))
-
-(defn delete-node-blobstore
-  "Deletes the state inside the zookeeper for a key, for which the
-   contents of the key starts with nimbus host port information"
-  [^CuratorFramework zk ^String parent-path ^String host-port-info]
-  (let [parent-path (normalize-path parent-path)
-        child-path-list (if (exists-node? zk parent-path false)
-                          (into [] (get-children zk parent-path false))
-                          [])]
-    (doseq [child child-path-list]
-      (when (.startsWith child host-port-info)
-        (log-debug "delete-node " "child" child)
-        (delete-node zk (str parent-path "/" child))))))
-
-(defn set-data
-  [^CuratorFramework zk ^String path ^bytes data]
-  (try
-    (.. zk (setData) (forPath (normalize-path path) data))
-    (catch Exception e (throw (wrap-in-runtime e)))))
-
-(defn exists
-  [^CuratorFramework zk ^String path watch?]
-  (exists-node? zk path watch?))
-
-(defnk mk-inprocess-zookeeper
-  [localdir :port nil]
-  (let [localfile (File. localdir)
-        zk (ZooKeeperServer. localfile localfile 2000)
-        [retport factory]
-        (loop [retport (if port port 2000)]
-          (if-let [factory-tmp
-                   (try-cause
-                     (doto (NIOServerCnxnFactory.)
-                       (.configure (InetSocketAddress. retport) 0))
-                     (catch BindException e
-                       (when (> (inc retport) (if port port 65535))
-                         (throw (RuntimeException.
-                                  "No port is available to launch an inprocess zookeeper.")))))]
-            [retport factory-tmp]
-            (recur (inc retport))))]
-    (log-message "Starting inprocess zookeeper at port " retport " and dir " localdir)
-    (.startup factory zk)
-    [retport factory]))
-
-(defn shutdown-inprocess-zookeeper
-  [handle]
-  (.shutdown handle))
-
-(defn- to-NimbusInfo [^Participant participant]
-  (let
-    [id (if (clojure.string/blank? (.getId participant))
-          (throw (RuntimeException. "No nimbus leader participant host found, have you started your nimbus hosts?"))
-          (.getId participant))
-     nimbus-info (NimbusInfo/parse id)]
-    (.setLeader nimbus-info (.isLeader participant))
-    nimbus-info))
-
-(defn leader-latch-listener-impl
-  "Leader latch listener that will be invoked when we either gain or lose leadership"
-  [conf zk leader-latch]
-  (let [hostname (.getCanonicalHostName (InetAddress/getLocalHost))]
-    (reify LeaderLatchListener
-      (^void isLeader[this]
-        (log-message (str hostname " gained leadership")))
-      (^void notLeader[this]
-        (log-message (str hostname " lost leadership."))))))
-
-(defn zk-leader-elector
-  "Zookeeper Implementation of ILeaderElector."
-  [conf]
-  (let [servers (conf STORM-ZOOKEEPER-SERVERS)
-        zk (mk-client conf (conf STORM-ZOOKEEPER-SERVERS) (conf STORM-ZOOKEEPER-PORT) :auth-conf conf)
-        leader-lock-path (str (conf STORM-ZOOKEEPER-ROOT) "/leader-lock")
-        id (.toHostPortString (NimbusInfo/fromConf conf))
-        leader-latch (atom (LeaderLatch. zk leader-lock-path id))
-        leader-latch-listener (atom (leader-latch-listener-impl conf zk @leader-latch))
-        ]
-    (reify ILeaderElector
-      (prepare [this conf]
-        (log-message "no-op for zookeeper implementation"))
-
-      (^void addToLeaderLockQueue [this]
-        ;if this latch is already closed, we need to create new instance.
-        (if (.equals LeaderLatch$State/CLOSED (.getState @leader-latch))
-          (do
-            (reset! leader-latch (LeaderLatch. zk leader-lock-path id))
-            (reset! leader-latch-listener (leader-latch-listener-impl conf zk @leader-latch))
-            (log-message "LeaderLatch was in closed state. Resetted the leaderLatch and listeners.")
-            ))
-
-        ;Only if the latch is not already started we invoke start.
-        (if (.equals LeaderLatch$State/LATENT (.getState @leader-latch))
-          (do
-            (.addListener @leader-latch @leader-latch-listener)
-            (.start @leader-latch)
-            (log-message "Queued up for leader lock."))
-          (log-message "Node already in queue for leader lock.")))
-
-      (^void removeFromLeaderLockQueue [this]
-        ;Only started latches can be closed.
-        (if (.equals LeaderLatch$State/STARTED (.getState @leader-latch))
-          (do
-            (.close @leader-latch)
-            (log-message "Removed from leader lock queue."))
-          (log-message "leader latch is not started so no removeFromLeaderLockQueue needed.")))
-
-      (^boolean isLeader [this]
-        (.hasLeadership @leader-latch))
-
-      (^NimbusInfo getLeader [this]
-        (to-NimbusInfo (.getLeader @leader-latch)))
-
-      (^List getAllNimbuses [this]
-        (let [participants (.getParticipants @leader-latch)]
-          (map (fn [^Participant participant]
-                 (to-NimbusInfo participant))
-            participants)))
-
-      (^void close[this]
-        (log-message "closing zookeeper connection of leader elector.")
-        (.close zk)))))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/LocalCluster.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/LocalCluster.clj b/storm-core/src/clj/org/apache/storm/LocalCluster.clj
new file mode 100644
index 0000000..df3c180
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/LocalCluster.clj
@@ -0,0 +1,106 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+
+(ns org.apache.storm.LocalCluster
+  (:use [org.apache.storm testing config util])
+  (:import [org.apache.storm.utils Utils])
+  (:import [java.util Map])
+  (:gen-class
+    :init init
+    :implements [org.apache.storm.ILocalCluster]
+    :constructors {[] []
+                   [java.util.Map] []
+                   [String Long] []}
+    :state state))
+
+(defn -init
+  ([]
+   (let [ret (mk-local-storm-cluster
+               :daemon-conf
+               {TOPOLOGY-ENABLE-MESSAGE-TIMEOUTS true})]
+     [[] ret]))
+  ([^String zk-host ^Long zk-port]
+   (let [ret (mk-local-storm-cluster :daemon-conf {TOPOLOGY-ENABLE-MESSAGE-TIMEOUTS true
+                                                     STORM-ZOOKEEPER-SERVERS (list zk-host)
+                                                     STORM-ZOOKEEPER-PORT zk-port})]
+     [[] ret]))
+  ([^Map stateMap]
+     [[] stateMap]))
+
+(defn submit-hook [hook name conf topology]
+  (let [topologyInfo (Utils/getTopologyInfo name nil conf)]
+    (.notify hook topologyInfo conf topology)))
+
+(defn -submitTopology
+  [this name conf topology]
+  (submit-local-topology
+    (:nimbus (. this state)) name conf topology)
+  (let [hook (get-configured-class conf STORM-TOPOLOGY-SUBMISSION-NOTIFIER-PLUGIN)]
+    (when hook (submit-hook hook name conf topology))))
+
+
+(defn -submitTopologyWithOpts
+  [this name conf topology submit-opts]
+  (submit-local-topology-with-opts
+    (:nimbus (. this state)) name conf topology submit-opts))
+
+(defn -uploadNewCredentials
+  [this name creds]
+  (.uploadNewCredentials (:nimbus (. this state)) name creds))
+
+(defn -shutdown
+  [this]
+  (kill-local-storm-cluster (. this state)))
+
+(defn -killTopology
+  [this name]
+  (.killTopology (:nimbus (. this state)) name))
+
+(defn -getTopologyConf
+  [this id]
+  (.getTopologyConf (:nimbus (. this state)) id))
+
+(defn -getTopology
+  [this id]
+  (.getTopology (:nimbus (. this state)) id))
+
+(defn -getClusterInfo
+  [this]
+  (.getClusterInfo (:nimbus (. this state))))
+
+(defn -getTopologyInfo
+  [this id]
+  (.getTopologyInfo (:nimbus (. this state)) id))
+
+(defn -killTopologyWithOpts
+  [this name opts]
+  (.killTopologyWithOpts (:nimbus (. this state)) name opts))
+
+(defn -activate
+  [this name]
+  (.activate (:nimbus (. this state)) name))
+
+(defn -deactivate
+  [this name]
+  (.deactivate (:nimbus (. this state)) name))
+
+(defn -rebalance
+  [this name opts]
+  (.rebalance (:nimbus (. this state)) name opts))
+
+(defn -getState
+  [this]
+  (.state this))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/LocalDRPC.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/LocalDRPC.clj b/storm-core/src/clj/org/apache/storm/LocalDRPC.clj
new file mode 100644
index 0000000..21b2bd3
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/LocalDRPC.clj
@@ -0,0 +1,56 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+
+(ns org.apache.storm.LocalDRPC
+  (:require [org.apache.storm.daemon [drpc :as drpc]])
+  (:use [org.apache.storm config util])
+  (:import [org.apache.storm.utils InprocMessaging ServiceRegistry])
+  (:gen-class
+   :init init
+   :implements [org.apache.storm.ILocalDRPC]
+   :constructors {[] []}
+   :state state ))
+
+(defn -init []
+  (let [handler (drpc/service-handler (read-storm-config))
+        id (ServiceRegistry/registerService handler)
+        ]
+    [[] {:service-id id :handler handler}]
+    ))
+
+(defn -execute [this func funcArgs]
+  (.execute (:handler (. this state)) func funcArgs)
+  )
+
+(defn -result [this id result]
+  (.result (:handler (. this state)) id result)
+  )
+
+(defn -fetchRequest [this func]
+  (.fetchRequest (:handler (. this state)) func)
+  )
+
+(defn -failRequest [this id]
+  (.failRequest (:handler (. this state)) id)
+  )
+
+(defn -getServiceId [this]
+  (:service-id (. this state)))
+
+(defn -shutdown [this]
+  (ServiceRegistry/unregisterService (:service-id (. this state)))
+  (.shutdown (:handler (. this state)))
+  )

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/MockAutoCred.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/MockAutoCred.clj b/storm-core/src/clj/org/apache/storm/MockAutoCred.clj
new file mode 100644
index 0000000..7e23c6b
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/MockAutoCred.clj
@@ -0,0 +1,58 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+
+;;mock implementation of INimbusCredentialPlugin,IAutoCredentials and ICredentialsRenewer for testing only.
+(ns org.apache.storm.MockAutoCred
+  (:use [org.apache.storm testing config])
+  (:import [org.apache.storm.security.INimbusCredentialPlugin]
+           [org.apache.storm.security.auth   ICredentialsRenewer])
+  (:gen-class
+    :implements [org.apache.storm.security.INimbusCredentialPlugin
+                 org.apache.storm.security.auth.IAutoCredentials
+                 org.apache.storm.security.auth.ICredentialsRenewer]))
+
+(def nimbus-cred-key "nimbusCredTestKey")
+(def nimbus-cred-val "nimbusTestCred")
+(def nimbus-cred-renew-val "renewedNimbusTestCred")
+(def gateway-cred-key "gatewayCredTestKey")
+(def gateway-cred-val "gatewayTestCred")
+(def gateway-cred-renew-val "renewedGatewayTestCred")
+
+(defn -populateCredentials
+  ([this creds conf]
+  (.put creds nimbus-cred-key nimbus-cred-val))
+  ([this creds]
+  (.put creds gateway-cred-key gateway-cred-val)))
+
+(defn -prepare
+  [this conf])
+
+(defn -renew
+  [this cred conf]
+  (.put cred nimbus-cred-key nimbus-cred-renew-val)
+  (.put cred gateway-cred-key gateway-cred-renew-val))
+
+(defn -populateSubject
+  [subject credentials]
+  (.add (.getPublicCredentials subject) (.get credentials nimbus-cred-key))
+  (.add (.getPublicCredentials subject) (.get credentials gateway-cred-key)))
+
+(defn -updateSubject
+  [subject credentials]
+  (-populateSubject subject credentials))
+
+
+

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/blobstore.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/blobstore.clj b/storm-core/src/clj/org/apache/storm/blobstore.clj
new file mode 100644
index 0000000..0b1c994
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/blobstore.clj
@@ -0,0 +1,28 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+
+(ns org.apache.storm.blobstore
+  (:import [org.apache.storm.utils Utils])
+  (:import [org.apache.storm.blobstore ClientBlobStore])
+  (:use [org.apache.storm config]))
+
+(defmacro with-configured-blob-client
+  [client-sym & body]
+  `(let [conf# (read-storm-config)
+         ^ClientBlobStore ~client-sym (Utils/getClientBlobStore conf#)]
+     (try
+       ~@body
+       (finally (.shutdown ~client-sym)))))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/clojure.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/clojure.clj b/storm-core/src/clj/org/apache/storm/clojure.clj
new file mode 100644
index 0000000..ff33829
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/clojure.clj
@@ -0,0 +1,201 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+
+(ns org.apache.storm.clojure
+  (:use [org.apache.storm util])
+  (:import [org.apache.storm StormSubmitter])
+  (:import [org.apache.storm.generated StreamInfo])
+  (:import [org.apache.storm.tuple Tuple])
+  (:import [org.apache.storm.task OutputCollector IBolt TopologyContext])
+  (:import [org.apache.storm.spout SpoutOutputCollector ISpout])
+  (:import [org.apache.storm.utils Utils])
+  (:import [org.apache.storm.clojure ClojureBolt ClojureSpout])
+  (:import [java.util List])
+  (:require [org.apache.storm [thrift :as thrift]]))
+
+(defn direct-stream [fields]
+  (StreamInfo. fields true))
+
+(defn to-spec [avar]
+  (let [m (meta avar)]
+    [(str (:ns m)) (str (:name m))]))
+
+(defn clojure-bolt* [output-spec fn-var conf-fn-var args]
+  (ClojureBolt. (to-spec fn-var) (to-spec conf-fn-var) args (thrift/mk-output-spec output-spec)))
+
+(defmacro clojure-bolt [output-spec fn-sym conf-fn-sym args]
+  `(clojure-bolt* ~output-spec (var ~fn-sym) (var ~conf-fn-sym) ~args))
+
+(defn clojure-spout* [output-spec fn-var conf-var args]
+  (let [m (meta fn-var)]
+    (ClojureSpout. (to-spec fn-var) (to-spec conf-var) args (thrift/mk-output-spec output-spec))
+    ))
+
+(defmacro clojure-spout [output-spec fn-sym conf-sym args]
+  `(clojure-spout* ~output-spec (var ~fn-sym) (var ~conf-sym) ~args))
+
+(defn normalize-fns [body]
+  (for [[name args & impl] body
+        :let [args (-> "this"
+                       gensym
+                       (cons args)
+                       vec)]]
+    (concat [name args] impl)
+    ))
+
+(defmacro bolt [& body]
+  (let [[bolt-fns other-fns] (split-with #(not (symbol? %)) body)
+        fns (normalize-fns bolt-fns)]
+    `(reify IBolt
+       ~@fns
+       ~@other-fns)))
+
+(defmacro bolt-execute [& body]
+  `(bolt
+     (~'execute ~@body)))
+
+(defmacro spout [& body]
+  (let [[spout-fns other-fns] (split-with #(not (symbol? %)) body)
+        fns (normalize-fns spout-fns)]
+    `(reify ISpout
+       ~@fns
+       ~@other-fns)))
+
+(defmacro defbolt [name output-spec & [opts & impl :as all]]
+  (if-not (map? opts)
+    `(defbolt ~name ~output-spec {} ~@all)
+    (let [worker-name (symbol (str name "__"))
+          conf-fn-name (symbol (str name "__conf__"))
+          params (:params opts)
+          conf-code (:conf opts)
+          fn-body (if (:prepare opts)
+                    (cons 'fn impl)
+                    (let [[args & impl-body] impl
+                          coll-sym (nth args 1)
+                          args (vec (take 1 args))
+                          prepargs [(gensym "conf") (gensym "context") coll-sym]]
+                      `(fn ~prepargs (bolt (~'execute ~args ~@impl-body)))))
+          definer (if params
+                    `(defn ~name [& args#]
+                       (clojure-bolt ~output-spec ~worker-name ~conf-fn-name args#))
+                    `(def ~name
+                       (clojure-bolt ~output-spec ~worker-name ~conf-fn-name []))
+                    )
+          ]
+      `(do
+         (defn ~conf-fn-name ~(if params params [])
+           ~conf-code
+           )
+         (defn ~worker-name ~(if params params [])
+           ~fn-body
+           )
+         ~definer
+         ))))
+
+(defmacro defspout [name output-spec & [opts & impl :as all]]
+  (if-not (map? opts)
+    `(defspout ~name ~output-spec {} ~@all)
+    (let [worker-name (symbol (str name "__"))
+          conf-fn-name (symbol (str name "__conf__"))
+          params (:params opts)
+          conf-code (:conf opts)
+          prepare? (:prepare opts)
+          prepare? (if (nil? prepare?) true prepare?)
+          fn-body (if prepare?
+                    (cons 'fn impl)
+                    (let [[args & impl-body] impl
+                          coll-sym (first args)
+                          prepargs [(gensym "conf") (gensym "context") coll-sym]]
+                      `(fn ~prepargs (spout (~'nextTuple [] ~@impl-body)))))
+          definer (if params
+                    `(defn ~name [& args#]
+                       (clojure-spout ~output-spec ~worker-name ~conf-fn-name args#))
+                    `(def ~name
+                       (clojure-spout ~output-spec ~worker-name ~conf-fn-name []))
+                    )
+          ]
+      `(do
+         (defn ~conf-fn-name ~(if params params [])
+           ~conf-code
+           )
+         (defn ~worker-name ~(if params params [])
+           ~fn-body
+           )
+         ~definer
+         ))))
+
+(defprotocol TupleValues
+  (tuple-values [values collector stream]))
+
+(extend-protocol TupleValues
+  java.util.Map
+  (tuple-values [this collector ^String stream]
+    (let [^TopologyContext context (:context collector)
+          fields (..  context (getThisOutputFields stream) toList) ]
+      (vec (map (into
+                  (empty this) (for [[k v] this]
+                                   [(if (keyword? k) (name k) k) v]))
+                fields))))
+  java.util.List
+  (tuple-values [this collector stream]
+    this))
+
+(defnk emit-bolt! [collector values
+                   :stream Utils/DEFAULT_STREAM_ID :anchor []]
+  (let [^List anchor (collectify anchor)
+        values (tuple-values values collector stream) ]
+    (.emit ^OutputCollector (:output-collector collector) stream anchor values)
+    ))
+
+(defnk emit-direct-bolt! [collector task values
+                          :stream Utils/DEFAULT_STREAM_ID :anchor []]
+  (let [^List anchor (collectify anchor)
+        values (tuple-values values collector stream) ]
+    (.emitDirect ^OutputCollector (:output-collector collector) task stream anchor values)
+    ))
+
+(defn ack! [collector ^Tuple tuple]
+  (.ack ^OutputCollector (:output-collector collector) tuple))
+
+(defn fail! [collector ^Tuple tuple]
+  (.fail ^OutputCollector (:output-collector collector) tuple))
+
+(defn report-error! [collector ^Tuple tuple]
+  (.reportError ^OutputCollector (:output-collector collector) tuple))
+
+(defnk emit-spout! [collector values
+                    :stream Utils/DEFAULT_STREAM_ID :id nil]
+  (let [values (tuple-values values collector stream)]
+    (.emit ^SpoutOutputCollector (:output-collector collector) stream values id)))
+
+(defnk emit-direct-spout! [collector task values
+                           :stream Utils/DEFAULT_STREAM_ID :id nil]
+  (let [values (tuple-values values collector stream)]
+    (.emitDirect ^SpoutOutputCollector (:output-collector collector) task stream values id)))
+
+(defalias topology thrift/mk-topology)
+(defalias bolt-spec thrift/mk-bolt-spec)
+(defalias spout-spec thrift/mk-spout-spec)
+(defalias shell-bolt-spec thrift/mk-shell-bolt-spec)
+(defalias shell-spout-spec thrift/mk-shell-spout-spec)
+
+(defn submit-remote-topology [name conf topology]
+  (StormSubmitter/submitTopology name conf topology))
+
+(defn local-cluster []
+  ;; do this to avoid a cyclic dependency of
+  ;; LocalCluster -> testing -> nimbus -> bootstrap -> clojure -> LocalCluster
+  (eval '(new org.apache.storm.LocalCluster)))


[34/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/test/storm/kafka/KafkaUtilsTest.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/test/storm/kafka/KafkaUtilsTest.java b/external/storm-kafka/src/test/storm/kafka/KafkaUtilsTest.java
deleted file mode 100644
index eb694bb..0000000
--- a/external/storm-kafka/src/test/storm/kafka/KafkaUtilsTest.java
+++ /dev/null
@@ -1,295 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-import static org.hamcrest.CoreMatchers.equalTo;
-import static org.hamcrest.CoreMatchers.is;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertThat;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Properties;
-
-import kafka.api.OffsetRequest;
-import kafka.javaapi.consumer.SimpleConsumer;
-import kafka.javaapi.message.ByteBufferMessageSet;
-import kafka.message.MessageAndOffset;
-
-import org.apache.kafka.clients.producer.KafkaProducer;
-import org.apache.kafka.clients.producer.ProducerRecord;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mockito;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import storm.kafka.trident.GlobalPartitionInformation;
-import backtype.storm.spout.SchemeAsMultiScheme;
-import backtype.storm.utils.Utils;
-
-import com.google.common.collect.ImmutableMap;
-public class KafkaUtilsTest {
-    private String TEST_TOPIC = "testTopic";
-    private static final Logger LOG = LoggerFactory.getLogger(KafkaUtilsTest.class);
-    private KafkaTestBroker broker;
-    private SimpleConsumer simpleConsumer;
-    private KafkaConfig config;
-    private BrokerHosts brokerHosts;
-
-    @Before
-    public void setup() {
-        broker = new KafkaTestBroker();
-        GlobalPartitionInformation globalPartitionInformation = new GlobalPartitionInformation(TEST_TOPIC);
-        globalPartitionInformation.addPartition(0, Broker.fromString(broker.getBrokerConnectionString()));
-        brokerHosts = new StaticHosts(globalPartitionInformation);
-        config = new KafkaConfig(brokerHosts, TEST_TOPIC);
-        simpleConsumer = new SimpleConsumer("localhost", broker.getPort(), 60000, 1024, "testClient");
-    }
-
-    @After
-    public void shutdown() {
-        simpleConsumer.close();
-        broker.shutdown();
-    }
-
-
-    @Test(expected = FailedFetchException.class)
-    public void topicDoesNotExist() throws Exception {
-        KafkaUtils.fetchMessages(config, simpleConsumer, new Partition(Broker.fromString(broker.getBrokerConnectionString()), TEST_TOPIC, 0), 0);
-    }
-
-    @Test(expected = FailedFetchException.class)
-    public void brokerIsDown() throws Exception {
-        int port = broker.getPort();
-        broker.shutdown();
-        SimpleConsumer simpleConsumer = new SimpleConsumer("localhost", port, 100, 1024, "testClient");
-        try {
-            KafkaUtils.fetchMessages(config, simpleConsumer, new Partition(Broker.fromString(broker.getBrokerConnectionString()), TEST_TOPIC, 0), OffsetRequest.LatestTime());
-        } finally {
-            simpleConsumer.close();
-        }
-    }
-
-    @Test
-    public void fetchMessage() throws Exception {
-        String value = "test";
-        createTopicAndSendMessage(value);
-        long offset = KafkaUtils.getOffset(simpleConsumer, config.topic, 0, OffsetRequest.LatestTime()) - 1;
-        ByteBufferMessageSet messageAndOffsets = KafkaUtils.fetchMessages(config, simpleConsumer,
-                new Partition(Broker.fromString(broker.getBrokerConnectionString()), TEST_TOPIC, 0), offset);
-        String message = new String(Utils.toByteArray(messageAndOffsets.iterator().next().message().payload()));
-        assertThat(message, is(equalTo(value)));
-    }
-
-    @Test(expected = FailedFetchException.class)
-    public void fetchMessagesWithInvalidOffsetAndDefaultHandlingDisabled() throws Exception {
-        config.useStartOffsetTimeIfOffsetOutOfRange = false;
-        KafkaUtils.fetchMessages(config, simpleConsumer,
-                new Partition(Broker.fromString(broker.getBrokerConnectionString()), TEST_TOPIC, 0), -99);
-    }
-
-    @Test(expected = TopicOffsetOutOfRangeException.class)
-    public void fetchMessagesWithInvalidOffsetAndDefaultHandlingEnabled() throws Exception {
-        config = new KafkaConfig(brokerHosts, "newTopic");
-        String value = "test";
-        createTopicAndSendMessage(value);
-        KafkaUtils.fetchMessages(config, simpleConsumer,
-                new Partition(Broker.fromString(broker.getBrokerConnectionString()), "newTopic", 0), -99);
-    }
-
-    @Test
-    public void getOffsetFromConfigAndDontForceFromStart() {
-        config.ignoreZkOffsets = false;
-        config.startOffsetTime = OffsetRequest.EarliestTime();
-        createTopicAndSendMessage();
-        long latestOffset = KafkaUtils.getOffset(simpleConsumer, config.topic, 0, OffsetRequest.EarliestTime());
-        long offsetFromConfig = KafkaUtils.getOffset(simpleConsumer, config.topic, 0, config);
-        assertThat(latestOffset, is(equalTo(offsetFromConfig)));
-    }
-
-    @Test
-    public void getOffsetFromConfigAndFroceFromStart() {
-        config.ignoreZkOffsets = true;
-        config.startOffsetTime = OffsetRequest.EarliestTime();
-        createTopicAndSendMessage();
-        long earliestOffset = KafkaUtils.getOffset(simpleConsumer, config.topic, 0, OffsetRequest.EarliestTime());
-        long offsetFromConfig = KafkaUtils.getOffset(simpleConsumer, config.topic, 0, config);
-        assertThat(earliestOffset, is(equalTo(offsetFromConfig)));
-    }
-
-    @Test
-    public void generateTuplesWithoutKeyAndKeyValueScheme() {
-        config.scheme = new KeyValueSchemeAsMultiScheme(new StringKeyValueScheme());
-        runGetValueOnlyTuplesTest();
-    }
-
-    @Test
-    public void generateTuplesWithKeyAndKeyValueScheme() {
-        config.scheme = new KeyValueSchemeAsMultiScheme(new StringKeyValueScheme());
-        config.useStartOffsetTimeIfOffsetOutOfRange = false;
-        String value = "value";
-        String key = "key";
-        createTopicAndSendMessage(key, value);
-        ByteBufferMessageSet messageAndOffsets = getLastMessage();
-        for (MessageAndOffset msg : messageAndOffsets) {
-            Iterable<List<Object>> lists = KafkaUtils.generateTuples(config, msg.message(), config.topic);
-            assertEquals(ImmutableMap.of(key, value), lists.iterator().next().get(0));
-        }
-    }
-
-    @Test
-    public void generateTupelsWithValueScheme() {
-        config.scheme = new SchemeAsMultiScheme(new StringScheme());
-        runGetValueOnlyTuplesTest();
-    }
-
-    @Test
-    public void generateTuplesWithValueAndStringMultiSchemeWithTopic() {
-        config.scheme = new StringMultiSchemeWithTopic();
-        String value = "value";
-        createTopicAndSendMessage(value);
-        ByteBufferMessageSet messageAndOffsets = getLastMessage();
-        for (MessageAndOffset msg : messageAndOffsets) {
-            Iterable<List<Object>> lists = KafkaUtils.generateTuples(config, msg.message(), config.topic);
-            List<Object> list = lists.iterator().next();
-            assertEquals(value, list.get(0));
-            assertEquals(config.topic, list.get(1));
-        }
-    }
-
-    @Test
-    public void generateTuplesWithValueSchemeAndKeyValueMessage() {
-        config.scheme = new SchemeAsMultiScheme(new StringScheme());
-        String value = "value";
-        String key = "key";
-        createTopicAndSendMessage(key, value);
-        ByteBufferMessageSet messageAndOffsets = getLastMessage();
-        for (MessageAndOffset msg : messageAndOffsets) {
-            Iterable<List<Object>> lists = KafkaUtils.generateTuples(config, msg.message(), config.topic);
-            assertEquals(value, lists.iterator().next().get(0));
-        }
-    }
-    
-    @Test
-    public void generateTuplesWithMessageAndMetadataScheme() {
-        String value = "value";
-        Partition mockPartition = Mockito.mock(Partition.class);
-        mockPartition.partition = 0;
-        long offset = 0L;
-        
-        MessageMetadataSchemeAsMultiScheme scheme = new MessageMetadataSchemeAsMultiScheme(new StringMessageAndMetadataScheme());
-        
-        createTopicAndSendMessage(null, value);
-        ByteBufferMessageSet messageAndOffsets = getLastMessage();
-        for (MessageAndOffset msg : messageAndOffsets) {
-            Iterable<List<Object>> lists = KafkaUtils.generateTuples(scheme, msg.message(), mockPartition, offset);
-            List<Object> values = lists.iterator().next(); 
-            assertEquals("Message is incorrect", value, values.get(0));
-            assertEquals("Partition is incorrect", mockPartition.partition, values.get(1));
-            assertEquals("Offset is incorrect", offset, values.get(2));
-        }
-    }
-
-    private ByteBufferMessageSet getLastMessage() {
-        long offsetOfLastMessage = KafkaUtils.getOffset(simpleConsumer, config.topic, 0, OffsetRequest.LatestTime()) - 1;
-        return KafkaUtils.fetchMessages(config, simpleConsumer, new Partition(Broker.fromString(broker.getBrokerConnectionString()), TEST_TOPIC, 0), offsetOfLastMessage);
-    }
-
-    private void runGetValueOnlyTuplesTest() {
-        String value = "value";
-        
-        createTopicAndSendMessage(null, value);
-        ByteBufferMessageSet messageAndOffsets = getLastMessage();
-        for (MessageAndOffset msg : messageAndOffsets) {
-            Iterable<List<Object>> lists = KafkaUtils.generateTuples(config, msg.message(), config.topic);
-            assertEquals(value, lists.iterator().next().get(0));
-        }
-    }
-
-    private void createTopicAndSendMessage() {
-        createTopicAndSendMessage(null, "someValue");
-    }
-
-    private void createTopicAndSendMessage(String value) {
-        createTopicAndSendMessage(null, value);
-    }
-
-    private void createTopicAndSendMessage(String key, String value) {
-        Properties p = new Properties();
-        p.put("acks", "1");
-        p.put("bootstrap.servers", broker.getBrokerConnectionString());
-        p.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
-        p.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
-        p.put("metadata.fetch.timeout.ms", 1000);
-        KafkaProducer<String, String> producer = new KafkaProducer<String, String>(p);
-        try {
-            producer.send(new ProducerRecord<String, String>(config.topic, key, value)).get();
-        } catch (Exception e) {
-            Assert.fail(e.getMessage());
-            LOG.error("Failed to do synchronous sending due to " + e, e);
-        } finally {
-            producer.close();
-        }
-    }
-
-    @Test
-    public void assignOnePartitionPerTask() {
-        runPartitionToTaskMappingTest(16, 1);
-    }
-
-    @Test
-    public void assignTwoPartitionsPerTask() {
-        runPartitionToTaskMappingTest(16, 2);
-    }
-
-    @Test
-    public void assignAllPartitionsToOneTask() {
-        runPartitionToTaskMappingTest(32, 32);
-    }
-    
-    public void runPartitionToTaskMappingTest(int numPartitions, int partitionsPerTask) {
-        GlobalPartitionInformation globalPartitionInformation = TestUtils.buildPartitionInfo(numPartitions);
-        List<GlobalPartitionInformation> partitions = new ArrayList<GlobalPartitionInformation>();
-        partitions.add(globalPartitionInformation);
-        int numTasks = numPartitions / partitionsPerTask;
-        for (int i = 0 ; i < numTasks ; i++) {
-            assertEquals(partitionsPerTask, KafkaUtils.calculatePartitionsForTask(partitions, numTasks, i).size());
-        }
-    }
-
-    @Test
-    public void moreTasksThanPartitions() {
-        GlobalPartitionInformation globalPartitionInformation = TestUtils.buildPartitionInfo(1);
-        List<GlobalPartitionInformation> partitions = new ArrayList<GlobalPartitionInformation>();
-        partitions.add(globalPartitionInformation);
-        int numTasks = 2;
-        assertEquals(1, KafkaUtils.calculatePartitionsForTask(partitions, numTasks, 0).size());
-        assertEquals(0, KafkaUtils.calculatePartitionsForTask(partitions, numTasks, 1).size());
-    }
-
-    @Test (expected = IllegalArgumentException.class )
-    public void assignInvalidTask() {
-        GlobalPartitionInformation globalPartitionInformation = new GlobalPartitionInformation(TEST_TOPIC);
-        List<GlobalPartitionInformation> partitions = new ArrayList<GlobalPartitionInformation>();
-        partitions.add(globalPartitionInformation);
-        KafkaUtils.calculatePartitionsForTask(partitions, 1, 1);
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/test/storm/kafka/StringKeyValueSchemeTest.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/test/storm/kafka/StringKeyValueSchemeTest.java b/external/storm-kafka/src/test/storm/kafka/StringKeyValueSchemeTest.java
deleted file mode 100644
index eddb900..0000000
--- a/external/storm-kafka/src/test/storm/kafka/StringKeyValueSchemeTest.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-import backtype.storm.tuple.Fields;
-import com.google.common.collect.ImmutableMap;
-import org.junit.Test;
-
-import java.nio.ByteBuffer;
-import java.nio.charset.Charset;
-import java.util.Collections;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-public class StringKeyValueSchemeTest {
-
-    private StringKeyValueScheme scheme = new StringKeyValueScheme();
-
-    @Test
-    public void testDeserialize() throws Exception {
-        assertEquals(Collections.singletonList("test"), scheme.deserialize(wrapString("test")));
-    }
-
-    @Test
-    public void testGetOutputFields() throws Exception {
-        Fields outputFields = scheme.getOutputFields();
-        assertTrue(outputFields.contains(StringScheme.STRING_SCHEME_KEY));
-        assertEquals(1, outputFields.size());
-    }
-
-    @Test
-    public void testDeserializeWithNullKeyAndValue() throws Exception {
-        assertEquals(Collections.singletonList("test"),
-            scheme.deserializeKeyAndValue(null, wrapString("test")));
-    }
-
-    @Test
-    public void testDeserializeWithKeyAndValue() throws Exception {
-        assertEquals(Collections.singletonList(ImmutableMap.of("key", "test")),
-                scheme.deserializeKeyAndValue(wrapString("key"), wrapString("test")));
-    }
-
-    private static ByteBuffer wrapString(String s) {
-        return ByteBuffer.wrap(s.getBytes(Charset.defaultCharset()));
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/test/storm/kafka/TestStringScheme.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/test/storm/kafka/TestStringScheme.java b/external/storm-kafka/src/test/storm/kafka/TestStringScheme.java
deleted file mode 100644
index ae36409..0000000
--- a/external/storm-kafka/src/test/storm/kafka/TestStringScheme.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-import org.junit.Test;
-
-import java.nio.ByteBuffer;
-import java.nio.charset.StandardCharsets;
-
-import static org.junit.Assert.assertEquals;
-
-public class TestStringScheme {
-  @Test
-  public void testDeserializeString() {
-    String s = "foo";
-    byte[] bytes = s.getBytes(StandardCharsets.UTF_8);
-    ByteBuffer direct = ByteBuffer.allocateDirect(bytes.length);
-    direct.put(bytes);
-    direct.flip();
-    String s1 = StringScheme.deserializeString(ByteBuffer.wrap(bytes));
-    String s2 = StringScheme.deserializeString(direct);
-    assertEquals(s, s1);
-    assertEquals(s, s2);
-  }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/test/storm/kafka/TestUtils.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/test/storm/kafka/TestUtils.java b/external/storm-kafka/src/test/storm/kafka/TestUtils.java
deleted file mode 100644
index 3e69160..0000000
--- a/external/storm-kafka/src/test/storm/kafka/TestUtils.java
+++ /dev/null
@@ -1,101 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-import backtype.storm.Config;
-import backtype.storm.utils.Utils;
-import kafka.api.OffsetRequest;
-import kafka.javaapi.consumer.SimpleConsumer;
-import kafka.javaapi.message.ByteBufferMessageSet;
-import kafka.message.Message;
-import kafka.message.MessageAndOffset;
-import storm.kafka.bolt.KafkaBolt;
-import storm.kafka.trident.GlobalPartitionInformation;
-
-import java.nio.ByteBuffer;
-import java.util.*;
-
-import static org.junit.Assert.assertEquals;
-
-public class TestUtils {
-
-    public static final String TOPIC = "test";
-
-    public static GlobalPartitionInformation buildPartitionInfo(int numPartitions) {
-        return buildPartitionInfo(numPartitions, 9092);
-    }
-
-    public static List<GlobalPartitionInformation> buildPartitionInfoList(GlobalPartitionInformation partitionInformation) {
-        List<GlobalPartitionInformation> map = new ArrayList<GlobalPartitionInformation>();
-        map.add(partitionInformation);
-        return map;
-    }
-
-    public static GlobalPartitionInformation buildPartitionInfo(int numPartitions, int brokerPort) {
-        GlobalPartitionInformation globalPartitionInformation = new GlobalPartitionInformation(TOPIC);
-        for (int i = 0; i < numPartitions; i++) {
-            globalPartitionInformation.addPartition(i, Broker.fromString("broker-" + i + " :" + brokerPort));
-        }
-        return globalPartitionInformation;
-    }
-
-    public static SimpleConsumer getKafkaConsumer(KafkaTestBroker broker) {
-        BrokerHosts brokerHosts = getBrokerHosts(broker);
-        KafkaConfig kafkaConfig = new KafkaConfig(brokerHosts, TOPIC);
-        SimpleConsumer simpleConsumer = new SimpleConsumer("localhost", broker.getPort(), 60000, 1024, "testClient");
-        return simpleConsumer;
-    }
-
-    public static KafkaConfig getKafkaConfig(KafkaTestBroker broker) {
-        BrokerHosts brokerHosts = getBrokerHosts(broker);
-        KafkaConfig kafkaConfig = new KafkaConfig(brokerHosts, TOPIC);
-        return kafkaConfig;
-    }
-
-    private static BrokerHosts getBrokerHosts(KafkaTestBroker broker) {
-        GlobalPartitionInformation globalPartitionInformation = new GlobalPartitionInformation(TOPIC);
-        globalPartitionInformation.addPartition(0, Broker.fromString(broker.getBrokerConnectionString()));
-        return new StaticHosts(globalPartitionInformation);
-    }
-
-    public static Properties getProducerProperties(String brokerConnectionString) {
-        Properties props = new Properties();
-        props.put("bootstrap.servers", brokerConnectionString);
-        props.put("acks", "1");
-        props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
-        props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
-        return props;
-    }
-
-    public static boolean verifyMessage(String key, String message, KafkaTestBroker broker, SimpleConsumer simpleConsumer) {
-        long lastMessageOffset = KafkaUtils.getOffset(simpleConsumer, TestUtils.TOPIC, 0, OffsetRequest.LatestTime()) - 1;
-        ByteBufferMessageSet messageAndOffsets = KafkaUtils.fetchMessages(TestUtils.getKafkaConfig(broker), simpleConsumer,
-                new Partition(Broker.fromString(broker.getBrokerConnectionString()),TestUtils.TOPIC, 0), lastMessageOffset);
-        MessageAndOffset messageAndOffset = messageAndOffsets.iterator().next();
-        Message kafkaMessage = messageAndOffset.message();
-        ByteBuffer messageKeyBuffer = kafkaMessage.key();
-        String keyString = null;
-        String messageString = new String(Utils.toByteArray(kafkaMessage.payload()));
-        if (messageKeyBuffer != null) {
-            keyString = new String(Utils.toByteArray(messageKeyBuffer));
-        }
-        assertEquals(key, keyString);
-        assertEquals(message, messageString);
-        return true;
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/test/storm/kafka/TridentKafkaTest.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/test/storm/kafka/TridentKafkaTest.java b/external/storm-kafka/src/test/storm/kafka/TridentKafkaTest.java
deleted file mode 100644
index 8213b07..0000000
--- a/external/storm-kafka/src/test/storm/kafka/TridentKafkaTest.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-import backtype.storm.tuple.Fields;
-import kafka.javaapi.consumer.SimpleConsumer;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import storm.kafka.trident.TridentKafkaState;
-import storm.kafka.trident.mapper.FieldNameBasedTupleToKafkaMapper;
-import storm.kafka.trident.mapper.TridentTupleToKafkaMapper;
-import storm.kafka.trident.selector.DefaultTopicSelector;
-import storm.kafka.trident.selector.KafkaTopicSelector;
-import storm.trident.tuple.TridentTuple;
-import storm.trident.tuple.TridentTupleView;
-
-import java.util.ArrayList;
-import java.util.List;
-
-public class TridentKafkaTest {
-    private KafkaTestBroker broker;
-    private TridentKafkaState state;
-    private SimpleConsumer simpleConsumer;
-
-    @Before
-    public void setup() {
-        broker = new KafkaTestBroker();
-        simpleConsumer = TestUtils.getKafkaConsumer(broker);
-        TridentTupleToKafkaMapper mapper = new FieldNameBasedTupleToKafkaMapper("key", "message");
-        KafkaTopicSelector topicSelector = new DefaultTopicSelector(TestUtils.TOPIC);
-        state = new TridentKafkaState()
-                .withKafkaTopicSelector(topicSelector)
-                .withTridentTupleToKafkaMapper(mapper);
-        state.prepare(TestUtils.getProducerProperties(broker.getBrokerConnectionString()));
-    }
-
-    @Test
-    public void testKeyValue() {
-        String keyString = "key-123";
-        String valString = "message-123";
-        int batchSize = 10;
-
-        List<TridentTuple> tridentTuples = generateTupleBatch(keyString, valString, batchSize);
-
-        state.updateState(tridentTuples, null);
-
-        for(int i = 0 ; i < batchSize ; i++) {
-            TestUtils.verifyMessage(keyString, valString, broker, simpleConsumer);
-        }
-    }
-
-    private List<TridentTuple> generateTupleBatch(String key, String message, int batchsize) {
-        List<TridentTuple> batch = new ArrayList<>();
-        for(int i =0 ; i < batchsize; i++) {
-            batch.add(TridentTupleView.createFreshTuple(new Fields("key", "message"), key, message));
-        }
-        return batch;
-    }
-
-    @After
-    public void shutdown() {
-        simpleConsumer.close();
-        broker.shutdown();
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/test/storm/kafka/TridentKafkaTopology.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/test/storm/kafka/TridentKafkaTopology.java b/external/storm-kafka/src/test/storm/kafka/TridentKafkaTopology.java
deleted file mode 100644
index b9e25e4..0000000
--- a/external/storm-kafka/src/test/storm/kafka/TridentKafkaTopology.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-import backtype.storm.Config;
-import backtype.storm.LocalCluster;
-import backtype.storm.generated.StormTopology;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Values;
-import com.google.common.collect.ImmutableMap;
-import storm.kafka.trident.TridentKafkaStateFactory;
-import storm.kafka.trident.TridentKafkaUpdater;
-import storm.kafka.trident.mapper.FieldNameBasedTupleToKafkaMapper;
-import storm.kafka.trident.selector.DefaultTopicSelector;
-import storm.trident.Stream;
-import storm.trident.TridentTopology;
-import storm.trident.testing.FixedBatchSpout;
-
-import java.util.Properties;
-
-public class TridentKafkaTopology {
-
-    private static StormTopology buildTopology(String brokerConnectionString) {
-        Fields fields = new Fields("word", "count");
-        FixedBatchSpout spout = new FixedBatchSpout(fields, 4,
-                new Values("storm", "1"),
-                new Values("trident", "1"),
-                new Values("needs", "1"),
-                new Values("javadoc", "1")
-        );
-        spout.setCycle(true);
-
-        TridentTopology topology = new TridentTopology();
-        Stream stream = topology.newStream("spout1", spout);
-
-        Properties props = new Properties();
-        props.put("bootstrap.servers", brokerConnectionString);
-        props.put("acks", "1");
-        props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
-        props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
-
-        TridentKafkaStateFactory stateFactory = new TridentKafkaStateFactory()
-            .withProducerProperties(props)
-            .withKafkaTopicSelector(new DefaultTopicSelector("test"))
-            .withTridentTupleToKafkaMapper(new FieldNameBasedTupleToKafkaMapper("word", "count"));
-        stream.partitionPersist(stateFactory, fields, new TridentKafkaUpdater(), new Fields());
-
-        return topology.build();
-    }
-
-    /**
-     * To run this topology ensure you have a kafka broker running and provide connection string to broker as argument.
-     * Create a topic test with command line,
-     * kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partition 1 --topic test
-     *
-     * run this program and run the kafka consumer:
-     * kafka-console-consumer.sh --zookeeper localhost:2181 --topic test --from-beginning
-     *
-     * you should see the messages flowing through.
-     *
-     * @param args
-     * @throws Exception
-     */
-    public static void main(String[] args) throws Exception {
-        if(args.length < 1) {
-            System.out.println("Please provide kafka broker url ,e.g. localhost:9092");
-        }
-
-        LocalCluster cluster = new LocalCluster();
-        cluster.submitTopology("wordCounter", new Config(), buildTopology(args[0]));
-        Thread.sleep(60 * 1000);
-        cluster.killTopology("wordCounter");
-
-        cluster.shutdown();
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/test/storm/kafka/ZkCoordinatorTest.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/test/storm/kafka/ZkCoordinatorTest.java b/external/storm-kafka/src/test/storm/kafka/ZkCoordinatorTest.java
deleted file mode 100644
index 48ca60f..0000000
--- a/external/storm-kafka/src/test/storm/kafka/ZkCoordinatorTest.java
+++ /dev/null
@@ -1,148 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-import backtype.storm.Config;
-import org.apache.curator.test.TestingServer;
-import kafka.javaapi.consumer.SimpleConsumer;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
-
-import java.util.*;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotEquals;
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.anyInt;
-import static org.mockito.Mockito.when;
-
-public class ZkCoordinatorTest {
-
-
-    @Mock
-    private DynamicBrokersReader reader;
-
-    @Mock
-    private DynamicPartitionConnections dynamicPartitionConnections;
-
-    private KafkaTestBroker broker = new KafkaTestBroker();
-    private TestingServer server;
-    private Map stormConf = new HashMap();
-    private SpoutConfig spoutConfig;
-    private ZkState state;
-    private SimpleConsumer simpleConsumer;
-
-    @Before
-    public void setUp() throws Exception {
-        MockitoAnnotations.initMocks(this);
-        server = new TestingServer();
-        String connectionString = server.getConnectString();
-        ZkHosts hosts = new ZkHosts(connectionString);
-        hosts.refreshFreqSecs = 1;
-        spoutConfig = new SpoutConfig(hosts, "topic", "/test", "id");
-        Map conf = buildZookeeperConfig(server);
-        state = new ZkState(conf);
-        simpleConsumer = new SimpleConsumer("localhost", broker.getPort(), 60000, 1024, "testClient");
-        when(dynamicPartitionConnections.register(any(Broker.class), any(String.class) ,anyInt())).thenReturn(simpleConsumer);
-    }
-
-    private Map buildZookeeperConfig(TestingServer server) {
-        Map conf = new HashMap();
-        conf.put(Config.TRANSACTIONAL_ZOOKEEPER_PORT, server.getPort());
-        conf.put(Config.TRANSACTIONAL_ZOOKEEPER_SERVERS, Arrays.asList("localhost"));
-        conf.put(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT, 20000);
-        conf.put(Config.STORM_ZOOKEEPER_CONNECTION_TIMEOUT, 20000);
-        conf.put(Config.STORM_ZOOKEEPER_RETRY_TIMES, 3);
-        conf.put(Config.STORM_ZOOKEEPER_RETRY_INTERVAL, 30);
-        return conf;
-    }
-
-    @After
-    public void shutdown() throws Exception {
-        simpleConsumer.close();
-        broker.shutdown();
-        server.close();
-    }
-
-    @Test
-    public void testOnePartitionPerTask() throws Exception {
-        int totalTasks = 64;
-        int partitionsPerTask = 1;
-        List<ZkCoordinator> coordinatorList = buildCoordinators(totalTasks / partitionsPerTask);
-        when(reader.getBrokerInfo()).thenReturn(TestUtils.buildPartitionInfoList(TestUtils.buildPartitionInfo(totalTasks)));
-        for (ZkCoordinator coordinator : coordinatorList) {
-            List<PartitionManager> myManagedPartitions = coordinator.getMyManagedPartitions();
-            assertEquals(partitionsPerTask, myManagedPartitions.size());
-            assertEquals(coordinator._taskIndex, myManagedPartitions.get(0).getPartition().partition);
-        }
-    }
-
-
-    @Test
-    public void testPartitionsChange() throws Exception {
-        final int totalTasks = 64;
-        int partitionsPerTask = 2;
-        List<ZkCoordinator> coordinatorList = buildCoordinators(totalTasks / partitionsPerTask);
-        when(reader.getBrokerInfo()).thenReturn(TestUtils.buildPartitionInfoList(TestUtils.buildPartitionInfo(totalTasks, 9092)));
-        List<List<PartitionManager>> partitionManagersBeforeRefresh = getPartitionManagers(coordinatorList);
-        waitForRefresh();
-        when(reader.getBrokerInfo()).thenReturn(TestUtils.buildPartitionInfoList(TestUtils.buildPartitionInfo(totalTasks, 9093)));
-        List<List<PartitionManager>> partitionManagersAfterRefresh = getPartitionManagers(coordinatorList);
-        assertEquals(partitionManagersAfterRefresh.size(), partitionManagersAfterRefresh.size());
-        Iterator<List<PartitionManager>> iterator = partitionManagersAfterRefresh.iterator();
-        for (List<PartitionManager> partitionManagersBefore : partitionManagersBeforeRefresh) {
-            List<PartitionManager> partitionManagersAfter = iterator.next();
-            assertPartitionsAreDifferent(partitionManagersBefore, partitionManagersAfter, partitionsPerTask);
-        }
-    }
-
-    private void assertPartitionsAreDifferent(List<PartitionManager> partitionManagersBefore, List<PartitionManager> partitionManagersAfter, int partitionsPerTask) {
-        assertEquals(partitionsPerTask, partitionManagersBefore.size());
-        assertEquals(partitionManagersBefore.size(), partitionManagersAfter.size());
-        for (int i = 0; i < partitionsPerTask; i++) {
-            assertNotEquals(partitionManagersBefore.get(i).getPartition(), partitionManagersAfter.get(i).getPartition());
-        }
-
-    }
-
-    private List<List<PartitionManager>> getPartitionManagers(List<ZkCoordinator> coordinatorList) {
-        List<List<PartitionManager>> partitions = new ArrayList();
-        for (ZkCoordinator coordinator : coordinatorList) {
-            partitions.add(coordinator.getMyManagedPartitions());
-        }
-        return partitions;
-    }
-
-    private void waitForRefresh() throws InterruptedException {
-        Thread.sleep(((ZkHosts) spoutConfig.hosts).refreshFreqSecs * 1000 + 1);
-    }
-
-    private List<ZkCoordinator> buildCoordinators(int totalTasks) {
-        List<ZkCoordinator> coordinatorList = new ArrayList<ZkCoordinator>();
-        for (int i = 0; i < totalTasks; i++) {
-            ZkCoordinator coordinator = new ZkCoordinator(dynamicPartitionConnections, stormConf, spoutConfig, state, i, totalTasks, "test-id", reader);
-            coordinatorList.add(coordinator);
-        }
-        return coordinatorList;
-    }
-
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/test/storm/kafka/bolt/KafkaBoltTest.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/test/storm/kafka/bolt/KafkaBoltTest.java b/external/storm-kafka/src/test/storm/kafka/bolt/KafkaBoltTest.java
deleted file mode 100644
index f3aee76..0000000
--- a/external/storm-kafka/src/test/storm/kafka/bolt/KafkaBoltTest.java
+++ /dev/null
@@ -1,341 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka.bolt;
-
-import backtype.storm.Config;
-import backtype.storm.Constants;
-import backtype.storm.task.GeneralTopologyContext;
-import backtype.storm.task.IOutputCollector;
-import backtype.storm.task.OutputCollector;
-import backtype.storm.topology.TopologyBuilder;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.TupleImpl;
-import backtype.storm.tuple.Values;
-import backtype.storm.utils.TupleUtils;
-import backtype.storm.utils.Utils;
-import com.google.common.collect.ImmutableList;
-import kafka.api.OffsetRequest;
-import kafka.api.FetchRequest;
-import kafka.javaapi.FetchResponse;
-import kafka.javaapi.OffsetResponse;
-import kafka.javaapi.consumer.SimpleConsumer;
-import kafka.javaapi.message.ByteBufferMessageSet;
-import kafka.message.Message;
-import kafka.message.MessageAndOffset;
-import org.apache.kafka.clients.producer.Callback;
-import org.apache.kafka.clients.producer.KafkaProducer;
-import org.apache.kafka.clients.producer.ProducerRecord;
-import org.junit.*;
-import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
-import org.mockito.internal.util.reflection.Whitebox;
-import org.mockito.invocation.InvocationOnMock;
-import org.mockito.stubbing.Answer;
-import storm.kafka.*;
-import storm.kafka.trident.GlobalPartitionInformation;
-
-import java.nio.ByteBuffer;
-import java.util.List;
-import java.util.HashMap;
-import java.util.Properties;
-import java.util.concurrent.Future;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.*;
-
-public class KafkaBoltTest {
-
-    private static final String TEST_TOPIC = "test-topic";
-    private KafkaTestBroker broker;
-    private KafkaBolt bolt;
-    private Config config = new Config();
-    private KafkaConfig kafkaConfig;
-    private SimpleConsumer simpleConsumer;
-
-    @Mock
-    private IOutputCollector collector;
-
-    @Before
-    public void initMocks() {
-        MockitoAnnotations.initMocks(this);
-        broker = new KafkaTestBroker();
-        setupKafkaConsumer();
-        config.put(KafkaBolt.TOPIC, TEST_TOPIC);
-        bolt = generateStringSerializerBolt();
-    }
-
-    @After
-    public void shutdown() {
-        simpleConsumer.close();
-        broker.shutdown();
-        bolt.cleanup();
-    }
-
-    private void setupKafkaConsumer() {
-        GlobalPartitionInformation globalPartitionInformation = new GlobalPartitionInformation(TEST_TOPIC);
-        globalPartitionInformation.addPartition(0, Broker.fromString(broker.getBrokerConnectionString()));
-        BrokerHosts brokerHosts = new StaticHosts(globalPartitionInformation);
-        kafkaConfig = new KafkaConfig(brokerHosts, TEST_TOPIC);
-        simpleConsumer = new SimpleConsumer("localhost", broker.getPort(), 60000, 1024, "testClient");
-    }
-
-    @Test
-    public void shouldAcknowledgeTickTuples() throws Exception {
-        // Given
-        Tuple tickTuple = mockTickTuple();
-
-        // When
-        bolt.execute(tickTuple);
-
-        // Then
-        verify(collector).ack(tickTuple);
-    }
-
-    @Test
-    public void executeWithKey() throws Exception {
-        String message = "value-123";
-        String key = "key-123";
-        Tuple tuple = generateTestTuple(key, message);
-        bolt.execute(tuple);
-        verify(collector).ack(tuple);
-        verifyMessage(key, message);
-    }
-
-    /* test synchronous sending */
-    @Test
-    public void executeWithByteArrayKeyAndMessageSync() {
-        boolean async = false;
-        boolean fireAndForget = false;
-        bolt = generateDefaultSerializerBolt(async, fireAndForget, null);
-        String keyString = "test-key";
-        String messageString = "test-message";
-        byte[] key = keyString.getBytes();
-        byte[] message = messageString.getBytes();
-        Tuple tuple = generateTestTuple(key, message);
-        bolt.execute(tuple);
-        verify(collector).ack(tuple);
-        verifyMessage(keyString, messageString);
-    }
-
-    /* test asynchronous sending (default) */
-    @Test
-    public void executeWithByteArrayKeyAndMessageAsync() {
-        boolean async = true;
-        boolean fireAndForget = false;
-        String keyString = "test-key";
-        String messageString = "test-message";
-        byte[] key = keyString.getBytes();
-        byte[] message = messageString.getBytes();
-        final Tuple tuple = generateTestTuple(key, message);
-
-        final ByteBufferMessageSet mockMsg = mockSingleMessage(key, message);
-        simpleConsumer.close();
-        simpleConsumer = mockSimpleConsumer(mockMsg);
-        KafkaProducer<?, ?> producer = mock(KafkaProducer.class);
-        when(producer.send(any(ProducerRecord.class), any(Callback.class))).thenAnswer(new Answer<Future>() {
-            @Override
-            public Future answer(InvocationOnMock invocationOnMock) throws Throwable {
-                Callback cb = (Callback) invocationOnMock.getArguments()[1];
-                cb.onCompletion(null, null);
-                return mock(Future.class);
-            }
-        });
-        bolt = generateDefaultSerializerBolt(async, fireAndForget, producer);
-        bolt.execute(tuple);
-        verify(collector).ack(tuple);
-        verifyMessage(keyString, messageString);
-    }
-
-    /* test with fireAndForget option enabled */
-    @Test
-    public void executeWithByteArrayKeyAndMessageFire() {
-        boolean async = true;
-        boolean fireAndForget = true;
-        bolt = generateDefaultSerializerBolt(async, fireAndForget, null);
-        String keyString = "test-key";
-        String messageString = "test-message";
-        byte[] key = keyString.getBytes();
-        byte[] message = messageString.getBytes();
-        Tuple tuple = generateTestTuple(key, message);
-        final ByteBufferMessageSet mockMsg = mockSingleMessage(key, message);
-        simpleConsumer.close();
-        simpleConsumer = mockSimpleConsumer(mockMsg);
-        KafkaProducer<?, ?> producer = mock(KafkaProducer.class);
-        // do not invoke the callback of send() in order to test whether the bolt handle the fireAndForget option
-        // properly.
-        doReturn(mock(Future.class)).when(producer).send(any(ProducerRecord.class), any(Callback.class));
-        bolt.execute(tuple);
-        verify(collector).ack(tuple);
-        verifyMessage(keyString, messageString);
-    }
-
-    /* test bolt specified properties */
-    @Test
-    public void executeWithBoltSpecifiedProperties() {
-        boolean async = false;
-        boolean fireAndForget = false;
-        bolt = defaultSerializerBoltWithSpecifiedProperties(async, fireAndForget);
-        String keyString = "test-key";
-        String messageString = "test-message";
-        byte[] key = keyString.getBytes();
-        byte[] message = messageString.getBytes();
-        Tuple tuple = generateTestTuple(key, message);
-        bolt.execute(tuple);
-        verify(collector).ack(tuple);
-        verifyMessage(keyString, messageString);
-    }
-
-    private KafkaBolt generateStringSerializerBolt() {
-        Properties props = new Properties();
-        props.put("acks", "1");
-        props.put("bootstrap.servers", broker.getBrokerConnectionString());
-        props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
-        props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
-        props.put("metadata.fetch.timeout.ms", 1000);
-        KafkaBolt bolt = new KafkaBolt().withProducerProperties(props);
-        bolt.prepare(config, null, new OutputCollector(collector));
-        bolt.setAsync(false);
-        return bolt;
-    }
-
-    private KafkaBolt generateDefaultSerializerBolt(boolean async, boolean fireAndForget,
-                                                    KafkaProducer<?, ?> mockProducer) {
-        Properties props = new Properties();
-        props.put("acks", "1");
-        props.put("bootstrap.servers", broker.getBrokerConnectionString());
-        props.put("key.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
-        props.put("value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
-        props.put("metadata.fetch.timeout.ms", 1000);
-        props.put("linger.ms", 0);
-        KafkaBolt bolt = new KafkaBolt().withProducerProperties(props);
-        bolt.prepare(config, null, new OutputCollector(collector));
-        bolt.setAsync(async);
-        bolt.setFireAndForget(fireAndForget);
-        if (mockProducer != null) {
-            Whitebox.setInternalState(bolt, "producer", mockProducer);
-        }
-        return bolt;
-    }
-
-    private KafkaBolt defaultSerializerBoltWithSpecifiedProperties(boolean async, boolean fireAndForget) {
-        Properties props = new Properties();
-        props.put("acks", "1");
-        props.put("bootstrap.servers", broker.getBrokerConnectionString());
-        props.put("key.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
-        props.put("value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
-        props.put("metadata.fetch.timeout.ms", 1000);
-        props.put("linger.ms", 0);
-        KafkaBolt bolt = new KafkaBolt().withProducerProperties(props);
-        bolt.prepare(config, null, new OutputCollector(collector));
-        bolt.setAsync(async);
-        bolt.setFireAndForget(fireAndForget);
-        return bolt;
-    }
-
-    @Test
-    public void executeWithoutKey() throws Exception {
-        String message = "value-234";
-        Tuple tuple = generateTestTuple(message);
-        bolt.execute(tuple);
-        verify(collector).ack(tuple);
-        verifyMessage(null, message);
-    }
-
-
-    @Test
-    public void executeWithBrokerDown() throws Exception {
-        broker.shutdown();
-        String message = "value-234";
-        Tuple tuple = generateTestTuple(message);
-        bolt.execute(tuple);
-        verify(collector).fail(tuple);
-    }
-
-    private boolean verifyMessage(String key, String message) {
-        long lastMessageOffset = KafkaUtils.getOffset(simpleConsumer, kafkaConfig.topic, 0, OffsetRequest.LatestTime()) - 1;
-        ByteBufferMessageSet messageAndOffsets = KafkaUtils.fetchMessages(kafkaConfig, simpleConsumer,
-                new Partition(Broker.fromString(broker.getBrokerConnectionString()),kafkaConfig.topic, 0), lastMessageOffset);
-        MessageAndOffset messageAndOffset = messageAndOffsets.iterator().next();
-        Message kafkaMessage = messageAndOffset.message();
-        ByteBuffer messageKeyBuffer = kafkaMessage.key();
-        String keyString = null;
-        String messageString = new String(Utils.toByteArray(kafkaMessage.payload()));
-        if (messageKeyBuffer != null) {
-            keyString = new String(Utils.toByteArray(messageKeyBuffer));
-        }
-        assertEquals(key, keyString);
-        assertEquals(message, messageString);
-        return true;
-    }
-
-    private Tuple generateTestTuple(Object key, Object message) {
-        TopologyBuilder builder = new TopologyBuilder();
-        GeneralTopologyContext topologyContext = new GeneralTopologyContext(builder.createTopology(), new Config(), new HashMap(), new HashMap(), new HashMap(), "") {
-            @Override
-            public Fields getComponentOutputFields(String componentId, String streamId) {
-                return new Fields("key", "message");
-            }
-        };
-        return new TupleImpl(topologyContext, new Values(key, message), 1, "");
-    }
-
-    private Tuple generateTestTuple(Object message) {
-        TopologyBuilder builder = new TopologyBuilder();
-        GeneralTopologyContext topologyContext = new GeneralTopologyContext(builder.createTopology(), new Config(), new HashMap(), new HashMap(), new HashMap(), "") {
-            @Override
-            public Fields getComponentOutputFields(String componentId, String streamId) {
-                return new Fields("message");
-            }
-        };
-        return new TupleImpl(topologyContext, new Values(message), 1, "");
-    }
-
-    private Tuple mockTickTuple() {
-        Tuple tuple = mock(Tuple.class);
-        when(tuple.getSourceComponent()).thenReturn(Constants.SYSTEM_COMPONENT_ID);
-        when(tuple.getSourceStreamId()).thenReturn(Constants.SYSTEM_TICK_STREAM_ID);
-        // Sanity check
-        assertTrue(TupleUtils.isTick(tuple));
-        return tuple;
-    }
-
-    private static ByteBufferMessageSet mockSingleMessage(byte[] key, byte[] message) {
-        ByteBufferMessageSet sets = mock(ByteBufferMessageSet.class);
-        MessageAndOffset msg = mock(MessageAndOffset.class);
-        final List<MessageAndOffset> msgs = ImmutableList.of(msg);
-        doReturn(msgs.iterator()).when(sets).iterator();
-        Message kafkaMessage = mock(Message.class);
-        doReturn(ByteBuffer.wrap(key)).when(kafkaMessage).key();
-        doReturn(ByteBuffer.wrap(message)).when(kafkaMessage).payload();
-        doReturn(kafkaMessage).when(msg).message();
-        return sets;
-    }
-
-    private static SimpleConsumer mockSimpleConsumer(ByteBufferMessageSet mockMsg) {
-        SimpleConsumer simpleConsumer = mock(SimpleConsumer.class);
-        FetchResponse resp = mock(FetchResponse.class);
-        doReturn(resp).when(simpleConsumer).fetch(any(FetchRequest.class));
-        OffsetResponse mockOffsetResponse = mock(OffsetResponse.class);
-        doReturn(new long[] {}).when(mockOffsetResponse).offsets(anyString(), anyInt());
-        doReturn(mockOffsetResponse).when(simpleConsumer).getOffsetsBefore(any(kafka.javaapi.OffsetRequest.class));
-        doReturn(mockMsg).when(resp).messageSet(anyString(), anyInt());
-        return simpleConsumer;
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-metrics/src/main/java/org/apache/storm/metrics/hdrhistogram/HistogramMetric.java
----------------------------------------------------------------------
diff --git a/external/storm-metrics/src/main/java/org/apache/storm/metrics/hdrhistogram/HistogramMetric.java b/external/storm-metrics/src/main/java/org/apache/storm/metrics/hdrhistogram/HistogramMetric.java
index 4adc500..7f58a3d 100644
--- a/external/storm-metrics/src/main/java/org/apache/storm/metrics/hdrhistogram/HistogramMetric.java
+++ b/external/storm-metrics/src/main/java/org/apache/storm/metrics/hdrhistogram/HistogramMetric.java
@@ -17,7 +17,7 @@
  */
 package org.apache.storm.metrics.hdrhistogram;
 
-import backtype.storm.metric.api.IMetric;
+import org.apache.storm.metric.api.IMetric;
 import org.HdrHistogram.Histogram;
 
 /**

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-metrics/src/main/java/org/apache/storm/metrics/sigar/CPUMetric.java
----------------------------------------------------------------------
diff --git a/external/storm-metrics/src/main/java/org/apache/storm/metrics/sigar/CPUMetric.java b/external/storm-metrics/src/main/java/org/apache/storm/metrics/sigar/CPUMetric.java
index a3addc9..7c6d75e 100644
--- a/external/storm-metrics/src/main/java/org/apache/storm/metrics/sigar/CPUMetric.java
+++ b/external/storm-metrics/src/main/java/org/apache/storm/metrics/sigar/CPUMetric.java
@@ -20,7 +20,7 @@ package org.apache.storm.metrics.sigar;
 import org.hyperic.sigar.Sigar;
 import org.hyperic.sigar.ProcCpu;
 
-import backtype.storm.metric.api.IMetric;
+import org.apache.storm.metric.api.IMetric;
 
 import java.util.HashMap;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-redis/src/main/java/org/apache/storm/redis/bolt/AbstractRedisBolt.java
----------------------------------------------------------------------
diff --git a/external/storm-redis/src/main/java/org/apache/storm/redis/bolt/AbstractRedisBolt.java b/external/storm-redis/src/main/java/org/apache/storm/redis/bolt/AbstractRedisBolt.java
index dc2a2d3..0c64f43 100644
--- a/external/storm-redis/src/main/java/org/apache/storm/redis/bolt/AbstractRedisBolt.java
+++ b/external/storm-redis/src/main/java/org/apache/storm/redis/bolt/AbstractRedisBolt.java
@@ -17,9 +17,9 @@
  */
 package org.apache.storm.redis.bolt;
 
-import backtype.storm.task.OutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.base.BaseRichBolt;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.base.BaseRichBolt;
 import org.apache.storm.redis.common.config.JedisClusterConfig;
 import org.apache.storm.redis.common.config.JedisPoolConfig;
 import org.apache.storm.redis.common.container.JedisCommandsContainerBuilder;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-redis/src/main/java/org/apache/storm/redis/bolt/RedisLookupBolt.java
----------------------------------------------------------------------
diff --git a/external/storm-redis/src/main/java/org/apache/storm/redis/bolt/RedisLookupBolt.java b/external/storm-redis/src/main/java/org/apache/storm/redis/bolt/RedisLookupBolt.java
index 47c98cb..4d6dc4e 100644
--- a/external/storm-redis/src/main/java/org/apache/storm/redis/bolt/RedisLookupBolt.java
+++ b/external/storm-redis/src/main/java/org/apache/storm/redis/bolt/RedisLookupBolt.java
@@ -17,9 +17,9 @@
  */
 package org.apache.storm.redis.bolt;
 
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.Values;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.Values;
 import org.apache.storm.redis.common.mapper.RedisDataTypeDescription;
 import org.apache.storm.redis.common.mapper.RedisLookupMapper;
 import org.apache.storm.redis.common.config.JedisClusterConfig;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-redis/src/main/java/org/apache/storm/redis/bolt/RedisStoreBolt.java
----------------------------------------------------------------------
diff --git a/external/storm-redis/src/main/java/org/apache/storm/redis/bolt/RedisStoreBolt.java b/external/storm-redis/src/main/java/org/apache/storm/redis/bolt/RedisStoreBolt.java
index be9a328..b74ed1c 100644
--- a/external/storm-redis/src/main/java/org/apache/storm/redis/bolt/RedisStoreBolt.java
+++ b/external/storm-redis/src/main/java/org/apache/storm/redis/bolt/RedisStoreBolt.java
@@ -17,8 +17,8 @@
  */
 package org.apache.storm.redis.bolt;
 
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.tuple.Tuple;
 import org.apache.storm.redis.common.config.JedisClusterConfig;
 import org.apache.storm.redis.common.config.JedisPoolConfig;
 import org.apache.storm.redis.common.mapper.RedisDataTypeDescription;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-redis/src/main/java/org/apache/storm/redis/common/mapper/RedisLookupMapper.java
----------------------------------------------------------------------
diff --git a/external/storm-redis/src/main/java/org/apache/storm/redis/common/mapper/RedisLookupMapper.java b/external/storm-redis/src/main/java/org/apache/storm/redis/common/mapper/RedisLookupMapper.java
index 727e4ec..fe464f5 100644
--- a/external/storm-redis/src/main/java/org/apache/storm/redis/common/mapper/RedisLookupMapper.java
+++ b/external/storm-redis/src/main/java/org/apache/storm/redis/common/mapper/RedisLookupMapper.java
@@ -17,9 +17,9 @@
  */
 package org.apache.storm.redis.common.mapper;
 
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.tuple.ITuple;
-import backtype.storm.tuple.Values;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.tuple.ITuple;
+import org.apache.storm.tuple.Values;
 
 import java.util.List;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-redis/src/main/java/org/apache/storm/redis/common/mapper/TupleMapper.java
----------------------------------------------------------------------
diff --git a/external/storm-redis/src/main/java/org/apache/storm/redis/common/mapper/TupleMapper.java b/external/storm-redis/src/main/java/org/apache/storm/redis/common/mapper/TupleMapper.java
index bcc531e..a2ab48b 100644
--- a/external/storm-redis/src/main/java/org/apache/storm/redis/common/mapper/TupleMapper.java
+++ b/external/storm-redis/src/main/java/org/apache/storm/redis/common/mapper/TupleMapper.java
@@ -17,7 +17,7 @@
  */
 package org.apache.storm.redis.common.mapper;
 
-import backtype.storm.tuple.ITuple;
+import org.apache.storm.tuple.ITuple;
 
 import java.io.Serializable;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-redis/src/main/java/org/apache/storm/redis/trident/state/AbstractRedisMapState.java
----------------------------------------------------------------------
diff --git a/external/storm-redis/src/main/java/org/apache/storm/redis/trident/state/AbstractRedisMapState.java b/external/storm-redis/src/main/java/org/apache/storm/redis/trident/state/AbstractRedisMapState.java
index 26056d2..f5bd459 100644
--- a/external/storm-redis/src/main/java/org/apache/storm/redis/trident/state/AbstractRedisMapState.java
+++ b/external/storm-redis/src/main/java/org/apache/storm/redis/trident/state/AbstractRedisMapState.java
@@ -19,12 +19,12 @@ package org.apache.storm.redis.trident.state;
 
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Maps;
-import storm.trident.state.JSONNonTransactionalSerializer;
-import storm.trident.state.JSONOpaqueSerializer;
-import storm.trident.state.JSONTransactionalSerializer;
-import storm.trident.state.Serializer;
-import storm.trident.state.StateType;
-import storm.trident.state.map.IBackingMap;
+import org.apache.storm.trident.state.JSONNonTransactionalSerializer;
+import org.apache.storm.trident.state.JSONOpaqueSerializer;
+import org.apache.storm.trident.state.JSONTransactionalSerializer;
+import org.apache.storm.trident.state.Serializer;
+import org.apache.storm.trident.state.StateType;
+import org.apache.storm.trident.state.map.IBackingMap;
 
 import java.util.ArrayList;
 import java.util.Collections;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-redis/src/main/java/org/apache/storm/redis/trident/state/AbstractRedisStateQuerier.java
----------------------------------------------------------------------
diff --git a/external/storm-redis/src/main/java/org/apache/storm/redis/trident/state/AbstractRedisStateQuerier.java b/external/storm-redis/src/main/java/org/apache/storm/redis/trident/state/AbstractRedisStateQuerier.java
index 5c7335d..3785b84 100644
--- a/external/storm-redis/src/main/java/org/apache/storm/redis/trident/state/AbstractRedisStateQuerier.java
+++ b/external/storm-redis/src/main/java/org/apache/storm/redis/trident/state/AbstractRedisStateQuerier.java
@@ -17,14 +17,14 @@
  */
 package org.apache.storm.redis.trident.state;
 
-import backtype.storm.tuple.Values;
+import org.apache.storm.tuple.Values;
 import com.google.common.collect.Lists;
 import org.apache.storm.redis.common.mapper.RedisDataTypeDescription;
 import org.apache.storm.redis.common.mapper.RedisLookupMapper;
-import storm.trident.operation.TridentCollector;
-import storm.trident.state.BaseQueryFunction;
-import storm.trident.state.State;
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.trident.operation.TridentCollector;
+import org.apache.storm.trident.state.BaseQueryFunction;
+import org.apache.storm.trident.state.State;
+import org.apache.storm.trident.tuple.TridentTuple;
 
 import java.util.List;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-redis/src/main/java/org/apache/storm/redis/trident/state/AbstractRedisStateUpdater.java
----------------------------------------------------------------------
diff --git a/external/storm-redis/src/main/java/org/apache/storm/redis/trident/state/AbstractRedisStateUpdater.java b/external/storm-redis/src/main/java/org/apache/storm/redis/trident/state/AbstractRedisStateUpdater.java
index e9654c7..82b7483 100644
--- a/external/storm-redis/src/main/java/org/apache/storm/redis/trident/state/AbstractRedisStateUpdater.java
+++ b/external/storm-redis/src/main/java/org/apache/storm/redis/trident/state/AbstractRedisStateUpdater.java
@@ -19,10 +19,10 @@ package org.apache.storm.redis.trident.state;
 
 import org.apache.storm.redis.common.mapper.RedisDataTypeDescription;
 import org.apache.storm.redis.common.mapper.RedisStoreMapper;
-import storm.trident.operation.TridentCollector;
-import storm.trident.state.BaseStateUpdater;
-import storm.trident.state.State;
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.trident.operation.TridentCollector;
+import org.apache.storm.trident.state.BaseStateUpdater;
+import org.apache.storm.trident.state.State;
+import org.apache.storm.trident.tuple.TridentTuple;
 
 import java.util.HashMap;
 import java.util.List;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-redis/src/main/java/org/apache/storm/redis/trident/state/Options.java
----------------------------------------------------------------------
diff --git a/external/storm-redis/src/main/java/org/apache/storm/redis/trident/state/Options.java b/external/storm-redis/src/main/java/org/apache/storm/redis/trident/state/Options.java
index f4dbfaa..6ebcb22 100644
--- a/external/storm-redis/src/main/java/org/apache/storm/redis/trident/state/Options.java
+++ b/external/storm-redis/src/main/java/org/apache/storm/redis/trident/state/Options.java
@@ -18,7 +18,7 @@
 package org.apache.storm.redis.trident.state;
 
 import org.apache.storm.redis.common.mapper.RedisDataTypeDescription;
-import storm.trident.state.Serializer;
+import org.apache.storm.trident.state.Serializer;
 
 import java.io.Serializable;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-redis/src/main/java/org/apache/storm/redis/trident/state/RedisClusterMapState.java
----------------------------------------------------------------------
diff --git a/external/storm-redis/src/main/java/org/apache/storm/redis/trident/state/RedisClusterMapState.java b/external/storm-redis/src/main/java/org/apache/storm/redis/trident/state/RedisClusterMapState.java
index cbd37c5..54e9aea 100644
--- a/external/storm-redis/src/main/java/org/apache/storm/redis/trident/state/RedisClusterMapState.java
+++ b/external/storm-redis/src/main/java/org/apache/storm/redis/trident/state/RedisClusterMapState.java
@@ -17,24 +17,24 @@
  */
 package org.apache.storm.redis.trident.state;
 
-import backtype.storm.task.IMetricsContext;
-import backtype.storm.tuple.Values;
+import org.apache.storm.task.IMetricsContext;
+import org.apache.storm.tuple.Values;
 import com.google.common.collect.Lists;
 import org.apache.storm.redis.common.config.JedisClusterConfig;
 import org.apache.storm.redis.common.mapper.RedisDataTypeDescription;
 import redis.clients.jedis.JedisCluster;
-import storm.trident.state.OpaqueValue;
-import storm.trident.state.Serializer;
-import storm.trident.state.State;
-import storm.trident.state.StateFactory;
-import storm.trident.state.StateType;
-import storm.trident.state.TransactionalValue;
-import storm.trident.state.map.CachedMap;
-import storm.trident.state.map.MapState;
-import storm.trident.state.map.NonTransactionalMap;
-import storm.trident.state.map.OpaqueMap;
-import storm.trident.state.map.SnapshottableMap;
-import storm.trident.state.map.TransactionalMap;
+import org.apache.storm.trident.state.OpaqueValue;
+import org.apache.storm.trident.state.Serializer;
+import org.apache.storm.trident.state.State;
+import org.apache.storm.trident.state.StateFactory;
+import org.apache.storm.trident.state.StateType;
+import org.apache.storm.trident.state.TransactionalValue;
+import org.apache.storm.trident.state.map.CachedMap;
+import org.apache.storm.trident.state.map.MapState;
+import org.apache.storm.trident.state.map.NonTransactionalMap;
+import org.apache.storm.trident.state.map.OpaqueMap;
+import org.apache.storm.trident.state.map.SnapshottableMap;
+import org.apache.storm.trident.state.map.TransactionalMap;
 
 import java.util.List;
 import java.util.Map;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-redis/src/main/java/org/apache/storm/redis/trident/state/RedisClusterState.java
----------------------------------------------------------------------
diff --git a/external/storm-redis/src/main/java/org/apache/storm/redis/trident/state/RedisClusterState.java b/external/storm-redis/src/main/java/org/apache/storm/redis/trident/state/RedisClusterState.java
index 764436d..c773c1a 100644
--- a/external/storm-redis/src/main/java/org/apache/storm/redis/trident/state/RedisClusterState.java
+++ b/external/storm-redis/src/main/java/org/apache/storm/redis/trident/state/RedisClusterState.java
@@ -17,13 +17,13 @@
  */
 package org.apache.storm.redis.trident.state;
 
-import backtype.storm.task.IMetricsContext;
+import org.apache.storm.task.IMetricsContext;
 import org.apache.storm.redis.common.config.JedisClusterConfig;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import redis.clients.jedis.JedisCluster;
-import storm.trident.state.State;
-import storm.trident.state.StateFactory;
+import org.apache.storm.trident.state.State;
+import org.apache.storm.trident.state.StateFactory;
 
 import java.util.Map;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-redis/src/main/java/org/apache/storm/redis/trident/state/RedisMapState.java
----------------------------------------------------------------------
diff --git a/external/storm-redis/src/main/java/org/apache/storm/redis/trident/state/RedisMapState.java b/external/storm-redis/src/main/java/org/apache/storm/redis/trident/state/RedisMapState.java
index 25e9924..b379fc1 100644
--- a/external/storm-redis/src/main/java/org/apache/storm/redis/trident/state/RedisMapState.java
+++ b/external/storm-redis/src/main/java/org/apache/storm/redis/trident/state/RedisMapState.java
@@ -17,25 +17,25 @@
  */
 package org.apache.storm.redis.trident.state;
 
-import backtype.storm.task.IMetricsContext;
-import backtype.storm.tuple.Values;
+import org.apache.storm.task.IMetricsContext;
+import org.apache.storm.tuple.Values;
 import org.apache.storm.redis.common.config.JedisPoolConfig;
 import org.apache.storm.redis.common.mapper.RedisDataTypeDescription;
 import redis.clients.jedis.Jedis;
 import redis.clients.jedis.JedisPool;
 import redis.clients.jedis.Pipeline;
-import storm.trident.state.OpaqueValue;
-import storm.trident.state.Serializer;
-import storm.trident.state.State;
-import storm.trident.state.StateFactory;
-import storm.trident.state.StateType;
-import storm.trident.state.TransactionalValue;
-import storm.trident.state.map.CachedMap;
-import storm.trident.state.map.MapState;
-import storm.trident.state.map.NonTransactionalMap;
-import storm.trident.state.map.OpaqueMap;
-import storm.trident.state.map.SnapshottableMap;
-import storm.trident.state.map.TransactionalMap;
+import org.apache.storm.trident.state.OpaqueValue;
+import org.apache.storm.trident.state.Serializer;
+import org.apache.storm.trident.state.State;
+import org.apache.storm.trident.state.StateFactory;
+import org.apache.storm.trident.state.StateType;
+import org.apache.storm.trident.state.TransactionalValue;
+import org.apache.storm.trident.state.map.CachedMap;
+import org.apache.storm.trident.state.map.MapState;
+import org.apache.storm.trident.state.map.NonTransactionalMap;
+import org.apache.storm.trident.state.map.OpaqueMap;
+import org.apache.storm.trident.state.map.SnapshottableMap;
+import org.apache.storm.trident.state.map.TransactionalMap;
 
 import java.util.List;
 import java.util.Map;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-redis/src/main/java/org/apache/storm/redis/trident/state/RedisState.java
----------------------------------------------------------------------
diff --git a/external/storm-redis/src/main/java/org/apache/storm/redis/trident/state/RedisState.java b/external/storm-redis/src/main/java/org/apache/storm/redis/trident/state/RedisState.java
index 85d0e1b..a93b348 100644
--- a/external/storm-redis/src/main/java/org/apache/storm/redis/trident/state/RedisState.java
+++ b/external/storm-redis/src/main/java/org/apache/storm/redis/trident/state/RedisState.java
@@ -17,14 +17,14 @@
  */
 package org.apache.storm.redis.trident.state;
 
-import backtype.storm.task.IMetricsContext;
+import org.apache.storm.task.IMetricsContext;
 import org.apache.storm.redis.common.config.JedisPoolConfig;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import redis.clients.jedis.Jedis;
 import redis.clients.jedis.JedisPool;
-import storm.trident.state.State;
-import storm.trident.state.StateFactory;
+import org.apache.storm.trident.state.State;
+import org.apache.storm.trident.state.StateFactory;
 
 import java.util.Map;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-redis/src/test/java/org/apache/storm/redis/topology/LookupWordCount.java
----------------------------------------------------------------------
diff --git a/external/storm-redis/src/test/java/org/apache/storm/redis/topology/LookupWordCount.java b/external/storm-redis/src/test/java/org/apache/storm/redis/topology/LookupWordCount.java
index ae053de..f62b7b0 100644
--- a/external/storm-redis/src/test/java/org/apache/storm/redis/topology/LookupWordCount.java
+++ b/external/storm-redis/src/test/java/org/apache/storm/redis/topology/LookupWordCount.java
@@ -17,18 +17,18 @@
  */
 package org.apache.storm.redis.topology;
 
-import backtype.storm.Config;
-import backtype.storm.LocalCluster;
-import backtype.storm.StormSubmitter;
-import backtype.storm.task.OutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.TopologyBuilder;
-import backtype.storm.topology.base.BaseRichBolt;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.ITuple;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.Values;
+import org.apache.storm.Config;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.StormSubmitter;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.TopologyBuilder;
+import org.apache.storm.topology.base.BaseRichBolt;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.ITuple;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.Values;
 import com.google.common.collect.Lists;
 import org.apache.storm.redis.bolt.RedisLookupBolt;
 import org.apache.storm.redis.common.config.JedisPoolConfig;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-redis/src/test/java/org/apache/storm/redis/topology/PersistentWordCount.java
----------------------------------------------------------------------
diff --git a/external/storm-redis/src/test/java/org/apache/storm/redis/topology/PersistentWordCount.java b/external/storm-redis/src/test/java/org/apache/storm/redis/topology/PersistentWordCount.java
index 77c6ee8..d46bab6 100644
--- a/external/storm-redis/src/test/java/org/apache/storm/redis/topology/PersistentWordCount.java
+++ b/external/storm-redis/src/test/java/org/apache/storm/redis/topology/PersistentWordCount.java
@@ -17,14 +17,14 @@
  */
 package org.apache.storm.redis.topology;
 
-import backtype.storm.Config;
-import backtype.storm.LocalCluster;
-import backtype.storm.StormSubmitter;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.TopologyBuilder;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.ITuple;
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.Config;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.StormSubmitter;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.TopologyBuilder;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.ITuple;
+import org.apache.storm.tuple.Tuple;
 import org.apache.storm.redis.bolt.AbstractRedisBolt;
 import org.apache.storm.redis.bolt.RedisStoreBolt;
 import org.apache.storm.redis.common.config.JedisClusterConfig;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-redis/src/test/java/org/apache/storm/redis/topology/WordCounter.java
----------------------------------------------------------------------
diff --git a/external/storm-redis/src/test/java/org/apache/storm/redis/topology/WordCounter.java b/external/storm-redis/src/test/java/org/apache/storm/redis/topology/WordCounter.java
index 6f25038..6fa930c 100644
--- a/external/storm-redis/src/test/java/org/apache/storm/redis/topology/WordCounter.java
+++ b/external/storm-redis/src/test/java/org/apache/storm/redis/topology/WordCounter.java
@@ -17,18 +17,18 @@
  */
 package org.apache.storm.redis.topology;
 
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.BasicOutputCollector;
-import backtype.storm.topology.IBasicBolt;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.Values;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.BasicOutputCollector;
+import org.apache.storm.topology.IBasicBolt;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.Values;
 import com.google.common.collect.Maps;
 
 import java.util.Map;
 
-import static backtype.storm.utils.Utils.tuple;
+import static org.apache.storm.utils.Utils.tuple;
 
 public class WordCounter implements IBasicBolt {
     private Map<String, Integer> wordCounter = Maps.newHashMap();
@@ -64,4 +64,4 @@ public class WordCounter implements IBasicBolt {
         return null;
     }
 
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-redis/src/test/java/org/apache/storm/redis/topology/WordSpout.java
----------------------------------------------------------------------
diff --git a/external/storm-redis/src/test/java/org/apache/storm/redis/topology/WordSpout.java b/external/storm-redis/src/test/java/org/apache/storm/redis/topology/WordSpout.java
index bb9c2d7..e2cdfde 100644
--- a/external/storm-redis/src/test/java/org/apache/storm/redis/topology/WordSpout.java
+++ b/external/storm-redis/src/test/java/org/apache/storm/redis/topology/WordSpout.java
@@ -17,12 +17,12 @@
  */
 package org.apache.storm.redis.topology;
 
-import backtype.storm.spout.SpoutOutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.IRichSpout;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Values;
+import org.apache.storm.spout.SpoutOutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.IRichSpout;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Values;
 
 import java.util.Map;
 import java.util.Random;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-redis/src/test/java/org/apache/storm/redis/trident/PrintFunction.java
----------------------------------------------------------------------
diff --git a/external/storm-redis/src/test/java/org/apache/storm/redis/trident/PrintFunction.java b/external/storm-redis/src/test/java/org/apache/storm/redis/trident/PrintFunction.java
index 6f465c9..37d3936 100644
--- a/external/storm-redis/src/test/java/org/apache/storm/redis/trident/PrintFunction.java
+++ b/external/storm-redis/src/test/java/org/apache/storm/redis/trident/PrintFunction.java
@@ -19,9 +19,9 @@ package org.apache.storm.redis.trident;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import storm.trident.operation.BaseFunction;
-import storm.trident.operation.TridentCollector;
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.trident.operation.BaseFunction;
+import org.apache.storm.trident.operation.TridentCollector;
+import org.apache.storm.trident.tuple.TridentTuple;
 
 import java.util.Random;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-redis/src/test/java/org/apache/storm/redis/trident/WordCountLookupMapper.java
----------------------------------------------------------------------
diff --git a/external/storm-redis/src/test/java/org/apache/storm/redis/trident/WordCountLookupMapper.java b/external/storm-redis/src/test/java/org/apache/storm/redis/trident/WordCountLookupMapper.java
index a445749..a6ca8c9 100644
--- a/external/storm-redis/src/test/java/org/apache/storm/redis/trident/WordCountLookupMapper.java
+++ b/external/storm-redis/src/test/java/org/apache/storm/redis/trident/WordCountLookupMapper.java
@@ -17,10 +17,10 @@
  */
 package org.apache.storm.redis.trident;
 
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.ITuple;
-import backtype.storm.tuple.Values;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.ITuple;
+import org.apache.storm.tuple.Values;
 import org.apache.storm.redis.common.mapper.RedisDataTypeDescription;
 import org.apache.storm.redis.common.mapper.RedisLookupMapper;
 
@@ -54,4 +54,4 @@ public class WordCountLookupMapper implements RedisLookupMapper {
     public String getValueFromTuple(ITuple tuple) {
         return tuple.getInteger(1).toString();
     }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-redis/src/test/java/org/apache/storm/redis/trident/WordCountStoreMapper.java
----------------------------------------------------------------------
diff --git a/external/storm-redis/src/test/java/org/apache/storm/redis/trident/WordCountStoreMapper.java b/external/storm-redis/src/test/java/org/apache/storm/redis/trident/WordCountStoreMapper.java
index b930998..58df150 100644
--- a/external/storm-redis/src/test/java/org/apache/storm/redis/trident/WordCountStoreMapper.java
+++ b/external/storm-redis/src/test/java/org/apache/storm/redis/trident/WordCountStoreMapper.java
@@ -17,7 +17,7 @@
  */
 package org.apache.storm.redis.trident;
 
-import backtype.storm.tuple.ITuple;
+import org.apache.storm.tuple.ITuple;
 import org.apache.storm.redis.common.mapper.RedisDataTypeDescription;
 import org.apache.storm.redis.common.mapper.RedisStoreMapper;
 
@@ -36,4 +36,4 @@ public class WordCountStoreMapper implements RedisStoreMapper {
     public String getValueFromTuple(ITuple tuple) {
         return tuple.getInteger(1).toString();
     }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-redis/src/test/java/org/apache/storm/redis/trident/WordCountTridentRedis.java
----------------------------------------------------------------------
diff --git a/external/storm-redis/src/test/java/org/apache/storm/redis/trident/WordCountTridentRedis.java b/external/storm-redis/src/test/java/org/apache/storm/redis/trident/WordCountTridentRedis.java
index 4a4aae0..e3eb0f9 100644
--- a/external/storm-redis/src/test/java/org/apache/storm/redis/trident/WordCountTridentRedis.java
+++ b/external/storm-redis/src/test/java/org/apache/storm/redis/trident/WordCountTridentRedis.java
@@ -17,22 +17,22 @@
  */
 package org.apache.storm.redis.trident;
 
-import backtype.storm.Config;
-import backtype.storm.LocalCluster;
-import backtype.storm.StormSubmitter;
-import backtype.storm.generated.StormTopology;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Values;
+import org.apache.storm.Config;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.StormSubmitter;
+import org.apache.storm.generated.StormTopology;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Values;
 import org.apache.storm.redis.common.mapper.RedisLookupMapper;
 import org.apache.storm.redis.common.mapper.RedisStoreMapper;
 import org.apache.storm.redis.trident.state.RedisState;
 import org.apache.storm.redis.trident.state.RedisStateQuerier;
 import org.apache.storm.redis.trident.state.RedisStateUpdater;
 import org.apache.storm.redis.common.config.JedisPoolConfig;
-import storm.trident.Stream;
-import storm.trident.TridentState;
-import storm.trident.TridentTopology;
-import storm.trident.testing.FixedBatchSpout;
+import org.apache.storm.trident.Stream;
+import org.apache.storm.trident.TridentState;
+import org.apache.storm.trident.TridentTopology;
+import org.apache.storm.trident.testing.FixedBatchSpout;
 
 public class WordCountTridentRedis {
     public static StormTopology buildTopology(String redisHost, Integer redisPort){

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-redis/src/test/java/org/apache/storm/redis/trident/WordCountTridentRedisCluster.java
----------------------------------------------------------------------
diff --git a/external/storm-redis/src/test/java/org/apache/storm/redis/trident/WordCountTridentRedisCluster.java b/external/storm-redis/src/test/java/org/apache/storm/redis/trident/WordCountTridentRedisCluster.java
index 765b339..116a58a 100644
--- a/external/storm-redis/src/test/java/org/apache/storm/redis/trident/WordCountTridentRedisCluster.java
+++ b/external/storm-redis/src/test/java/org/apache/storm/redis/trident/WordCountTridentRedisCluster.java
@@ -17,22 +17,22 @@
  */
 package org.apache.storm.redis.trident;
 
-import backtype.storm.Config;
-import backtype.storm.LocalCluster;
-import backtype.storm.StormSubmitter;
-import backtype.storm.generated.StormTopology;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Values;
+import org.apache.storm.Config;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.StormSubmitter;
+import org.apache.storm.generated.StormTopology;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Values;
 import org.apache.storm.redis.common.mapper.RedisLookupMapper;
 import org.apache.storm.redis.common.mapper.RedisStoreMapper;
 import org.apache.storm.redis.trident.state.RedisClusterState;
 import org.apache.storm.redis.trident.state.RedisClusterStateQuerier;
 import org.apache.storm.redis.trident.state.RedisClusterStateUpdater;
 import org.apache.storm.redis.common.config.JedisClusterConfig;
-import storm.trident.Stream;
-import storm.trident.TridentState;
-import storm.trident.TridentTopology;
-import storm.trident.testing.FixedBatchSpout;
+import org.apache.storm.trident.Stream;
+import org.apache.storm.trident.TridentState;
+import org.apache.storm.trident.TridentTopology;
+import org.apache.storm.trident.testing.FixedBatchSpout;
 
 import java.net.InetSocketAddress;
 import java.util.HashSet;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-redis/src/test/java/org/apache/storm/redis/trident/WordCountTridentRedisClusterMap.java
----------------------------------------------------------------------
diff --git a/external/storm-redis/src/test/java/org/apache/storm/redis/trident/WordCountTridentRedisClusterMap.java b/external/storm-redis/src/test/java/org/apache/storm/redis/trident/WordCountTridentRedisClusterMap.java
index beb4b5f..fafb4e0 100644
--- a/external/storm-redis/src/test/java/org/apache/storm/redis/trident/WordCountTridentRedisClusterMap.java
+++ b/external/storm-redis/src/test/java/org/apache/storm/redis/trident/WordCountTridentRedisClusterMap.java
@@ -17,23 +17,23 @@
  */
 package org.apache.storm.redis.trident;
 
-import backtype.storm.Config;
-import backtype.storm.LocalCluster;
-import backtype.storm.StormSubmitter;
-import backtype.storm.generated.StormTopology;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Values;
+import org.apache.storm.Config;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.StormSubmitter;
+import org.apache.storm.generated.StormTopology;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Values;
 import org.apache.storm.redis.common.mapper.RedisDataTypeDescription;
 import org.apache.storm.redis.common.mapper.TupleMapper;
 import org.apache.storm.redis.trident.state.RedisClusterMapState;
 import org.apache.storm.redis.common.config.JedisClusterConfig;
-import storm.trident.Stream;
-import storm.trident.TridentState;
-import storm.trident.TridentTopology;
-import storm.trident.operation.builtin.MapGet;
-import storm.trident.operation.builtin.Sum;
-import storm.trident.state.StateFactory;
-import storm.trident.testing.FixedBatchSpout;
+import org.apache.storm.trident.Stream;
+import org.apache.storm.trident.TridentState;
+import org.apache.storm.trident.TridentTopology;
+import org.apache.storm.trident.operation.builtin.MapGet;
+import org.apache.storm.trident.operation.builtin.Sum;
+import org.apache.storm.trident.state.StateFactory;
+import org.apache.storm.trident.testing.FixedBatchSpout;
 
 import java.net.InetSocketAddress;
 import java.util.HashSet;


[03/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/generated/ClusterWorkerHeartbeat.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/generated/ClusterWorkerHeartbeat.java b/storm-core/src/jvm/backtype/storm/generated/ClusterWorkerHeartbeat.java
deleted file mode 100644
index 2754abd..0000000
--- a/storm-core/src/jvm/backtype/storm/generated/ClusterWorkerHeartbeat.java
+++ /dev/null
@@ -1,768 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * Autogenerated by Thrift Compiler (0.9.3)
- *
- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
- *  @generated
- */
-package backtype.storm.generated;
-
-import org.apache.thrift.scheme.IScheme;
-import org.apache.thrift.scheme.SchemeFactory;
-import org.apache.thrift.scheme.StandardScheme;
-
-import org.apache.thrift.scheme.TupleScheme;
-import org.apache.thrift.protocol.TTupleProtocol;
-import org.apache.thrift.protocol.TProtocolException;
-import org.apache.thrift.EncodingUtils;
-import org.apache.thrift.TException;
-import org.apache.thrift.async.AsyncMethodCallback;
-import org.apache.thrift.server.AbstractNonblockingServer.*;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.EnumMap;
-import java.util.Set;
-import java.util.HashSet;
-import java.util.EnumSet;
-import java.util.Collections;
-import java.util.BitSet;
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import javax.annotation.Generated;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
-public class ClusterWorkerHeartbeat implements org.apache.thrift.TBase<ClusterWorkerHeartbeat, ClusterWorkerHeartbeat._Fields>, java.io.Serializable, Cloneable, Comparable<ClusterWorkerHeartbeat> {
-  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ClusterWorkerHeartbeat");
-
-  private static final org.apache.thrift.protocol.TField STORM_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("storm_id", org.apache.thrift.protocol.TType.STRING, (short)1);
-  private static final org.apache.thrift.protocol.TField EXECUTOR_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("executor_stats", org.apache.thrift.protocol.TType.MAP, (short)2);
-  private static final org.apache.thrift.protocol.TField TIME_SECS_FIELD_DESC = new org.apache.thrift.protocol.TField("time_secs", org.apache.thrift.protocol.TType.I32, (short)3);
-  private static final org.apache.thrift.protocol.TField UPTIME_SECS_FIELD_DESC = new org.apache.thrift.protocol.TField("uptime_secs", org.apache.thrift.protocol.TType.I32, (short)4);
-
-  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
-  static {
-    schemes.put(StandardScheme.class, new ClusterWorkerHeartbeatStandardSchemeFactory());
-    schemes.put(TupleScheme.class, new ClusterWorkerHeartbeatTupleSchemeFactory());
-  }
-
-  private String storm_id; // required
-  private Map<ExecutorInfo,ExecutorStats> executor_stats; // required
-  private int time_secs; // required
-  private int uptime_secs; // required
-
-  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
-  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-    STORM_ID((short)1, "storm_id"),
-    EXECUTOR_STATS((short)2, "executor_stats"),
-    TIME_SECS((short)3, "time_secs"),
-    UPTIME_SECS((short)4, "uptime_secs");
-
-    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
-
-    static {
-      for (_Fields field : EnumSet.allOf(_Fields.class)) {
-        byName.put(field.getFieldName(), field);
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, or null if its not found.
-     */
-    public static _Fields findByThriftId(int fieldId) {
-      switch(fieldId) {
-        case 1: // STORM_ID
-          return STORM_ID;
-        case 2: // EXECUTOR_STATS
-          return EXECUTOR_STATS;
-        case 3: // TIME_SECS
-          return TIME_SECS;
-        case 4: // UPTIME_SECS
-          return UPTIME_SECS;
-        default:
-          return null;
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, throwing an exception
-     * if it is not found.
-     */
-    public static _Fields findByThriftIdOrThrow(int fieldId) {
-      _Fields fields = findByThriftId(fieldId);
-      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
-      return fields;
-    }
-
-    /**
-     * Find the _Fields constant that matches name, or null if its not found.
-     */
-    public static _Fields findByName(String name) {
-      return byName.get(name);
-    }
-
-    private final short _thriftId;
-    private final String _fieldName;
-
-    _Fields(short thriftId, String fieldName) {
-      _thriftId = thriftId;
-      _fieldName = fieldName;
-    }
-
-    public short getThriftFieldId() {
-      return _thriftId;
-    }
-
-    public String getFieldName() {
-      return _fieldName;
-    }
-  }
-
-  // isset id assignments
-  private static final int __TIME_SECS_ISSET_ID = 0;
-  private static final int __UPTIME_SECS_ISSET_ID = 1;
-  private byte __isset_bitfield = 0;
-  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
-  static {
-    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.STORM_ID, new org.apache.thrift.meta_data.FieldMetaData("storm_id", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    tmpMap.put(_Fields.EXECUTOR_STATS, new org.apache.thrift.meta_data.FieldMetaData("executor_stats", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
-            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ExecutorInfo.class), 
-            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ExecutorStats.class))));
-    tmpMap.put(_Fields.TIME_SECS, new org.apache.thrift.meta_data.FieldMetaData("time_secs", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
-    tmpMap.put(_Fields.UPTIME_SECS, new org.apache.thrift.meta_data.FieldMetaData("uptime_secs", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
-    metaDataMap = Collections.unmodifiableMap(tmpMap);
-    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ClusterWorkerHeartbeat.class, metaDataMap);
-  }
-
-  public ClusterWorkerHeartbeat() {
-  }
-
-  public ClusterWorkerHeartbeat(
-    String storm_id,
-    Map<ExecutorInfo,ExecutorStats> executor_stats,
-    int time_secs,
-    int uptime_secs)
-  {
-    this();
-    this.storm_id = storm_id;
-    this.executor_stats = executor_stats;
-    this.time_secs = time_secs;
-    set_time_secs_isSet(true);
-    this.uptime_secs = uptime_secs;
-    set_uptime_secs_isSet(true);
-  }
-
-  /**
-   * Performs a deep copy on <i>other</i>.
-   */
-  public ClusterWorkerHeartbeat(ClusterWorkerHeartbeat other) {
-    __isset_bitfield = other.__isset_bitfield;
-    if (other.is_set_storm_id()) {
-      this.storm_id = other.storm_id;
-    }
-    if (other.is_set_executor_stats()) {
-      Map<ExecutorInfo,ExecutorStats> __this__executor_stats = new HashMap<ExecutorInfo,ExecutorStats>(other.executor_stats.size());
-      for (Map.Entry<ExecutorInfo, ExecutorStats> other_element : other.executor_stats.entrySet()) {
-
-        ExecutorInfo other_element_key = other_element.getKey();
-        ExecutorStats other_element_value = other_element.getValue();
-
-        ExecutorInfo __this__executor_stats_copy_key = new ExecutorInfo(other_element_key);
-
-        ExecutorStats __this__executor_stats_copy_value = new ExecutorStats(other_element_value);
-
-        __this__executor_stats.put(__this__executor_stats_copy_key, __this__executor_stats_copy_value);
-      }
-      this.executor_stats = __this__executor_stats;
-    }
-    this.time_secs = other.time_secs;
-    this.uptime_secs = other.uptime_secs;
-  }
-
-  public ClusterWorkerHeartbeat deepCopy() {
-    return new ClusterWorkerHeartbeat(this);
-  }
-
-  @Override
-  public void clear() {
-    this.storm_id = null;
-    this.executor_stats = null;
-    set_time_secs_isSet(false);
-    this.time_secs = 0;
-    set_uptime_secs_isSet(false);
-    this.uptime_secs = 0;
-  }
-
-  public String get_storm_id() {
-    return this.storm_id;
-  }
-
-  public void set_storm_id(String storm_id) {
-    this.storm_id = storm_id;
-  }
-
-  public void unset_storm_id() {
-    this.storm_id = null;
-  }
-
-  /** Returns true if field storm_id is set (has been assigned a value) and false otherwise */
-  public boolean is_set_storm_id() {
-    return this.storm_id != null;
-  }
-
-  public void set_storm_id_isSet(boolean value) {
-    if (!value) {
-      this.storm_id = null;
-    }
-  }
-
-  public int get_executor_stats_size() {
-    return (this.executor_stats == null) ? 0 : this.executor_stats.size();
-  }
-
-  public void put_to_executor_stats(ExecutorInfo key, ExecutorStats val) {
-    if (this.executor_stats == null) {
-      this.executor_stats = new HashMap<ExecutorInfo,ExecutorStats>();
-    }
-    this.executor_stats.put(key, val);
-  }
-
-  public Map<ExecutorInfo,ExecutorStats> get_executor_stats() {
-    return this.executor_stats;
-  }
-
-  public void set_executor_stats(Map<ExecutorInfo,ExecutorStats> executor_stats) {
-    this.executor_stats = executor_stats;
-  }
-
-  public void unset_executor_stats() {
-    this.executor_stats = null;
-  }
-
-  /** Returns true if field executor_stats is set (has been assigned a value) and false otherwise */
-  public boolean is_set_executor_stats() {
-    return this.executor_stats != null;
-  }
-
-  public void set_executor_stats_isSet(boolean value) {
-    if (!value) {
-      this.executor_stats = null;
-    }
-  }
-
-  public int get_time_secs() {
-    return this.time_secs;
-  }
-
-  public void set_time_secs(int time_secs) {
-    this.time_secs = time_secs;
-    set_time_secs_isSet(true);
-  }
-
-  public void unset_time_secs() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TIME_SECS_ISSET_ID);
-  }
-
-  /** Returns true if field time_secs is set (has been assigned a value) and false otherwise */
-  public boolean is_set_time_secs() {
-    return EncodingUtils.testBit(__isset_bitfield, __TIME_SECS_ISSET_ID);
-  }
-
-  public void set_time_secs_isSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TIME_SECS_ISSET_ID, value);
-  }
-
-  public int get_uptime_secs() {
-    return this.uptime_secs;
-  }
-
-  public void set_uptime_secs(int uptime_secs) {
-    this.uptime_secs = uptime_secs;
-    set_uptime_secs_isSet(true);
-  }
-
-  public void unset_uptime_secs() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __UPTIME_SECS_ISSET_ID);
-  }
-
-  /** Returns true if field uptime_secs is set (has been assigned a value) and false otherwise */
-  public boolean is_set_uptime_secs() {
-    return EncodingUtils.testBit(__isset_bitfield, __UPTIME_SECS_ISSET_ID);
-  }
-
-  public void set_uptime_secs_isSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __UPTIME_SECS_ISSET_ID, value);
-  }
-
-  public void setFieldValue(_Fields field, Object value) {
-    switch (field) {
-    case STORM_ID:
-      if (value == null) {
-        unset_storm_id();
-      } else {
-        set_storm_id((String)value);
-      }
-      break;
-
-    case EXECUTOR_STATS:
-      if (value == null) {
-        unset_executor_stats();
-      } else {
-        set_executor_stats((Map<ExecutorInfo,ExecutorStats>)value);
-      }
-      break;
-
-    case TIME_SECS:
-      if (value == null) {
-        unset_time_secs();
-      } else {
-        set_time_secs((Integer)value);
-      }
-      break;
-
-    case UPTIME_SECS:
-      if (value == null) {
-        unset_uptime_secs();
-      } else {
-        set_uptime_secs((Integer)value);
-      }
-      break;
-
-    }
-  }
-
-  public Object getFieldValue(_Fields field) {
-    switch (field) {
-    case STORM_ID:
-      return get_storm_id();
-
-    case EXECUTOR_STATS:
-      return get_executor_stats();
-
-    case TIME_SECS:
-      return get_time_secs();
-
-    case UPTIME_SECS:
-      return get_uptime_secs();
-
-    }
-    throw new IllegalStateException();
-  }
-
-  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
-  public boolean isSet(_Fields field) {
-    if (field == null) {
-      throw new IllegalArgumentException();
-    }
-
-    switch (field) {
-    case STORM_ID:
-      return is_set_storm_id();
-    case EXECUTOR_STATS:
-      return is_set_executor_stats();
-    case TIME_SECS:
-      return is_set_time_secs();
-    case UPTIME_SECS:
-      return is_set_uptime_secs();
-    }
-    throw new IllegalStateException();
-  }
-
-  @Override
-  public boolean equals(Object that) {
-    if (that == null)
-      return false;
-    if (that instanceof ClusterWorkerHeartbeat)
-      return this.equals((ClusterWorkerHeartbeat)that);
-    return false;
-  }
-
-  public boolean equals(ClusterWorkerHeartbeat that) {
-    if (that == null)
-      return false;
-
-    boolean this_present_storm_id = true && this.is_set_storm_id();
-    boolean that_present_storm_id = true && that.is_set_storm_id();
-    if (this_present_storm_id || that_present_storm_id) {
-      if (!(this_present_storm_id && that_present_storm_id))
-        return false;
-      if (!this.storm_id.equals(that.storm_id))
-        return false;
-    }
-
-    boolean this_present_executor_stats = true && this.is_set_executor_stats();
-    boolean that_present_executor_stats = true && that.is_set_executor_stats();
-    if (this_present_executor_stats || that_present_executor_stats) {
-      if (!(this_present_executor_stats && that_present_executor_stats))
-        return false;
-      if (!this.executor_stats.equals(that.executor_stats))
-        return false;
-    }
-
-    boolean this_present_time_secs = true;
-    boolean that_present_time_secs = true;
-    if (this_present_time_secs || that_present_time_secs) {
-      if (!(this_present_time_secs && that_present_time_secs))
-        return false;
-      if (this.time_secs != that.time_secs)
-        return false;
-    }
-
-    boolean this_present_uptime_secs = true;
-    boolean that_present_uptime_secs = true;
-    if (this_present_uptime_secs || that_present_uptime_secs) {
-      if (!(this_present_uptime_secs && that_present_uptime_secs))
-        return false;
-      if (this.uptime_secs != that.uptime_secs)
-        return false;
-    }
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    List<Object> list = new ArrayList<Object>();
-
-    boolean present_storm_id = true && (is_set_storm_id());
-    list.add(present_storm_id);
-    if (present_storm_id)
-      list.add(storm_id);
-
-    boolean present_executor_stats = true && (is_set_executor_stats());
-    list.add(present_executor_stats);
-    if (present_executor_stats)
-      list.add(executor_stats);
-
-    boolean present_time_secs = true;
-    list.add(present_time_secs);
-    if (present_time_secs)
-      list.add(time_secs);
-
-    boolean present_uptime_secs = true;
-    list.add(present_uptime_secs);
-    if (present_uptime_secs)
-      list.add(uptime_secs);
-
-    return list.hashCode();
-  }
-
-  @Override
-  public int compareTo(ClusterWorkerHeartbeat other) {
-    if (!getClass().equals(other.getClass())) {
-      return getClass().getName().compareTo(other.getClass().getName());
-    }
-
-    int lastComparison = 0;
-
-    lastComparison = Boolean.valueOf(is_set_storm_id()).compareTo(other.is_set_storm_id());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_storm_id()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.storm_id, other.storm_id);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_executor_stats()).compareTo(other.is_set_executor_stats());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_executor_stats()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.executor_stats, other.executor_stats);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_time_secs()).compareTo(other.is_set_time_secs());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_time_secs()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.time_secs, other.time_secs);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_uptime_secs()).compareTo(other.is_set_uptime_secs());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_uptime_secs()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.uptime_secs, other.uptime_secs);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    return 0;
-  }
-
-  public _Fields fieldForId(int fieldId) {
-    return _Fields.findByThriftId(fieldId);
-  }
-
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
-    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
-  }
-
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
-    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder("ClusterWorkerHeartbeat(");
-    boolean first = true;
-
-    sb.append("storm_id:");
-    if (this.storm_id == null) {
-      sb.append("null");
-    } else {
-      sb.append(this.storm_id);
-    }
-    first = false;
-    if (!first) sb.append(", ");
-    sb.append("executor_stats:");
-    if (this.executor_stats == null) {
-      sb.append("null");
-    } else {
-      sb.append(this.executor_stats);
-    }
-    first = false;
-    if (!first) sb.append(", ");
-    sb.append("time_secs:");
-    sb.append(this.time_secs);
-    first = false;
-    if (!first) sb.append(", ");
-    sb.append("uptime_secs:");
-    sb.append(this.uptime_secs);
-    first = false;
-    sb.append(")");
-    return sb.toString();
-  }
-
-  public void validate() throws org.apache.thrift.TException {
-    // check for required fields
-    if (!is_set_storm_id()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'storm_id' is unset! Struct:" + toString());
-    }
-
-    if (!is_set_executor_stats()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'executor_stats' is unset! Struct:" + toString());
-    }
-
-    if (!is_set_time_secs()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'time_secs' is unset! Struct:" + toString());
-    }
-
-    if (!is_set_uptime_secs()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'uptime_secs' is unset! Struct:" + toString());
-    }
-
-    // check for sub-struct validity
-  }
-
-  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
-    try {
-      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
-    try {
-      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
-      __isset_bitfield = 0;
-      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private static class ClusterWorkerHeartbeatStandardSchemeFactory implements SchemeFactory {
-    public ClusterWorkerHeartbeatStandardScheme getScheme() {
-      return new ClusterWorkerHeartbeatStandardScheme();
-    }
-  }
-
-  private static class ClusterWorkerHeartbeatStandardScheme extends StandardScheme<ClusterWorkerHeartbeat> {
-
-    public void read(org.apache.thrift.protocol.TProtocol iprot, ClusterWorkerHeartbeat struct) throws org.apache.thrift.TException {
-      org.apache.thrift.protocol.TField schemeField;
-      iprot.readStructBegin();
-      while (true)
-      {
-        schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
-          break;
-        }
-        switch (schemeField.id) {
-          case 1: // STORM_ID
-            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
-              struct.storm_id = iprot.readString();
-              struct.set_storm_id_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 2: // EXECUTOR_STATS
-            if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
-              {
-                org.apache.thrift.protocol.TMap _map624 = iprot.readMapBegin();
-                struct.executor_stats = new HashMap<ExecutorInfo,ExecutorStats>(2*_map624.size);
-                ExecutorInfo _key625;
-                ExecutorStats _val626;
-                for (int _i627 = 0; _i627 < _map624.size; ++_i627)
-                {
-                  _key625 = new ExecutorInfo();
-                  _key625.read(iprot);
-                  _val626 = new ExecutorStats();
-                  _val626.read(iprot);
-                  struct.executor_stats.put(_key625, _val626);
-                }
-                iprot.readMapEnd();
-              }
-              struct.set_executor_stats_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 3: // TIME_SECS
-            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
-              struct.time_secs = iprot.readI32();
-              struct.set_time_secs_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 4: // UPTIME_SECS
-            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
-              struct.uptime_secs = iprot.readI32();
-              struct.set_uptime_secs_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          default:
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-        }
-        iprot.readFieldEnd();
-      }
-      iprot.readStructEnd();
-      struct.validate();
-    }
-
-    public void write(org.apache.thrift.protocol.TProtocol oprot, ClusterWorkerHeartbeat struct) throws org.apache.thrift.TException {
-      struct.validate();
-
-      oprot.writeStructBegin(STRUCT_DESC);
-      if (struct.storm_id != null) {
-        oprot.writeFieldBegin(STORM_ID_FIELD_DESC);
-        oprot.writeString(struct.storm_id);
-        oprot.writeFieldEnd();
-      }
-      if (struct.executor_stats != null) {
-        oprot.writeFieldBegin(EXECUTOR_STATS_FIELD_DESC);
-        {
-          oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.STRUCT, struct.executor_stats.size()));
-          for (Map.Entry<ExecutorInfo, ExecutorStats> _iter628 : struct.executor_stats.entrySet())
-          {
-            _iter628.getKey().write(oprot);
-            _iter628.getValue().write(oprot);
-          }
-          oprot.writeMapEnd();
-        }
-        oprot.writeFieldEnd();
-      }
-      oprot.writeFieldBegin(TIME_SECS_FIELD_DESC);
-      oprot.writeI32(struct.time_secs);
-      oprot.writeFieldEnd();
-      oprot.writeFieldBegin(UPTIME_SECS_FIELD_DESC);
-      oprot.writeI32(struct.uptime_secs);
-      oprot.writeFieldEnd();
-      oprot.writeFieldStop();
-      oprot.writeStructEnd();
-    }
-
-  }
-
-  private static class ClusterWorkerHeartbeatTupleSchemeFactory implements SchemeFactory {
-    public ClusterWorkerHeartbeatTupleScheme getScheme() {
-      return new ClusterWorkerHeartbeatTupleScheme();
-    }
-  }
-
-  private static class ClusterWorkerHeartbeatTupleScheme extends TupleScheme<ClusterWorkerHeartbeat> {
-
-    @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, ClusterWorkerHeartbeat struct) throws org.apache.thrift.TException {
-      TTupleProtocol oprot = (TTupleProtocol) prot;
-      oprot.writeString(struct.storm_id);
-      {
-        oprot.writeI32(struct.executor_stats.size());
-        for (Map.Entry<ExecutorInfo, ExecutorStats> _iter629 : struct.executor_stats.entrySet())
-        {
-          _iter629.getKey().write(oprot);
-          _iter629.getValue().write(oprot);
-        }
-      }
-      oprot.writeI32(struct.time_secs);
-      oprot.writeI32(struct.uptime_secs);
-    }
-
-    @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, ClusterWorkerHeartbeat struct) throws org.apache.thrift.TException {
-      TTupleProtocol iprot = (TTupleProtocol) prot;
-      struct.storm_id = iprot.readString();
-      struct.set_storm_id_isSet(true);
-      {
-        org.apache.thrift.protocol.TMap _map630 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-        struct.executor_stats = new HashMap<ExecutorInfo,ExecutorStats>(2*_map630.size);
-        ExecutorInfo _key631;
-        ExecutorStats _val632;
-        for (int _i633 = 0; _i633 < _map630.size; ++_i633)
-        {
-          _key631 = new ExecutorInfo();
-          _key631.read(iprot);
-          _val632 = new ExecutorStats();
-          _val632.read(iprot);
-          struct.executor_stats.put(_key631, _val632);
-        }
-      }
-      struct.set_executor_stats_isSet(true);
-      struct.time_secs = iprot.readI32();
-      struct.set_time_secs_isSet(true);
-      struct.uptime_secs = iprot.readI32();
-      struct.set_uptime_secs_isSet(true);
-    }
-  }
-
-}
-

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/generated/CommonAggregateStats.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/generated/CommonAggregateStats.java b/storm-core/src/jvm/backtype/storm/generated/CommonAggregateStats.java
deleted file mode 100644
index f1ff590..0000000
--- a/storm-core/src/jvm/backtype/storm/generated/CommonAggregateStats.java
+++ /dev/null
@@ -1,902 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * Autogenerated by Thrift Compiler (0.9.3)
- *
- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
- *  @generated
- */
-package backtype.storm.generated;
-
-import org.apache.thrift.scheme.IScheme;
-import org.apache.thrift.scheme.SchemeFactory;
-import org.apache.thrift.scheme.StandardScheme;
-
-import org.apache.thrift.scheme.TupleScheme;
-import org.apache.thrift.protocol.TTupleProtocol;
-import org.apache.thrift.protocol.TProtocolException;
-import org.apache.thrift.EncodingUtils;
-import org.apache.thrift.TException;
-import org.apache.thrift.async.AsyncMethodCallback;
-import org.apache.thrift.server.AbstractNonblockingServer.*;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.EnumMap;
-import java.util.Set;
-import java.util.HashSet;
-import java.util.EnumSet;
-import java.util.Collections;
-import java.util.BitSet;
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import javax.annotation.Generated;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
-public class CommonAggregateStats implements org.apache.thrift.TBase<CommonAggregateStats, CommonAggregateStats._Fields>, java.io.Serializable, Cloneable, Comparable<CommonAggregateStats> {
-  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CommonAggregateStats");
-
-  private static final org.apache.thrift.protocol.TField NUM_EXECUTORS_FIELD_DESC = new org.apache.thrift.protocol.TField("num_executors", org.apache.thrift.protocol.TType.I32, (short)1);
-  private static final org.apache.thrift.protocol.TField NUM_TASKS_FIELD_DESC = new org.apache.thrift.protocol.TField("num_tasks", org.apache.thrift.protocol.TType.I32, (short)2);
-  private static final org.apache.thrift.protocol.TField EMITTED_FIELD_DESC = new org.apache.thrift.protocol.TField("emitted", org.apache.thrift.protocol.TType.I64, (short)3);
-  private static final org.apache.thrift.protocol.TField TRANSFERRED_FIELD_DESC = new org.apache.thrift.protocol.TField("transferred", org.apache.thrift.protocol.TType.I64, (short)4);
-  private static final org.apache.thrift.protocol.TField ACKED_FIELD_DESC = new org.apache.thrift.protocol.TField("acked", org.apache.thrift.protocol.TType.I64, (short)5);
-  private static final org.apache.thrift.protocol.TField FAILED_FIELD_DESC = new org.apache.thrift.protocol.TField("failed", org.apache.thrift.protocol.TType.I64, (short)6);
-
-  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
-  static {
-    schemes.put(StandardScheme.class, new CommonAggregateStatsStandardSchemeFactory());
-    schemes.put(TupleScheme.class, new CommonAggregateStatsTupleSchemeFactory());
-  }
-
-  private int num_executors; // optional
-  private int num_tasks; // optional
-  private long emitted; // optional
-  private long transferred; // optional
-  private long acked; // optional
-  private long failed; // optional
-
-  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
-  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-    NUM_EXECUTORS((short)1, "num_executors"),
-    NUM_TASKS((short)2, "num_tasks"),
-    EMITTED((short)3, "emitted"),
-    TRANSFERRED((short)4, "transferred"),
-    ACKED((short)5, "acked"),
-    FAILED((short)6, "failed");
-
-    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
-
-    static {
-      for (_Fields field : EnumSet.allOf(_Fields.class)) {
-        byName.put(field.getFieldName(), field);
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, or null if its not found.
-     */
-    public static _Fields findByThriftId(int fieldId) {
-      switch(fieldId) {
-        case 1: // NUM_EXECUTORS
-          return NUM_EXECUTORS;
-        case 2: // NUM_TASKS
-          return NUM_TASKS;
-        case 3: // EMITTED
-          return EMITTED;
-        case 4: // TRANSFERRED
-          return TRANSFERRED;
-        case 5: // ACKED
-          return ACKED;
-        case 6: // FAILED
-          return FAILED;
-        default:
-          return null;
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, throwing an exception
-     * if it is not found.
-     */
-    public static _Fields findByThriftIdOrThrow(int fieldId) {
-      _Fields fields = findByThriftId(fieldId);
-      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
-      return fields;
-    }
-
-    /**
-     * Find the _Fields constant that matches name, or null if its not found.
-     */
-    public static _Fields findByName(String name) {
-      return byName.get(name);
-    }
-
-    private final short _thriftId;
-    private final String _fieldName;
-
-    _Fields(short thriftId, String fieldName) {
-      _thriftId = thriftId;
-      _fieldName = fieldName;
-    }
-
-    public short getThriftFieldId() {
-      return _thriftId;
-    }
-
-    public String getFieldName() {
-      return _fieldName;
-    }
-  }
-
-  // isset id assignments
-  private static final int __NUM_EXECUTORS_ISSET_ID = 0;
-  private static final int __NUM_TASKS_ISSET_ID = 1;
-  private static final int __EMITTED_ISSET_ID = 2;
-  private static final int __TRANSFERRED_ISSET_ID = 3;
-  private static final int __ACKED_ISSET_ID = 4;
-  private static final int __FAILED_ISSET_ID = 5;
-  private byte __isset_bitfield = 0;
-  private static final _Fields optionals[] = {_Fields.NUM_EXECUTORS,_Fields.NUM_TASKS,_Fields.EMITTED,_Fields.TRANSFERRED,_Fields.ACKED,_Fields.FAILED};
-  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
-  static {
-    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.NUM_EXECUTORS, new org.apache.thrift.meta_data.FieldMetaData("num_executors", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
-    tmpMap.put(_Fields.NUM_TASKS, new org.apache.thrift.meta_data.FieldMetaData("num_tasks", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
-    tmpMap.put(_Fields.EMITTED, new org.apache.thrift.meta_data.FieldMetaData("emitted", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
-    tmpMap.put(_Fields.TRANSFERRED, new org.apache.thrift.meta_data.FieldMetaData("transferred", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
-    tmpMap.put(_Fields.ACKED, new org.apache.thrift.meta_data.FieldMetaData("acked", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
-    tmpMap.put(_Fields.FAILED, new org.apache.thrift.meta_data.FieldMetaData("failed", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
-    metaDataMap = Collections.unmodifiableMap(tmpMap);
-    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CommonAggregateStats.class, metaDataMap);
-  }
-
-  public CommonAggregateStats() {
-  }
-
-  /**
-   * Performs a deep copy on <i>other</i>.
-   */
-  public CommonAggregateStats(CommonAggregateStats other) {
-    __isset_bitfield = other.__isset_bitfield;
-    this.num_executors = other.num_executors;
-    this.num_tasks = other.num_tasks;
-    this.emitted = other.emitted;
-    this.transferred = other.transferred;
-    this.acked = other.acked;
-    this.failed = other.failed;
-  }
-
-  public CommonAggregateStats deepCopy() {
-    return new CommonAggregateStats(this);
-  }
-
-  @Override
-  public void clear() {
-    set_num_executors_isSet(false);
-    this.num_executors = 0;
-    set_num_tasks_isSet(false);
-    this.num_tasks = 0;
-    set_emitted_isSet(false);
-    this.emitted = 0;
-    set_transferred_isSet(false);
-    this.transferred = 0;
-    set_acked_isSet(false);
-    this.acked = 0;
-    set_failed_isSet(false);
-    this.failed = 0;
-  }
-
-  public int get_num_executors() {
-    return this.num_executors;
-  }
-
-  public void set_num_executors(int num_executors) {
-    this.num_executors = num_executors;
-    set_num_executors_isSet(true);
-  }
-
-  public void unset_num_executors() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUM_EXECUTORS_ISSET_ID);
-  }
-
-  /** Returns true if field num_executors is set (has been assigned a value) and false otherwise */
-  public boolean is_set_num_executors() {
-    return EncodingUtils.testBit(__isset_bitfield, __NUM_EXECUTORS_ISSET_ID);
-  }
-
-  public void set_num_executors_isSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUM_EXECUTORS_ISSET_ID, value);
-  }
-
-  public int get_num_tasks() {
-    return this.num_tasks;
-  }
-
-  public void set_num_tasks(int num_tasks) {
-    this.num_tasks = num_tasks;
-    set_num_tasks_isSet(true);
-  }
-
-  public void unset_num_tasks() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUM_TASKS_ISSET_ID);
-  }
-
-  /** Returns true if field num_tasks is set (has been assigned a value) and false otherwise */
-  public boolean is_set_num_tasks() {
-    return EncodingUtils.testBit(__isset_bitfield, __NUM_TASKS_ISSET_ID);
-  }
-
-  public void set_num_tasks_isSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUM_TASKS_ISSET_ID, value);
-  }
-
-  public long get_emitted() {
-    return this.emitted;
-  }
-
-  public void set_emitted(long emitted) {
-    this.emitted = emitted;
-    set_emitted_isSet(true);
-  }
-
-  public void unset_emitted() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __EMITTED_ISSET_ID);
-  }
-
-  /** Returns true if field emitted is set (has been assigned a value) and false otherwise */
-  public boolean is_set_emitted() {
-    return EncodingUtils.testBit(__isset_bitfield, __EMITTED_ISSET_ID);
-  }
-
-  public void set_emitted_isSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __EMITTED_ISSET_ID, value);
-  }
-
-  public long get_transferred() {
-    return this.transferred;
-  }
-
-  public void set_transferred(long transferred) {
-    this.transferred = transferred;
-    set_transferred_isSet(true);
-  }
-
-  public void unset_transferred() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TRANSFERRED_ISSET_ID);
-  }
-
-  /** Returns true if field transferred is set (has been assigned a value) and false otherwise */
-  public boolean is_set_transferred() {
-    return EncodingUtils.testBit(__isset_bitfield, __TRANSFERRED_ISSET_ID);
-  }
-
-  public void set_transferred_isSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TRANSFERRED_ISSET_ID, value);
-  }
-
-  public long get_acked() {
-    return this.acked;
-  }
-
-  public void set_acked(long acked) {
-    this.acked = acked;
-    set_acked_isSet(true);
-  }
-
-  public void unset_acked() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ACKED_ISSET_ID);
-  }
-
-  /** Returns true if field acked is set (has been assigned a value) and false otherwise */
-  public boolean is_set_acked() {
-    return EncodingUtils.testBit(__isset_bitfield, __ACKED_ISSET_ID);
-  }
-
-  public void set_acked_isSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ACKED_ISSET_ID, value);
-  }
-
-  public long get_failed() {
-    return this.failed;
-  }
-
-  public void set_failed(long failed) {
-    this.failed = failed;
-    set_failed_isSet(true);
-  }
-
-  public void unset_failed() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __FAILED_ISSET_ID);
-  }
-
-  /** Returns true if field failed is set (has been assigned a value) and false otherwise */
-  public boolean is_set_failed() {
-    return EncodingUtils.testBit(__isset_bitfield, __FAILED_ISSET_ID);
-  }
-
-  public void set_failed_isSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __FAILED_ISSET_ID, value);
-  }
-
-  public void setFieldValue(_Fields field, Object value) {
-    switch (field) {
-    case NUM_EXECUTORS:
-      if (value == null) {
-        unset_num_executors();
-      } else {
-        set_num_executors((Integer)value);
-      }
-      break;
-
-    case NUM_TASKS:
-      if (value == null) {
-        unset_num_tasks();
-      } else {
-        set_num_tasks((Integer)value);
-      }
-      break;
-
-    case EMITTED:
-      if (value == null) {
-        unset_emitted();
-      } else {
-        set_emitted((Long)value);
-      }
-      break;
-
-    case TRANSFERRED:
-      if (value == null) {
-        unset_transferred();
-      } else {
-        set_transferred((Long)value);
-      }
-      break;
-
-    case ACKED:
-      if (value == null) {
-        unset_acked();
-      } else {
-        set_acked((Long)value);
-      }
-      break;
-
-    case FAILED:
-      if (value == null) {
-        unset_failed();
-      } else {
-        set_failed((Long)value);
-      }
-      break;
-
-    }
-  }
-
-  public Object getFieldValue(_Fields field) {
-    switch (field) {
-    case NUM_EXECUTORS:
-      return get_num_executors();
-
-    case NUM_TASKS:
-      return get_num_tasks();
-
-    case EMITTED:
-      return get_emitted();
-
-    case TRANSFERRED:
-      return get_transferred();
-
-    case ACKED:
-      return get_acked();
-
-    case FAILED:
-      return get_failed();
-
-    }
-    throw new IllegalStateException();
-  }
-
-  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
-  public boolean isSet(_Fields field) {
-    if (field == null) {
-      throw new IllegalArgumentException();
-    }
-
-    switch (field) {
-    case NUM_EXECUTORS:
-      return is_set_num_executors();
-    case NUM_TASKS:
-      return is_set_num_tasks();
-    case EMITTED:
-      return is_set_emitted();
-    case TRANSFERRED:
-      return is_set_transferred();
-    case ACKED:
-      return is_set_acked();
-    case FAILED:
-      return is_set_failed();
-    }
-    throw new IllegalStateException();
-  }
-
-  @Override
-  public boolean equals(Object that) {
-    if (that == null)
-      return false;
-    if (that instanceof CommonAggregateStats)
-      return this.equals((CommonAggregateStats)that);
-    return false;
-  }
-
-  public boolean equals(CommonAggregateStats that) {
-    if (that == null)
-      return false;
-
-    boolean this_present_num_executors = true && this.is_set_num_executors();
-    boolean that_present_num_executors = true && that.is_set_num_executors();
-    if (this_present_num_executors || that_present_num_executors) {
-      if (!(this_present_num_executors && that_present_num_executors))
-        return false;
-      if (this.num_executors != that.num_executors)
-        return false;
-    }
-
-    boolean this_present_num_tasks = true && this.is_set_num_tasks();
-    boolean that_present_num_tasks = true && that.is_set_num_tasks();
-    if (this_present_num_tasks || that_present_num_tasks) {
-      if (!(this_present_num_tasks && that_present_num_tasks))
-        return false;
-      if (this.num_tasks != that.num_tasks)
-        return false;
-    }
-
-    boolean this_present_emitted = true && this.is_set_emitted();
-    boolean that_present_emitted = true && that.is_set_emitted();
-    if (this_present_emitted || that_present_emitted) {
-      if (!(this_present_emitted && that_present_emitted))
-        return false;
-      if (this.emitted != that.emitted)
-        return false;
-    }
-
-    boolean this_present_transferred = true && this.is_set_transferred();
-    boolean that_present_transferred = true && that.is_set_transferred();
-    if (this_present_transferred || that_present_transferred) {
-      if (!(this_present_transferred && that_present_transferred))
-        return false;
-      if (this.transferred != that.transferred)
-        return false;
-    }
-
-    boolean this_present_acked = true && this.is_set_acked();
-    boolean that_present_acked = true && that.is_set_acked();
-    if (this_present_acked || that_present_acked) {
-      if (!(this_present_acked && that_present_acked))
-        return false;
-      if (this.acked != that.acked)
-        return false;
-    }
-
-    boolean this_present_failed = true && this.is_set_failed();
-    boolean that_present_failed = true && that.is_set_failed();
-    if (this_present_failed || that_present_failed) {
-      if (!(this_present_failed && that_present_failed))
-        return false;
-      if (this.failed != that.failed)
-        return false;
-    }
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    List<Object> list = new ArrayList<Object>();
-
-    boolean present_num_executors = true && (is_set_num_executors());
-    list.add(present_num_executors);
-    if (present_num_executors)
-      list.add(num_executors);
-
-    boolean present_num_tasks = true && (is_set_num_tasks());
-    list.add(present_num_tasks);
-    if (present_num_tasks)
-      list.add(num_tasks);
-
-    boolean present_emitted = true && (is_set_emitted());
-    list.add(present_emitted);
-    if (present_emitted)
-      list.add(emitted);
-
-    boolean present_transferred = true && (is_set_transferred());
-    list.add(present_transferred);
-    if (present_transferred)
-      list.add(transferred);
-
-    boolean present_acked = true && (is_set_acked());
-    list.add(present_acked);
-    if (present_acked)
-      list.add(acked);
-
-    boolean present_failed = true && (is_set_failed());
-    list.add(present_failed);
-    if (present_failed)
-      list.add(failed);
-
-    return list.hashCode();
-  }
-
-  @Override
-  public int compareTo(CommonAggregateStats other) {
-    if (!getClass().equals(other.getClass())) {
-      return getClass().getName().compareTo(other.getClass().getName());
-    }
-
-    int lastComparison = 0;
-
-    lastComparison = Boolean.valueOf(is_set_num_executors()).compareTo(other.is_set_num_executors());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_num_executors()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.num_executors, other.num_executors);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_num_tasks()).compareTo(other.is_set_num_tasks());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_num_tasks()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.num_tasks, other.num_tasks);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_emitted()).compareTo(other.is_set_emitted());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_emitted()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.emitted, other.emitted);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_transferred()).compareTo(other.is_set_transferred());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_transferred()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.transferred, other.transferred);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_acked()).compareTo(other.is_set_acked());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_acked()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.acked, other.acked);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_failed()).compareTo(other.is_set_failed());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_failed()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.failed, other.failed);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    return 0;
-  }
-
-  public _Fields fieldForId(int fieldId) {
-    return _Fields.findByThriftId(fieldId);
-  }
-
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
-    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
-  }
-
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
-    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder("CommonAggregateStats(");
-    boolean first = true;
-
-    if (is_set_num_executors()) {
-      sb.append("num_executors:");
-      sb.append(this.num_executors);
-      first = false;
-    }
-    if (is_set_num_tasks()) {
-      if (!first) sb.append(", ");
-      sb.append("num_tasks:");
-      sb.append(this.num_tasks);
-      first = false;
-    }
-    if (is_set_emitted()) {
-      if (!first) sb.append(", ");
-      sb.append("emitted:");
-      sb.append(this.emitted);
-      first = false;
-    }
-    if (is_set_transferred()) {
-      if (!first) sb.append(", ");
-      sb.append("transferred:");
-      sb.append(this.transferred);
-      first = false;
-    }
-    if (is_set_acked()) {
-      if (!first) sb.append(", ");
-      sb.append("acked:");
-      sb.append(this.acked);
-      first = false;
-    }
-    if (is_set_failed()) {
-      if (!first) sb.append(", ");
-      sb.append("failed:");
-      sb.append(this.failed);
-      first = false;
-    }
-    sb.append(")");
-    return sb.toString();
-  }
-
-  public void validate() throws org.apache.thrift.TException {
-    // check for required fields
-    // check for sub-struct validity
-  }
-
-  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
-    try {
-      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
-    try {
-      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
-      __isset_bitfield = 0;
-      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private static class CommonAggregateStatsStandardSchemeFactory implements SchemeFactory {
-    public CommonAggregateStatsStandardScheme getScheme() {
-      return new CommonAggregateStatsStandardScheme();
-    }
-  }
-
-  private static class CommonAggregateStatsStandardScheme extends StandardScheme<CommonAggregateStats> {
-
-    public void read(org.apache.thrift.protocol.TProtocol iprot, CommonAggregateStats struct) throws org.apache.thrift.TException {
-      org.apache.thrift.protocol.TField schemeField;
-      iprot.readStructBegin();
-      while (true)
-      {
-        schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
-          break;
-        }
-        switch (schemeField.id) {
-          case 1: // NUM_EXECUTORS
-            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
-              struct.num_executors = iprot.readI32();
-              struct.set_num_executors_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 2: // NUM_TASKS
-            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
-              struct.num_tasks = iprot.readI32();
-              struct.set_num_tasks_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 3: // EMITTED
-            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
-              struct.emitted = iprot.readI64();
-              struct.set_emitted_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 4: // TRANSFERRED
-            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
-              struct.transferred = iprot.readI64();
-              struct.set_transferred_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 5: // ACKED
-            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
-              struct.acked = iprot.readI64();
-              struct.set_acked_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 6: // FAILED
-            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
-              struct.failed = iprot.readI64();
-              struct.set_failed_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          default:
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-        }
-        iprot.readFieldEnd();
-      }
-      iprot.readStructEnd();
-      struct.validate();
-    }
-
-    public void write(org.apache.thrift.protocol.TProtocol oprot, CommonAggregateStats struct) throws org.apache.thrift.TException {
-      struct.validate();
-
-      oprot.writeStructBegin(STRUCT_DESC);
-      if (struct.is_set_num_executors()) {
-        oprot.writeFieldBegin(NUM_EXECUTORS_FIELD_DESC);
-        oprot.writeI32(struct.num_executors);
-        oprot.writeFieldEnd();
-      }
-      if (struct.is_set_num_tasks()) {
-        oprot.writeFieldBegin(NUM_TASKS_FIELD_DESC);
-        oprot.writeI32(struct.num_tasks);
-        oprot.writeFieldEnd();
-      }
-      if (struct.is_set_emitted()) {
-        oprot.writeFieldBegin(EMITTED_FIELD_DESC);
-        oprot.writeI64(struct.emitted);
-        oprot.writeFieldEnd();
-      }
-      if (struct.is_set_transferred()) {
-        oprot.writeFieldBegin(TRANSFERRED_FIELD_DESC);
-        oprot.writeI64(struct.transferred);
-        oprot.writeFieldEnd();
-      }
-      if (struct.is_set_acked()) {
-        oprot.writeFieldBegin(ACKED_FIELD_DESC);
-        oprot.writeI64(struct.acked);
-        oprot.writeFieldEnd();
-      }
-      if (struct.is_set_failed()) {
-        oprot.writeFieldBegin(FAILED_FIELD_DESC);
-        oprot.writeI64(struct.failed);
-        oprot.writeFieldEnd();
-      }
-      oprot.writeFieldStop();
-      oprot.writeStructEnd();
-    }
-
-  }
-
-  private static class CommonAggregateStatsTupleSchemeFactory implements SchemeFactory {
-    public CommonAggregateStatsTupleScheme getScheme() {
-      return new CommonAggregateStatsTupleScheme();
-    }
-  }
-
-  private static class CommonAggregateStatsTupleScheme extends TupleScheme<CommonAggregateStats> {
-
-    @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, CommonAggregateStats struct) throws org.apache.thrift.TException {
-      TTupleProtocol oprot = (TTupleProtocol) prot;
-      BitSet optionals = new BitSet();
-      if (struct.is_set_num_executors()) {
-        optionals.set(0);
-      }
-      if (struct.is_set_num_tasks()) {
-        optionals.set(1);
-      }
-      if (struct.is_set_emitted()) {
-        optionals.set(2);
-      }
-      if (struct.is_set_transferred()) {
-        optionals.set(3);
-      }
-      if (struct.is_set_acked()) {
-        optionals.set(4);
-      }
-      if (struct.is_set_failed()) {
-        optionals.set(5);
-      }
-      oprot.writeBitSet(optionals, 6);
-      if (struct.is_set_num_executors()) {
-        oprot.writeI32(struct.num_executors);
-      }
-      if (struct.is_set_num_tasks()) {
-        oprot.writeI32(struct.num_tasks);
-      }
-      if (struct.is_set_emitted()) {
-        oprot.writeI64(struct.emitted);
-      }
-      if (struct.is_set_transferred()) {
-        oprot.writeI64(struct.transferred);
-      }
-      if (struct.is_set_acked()) {
-        oprot.writeI64(struct.acked);
-      }
-      if (struct.is_set_failed()) {
-        oprot.writeI64(struct.failed);
-      }
-    }
-
-    @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, CommonAggregateStats struct) throws org.apache.thrift.TException {
-      TTupleProtocol iprot = (TTupleProtocol) prot;
-      BitSet incoming = iprot.readBitSet(6);
-      if (incoming.get(0)) {
-        struct.num_executors = iprot.readI32();
-        struct.set_num_executors_isSet(true);
-      }
-      if (incoming.get(1)) {
-        struct.num_tasks = iprot.readI32();
-        struct.set_num_tasks_isSet(true);
-      }
-      if (incoming.get(2)) {
-        struct.emitted = iprot.readI64();
-        struct.set_emitted_isSet(true);
-      }
-      if (incoming.get(3)) {
-        struct.transferred = iprot.readI64();
-        struct.set_transferred_isSet(true);
-      }
-      if (incoming.get(4)) {
-        struct.acked = iprot.readI64();
-        struct.set_acked_isSet(true);
-      }
-      if (incoming.get(5)) {
-        struct.failed = iprot.readI64();
-        struct.set_failed_isSet(true);
-      }
-    }
-  }
-
-}
-

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/generated/ComponentAggregateStats.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/generated/ComponentAggregateStats.java b/storm-core/src/jvm/backtype/storm/generated/ComponentAggregateStats.java
deleted file mode 100644
index eea060f..0000000
--- a/storm-core/src/jvm/backtype/storm/generated/ComponentAggregateStats.java
+++ /dev/null
@@ -1,752 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * Autogenerated by Thrift Compiler (0.9.3)
- *
- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
- *  @generated
- */
-package backtype.storm.generated;
-
-import org.apache.thrift.scheme.IScheme;
-import org.apache.thrift.scheme.SchemeFactory;
-import org.apache.thrift.scheme.StandardScheme;
-
-import org.apache.thrift.scheme.TupleScheme;
-import org.apache.thrift.protocol.TTupleProtocol;
-import org.apache.thrift.protocol.TProtocolException;
-import org.apache.thrift.EncodingUtils;
-import org.apache.thrift.TException;
-import org.apache.thrift.async.AsyncMethodCallback;
-import org.apache.thrift.server.AbstractNonblockingServer.*;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.EnumMap;
-import java.util.Set;
-import java.util.HashSet;
-import java.util.EnumSet;
-import java.util.Collections;
-import java.util.BitSet;
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import javax.annotation.Generated;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
-public class ComponentAggregateStats implements org.apache.thrift.TBase<ComponentAggregateStats, ComponentAggregateStats._Fields>, java.io.Serializable, Cloneable, Comparable<ComponentAggregateStats> {
-  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ComponentAggregateStats");
-
-  private static final org.apache.thrift.protocol.TField TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("type", org.apache.thrift.protocol.TType.I32, (short)1);
-  private static final org.apache.thrift.protocol.TField COMMON_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("common_stats", org.apache.thrift.protocol.TType.STRUCT, (short)2);
-  private static final org.apache.thrift.protocol.TField SPECIFIC_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("specific_stats", org.apache.thrift.protocol.TType.STRUCT, (short)3);
-  private static final org.apache.thrift.protocol.TField LAST_ERROR_FIELD_DESC = new org.apache.thrift.protocol.TField("last_error", org.apache.thrift.protocol.TType.STRUCT, (short)4);
-
-  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
-  static {
-    schemes.put(StandardScheme.class, new ComponentAggregateStatsStandardSchemeFactory());
-    schemes.put(TupleScheme.class, new ComponentAggregateStatsTupleSchemeFactory());
-  }
-
-  private ComponentType type; // optional
-  private CommonAggregateStats common_stats; // optional
-  private SpecificAggregateStats specific_stats; // optional
-  private ErrorInfo last_error; // optional
-
-  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
-  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-    /**
-     * 
-     * @see ComponentType
-     */
-    TYPE((short)1, "type"),
-    COMMON_STATS((short)2, "common_stats"),
-    SPECIFIC_STATS((short)3, "specific_stats"),
-    LAST_ERROR((short)4, "last_error");
-
-    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
-
-    static {
-      for (_Fields field : EnumSet.allOf(_Fields.class)) {
-        byName.put(field.getFieldName(), field);
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, or null if its not found.
-     */
-    public static _Fields findByThriftId(int fieldId) {
-      switch(fieldId) {
-        case 1: // TYPE
-          return TYPE;
-        case 2: // COMMON_STATS
-          return COMMON_STATS;
-        case 3: // SPECIFIC_STATS
-          return SPECIFIC_STATS;
-        case 4: // LAST_ERROR
-          return LAST_ERROR;
-        default:
-          return null;
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, throwing an exception
-     * if it is not found.
-     */
-    public static _Fields findByThriftIdOrThrow(int fieldId) {
-      _Fields fields = findByThriftId(fieldId);
-      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
-      return fields;
-    }
-
-    /**
-     * Find the _Fields constant that matches name, or null if its not found.
-     */
-    public static _Fields findByName(String name) {
-      return byName.get(name);
-    }
-
-    private final short _thriftId;
-    private final String _fieldName;
-
-    _Fields(short thriftId, String fieldName) {
-      _thriftId = thriftId;
-      _fieldName = fieldName;
-    }
-
-    public short getThriftFieldId() {
-      return _thriftId;
-    }
-
-    public String getFieldName() {
-      return _fieldName;
-    }
-  }
-
-  // isset id assignments
-  private static final _Fields optionals[] = {_Fields.TYPE,_Fields.COMMON_STATS,_Fields.SPECIFIC_STATS,_Fields.LAST_ERROR};
-  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
-  static {
-    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.TYPE, new org.apache.thrift.meta_data.FieldMetaData("type", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, ComponentType.class)));
-    tmpMap.put(_Fields.COMMON_STATS, new org.apache.thrift.meta_data.FieldMetaData("common_stats", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CommonAggregateStats.class)));
-    tmpMap.put(_Fields.SPECIFIC_STATS, new org.apache.thrift.meta_data.FieldMetaData("specific_stats", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SpecificAggregateStats.class)));
-    tmpMap.put(_Fields.LAST_ERROR, new org.apache.thrift.meta_data.FieldMetaData("last_error", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ErrorInfo.class)));
-    metaDataMap = Collections.unmodifiableMap(tmpMap);
-    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ComponentAggregateStats.class, metaDataMap);
-  }
-
-  public ComponentAggregateStats() {
-  }
-
-  /**
-   * Performs a deep copy on <i>other</i>.
-   */
-  public ComponentAggregateStats(ComponentAggregateStats other) {
-    if (other.is_set_type()) {
-      this.type = other.type;
-    }
-    if (other.is_set_common_stats()) {
-      this.common_stats = new CommonAggregateStats(other.common_stats);
-    }
-    if (other.is_set_specific_stats()) {
-      this.specific_stats = new SpecificAggregateStats(other.specific_stats);
-    }
-    if (other.is_set_last_error()) {
-      this.last_error = new ErrorInfo(other.last_error);
-    }
-  }
-
-  public ComponentAggregateStats deepCopy() {
-    return new ComponentAggregateStats(this);
-  }
-
-  @Override
-  public void clear() {
-    this.type = null;
-    this.common_stats = null;
-    this.specific_stats = null;
-    this.last_error = null;
-  }
-
-  /**
-   * 
-   * @see ComponentType
-   */
-  public ComponentType get_type() {
-    return this.type;
-  }
-
-  /**
-   * 
-   * @see ComponentType
-   */
-  public void set_type(ComponentType type) {
-    this.type = type;
-  }
-
-  public void unset_type() {
-    this.type = null;
-  }
-
-  /** Returns true if field type is set (has been assigned a value) and false otherwise */
-  public boolean is_set_type() {
-    return this.type != null;
-  }
-
-  public void set_type_isSet(boolean value) {
-    if (!value) {
-      this.type = null;
-    }
-  }
-
-  public CommonAggregateStats get_common_stats() {
-    return this.common_stats;
-  }
-
-  public void set_common_stats(CommonAggregateStats common_stats) {
-    this.common_stats = common_stats;
-  }
-
-  public void unset_common_stats() {
-    this.common_stats = null;
-  }
-
-  /** Returns true if field common_stats is set (has been assigned a value) and false otherwise */
-  public boolean is_set_common_stats() {
-    return this.common_stats != null;
-  }
-
-  public void set_common_stats_isSet(boolean value) {
-    if (!value) {
-      this.common_stats = null;
-    }
-  }
-
-  public SpecificAggregateStats get_specific_stats() {
-    return this.specific_stats;
-  }
-
-  public void set_specific_stats(SpecificAggregateStats specific_stats) {
-    this.specific_stats = specific_stats;
-  }
-
-  public void unset_specific_stats() {
-    this.specific_stats = null;
-  }
-
-  /** Returns true if field specific_stats is set (has been assigned a value) and false otherwise */
-  public boolean is_set_specific_stats() {
-    return this.specific_stats != null;
-  }
-
-  public void set_specific_stats_isSet(boolean value) {
-    if (!value) {
-      this.specific_stats = null;
-    }
-  }
-
-  public ErrorInfo get_last_error() {
-    return this.last_error;
-  }
-
-  public void set_last_error(ErrorInfo last_error) {
-    this.last_error = last_error;
-  }
-
-  public void unset_last_error() {
-    this.last_error = null;
-  }
-
-  /** Returns true if field last_error is set (has been assigned a value) and false otherwise */
-  public boolean is_set_last_error() {
-    return this.last_error != null;
-  }
-
-  public void set_last_error_isSet(boolean value) {
-    if (!value) {
-      this.last_error = null;
-    }
-  }
-
-  public void setFieldValue(_Fields field, Object value) {
-    switch (field) {
-    case TYPE:
-      if (value == null) {
-        unset_type();
-      } else {
-        set_type((ComponentType)value);
-      }
-      break;
-
-    case COMMON_STATS:
-      if (value == null) {
-        unset_common_stats();
-      } else {
-        set_common_stats((CommonAggregateStats)value);
-      }
-      break;
-
-    case SPECIFIC_STATS:
-      if (value == null) {
-        unset_specific_stats();
-      } else {
-        set_specific_stats((SpecificAggregateStats)value);
-      }
-      break;
-
-    case LAST_ERROR:
-      if (value == null) {
-        unset_last_error();
-      } else {
-        set_last_error((ErrorInfo)value);
-      }
-      break;
-
-    }
-  }
-
-  public Object getFieldValue(_Fields field) {
-    switch (field) {
-    case TYPE:
-      return get_type();
-
-    case COMMON_STATS:
-      return get_common_stats();
-
-    case SPECIFIC_STATS:
-      return get_specific_stats();
-
-    case LAST_ERROR:
-      return get_last_error();
-
-    }
-    throw new IllegalStateException();
-  }
-
-  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
-  public boolean isSet(_Fields field) {
-    if (field == null) {
-      throw new IllegalArgumentException();
-    }
-
-    switch (field) {
-    case TYPE:
-      return is_set_type();
-    case COMMON_STATS:
-      return is_set_common_stats();
-    case SPECIFIC_STATS:
-      return is_set_specific_stats();
-    case LAST_ERROR:
-      return is_set_last_error();
-    }
-    throw new IllegalStateException();
-  }
-
-  @Override
-  public boolean equals(Object that) {
-    if (that == null)
-      return false;
-    if (that instanceof ComponentAggregateStats)
-      return this.equals((ComponentAggregateStats)that);
-    return false;
-  }
-
-  public boolean equals(ComponentAggregateStats that) {
-    if (that == null)
-      return false;
-
-    boolean this_present_type = true && this.is_set_type();
-    boolean that_present_type = true && that.is_set_type();
-    if (this_present_type || that_present_type) {
-      if (!(this_present_type && that_present_type))
-        return false;
-      if (!this.type.equals(that.type))
-        return false;
-    }
-
-    boolean this_present_common_stats = true && this.is_set_common_stats();
-    boolean that_present_common_stats = true && that.is_set_common_stats();
-    if (this_present_common_stats || that_present_common_stats) {
-      if (!(this_present_common_stats && that_present_common_stats))
-        return false;
-      if (!this.common_stats.equals(that.common_stats))
-        return false;
-    }
-
-    boolean this_present_specific_stats = true && this.is_set_specific_stats();
-    boolean that_present_specific_stats = true && that.is_set_specific_stats();
-    if (this_present_specific_stats || that_present_specific_stats) {
-      if (!(this_present_specific_stats && that_present_specific_stats))
-        return false;
-      if (!this.specific_stats.equals(that.specific_stats))
-        return false;
-    }
-
-    boolean this_present_last_error = true && this.is_set_last_error();
-    boolean that_present_last_error = true && that.is_set_last_error();
-    if (this_present_last_error || that_present_last_error) {
-      if (!(this_present_last_error && that_present_last_error))
-        return false;
-      if (!this.last_error.equals(that.last_error))
-        return false;
-    }
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    List<Object> list = new ArrayList<Object>();
-
-    boolean present_type = true && (is_set_type());
-    list.add(present_type);
-    if (present_type)
-      list.add(type.getValue());
-
-    boolean present_common_stats = true && (is_set_common_stats());
-    list.add(present_common_stats);
-    if (present_common_stats)
-      list.add(common_stats);
-
-    boolean present_specific_stats = true && (is_set_specific_stats());
-    list.add(present_specific_stats);
-    if (present_specific_stats)
-      list.add(specific_stats);
-
-    boolean present_last_error = true && (is_set_last_error());
-    list.add(present_last_error);
-    if (present_last_error)
-      list.add(last_error);
-
-    return list.hashCode();
-  }
-
-  @Override
-  public int compareTo(ComponentAggregateStats other) {
-    if (!getClass().equals(other.getClass())) {
-      return getClass().getName().compareTo(other.getClass().getName());
-    }
-
-    int lastComparison = 0;
-
-    lastComparison = Boolean.valueOf(is_set_type()).compareTo(other.is_set_type());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_type()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.type, other.type);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_common_stats()).compareTo(other.is_set_common_stats());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_common_stats()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.common_stats, other.common_stats);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_specific_stats()).compareTo(other.is_set_specific_stats());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_specific_stats()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.specific_stats, other.specific_stats);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_last_error()).compareTo(other.is_set_last_error());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_last_error()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.last_error, other.last_error);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    return 0;
-  }
-
-  public _Fields fieldForId(int fieldId) {
-    return _Fields.findByThriftId(fieldId);
-  }
-
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
-    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
-  }
-
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
-    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder("ComponentAggregateStats(");
-    boolean first = true;
-
-    if (is_set_type()) {
-      sb.append("type:");
-      if (this.type == null) {
-        sb.append("null");
-      } else {
-        sb.append(this.type);
-      }
-      first = false;
-    }
-    if (is_set_common_stats()) {
-      if (!first) sb.append(", ");
-      sb.append("common_stats:");
-      if (this.common_stats == null) {
-        sb.append("null");
-      } else {
-        sb.append(this.common_stats);
-      }
-      first = false;
-    }
-    if (is_set_specific_stats()) {
-      if (!first) sb.append(", ");
-      sb.append("specific_stats:");
-      if (this.specific_stats == null) {
-        sb.append("null");
-      } else {
-        sb.append(this.specific_stats);
-      }
-      first = false;
-    }
-    if (is_set_last_error()) {
-      if (!first) sb.append(", ");
-      sb.append("last_error:");
-      if (this.last_error == null) {
-        sb.append("null");
-      } else {
-        sb.append(this.last_error);
-      }
-      first = false;
-    }
-    sb.append(")");
-    return sb.toString();
-  }
-
-  public void validate() throws org.apache.thrift.TException {
-    // check for required fields
-    // check for sub-struct validity
-    if (common_stats != null) {
-      common_stats.validate();
-    }
-    if (last_error != null) {
-      last_error.validate();
-    }
-  }
-
-  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
-    try {
-      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
-    try {
-      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private static class ComponentAggregateStatsStandardSchemeFactory implements SchemeFactory {
-    public ComponentAggregateStatsStandardScheme getScheme() {
-      return new ComponentAggregateStatsStandardScheme();
-    }
-  }
-
-  private static class ComponentAggregateStatsStandardScheme extends StandardScheme<ComponentAggregateStats> {
-
-    public void read(org.apache.thrift.protocol.TProtocol iprot, ComponentAggregateStats struct) throws org.apache.thrift.TException {
-      org.apache.thrift.protocol.TField schemeField;
-      iprot.readStructBegin();
-      while (true)
-      {
-        schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
-          break;
-        }
-        switch (schemeField.id) {
-          case 1: // TYPE
-            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
-              struct.type = backtype.storm.generated.ComponentType.findByValue(iprot.readI32());
-              struct.set_type_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 2: // COMMON_STATS
-            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
-              struct.common_stats = new CommonAggregateStats();
-              struct.common_stats.read(iprot);
-              struct.set_common_stats_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 3: // SPECIFIC_STATS
-            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
-              struct.specific_stats = new SpecificAggregateStats();
-              struct.specific_stats.read(iprot);
-              struct.set_specific_stats_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 4: // LAST_ERROR
-            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
-              struct.last_error = new ErrorInfo();
-              struct.last_error.read(iprot);
-              struct.set_last_error_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          default:
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-        }
-        iprot.readFieldEnd();
-      }
-      iprot.readStructEnd();
-      struct.validate();
-    }
-
-    public void write(org.apache.thrift.protocol.TProtocol oprot, ComponentAggregateStats struct) throws org.apache.thrift.TException {
-      struct.validate();
-
-      oprot.writeStructBegin(STRUCT_DESC);
-      if (struct.type != null) {
-        if (struct.is_set_type()) {
-          oprot.writeFieldBegin(TYPE_FIELD_DESC);
-          oprot.writeI32(struct.type.getValue());
-          oprot.writeFieldEnd();
-        }
-      }
-      if (struct.common_stats != null) {
-        if (struct.is_set_common_stats()) {
-          oprot.writeFieldBegin(COMMON_STATS_FIELD_DESC);
-          struct.common_stats.write(oprot);
-          oprot.writeFieldEnd();
-        }
-      }
-      if (struct.specific_stats != null) {
-        if (struct.is_set_specific_stats()) {
-          oprot.writeFieldBegin(SPECIFIC_STATS_FIELD_DESC);
-          struct.specific_stats.write(oprot);
-          oprot.writeFieldEnd();
-        }
-      }
-      if (struct.last_error != null) {
-        if (struct.is_set_last_error()) {
-          oprot.writeFieldBegin(LAST_ERROR_FIELD_DESC);
-          struct.last_error.write(oprot);
-          oprot.writeFieldEnd();
-        }
-      }
-      oprot.writeFieldStop();
-      oprot.writeStructEnd();
-    }
-
-  }
-
-  private static class ComponentAggregateStatsTupleSchemeFactory implements SchemeFactory {
-    public ComponentAggregateStatsTupleScheme getScheme() {
-      return new ComponentAggregateStatsTupleScheme();
-    }
-  }
-
-  private static class ComponentAggregateStatsTupleScheme extends TupleScheme<ComponentAggregateStats> {
-
-    @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, ComponentAggregateStats struct) throws org.apache.thrift.TException {
-      TTupleProtocol oprot = (TTupleProtocol) prot;
-      BitSet optionals = new BitSet();
-      if (struct.is_set_type()) {
-        optionals.set(0);
-      }
-      if (struct.is_set_common_stats()) {
-        optionals.set(1);
-      }
-      if (struct.is_set_specific_stats()) {
-        optionals.set(2);
-      }
-      if (struct.is_set_last_error()) {
-        optionals.set(3);
-      }
-      oprot.writeBitSet(optionals, 4);
-      if (struct.is_set_type()) {
-        oprot.writeI32(struct.type.getValue());
-      }
-      if (struct.is_set_common_stats()) {
-        struct.common_stats.write(oprot);
-      }
-      if (struct.is_set_specific_stats()) {
-        struct.specific_stats.write(oprot);
-      }
-      if (struct.is_set_last_error()) {
-        struct.last_error.write(oprot);
-      }
-    }
-
-    @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, ComponentAggregateStats struct) throws org.apache.thrift.TException {
-      TTupleProtocol iprot = (TTupleProtocol) prot;
-      BitSet incoming = iprot.readBitSet(4);
-      if (incoming.get(0)) {
-        struct.type = backtype.storm.generated.ComponentType.findByValue(iprot.readI32());
-        struct.set_type_isSet(true);
-      }
-      if (incoming.get(1)) {
-        struct.common_stats = new CommonAggregateStats();
-        struct.common_stats.read(iprot);
-        struct.set_common_stats_isSet(true);
-      }
-      if (incoming.get(2)) {
-        struct.specific_stats = new SpecificAggregateStats();
-        struct.specific_stats.read(iprot);
-        struct.set_specific_stats_isSet(true);
-      }
-      if (incoming.get(3)) {
-        struct.last_error = new ErrorInfo();
-        struct.last_error.read(iprot);
-        struct.set_last_error_isSet(true);
-      }
-    }
-  }
-
-}
-


[40/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/trident/state/JdbcUpdater.java
----------------------------------------------------------------------
diff --git a/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/trident/state/JdbcUpdater.java b/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/trident/state/JdbcUpdater.java
index b76e230..d2ca5b8 100644
--- a/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/trident/state/JdbcUpdater.java
+++ b/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/trident/state/JdbcUpdater.java
@@ -17,9 +17,9 @@
  */
 package org.apache.storm.jdbc.trident.state;
 
-import storm.trident.operation.TridentCollector;
-import storm.trident.state.BaseStateUpdater;
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.trident.operation.TridentCollector;
+import org.apache.storm.trident.state.BaseStateUpdater;
+import org.apache.storm.trident.tuple.TridentTuple;
 
 import java.util.List;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-jdbc/src/test/java/org/apache/storm/jdbc/bolt/JdbcLookupBoltTest.java
----------------------------------------------------------------------
diff --git a/external/storm-jdbc/src/test/java/org/apache/storm/jdbc/bolt/JdbcLookupBoltTest.java b/external/storm-jdbc/src/test/java/org/apache/storm/jdbc/bolt/JdbcLookupBoltTest.java
index 1fda3b1..9a5ec09 100644
--- a/external/storm-jdbc/src/test/java/org/apache/storm/jdbc/bolt/JdbcLookupBoltTest.java
+++ b/external/storm-jdbc/src/test/java/org/apache/storm/jdbc/bolt/JdbcLookupBoltTest.java
@@ -17,7 +17,7 @@
  */
 package org.apache.storm.jdbc.bolt;
 
-import backtype.storm.tuple.Fields;
+import org.apache.storm.tuple.Fields;
 import com.google.common.collect.Lists;
 import org.apache.storm.jdbc.common.Column;
 import org.apache.storm.jdbc.common.ConnectionProvider;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-jdbc/src/test/java/org/apache/storm/jdbc/spout/UserSpout.java
----------------------------------------------------------------------
diff --git a/external/storm-jdbc/src/test/java/org/apache/storm/jdbc/spout/UserSpout.java b/external/storm-jdbc/src/test/java/org/apache/storm/jdbc/spout/UserSpout.java
index 718917a..fdcd053 100644
--- a/external/storm-jdbc/src/test/java/org/apache/storm/jdbc/spout/UserSpout.java
+++ b/external/storm-jdbc/src/test/java/org/apache/storm/jdbc/spout/UserSpout.java
@@ -17,12 +17,12 @@
  */
 package org.apache.storm.jdbc.spout;
 
-import backtype.storm.spout.SpoutOutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.IRichSpout;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Values;
+import org.apache.storm.spout.SpoutOutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.IRichSpout;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Values;
 import com.google.common.collect.Lists;
 
 import java.util.*;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-jdbc/src/test/java/org/apache/storm/jdbc/topology/AbstractUserTopology.java
----------------------------------------------------------------------
diff --git a/external/storm-jdbc/src/test/java/org/apache/storm/jdbc/topology/AbstractUserTopology.java b/external/storm-jdbc/src/test/java/org/apache/storm/jdbc/topology/AbstractUserTopology.java
index 9df5a86..ec7ca36 100644
--- a/external/storm-jdbc/src/test/java/org/apache/storm/jdbc/topology/AbstractUserTopology.java
+++ b/external/storm-jdbc/src/test/java/org/apache/storm/jdbc/topology/AbstractUserTopology.java
@@ -17,10 +17,10 @@
  */
 package org.apache.storm.jdbc.topology;
 
-import backtype.storm.Config;
-import backtype.storm.StormSubmitter;
-import backtype.storm.generated.StormTopology;
-import backtype.storm.tuple.Fields;
+import org.apache.storm.Config;
+import org.apache.storm.StormSubmitter;
+import org.apache.storm.generated.StormTopology;
+import org.apache.storm.tuple.Fields;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import org.apache.storm.jdbc.common.Column;
@@ -32,7 +32,7 @@ import org.apache.storm.jdbc.mapper.JdbcLookupMapper;
 import org.apache.storm.jdbc.mapper.SimpleJdbcMapper;
 import org.apache.storm.jdbc.mapper.SimpleJdbcLookupMapper;
 import org.apache.storm.jdbc.spout.UserSpout;
-import backtype.storm.LocalCluster;
+import org.apache.storm.LocalCluster;
 
 import java.sql.Types;
 import java.util.List;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-jdbc/src/test/java/org/apache/storm/jdbc/topology/UserPersistanceTopology.java
----------------------------------------------------------------------
diff --git a/external/storm-jdbc/src/test/java/org/apache/storm/jdbc/topology/UserPersistanceTopology.java b/external/storm-jdbc/src/test/java/org/apache/storm/jdbc/topology/UserPersistanceTopology.java
index 585994e..1915219 100644
--- a/external/storm-jdbc/src/test/java/org/apache/storm/jdbc/topology/UserPersistanceTopology.java
+++ b/external/storm-jdbc/src/test/java/org/apache/storm/jdbc/topology/UserPersistanceTopology.java
@@ -17,8 +17,8 @@
  */
 package org.apache.storm.jdbc.topology;
 
-import backtype.storm.generated.StormTopology;
-import backtype.storm.topology.TopologyBuilder;
+import org.apache.storm.generated.StormTopology;
+import org.apache.storm.topology.TopologyBuilder;
 import com.google.common.collect.Lists;
 import org.apache.storm.jdbc.bolt.JdbcInsertBolt;
 import org.apache.storm.jdbc.bolt.JdbcLookupBolt;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-jdbc/src/test/java/org/apache/storm/jdbc/topology/UserPersistanceTridentTopology.java
----------------------------------------------------------------------
diff --git a/external/storm-jdbc/src/test/java/org/apache/storm/jdbc/topology/UserPersistanceTridentTopology.java b/external/storm-jdbc/src/test/java/org/apache/storm/jdbc/topology/UserPersistanceTridentTopology.java
index 522d41a..11269c3 100644
--- a/external/storm-jdbc/src/test/java/org/apache/storm/jdbc/topology/UserPersistanceTridentTopology.java
+++ b/external/storm-jdbc/src/test/java/org/apache/storm/jdbc/topology/UserPersistanceTridentTopology.java
@@ -17,8 +17,8 @@
  */
 package org.apache.storm.jdbc.topology;
 
-import backtype.storm.generated.StormTopology;
-import backtype.storm.tuple.Fields;
+import org.apache.storm.generated.StormTopology;
+import org.apache.storm.tuple.Fields;
 import com.google.common.collect.Lists;
 import org.apache.storm.jdbc.common.Column;
 import org.apache.storm.jdbc.mapper.SimpleJdbcLookupMapper;
@@ -27,9 +27,9 @@ import org.apache.storm.jdbc.trident.state.JdbcQuery;
 import org.apache.storm.jdbc.trident.state.JdbcState;
 import org.apache.storm.jdbc.trident.state.JdbcStateFactory;
 import org.apache.storm.jdbc.trident.state.JdbcUpdater;
-import storm.trident.Stream;
-import storm.trident.TridentState;
-import storm.trident.TridentTopology;
+import org.apache.storm.trident.Stream;
+import org.apache.storm.trident.TridentState;
+import org.apache.storm.trident.TridentTopology;
 
 import java.sql.Types;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/README.md
----------------------------------------------------------------------
diff --git a/external/storm-kafka/README.md b/external/storm-kafka/README.md
index 3dfa0b7..3a86cf0 100644
--- a/external/storm-kafka/README.md
+++ b/external/storm-kafka/README.md
@@ -65,7 +65,7 @@ In addition to these parameters, SpoutConfig contains the following fields that
 
     // Exponential back-off retry settings.  These are used when retrying messages after a bolt
     // calls OutputCollector.fail().
-    // Note: be sure to set backtype.storm.Config.MESSAGE_TIMEOUT_SECS appropriately to prevent
+    // Note: be sure to set org.apache.storm.Config.MESSAGE_TIMEOUT_SECS appropriately to prevent
     // resubmitting the message while still retrying.
     public long retryInitialDelayMs = 0;
     public double retryDelayMultiplier = 1.0;
@@ -190,9 +190,9 @@ use Kafka 0.8.1.1 built against Scala 2.10, you would use the following dependen
 Note that the ZooKeeper and log4j dependencies are excluded to prevent version conflicts with Storm's dependencies.
 
 ##Writing to Kafka as part of your topology
-You can create an instance of storm.kafka.bolt.KafkaBolt and attach it as a component to your topology or if you
-are using trident you can use storm.kafka.trident.TridentState, storm.kafka.trident.TridentStateFactory and
-storm.kafka.trident.TridentKafkaUpdater.
+You can create an instance of org.apache.storm.kafka.bolt.KafkaBolt and attach it as a component to your topology or if you
+are using trident you can use org.apache.storm.kafka.trident.TridentState, org.apache.storm.kafka.trident.TridentStateFactory and
+org.apache.storm.kafka.trident.TridentKafkaUpdater.
 
 You need to provide implementation of following 2 interfaces
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/Broker.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/Broker.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/Broker.java
new file mode 100644
index 0000000..0d95e8d
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/Broker.java
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+import com.google.common.base.Objects;
+
+import java.io.Serializable;
+
+public class Broker implements Serializable, Comparable<Broker> {
+    public String host;
+    public int port;
+
+    // for kryo compatibility
+    private Broker() {
+	
+    }
+    
+    public Broker(String host, int port) {
+        this.host = host;
+        this.port = port;
+    }
+
+    public Broker(String host) {
+        this(host, 9092);
+    }
+
+    @Override
+    public int hashCode() {
+        return Objects.hashCode(host, port);
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj) {
+            return true;
+        }
+        if (obj == null || getClass() != obj.getClass()) {
+            return false;
+        }
+        final Broker other = (Broker) obj;
+        return Objects.equal(this.host, other.host) && Objects.equal(this.port, other.port);
+    }
+
+    @Override
+    public String toString() {
+        return host + ":" + port;
+    }
+
+    public static Broker fromString(String host) {
+        Broker hp;
+        String[] spec = host.split(":");
+        if (spec.length == 1) {
+            hp = new Broker(spec[0]);
+        } else if (spec.length == 2) {
+            hp = new Broker(spec[0], Integer.parseInt(spec[1]));
+        } else {
+            throw new IllegalArgumentException("Invalid host specification: " + host);
+        }
+        return hp;
+    }
+
+
+    @Override
+    public int compareTo(Broker o) {
+        if (this.host.equals(o.host)) {
+            return this.port - o.port;
+        } else {
+            return this.host.compareTo(o.host);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/BrokerHosts.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/BrokerHosts.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/BrokerHosts.java
new file mode 100644
index 0000000..13ba0a1
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/BrokerHosts.java
@@ -0,0 +1,25 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+import java.io.Serializable;
+
+
+public interface BrokerHosts extends Serializable {
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/ByteBufferSerializer.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/ByteBufferSerializer.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/ByteBufferSerializer.java
new file mode 100644
index 0000000..2a18a7f
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/ByteBufferSerializer.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+import org.apache.storm.utils.Utils;
+import org.apache.kafka.common.serialization.Serializer;
+
+import java.nio.ByteBuffer;
+import java.util.Map;
+
+public class ByteBufferSerializer implements Serializer<ByteBuffer> {
+  @Override
+  public void configure(Map<String, ?> map, boolean b) {
+
+  }
+
+  @Override
+  public void close() {
+
+  }
+
+  @Override
+  public byte[] serialize(String s, ByteBuffer b) {
+    return Utils.toByteArray(b);
+  }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/DynamicBrokersReader.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/DynamicBrokersReader.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/DynamicBrokersReader.java
new file mode 100644
index 0000000..0fc85b3
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/DynamicBrokersReader.java
@@ -0,0 +1,213 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+import org.apache.storm.Config;
+import org.apache.storm.utils.Utils;
+import com.google.common.base.Preconditions;
+import org.apache.curator.framework.CuratorFramework;
+import org.apache.curator.framework.CuratorFrameworkFactory;
+import org.apache.curator.retry.RetryNTimes;
+import org.json.simple.JSONValue;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.storm.kafka.trident.GlobalPartitionInformation;
+
+import java.io.UnsupportedEncodingException;
+import java.net.SocketTimeoutException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+
+public class DynamicBrokersReader {
+
+    public static final Logger LOG = LoggerFactory.getLogger(DynamicBrokersReader.class);
+
+    private CuratorFramework _curator;
+    private String _zkPath;
+    private String _topic;
+    private Boolean _isWildcardTopic;
+
+    public DynamicBrokersReader(Map conf, String zkStr, String zkPath, String topic) {
+        // Check required parameters
+        Preconditions.checkNotNull(conf, "conf cannot be null");
+
+        validateConfig(conf);
+
+        Preconditions.checkNotNull(zkStr,"zkString cannot be null");
+        Preconditions.checkNotNull(zkPath, "zkPath cannot be null");
+        Preconditions.checkNotNull(topic, "topic cannot be null");
+
+        _zkPath = zkPath;
+        _topic = topic;
+        _isWildcardTopic = Utils.getBoolean(conf.get("kafka.topic.wildcard.match"), false);
+        try {
+            _curator = CuratorFrameworkFactory.newClient(
+                    zkStr,
+                    Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT)),
+                    Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_CONNECTION_TIMEOUT)),
+                    new RetryNTimes(Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_RETRY_TIMES)),
+                            Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_RETRY_INTERVAL))));
+            _curator.start();
+        } catch (Exception ex) {
+            LOG.error("Couldn't connect to zookeeper", ex);
+            throw new RuntimeException(ex);
+        }
+    }
+
+    /**
+     * Get all partitions with their current leaders
+     */
+    public List<GlobalPartitionInformation> getBrokerInfo() throws SocketTimeoutException {
+      List<String> topics =  getTopics();
+      List<GlobalPartitionInformation> partitions =  new ArrayList<GlobalPartitionInformation>();
+
+      for (String topic : topics) {
+          GlobalPartitionInformation globalPartitionInformation = new GlobalPartitionInformation(topic, this._isWildcardTopic);
+          try {
+              int numPartitionsForTopic = getNumPartitions(topic);
+              String brokerInfoPath = brokerPath();
+              for (int partition = 0; partition < numPartitionsForTopic; partition++) {
+                  int leader = getLeaderFor(topic,partition);
+                  String path = brokerInfoPath + "/" + leader;
+                  try {
+                      byte[] brokerData = _curator.getData().forPath(path);
+                      Broker hp = getBrokerHost(brokerData);
+                      globalPartitionInformation.addPartition(partition, hp);
+                  } catch (org.apache.zookeeper.KeeperException.NoNodeException e) {
+                      LOG.error("Node {} does not exist ", path);
+                  }
+              }
+          } catch (SocketTimeoutException e) {
+              throw e;
+          } catch (Exception e) {
+              throw new RuntimeException(e);
+          }
+          LOG.info("Read partition info from zookeeper: " + globalPartitionInformation);
+          partitions.add(globalPartitionInformation);
+      }
+        return partitions;
+    }
+
+    private int getNumPartitions(String topic) {
+        try {
+            String topicBrokersPath = partitionPath(topic);
+            List<String> children = _curator.getChildren().forPath(topicBrokersPath);
+            return children.size();
+        } catch (Exception e) {
+            throw new RuntimeException(e);
+        }
+    }
+
+    private List<String> getTopics() {
+        List<String> topics = new ArrayList<String>();
+        if (!_isWildcardTopic) {
+            topics.add(_topic);
+            return topics;
+        } else {
+            try {
+                List<String> children = _curator.getChildren().forPath(topicsPath());
+                for (String t : children) {
+                    if (t.matches(_topic)) {
+                        LOG.info(String.format("Found matching topic %s", t));
+                        topics.add(t);
+                    }
+                }
+                return topics;
+            } catch (Exception e) {
+                throw new RuntimeException(e);
+            }
+        }
+    }
+
+    public String topicsPath () {
+        return _zkPath + "/topics";
+    }
+    public String partitionPath(String topic) {
+        return topicsPath() + "/" + topic + "/partitions";
+    }
+
+    public String brokerPath() {
+        return _zkPath + "/ids";
+    }
+
+
+
+    /**
+     * get /brokers/topics/distributedTopic/partitions/1/state
+     * { "controller_epoch":4, "isr":[ 1, 0 ], "leader":1, "leader_epoch":1, "version":1 }
+     * @param topic
+     * @param partition
+     * @return
+     */
+    private int getLeaderFor(String topic, long partition) {
+        try {
+            String topicBrokersPath = partitionPath(topic);
+            byte[] hostPortData = _curator.getData().forPath(topicBrokersPath + "/" + partition + "/state");
+            Map<Object, Object> value = (Map<Object, Object>) JSONValue.parse(new String(hostPortData, "UTF-8"));
+            Integer leader = ((Number) value.get("leader")).intValue();
+            if (leader == -1) {
+                throw new RuntimeException("No leader found for partition " + partition);
+            }
+            return leader;
+        } catch (RuntimeException e) {
+            throw e;
+        } catch (Exception e) {
+            throw new RuntimeException(e);
+        }
+    }
+
+    public void close() {
+        _curator.close();
+    }
+
+    /**
+     * [zk: localhost:2181(CONNECTED) 56] get /brokers/ids/0
+     * { "host":"localhost", "jmx_port":9999, "port":9092, "version":1 }
+     *
+     * @param contents
+     * @return
+     */
+    private Broker getBrokerHost(byte[] contents) {
+        try {
+            Map<Object, Object> value = (Map<Object, Object>) JSONValue.parse(new String(contents, "UTF-8"));
+            String host = (String) value.get("host");
+            Integer port = ((Long) value.get("port")).intValue();
+            return new Broker(host, port);
+        } catch (UnsupportedEncodingException e) {
+            throw new RuntimeException(e);
+        }
+    }
+
+    /**
+     * Validate required parameters in the input configuration Map
+     * @param conf
+     */
+    private void validateConfig(final Map conf) {
+        Preconditions.checkNotNull(conf.get(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT),
+                "%s cannot be null", Config.STORM_ZOOKEEPER_SESSION_TIMEOUT);
+        Preconditions.checkNotNull(conf.get(Config.STORM_ZOOKEEPER_CONNECTION_TIMEOUT),
+                "%s cannot be null", Config.STORM_ZOOKEEPER_CONNECTION_TIMEOUT);
+        Preconditions.checkNotNull(conf.get(Config.STORM_ZOOKEEPER_RETRY_TIMES),
+                "%s cannot be null", Config.STORM_ZOOKEEPER_RETRY_TIMES);
+        Preconditions.checkNotNull(conf.get(Config.STORM_ZOOKEEPER_RETRY_INTERVAL),
+                "%s cannot be null", Config.STORM_ZOOKEEPER_RETRY_INTERVAL);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/DynamicPartitionConnections.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/DynamicPartitionConnections.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/DynamicPartitionConnections.java
new file mode 100644
index 0000000..6d30139
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/DynamicPartitionConnections.java
@@ -0,0 +1,98 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+import kafka.javaapi.consumer.SimpleConsumer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.storm.kafka.trident.IBrokerReader;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+
+public class DynamicPartitionConnections {
+
+    public static final Logger LOG = LoggerFactory.getLogger(DynamicPartitionConnections.class);
+
+    static class ConnectionInfo {
+        SimpleConsumer consumer;
+        Set<String> partitions = new HashSet<String>();
+
+        public ConnectionInfo(SimpleConsumer consumer) {
+            this.consumer = consumer;
+        }
+    }
+
+    Map<Broker, ConnectionInfo> _connections = new HashMap();
+    KafkaConfig _config;
+    IBrokerReader _reader;
+
+    public DynamicPartitionConnections(KafkaConfig config, IBrokerReader brokerReader) {
+        _config = config;
+        _reader = brokerReader;
+    }
+
+    public SimpleConsumer register(Partition partition) {
+        Broker broker = _reader.getBrokerForTopic(partition.topic).getBrokerFor(partition.partition);
+        return register(broker, partition.topic, partition.partition);
+    }
+
+    public SimpleConsumer register(Broker host, String topic, int partition) {
+        if (!_connections.containsKey(host)) {
+            _connections.put(host, new ConnectionInfo(new SimpleConsumer(host.host, host.port, _config.socketTimeoutMs, _config.bufferSizeBytes, _config.clientId)));
+        }
+        ConnectionInfo info = _connections.get(host);
+        info.partitions.add(getHashKey(topic,partition));
+        return info.consumer;
+    }
+
+    public SimpleConsumer getConnection(Partition partition) {
+        ConnectionInfo info = _connections.get(partition.host);
+        if (info != null) {
+            return info.consumer;
+        }
+        return null;
+    }
+
+    public void unregister(Broker port, String topic, int partition) {
+        ConnectionInfo info = _connections.get(port);
+        info.partitions.remove(getHashKey(topic,partition));
+        if (info.partitions.isEmpty()) {
+            info.consumer.close();
+            _connections.remove(port);
+        }
+    }
+
+    public void unregister(Partition partition) {
+        unregister(partition.host, partition.topic, partition.partition);
+    }
+
+    public void clear() {
+        for (ConnectionInfo info : _connections.values()) {
+            info.consumer.close();
+        }
+        _connections.clear();
+    }
+
+    private String getHashKey(String topic, int partition) {
+        return topic + "_" + partition;
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/ExponentialBackoffMsgRetryManager.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/ExponentialBackoffMsgRetryManager.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/ExponentialBackoffMsgRetryManager.java
new file mode 100644
index 0000000..f86d624
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/ExponentialBackoffMsgRetryManager.java
@@ -0,0 +1,184 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+import java.util.Comparator;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.PriorityQueue;
+import java.util.Queue;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+
+public class ExponentialBackoffMsgRetryManager implements FailedMsgRetryManager {
+
+    private final long retryInitialDelayMs;
+    private final double retryDelayMultiplier;
+    private final long retryDelayMaxMs;
+
+    private Queue<MessageRetryRecord> waiting = new PriorityQueue<MessageRetryRecord>(11, new RetryTimeComparator());
+    private Map<Long,MessageRetryRecord> records = new ConcurrentHashMap<Long,MessageRetryRecord>();
+
+    public ExponentialBackoffMsgRetryManager(long retryInitialDelayMs, double retryDelayMultiplier, long retryDelayMaxMs) {
+        this.retryInitialDelayMs = retryInitialDelayMs;
+        this.retryDelayMultiplier = retryDelayMultiplier;
+        this.retryDelayMaxMs = retryDelayMaxMs;
+    }
+
+    @Override
+    public void failed(Long offset) {
+        MessageRetryRecord oldRecord = this.records.get(offset);
+        MessageRetryRecord newRecord = oldRecord == null ?
+                                       new MessageRetryRecord(offset) :
+                                       oldRecord.createNextRetryRecord();
+        this.records.put(offset, newRecord);
+        this.waiting.add(newRecord);
+    }
+
+    @Override
+    public void acked(Long offset) {
+        MessageRetryRecord record = this.records.remove(offset);
+        if (record != null) {
+            this.waiting.remove(record);
+        }
+    }
+
+    @Override
+    public void retryStarted(Long offset) {
+        MessageRetryRecord record = this.records.get(offset);
+        if (record == null || !this.waiting.contains(record)) {
+            throw new IllegalStateException("cannot retry a message that has not failed");
+        } else {
+            this.waiting.remove(record);
+        }
+    }
+
+    @Override
+    public Long nextFailedMessageToRetry() {
+        if (this.waiting.size() > 0) {
+            MessageRetryRecord first = this.waiting.peek();
+            if (System.currentTimeMillis() >= first.retryTimeUTC) {
+                if (this.records.containsKey(first.offset)) {
+                    return first.offset;
+                } else {
+                    // defensive programming - should be impossible
+                    this.waiting.remove(first);
+                    return nextFailedMessageToRetry();
+                }
+            }
+        }
+        return null;
+    }
+
+    @Override
+    public boolean shouldRetryMsg(Long offset) {
+        MessageRetryRecord record = this.records.get(offset);
+        return record != null &&
+                this.waiting.contains(record) &&
+                System.currentTimeMillis() >= record.retryTimeUTC;
+    }
+
+    @Override
+    public Set<Long> clearInvalidMessages(Long kafkaOffset) {
+        Set<Long> invalidOffsets = new HashSet<Long>(); 
+        for(Long offset : records.keySet()){
+            if(offset < kafkaOffset){
+                MessageRetryRecord record = this.records.remove(offset);
+                if (record != null) {
+                    this.waiting.remove(record);
+                    invalidOffsets.add(offset);
+                }
+            }
+        }
+        return invalidOffsets;
+    }
+
+    /**
+     * A MessageRetryRecord holds the data of how many times a message has
+     * failed and been retried, and when the last failure occurred.  It can
+     * determine whether it is ready to be retried by employing an exponential
+     * back-off calculation using config values stored in SpoutConfig:
+     * <ul>
+     *  <li>retryInitialDelayMs - time to delay before the first retry</li>
+     *  <li>retryDelayMultiplier - multiplier by which to increase the delay for each subsequent retry</li>
+     *  <li>retryDelayMaxMs - maximum retry delay (once this delay time is reached, subsequent retries will
+     *                        delay for this amount of time every time)
+     *  </li>
+     * </ul>
+     */
+    private class MessageRetryRecord {
+        private final long offset;
+        private final int retryNum;
+        private final long retryTimeUTC;
+
+        public MessageRetryRecord(long offset) {
+            this(offset, 1);
+        }
+
+        private MessageRetryRecord(long offset, int retryNum) {
+            this.offset = offset;
+            this.retryNum = retryNum;
+            this.retryTimeUTC = System.currentTimeMillis() + calculateRetryDelay();
+        }
+
+        /**
+         * Create a MessageRetryRecord for the next retry that should occur after this one.
+         * @return MessageRetryRecord with the next retry time, or null to indicate that another
+         *         retry should not be performed.  The latter case can happen if we are about to
+         *         run into the org.apache.storm.Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS in the Storm
+         *         configuration.
+         */
+        public MessageRetryRecord createNextRetryRecord() {
+            return new MessageRetryRecord(this.offset, this.retryNum + 1);
+        }
+
+        private long calculateRetryDelay() {
+            double delayMultiplier = Math.pow(retryDelayMultiplier, this.retryNum - 1);
+            double delay = retryInitialDelayMs * delayMultiplier;
+            Long maxLong = Long.MAX_VALUE;
+            long delayThisRetryMs = delay >= maxLong.doubleValue()
+                                    ?  maxLong
+                                    : (long) delay;
+            return Math.min(delayThisRetryMs, retryDelayMaxMs);
+        }
+
+        @Override
+        public boolean equals(Object other) {
+            return (other instanceof MessageRetryRecord
+                    && this.offset == ((MessageRetryRecord) other).offset);
+        }
+
+        @Override
+        public int hashCode() {
+            return Long.valueOf(this.offset).hashCode();
+        }
+    }
+
+    private static class RetryTimeComparator implements Comparator<MessageRetryRecord> {
+
+        @Override
+        public int compare(MessageRetryRecord record1, MessageRetryRecord record2) {
+            return Long.valueOf(record1.retryTimeUTC).compareTo(Long.valueOf(record2.retryTimeUTC));
+        }
+
+        @Override
+        public boolean equals(Object obj) {
+            return false;
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/FailedFetchException.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/FailedFetchException.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/FailedFetchException.java
new file mode 100644
index 0000000..448d0c3
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/FailedFetchException.java
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+public class FailedFetchException extends RuntimeException {
+
+    public FailedFetchException(String message) {
+        super(message);
+    }
+
+    public FailedFetchException(Exception e) {
+        super(e);
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/FailedMsgRetryManager.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/FailedMsgRetryManager.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/FailedMsgRetryManager.java
new file mode 100644
index 0000000..e9a7092
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/FailedMsgRetryManager.java
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+import java.util.Set;
+
+public interface FailedMsgRetryManager {
+    public void failed(Long offset);
+    public void acked(Long offset);
+    public void retryStarted(Long offset);
+    public Long nextFailedMessageToRetry();
+    public boolean shouldRetryMsg(Long offset);
+    public Set<Long> clearInvalidMessages(Long kafkaOffset);
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/IntSerializer.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/IntSerializer.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/IntSerializer.java
new file mode 100644
index 0000000..75f5563
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/IntSerializer.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+import org.apache.kafka.common.serialization.Serializer;
+
+import java.nio.ByteBuffer;
+import java.nio.IntBuffer;
+import java.util.Map;
+
+public class IntSerializer implements Serializer<Integer> {
+  @Override
+  public void configure(Map<String, ?> map, boolean b) {
+  }
+
+  @Override
+  public byte[] serialize(String topic, Integer val) {
+    byte[] r = new byte[4];
+    IntBuffer b = ByteBuffer.wrap(r).asIntBuffer();
+    b.put(val);
+    return r;
+  }
+
+  @Override
+  public void close() {
+  }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/KafkaConfig.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/KafkaConfig.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/KafkaConfig.java
new file mode 100644
index 0000000..e1e1d24
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/KafkaConfig.java
@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+import org.apache.storm.spout.MultiScheme;
+import org.apache.storm.spout.RawMultiScheme;
+
+import java.io.Serializable;
+
+public class KafkaConfig implements Serializable {
+    private static final long serialVersionUID = 5276718734571623855L;
+    
+    public final BrokerHosts hosts;
+    public final String topic;
+    public final String clientId;
+
+    public int fetchSizeBytes = 1024 * 1024;
+    public int socketTimeoutMs = 10000;
+    public int fetchMaxWait = 10000;
+    public int bufferSizeBytes = 1024 * 1024;
+    public MultiScheme scheme = new RawMultiScheme();
+    public boolean ignoreZkOffsets = false;
+    public long startOffsetTime = kafka.api.OffsetRequest.EarliestTime();
+    public long maxOffsetBehind = Long.MAX_VALUE;
+    public boolean useStartOffsetTimeIfOffsetOutOfRange = true;
+    public int metricsTimeBucketSizeInSecs = 60;
+
+    public KafkaConfig(BrokerHosts hosts, String topic) {
+        this(hosts, topic, kafka.api.OffsetRequest.DefaultClientId());
+    }
+
+    public KafkaConfig(BrokerHosts hosts, String topic, String clientId) {
+        this.hosts = hosts;
+        this.topic = topic;
+        this.clientId = clientId;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/KafkaError.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/KafkaError.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/KafkaError.java
new file mode 100644
index 0000000..1d866e7
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/KafkaError.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+public enum KafkaError {
+    NO_ERROR,
+    OFFSET_OUT_OF_RANGE,
+    INVALID_MESSAGE,
+    UNKNOWN_TOPIC_OR_PARTITION,
+    INVALID_FETCH_SIZE,
+    LEADER_NOT_AVAILABLE,
+    NOT_LEADER_FOR_PARTITION,
+    REQUEST_TIMED_OUT,
+    BROKER_NOT_AVAILABLE,
+    REPLICA_NOT_AVAILABLE,
+    MESSAGE_SIZE_TOO_LARGE,
+    STALE_CONTROLLER_EPOCH,
+    OFFSET_METADATA_TOO_LARGE,
+    UNKNOWN;
+
+    public static KafkaError getError(int errorCode) {
+        if (errorCode < 0 || errorCode >= UNKNOWN.ordinal()) {
+            return UNKNOWN;
+        } else {
+            return values()[errorCode];
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/KafkaSpout.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/KafkaSpout.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/KafkaSpout.java
new file mode 100644
index 0000000..7a83ae0
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/KafkaSpout.java
@@ -0,0 +1,198 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+import org.apache.storm.Config;
+import org.apache.storm.metric.api.IMetric;
+import org.apache.storm.spout.SpoutOutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.base.BaseRichSpout;
+import com.google.common.base.Strings;
+import kafka.message.Message;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.storm.kafka.PartitionManager.KafkaMessageId;
+
+import java.util.*;
+
+// TODO: need to add blacklisting
+// TODO: need to make a best effort to not re-emit messages if don't have to
+public class KafkaSpout extends BaseRichSpout {
+    static enum EmitState {
+        EMITTED_MORE_LEFT,
+        EMITTED_END,
+        NO_EMITTED
+    }
+
+    public static final Logger LOG = LoggerFactory.getLogger(KafkaSpout.class);
+
+    SpoutConfig _spoutConfig;
+    SpoutOutputCollector _collector;
+    PartitionCoordinator _coordinator;
+    DynamicPartitionConnections _connections;
+    ZkState _state;
+
+    long _lastUpdateMs = 0;
+
+    int _currPartitionIndex = 0;
+
+    public KafkaSpout(SpoutConfig spoutConf) {
+        _spoutConfig = spoutConf;
+    }
+
+    @Override
+    public void open(Map conf, final TopologyContext context, final SpoutOutputCollector collector) {
+        _collector = collector;
+        String topologyInstanceId = context.getStormId();
+        Map stateConf = new HashMap(conf);
+        List<String> zkServers = _spoutConfig.zkServers;
+        if (zkServers == null) {
+            zkServers = (List<String>) conf.get(Config.STORM_ZOOKEEPER_SERVERS);
+        }
+        Integer zkPort = _spoutConfig.zkPort;
+        if (zkPort == null) {
+            zkPort = ((Number) conf.get(Config.STORM_ZOOKEEPER_PORT)).intValue();
+        }
+        stateConf.put(Config.TRANSACTIONAL_ZOOKEEPER_SERVERS, zkServers);
+        stateConf.put(Config.TRANSACTIONAL_ZOOKEEPER_PORT, zkPort);
+        stateConf.put(Config.TRANSACTIONAL_ZOOKEEPER_ROOT, _spoutConfig.zkRoot);
+        _state = new ZkState(stateConf);
+
+        _connections = new DynamicPartitionConnections(_spoutConfig, KafkaUtils.makeBrokerReader(conf, _spoutConfig));
+
+        // using TransactionalState like this is a hack
+        int totalTasks = context.getComponentTasks(context.getThisComponentId()).size();
+        if (_spoutConfig.hosts instanceof StaticHosts) {
+            _coordinator = new StaticCoordinator(_connections, conf,
+                    _spoutConfig, _state, context.getThisTaskIndex(),
+                    totalTasks, topologyInstanceId);
+        } else {
+            _coordinator = new ZkCoordinator(_connections, conf,
+                    _spoutConfig, _state, context.getThisTaskIndex(),
+                    totalTasks, topologyInstanceId);
+        }
+
+        context.registerMetric("kafkaOffset", new IMetric() {
+            KafkaUtils.KafkaOffsetMetric _kafkaOffsetMetric = new KafkaUtils.KafkaOffsetMetric(_connections);
+
+            @Override
+            public Object getValueAndReset() {
+                List<PartitionManager> pms = _coordinator.getMyManagedPartitions();
+                Set<Partition> latestPartitions = new HashSet();
+                for (PartitionManager pm : pms) {
+                    latestPartitions.add(pm.getPartition());
+                }
+                _kafkaOffsetMetric.refreshPartitions(latestPartitions);
+                for (PartitionManager pm : pms) {
+                    _kafkaOffsetMetric.setLatestEmittedOffset(pm.getPartition(), pm.lastCompletedOffset());
+                }
+                return _kafkaOffsetMetric.getValueAndReset();
+            }
+        }, _spoutConfig.metricsTimeBucketSizeInSecs);
+
+        context.registerMetric("kafkaPartition", new IMetric() {
+            @Override
+            public Object getValueAndReset() {
+                List<PartitionManager> pms = _coordinator.getMyManagedPartitions();
+                Map concatMetricsDataMaps = new HashMap();
+                for (PartitionManager pm : pms) {
+                    concatMetricsDataMaps.putAll(pm.getMetricsDataMap());
+                }
+                return concatMetricsDataMaps;
+            }
+        }, _spoutConfig.metricsTimeBucketSizeInSecs);
+    }
+
+    @Override
+    public void close() {
+        _state.close();
+    }
+
+    @Override
+    public void nextTuple() {
+        List<PartitionManager> managers = _coordinator.getMyManagedPartitions();
+        for (int i = 0; i < managers.size(); i++) {
+
+            try {
+                // in case the number of managers decreased
+                _currPartitionIndex = _currPartitionIndex % managers.size();
+                EmitState state = managers.get(_currPartitionIndex).next(_collector);
+                if (state != EmitState.EMITTED_MORE_LEFT) {
+                    _currPartitionIndex = (_currPartitionIndex + 1) % managers.size();
+                }
+                if (state != EmitState.NO_EMITTED) {
+                    break;
+                }
+            } catch (FailedFetchException e) {
+                LOG.warn("Fetch failed", e);
+                _coordinator.refresh();
+            }
+        }
+
+        long diffWithNow = System.currentTimeMillis() - _lastUpdateMs;
+
+        /*
+             As far as the System.currentTimeMillis() is dependent on System clock,
+             additional check on negative value of diffWithNow in case of external changes.
+         */
+        if (diffWithNow > _spoutConfig.stateUpdateIntervalMs || diffWithNow < 0) {
+            commit();
+        }
+    }
+
+    @Override
+    public void ack(Object msgId) {
+        KafkaMessageId id = (KafkaMessageId) msgId;
+        PartitionManager m = _coordinator.getManager(id.partition);
+        if (m != null) {
+            m.ack(id.offset);
+        }
+    }
+
+    @Override
+    public void fail(Object msgId) {
+        KafkaMessageId id = (KafkaMessageId) msgId;
+        PartitionManager m = _coordinator.getManager(id.partition);
+        if (m != null) {
+            m.fail(id.offset);
+        }
+    }
+
+    @Override
+    public void deactivate() {
+        commit();
+    }
+
+    @Override
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+       if (!Strings.isNullOrEmpty(_spoutConfig.outputStreamId)) {
+            declarer.declareStream(_spoutConfig.outputStreamId, _spoutConfig.scheme.getOutputFields());
+        } else {
+            declarer.declare(_spoutConfig.scheme.getOutputFields());
+        }
+    }
+
+    private void commit() {
+        _lastUpdateMs = System.currentTimeMillis();
+        for (PartitionManager manager : _coordinator.getMyManagedPartitions()) {
+            manager.commit();
+        }
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/KafkaUtils.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/KafkaUtils.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/KafkaUtils.java
new file mode 100644
index 0000000..8cd0fd0
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/KafkaUtils.java
@@ -0,0 +1,275 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+import org.apache.storm.metric.api.IMetric;
+import org.apache.storm.utils.Utils;
+import com.google.common.base.Preconditions;
+import kafka.api.FetchRequest;
+import kafka.api.FetchRequestBuilder;
+import kafka.api.PartitionOffsetRequestInfo;
+import kafka.common.TopicAndPartition;
+import kafka.javaapi.FetchResponse;
+import kafka.javaapi.OffsetRequest;
+import kafka.javaapi.consumer.SimpleConsumer;
+import kafka.javaapi.message.ByteBufferMessageSet;
+import kafka.message.Message;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.storm.kafka.trident.GlobalPartitionInformation;
+import org.apache.storm.kafka.trident.IBrokerReader;
+import org.apache.storm.kafka.trident.StaticBrokerReader;
+import org.apache.storm.kafka.trident.ZkBrokerReader;
+
+import java.io.IOException;
+import java.net.ConnectException;
+import java.net.SocketTimeoutException;
+import java.nio.ByteBuffer;
+import java.nio.channels.UnresolvedAddressException;
+import java.util.*;
+
+
+public class KafkaUtils {
+
+    public static final Logger LOG = LoggerFactory.getLogger(KafkaUtils.class);
+    private static final int NO_OFFSET = -5;
+
+
+    public static IBrokerReader makeBrokerReader(Map stormConf, KafkaConfig conf) {
+        if (conf.hosts instanceof StaticHosts) {
+            return new StaticBrokerReader(conf.topic, ((StaticHosts) conf.hosts).getPartitionInformation());
+        } else {
+            return new ZkBrokerReader(stormConf, conf.topic, (ZkHosts) conf.hosts);
+        }
+    }
+
+
+    public static long getOffset(SimpleConsumer consumer, String topic, int partition, KafkaConfig config) {
+        long startOffsetTime = config.startOffsetTime;
+        return getOffset(consumer, topic, partition, startOffsetTime);
+    }
+
+    public static long getOffset(SimpleConsumer consumer, String topic, int partition, long startOffsetTime) {
+        TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
+        Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
+        requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(startOffsetTime, 1));
+        OffsetRequest request = new OffsetRequest(
+                requestInfo, kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId());
+
+        long[] offsets = consumer.getOffsetsBefore(request).offsets(topic, partition);
+        if (offsets.length > 0) {
+            return offsets[0];
+        } else {
+            return NO_OFFSET;
+        }
+    }
+
+    public static class KafkaOffsetMetric implements IMetric {
+        Map<Partition, Long> _partitionToOffset = new HashMap<Partition, Long>();
+        Set<Partition> _partitions;
+        DynamicPartitionConnections _connections;
+
+        public KafkaOffsetMetric(DynamicPartitionConnections connections) {
+            _connections = connections;
+        }
+
+        public void setLatestEmittedOffset(Partition partition, long offset) {
+            _partitionToOffset.put(partition, offset);
+        }
+
+        private class TopicMetrics {
+            long totalSpoutLag = 0;
+            long totalEarliestTimeOffset = 0;
+            long totalLatestTimeOffset = 0;
+            long totalLatestEmittedOffset = 0;
+        }
+
+        @Override
+        public Object getValueAndReset() {
+            try {
+                HashMap ret = new HashMap();
+                if (_partitions != null && _partitions.size() == _partitionToOffset.size()) {
+                    Map<String,TopicMetrics> topicMetricsMap = new TreeMap<String, TopicMetrics>();
+                    for (Map.Entry<Partition, Long> e : _partitionToOffset.entrySet()) {
+                        Partition partition = e.getKey();
+                        SimpleConsumer consumer = _connections.getConnection(partition);
+                        if (consumer == null) {
+                            LOG.warn("partitionToOffset contains partition not found in _connections. Stale partition data?");
+                            return null;
+                        }
+                        long latestTimeOffset = getOffset(consumer, partition.topic, partition.partition, kafka.api.OffsetRequest.LatestTime());
+                        long earliestTimeOffset = getOffset(consumer, partition.topic, partition.partition, kafka.api.OffsetRequest.EarliestTime());
+                        if (latestTimeOffset == KafkaUtils.NO_OFFSET) {
+                            LOG.warn("No data found in Kafka Partition " + partition.getId());
+                            return null;
+                        }
+                        long latestEmittedOffset = e.getValue();
+                        long spoutLag = latestTimeOffset - latestEmittedOffset;
+                        String topic = partition.topic;
+                        String metricPath = partition.getId();
+                        //Handle the case where Partition Path Id does not contain topic name Partition.getId() == "partition_" + partition
+                        if (!metricPath.startsWith(topic + "/")) {
+                            metricPath = topic + "/" + metricPath;
+                        }
+                        ret.put(metricPath + "/" + "spoutLag", spoutLag);
+                        ret.put(metricPath + "/" + "earliestTimeOffset", earliestTimeOffset);
+                        ret.put(metricPath + "/" + "latestTimeOffset", latestTimeOffset);
+                        ret.put(metricPath + "/" + "latestEmittedOffset", latestEmittedOffset);
+
+                        if (!topicMetricsMap.containsKey(partition.topic)) {
+                            topicMetricsMap.put(partition.topic,new TopicMetrics());
+                        }
+
+                        TopicMetrics topicMetrics = topicMetricsMap.get(partition.topic);
+                        topicMetrics.totalSpoutLag += spoutLag;
+                        topicMetrics.totalEarliestTimeOffset += earliestTimeOffset;
+                        topicMetrics.totalLatestTimeOffset += latestTimeOffset;
+                        topicMetrics.totalLatestEmittedOffset += latestEmittedOffset;
+                    }
+
+                    for(Map.Entry<String, TopicMetrics> e : topicMetricsMap.entrySet()) {
+                        String topic = e.getKey();
+                        TopicMetrics topicMetrics = e.getValue();
+                        ret.put(topic + "/" + "totalSpoutLag", topicMetrics.totalSpoutLag);
+                        ret.put(topic + "/" + "totalEarliestTimeOffset", topicMetrics.totalEarliestTimeOffset);
+                        ret.put(topic + "/" + "totalLatestTimeOffset", topicMetrics.totalLatestTimeOffset);
+                        ret.put(topic + "/" + "totalLatestEmittedOffset", topicMetrics.totalLatestEmittedOffset);
+                    }
+
+                    return ret;
+                } else {
+                    LOG.info("Metrics Tick: Not enough data to calculate spout lag.");
+                }
+            } catch (Throwable t) {
+                LOG.warn("Metrics Tick: Exception when computing kafkaOffset metric.", t);
+            }
+            return null;
+        }
+
+        public void refreshPartitions(Set<Partition> partitions) {
+            _partitions = partitions;
+            Iterator<Partition> it = _partitionToOffset.keySet().iterator();
+            while (it.hasNext()) {
+                if (!partitions.contains(it.next())) {
+                    it.remove();
+                }
+            }
+        }
+    }
+
+    public static ByteBufferMessageSet fetchMessages(KafkaConfig config, SimpleConsumer consumer, Partition partition, long offset)
+            throws TopicOffsetOutOfRangeException, FailedFetchException,RuntimeException {
+        ByteBufferMessageSet msgs = null;
+        String topic = partition.topic;
+        int partitionId = partition.partition;
+        FetchRequestBuilder builder = new FetchRequestBuilder();
+        FetchRequest fetchRequest = builder.addFetch(topic, partitionId, offset, config.fetchSizeBytes).
+                clientId(config.clientId).maxWait(config.fetchMaxWait).build();
+        FetchResponse fetchResponse;
+        try {
+            fetchResponse = consumer.fetch(fetchRequest);
+        } catch (Exception e) {
+            if (e instanceof ConnectException ||
+                    e instanceof SocketTimeoutException ||
+                    e instanceof IOException ||
+                    e instanceof UnresolvedAddressException
+                    ) {
+                LOG.warn("Network error when fetching messages:", e);
+                throw new FailedFetchException(e);
+            } else {
+                throw new RuntimeException(e);
+            }
+        }
+        if (fetchResponse.hasError()) {
+            KafkaError error = KafkaError.getError(fetchResponse.errorCode(topic, partitionId));
+            if (error.equals(KafkaError.OFFSET_OUT_OF_RANGE) && config.useStartOffsetTimeIfOffsetOutOfRange) {
+                String msg = partition + " Got fetch request with offset out of range: [" + offset + "]";
+                LOG.warn(msg);
+                throw new TopicOffsetOutOfRangeException(msg);
+            } else {
+                String message = "Error fetching data from [" + partition + "] for topic [" + topic + "]: [" + error + "]";
+                LOG.error(message);
+                throw new FailedFetchException(message);
+            }
+        } else {
+            msgs = fetchResponse.messageSet(topic, partitionId);
+        }
+        return msgs;
+    }
+
+
+    public static Iterable<List<Object>> generateTuples(KafkaConfig kafkaConfig, Message msg, String topic) {
+        Iterable<List<Object>> tups;
+        ByteBuffer payload = msg.payload();
+        if (payload == null) {
+            return null;
+        }
+        ByteBuffer key = msg.key();
+        if (key != null && kafkaConfig.scheme instanceof KeyValueSchemeAsMultiScheme) {
+            tups = ((KeyValueSchemeAsMultiScheme) kafkaConfig.scheme).deserializeKeyAndValue(key, payload);
+        } else {
+            if (kafkaConfig.scheme instanceof StringMultiSchemeWithTopic) {
+                tups = ((StringMultiSchemeWithTopic)kafkaConfig.scheme).deserializeWithTopic(topic, payload);
+            } else {
+                tups = kafkaConfig.scheme.deserialize(payload);
+            }
+        }
+        return tups;
+    }
+    
+    public static Iterable<List<Object>> generateTuples(MessageMetadataSchemeAsMultiScheme scheme, Message msg, Partition partition, long offset) {
+        ByteBuffer payload = msg.payload();
+        if (payload == null) {
+            return null;
+        }
+        return scheme.deserializeMessageWithMetadata(payload, partition, offset);
+    }
+
+
+    public static List<Partition> calculatePartitionsForTask(List<GlobalPartitionInformation> partitons, int totalTasks, int taskIndex) {
+        Preconditions.checkArgument(taskIndex < totalTasks, "task index must be less that total tasks");
+        List<Partition> taskPartitions = new ArrayList<Partition>();
+        List<Partition> partitions = new ArrayList<Partition>();
+        for(GlobalPartitionInformation partitionInformation : partitons) {
+            partitions.addAll(partitionInformation.getOrderedPartitions());
+        }
+        int numPartitions = partitions.size();
+        if (numPartitions < totalTasks) {
+            LOG.warn("there are more tasks than partitions (tasks: " + totalTasks + "; partitions: " + numPartitions + "), some tasks will be idle");
+        }
+        for (int i = taskIndex; i < numPartitions; i += totalTasks) {
+            Partition taskPartition = partitions.get(i);
+            taskPartitions.add(taskPartition);
+        }
+        logPartitionMapping(totalTasks, taskIndex, taskPartitions);
+        return taskPartitions;
+    }
+
+    private static void logPartitionMapping(int totalTasks, int taskIndex, List<Partition> taskPartitions) {
+        String taskPrefix = taskId(taskIndex, totalTasks);
+        if (taskPartitions.isEmpty()) {
+            LOG.warn(taskPrefix + "no partitions assigned");
+        } else {
+            LOG.info(taskPrefix + "assigned " + taskPartitions);
+        }
+    }
+
+    public static String taskId(int taskIndex, int totalTasks) {
+        return "Task [" + (taskIndex + 1) + "/" + totalTasks + "] ";
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/KeyValueScheme.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/KeyValueScheme.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/KeyValueScheme.java
new file mode 100644
index 0000000..3f9acc2
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/KeyValueScheme.java
@@ -0,0 +1,27 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+import org.apache.storm.spout.Scheme;
+
+import java.nio.ByteBuffer;
+import java.util.List;
+
+public interface KeyValueScheme extends Scheme {
+    List<Object> deserializeKeyAndValue(ByteBuffer key, ByteBuffer value);
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/KeyValueSchemeAsMultiScheme.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/KeyValueSchemeAsMultiScheme.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/KeyValueSchemeAsMultiScheme.java
new file mode 100644
index 0000000..25053dd
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/KeyValueSchemeAsMultiScheme.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+import org.apache.storm.spout.SchemeAsMultiScheme;
+
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.List;
+
+public class KeyValueSchemeAsMultiScheme extends SchemeAsMultiScheme {
+
+    public KeyValueSchemeAsMultiScheme(KeyValueScheme scheme) {
+        super(scheme);
+    }
+
+    public Iterable<List<Object>> deserializeKeyAndValue(final ByteBuffer key, final ByteBuffer value) {
+        List<Object> o = ((KeyValueScheme)scheme).deserializeKeyAndValue(key, value);
+        if(o == null) return null;
+        else return Arrays.asList(o);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/MessageMetadataScheme.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/MessageMetadataScheme.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/MessageMetadataScheme.java
new file mode 100644
index 0000000..d0fc08e
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/MessageMetadataScheme.java
@@ -0,0 +1,27 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+import org.apache.storm.spout.Scheme;
+
+import java.nio.ByteBuffer;
+import java.util.List;
+
+public interface MessageMetadataScheme extends Scheme {
+    List<Object> deserializeMessageWithMetadata(ByteBuffer message, Partition partition, long offset);
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/MessageMetadataSchemeAsMultiScheme.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/MessageMetadataSchemeAsMultiScheme.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/MessageMetadataSchemeAsMultiScheme.java
new file mode 100644
index 0000000..a53fa88
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/MessageMetadataSchemeAsMultiScheme.java
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.storm.spout.SchemeAsMultiScheme;
+
+public class MessageMetadataSchemeAsMultiScheme extends SchemeAsMultiScheme {
+    private static final long serialVersionUID = -7172403703813625116L;
+
+    public MessageMetadataSchemeAsMultiScheme(MessageMetadataScheme scheme) {
+        super(scheme);
+    }
+
+    public Iterable<List<Object>> deserializeMessageWithMetadata(ByteBuffer message, Partition partition, long offset) {
+        List<Object> o = ((MessageMetadataScheme) scheme).deserializeMessageWithMetadata(message, partition, offset);
+        if (o == null) {
+            return null;
+        } else {
+            return Arrays.asList(o);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/Partition.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/Partition.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/Partition.java
new file mode 100644
index 0000000..afdf8af
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/Partition.java
@@ -0,0 +1,87 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+import com.google.common.base.Objects;
+import org.apache.storm.trident.spout.ISpoutPartition;
+
+
+public class Partition implements ISpoutPartition {
+
+    public Broker host;
+    public int partition;
+    public String topic;
+
+    //Flag to keep the Partition Path Id backward compatible with Old implementation of Partition.getId() == "partition_" + partition
+    private Boolean bUseTopicNameForPartitionPathId;
+
+    // for kryo compatibility
+    private Partition() {
+	
+    }
+    public Partition(Broker host, String topic, int partition) {
+        this.topic = topic;
+        this.host = host;
+        this.partition = partition;
+        this.bUseTopicNameForPartitionPathId = false;
+    }
+    
+    public Partition(Broker host, String topic, int partition,Boolean bUseTopicNameForPartitionPathId) {
+        this.topic = topic;
+        this.host = host;
+        this.partition = partition;
+        this.bUseTopicNameForPartitionPathId = bUseTopicNameForPartitionPathId;
+    }
+
+    @Override
+    public int hashCode() {
+        return Objects.hashCode(host, topic, partition);
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj) {
+            return true;
+        }
+        if (obj == null || getClass() != obj.getClass()) {
+            return false;
+        }
+        final Partition other = (Partition) obj;
+        return Objects.equal(this.host, other.host) && Objects.equal(this.topic, other.topic) && Objects.equal(this.partition, other.partition);
+    }
+
+    @Override
+    public String toString() {
+        return "Partition{" +
+                "host=" + host +
+                ", topic=" + topic +
+                ", partition=" + partition +
+                '}';
+    }
+
+    @Override
+    public String getId() {
+        if (bUseTopicNameForPartitionPathId) {
+            return  topic  + "/partition_" + partition;
+        } else {
+            //Keep the Partition Id backward compatible with Old implementation of Partition.getId() == "partition_" + partition
+            return "partition_" + partition;
+        }
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/PartitionCoordinator.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/PartitionCoordinator.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/PartitionCoordinator.java
new file mode 100644
index 0000000..c9004fa
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/PartitionCoordinator.java
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+import java.util.List;
+
+public interface PartitionCoordinator {
+    List<PartitionManager> getMyManagedPartitions();
+
+    PartitionManager getManager(Partition partition);
+
+    void refresh();
+}


[35/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/test/org/apache/storm/kafka/KafkaUtilsTest.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/test/org/apache/storm/kafka/KafkaUtilsTest.java b/external/storm-kafka/src/test/org/apache/storm/kafka/KafkaUtilsTest.java
new file mode 100644
index 0000000..9da6c0a
--- /dev/null
+++ b/external/storm-kafka/src/test/org/apache/storm/kafka/KafkaUtilsTest.java
@@ -0,0 +1,295 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.is;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThat;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Properties;
+
+import kafka.api.OffsetRequest;
+import kafka.javaapi.consumer.SimpleConsumer;
+import kafka.javaapi.message.ByteBufferMessageSet;
+import kafka.message.MessageAndOffset;
+
+import org.apache.kafka.clients.producer.KafkaProducer;
+import org.apache.kafka.clients.producer.ProducerRecord;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.storm.kafka.trident.GlobalPartitionInformation;
+import org.apache.storm.spout.SchemeAsMultiScheme;
+import org.apache.storm.utils.Utils;
+
+import com.google.common.collect.ImmutableMap;
+public class KafkaUtilsTest {
+    private String TEST_TOPIC = "testTopic";
+    private static final Logger LOG = LoggerFactory.getLogger(KafkaUtilsTest.class);
+    private KafkaTestBroker broker;
+    private SimpleConsumer simpleConsumer;
+    private KafkaConfig config;
+    private BrokerHosts brokerHosts;
+
+    @Before
+    public void setup() {
+        broker = new KafkaTestBroker();
+        GlobalPartitionInformation globalPartitionInformation = new GlobalPartitionInformation(TEST_TOPIC);
+        globalPartitionInformation.addPartition(0, Broker.fromString(broker.getBrokerConnectionString()));
+        brokerHosts = new StaticHosts(globalPartitionInformation);
+        config = new KafkaConfig(brokerHosts, TEST_TOPIC);
+        simpleConsumer = new SimpleConsumer("localhost", broker.getPort(), 60000, 1024, "testClient");
+    }
+
+    @After
+    public void shutdown() {
+        simpleConsumer.close();
+        broker.shutdown();
+    }
+
+
+    @Test(expected = FailedFetchException.class)
+    public void topicDoesNotExist() throws Exception {
+        KafkaUtils.fetchMessages(config, simpleConsumer, new Partition(Broker.fromString(broker.getBrokerConnectionString()), TEST_TOPIC, 0), 0);
+    }
+
+    @Test(expected = FailedFetchException.class)
+    public void brokerIsDown() throws Exception {
+        int port = broker.getPort();
+        broker.shutdown();
+        SimpleConsumer simpleConsumer = new SimpleConsumer("localhost", port, 100, 1024, "testClient");
+        try {
+            KafkaUtils.fetchMessages(config, simpleConsumer, new Partition(Broker.fromString(broker.getBrokerConnectionString()), TEST_TOPIC, 0), OffsetRequest.LatestTime());
+        } finally {
+            simpleConsumer.close();
+        }
+    }
+
+    @Test
+    public void fetchMessage() throws Exception {
+        String value = "test";
+        createTopicAndSendMessage(value);
+        long offset = KafkaUtils.getOffset(simpleConsumer, config.topic, 0, OffsetRequest.LatestTime()) - 1;
+        ByteBufferMessageSet messageAndOffsets = KafkaUtils.fetchMessages(config, simpleConsumer,
+                new Partition(Broker.fromString(broker.getBrokerConnectionString()), TEST_TOPIC, 0), offset);
+        String message = new String(Utils.toByteArray(messageAndOffsets.iterator().next().message().payload()));
+        assertThat(message, is(equalTo(value)));
+    }
+
+    @Test(expected = FailedFetchException.class)
+    public void fetchMessagesWithInvalidOffsetAndDefaultHandlingDisabled() throws Exception {
+        config.useStartOffsetTimeIfOffsetOutOfRange = false;
+        KafkaUtils.fetchMessages(config, simpleConsumer,
+                new Partition(Broker.fromString(broker.getBrokerConnectionString()), TEST_TOPIC, 0), -99);
+    }
+
+    @Test(expected = TopicOffsetOutOfRangeException.class)
+    public void fetchMessagesWithInvalidOffsetAndDefaultHandlingEnabled() throws Exception {
+        config = new KafkaConfig(brokerHosts, "newTopic");
+        String value = "test";
+        createTopicAndSendMessage(value);
+        KafkaUtils.fetchMessages(config, simpleConsumer,
+                new Partition(Broker.fromString(broker.getBrokerConnectionString()), "newTopic", 0), -99);
+    }
+
+    @Test
+    public void getOffsetFromConfigAndDontForceFromStart() {
+        config.ignoreZkOffsets = false;
+        config.startOffsetTime = OffsetRequest.EarliestTime();
+        createTopicAndSendMessage();
+        long latestOffset = KafkaUtils.getOffset(simpleConsumer, config.topic, 0, OffsetRequest.EarliestTime());
+        long offsetFromConfig = KafkaUtils.getOffset(simpleConsumer, config.topic, 0, config);
+        assertThat(latestOffset, is(equalTo(offsetFromConfig)));
+    }
+
+    @Test
+    public void getOffsetFromConfigAndFroceFromStart() {
+        config.ignoreZkOffsets = true;
+        config.startOffsetTime = OffsetRequest.EarliestTime();
+        createTopicAndSendMessage();
+        long earliestOffset = KafkaUtils.getOffset(simpleConsumer, config.topic, 0, OffsetRequest.EarliestTime());
+        long offsetFromConfig = KafkaUtils.getOffset(simpleConsumer, config.topic, 0, config);
+        assertThat(earliestOffset, is(equalTo(offsetFromConfig)));
+    }
+
+    @Test
+    public void generateTuplesWithoutKeyAndKeyValueScheme() {
+        config.scheme = new KeyValueSchemeAsMultiScheme(new StringKeyValueScheme());
+        runGetValueOnlyTuplesTest();
+    }
+
+    @Test
+    public void generateTuplesWithKeyAndKeyValueScheme() {
+        config.scheme = new KeyValueSchemeAsMultiScheme(new StringKeyValueScheme());
+        config.useStartOffsetTimeIfOffsetOutOfRange = false;
+        String value = "value";
+        String key = "key";
+        createTopicAndSendMessage(key, value);
+        ByteBufferMessageSet messageAndOffsets = getLastMessage();
+        for (MessageAndOffset msg : messageAndOffsets) {
+            Iterable<List<Object>> lists = KafkaUtils.generateTuples(config, msg.message(), config.topic);
+            assertEquals(ImmutableMap.of(key, value), lists.iterator().next().get(0));
+        }
+    }
+
+    @Test
+    public void generateTupelsWithValueScheme() {
+        config.scheme = new SchemeAsMultiScheme(new StringScheme());
+        runGetValueOnlyTuplesTest();
+    }
+
+    @Test
+    public void generateTuplesWithValueAndStringMultiSchemeWithTopic() {
+        config.scheme = new StringMultiSchemeWithTopic();
+        String value = "value";
+        createTopicAndSendMessage(value);
+        ByteBufferMessageSet messageAndOffsets = getLastMessage();
+        for (MessageAndOffset msg : messageAndOffsets) {
+            Iterable<List<Object>> lists = KafkaUtils.generateTuples(config, msg.message(), config.topic);
+            List<Object> list = lists.iterator().next();
+            assertEquals(value, list.get(0));
+            assertEquals(config.topic, list.get(1));
+        }
+    }
+
+    @Test
+    public void generateTuplesWithValueSchemeAndKeyValueMessage() {
+        config.scheme = new SchemeAsMultiScheme(new StringScheme());
+        String value = "value";
+        String key = "key";
+        createTopicAndSendMessage(key, value);
+        ByteBufferMessageSet messageAndOffsets = getLastMessage();
+        for (MessageAndOffset msg : messageAndOffsets) {
+            Iterable<List<Object>> lists = KafkaUtils.generateTuples(config, msg.message(), config.topic);
+            assertEquals(value, lists.iterator().next().get(0));
+        }
+    }
+    
+    @Test
+    public void generateTuplesWithMessageAndMetadataScheme() {
+        String value = "value";
+        Partition mockPartition = Mockito.mock(Partition.class);
+        mockPartition.partition = 0;
+        long offset = 0L;
+        
+        MessageMetadataSchemeAsMultiScheme scheme = new MessageMetadataSchemeAsMultiScheme(new StringMessageAndMetadataScheme());
+        
+        createTopicAndSendMessage(null, value);
+        ByteBufferMessageSet messageAndOffsets = getLastMessage();
+        for (MessageAndOffset msg : messageAndOffsets) {
+            Iterable<List<Object>> lists = KafkaUtils.generateTuples(scheme, msg.message(), mockPartition, offset);
+            List<Object> values = lists.iterator().next(); 
+            assertEquals("Message is incorrect", value, values.get(0));
+            assertEquals("Partition is incorrect", mockPartition.partition, values.get(1));
+            assertEquals("Offset is incorrect", offset, values.get(2));
+        }
+    }
+
+    private ByteBufferMessageSet getLastMessage() {
+        long offsetOfLastMessage = KafkaUtils.getOffset(simpleConsumer, config.topic, 0, OffsetRequest.LatestTime()) - 1;
+        return KafkaUtils.fetchMessages(config, simpleConsumer, new Partition(Broker.fromString(broker.getBrokerConnectionString()), TEST_TOPIC, 0), offsetOfLastMessage);
+    }
+
+    private void runGetValueOnlyTuplesTest() {
+        String value = "value";
+        
+        createTopicAndSendMessage(null, value);
+        ByteBufferMessageSet messageAndOffsets = getLastMessage();
+        for (MessageAndOffset msg : messageAndOffsets) {
+            Iterable<List<Object>> lists = KafkaUtils.generateTuples(config, msg.message(), config.topic);
+            assertEquals(value, lists.iterator().next().get(0));
+        }
+    }
+
+    private void createTopicAndSendMessage() {
+        createTopicAndSendMessage(null, "someValue");
+    }
+
+    private void createTopicAndSendMessage(String value) {
+        createTopicAndSendMessage(null, value);
+    }
+
+    private void createTopicAndSendMessage(String key, String value) {
+        Properties p = new Properties();
+        p.put("acks", "1");
+        p.put("bootstrap.servers", broker.getBrokerConnectionString());
+        p.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
+        p.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
+        p.put("metadata.fetch.timeout.ms", 1000);
+        KafkaProducer<String, String> producer = new KafkaProducer<String, String>(p);
+        try {
+            producer.send(new ProducerRecord<String, String>(config.topic, key, value)).get();
+        } catch (Exception e) {
+            Assert.fail(e.getMessage());
+            LOG.error("Failed to do synchronous sending due to " + e, e);
+        } finally {
+            producer.close();
+        }
+    }
+
+    @Test
+    public void assignOnePartitionPerTask() {
+        runPartitionToTaskMappingTest(16, 1);
+    }
+
+    @Test
+    public void assignTwoPartitionsPerTask() {
+        runPartitionToTaskMappingTest(16, 2);
+    }
+
+    @Test
+    public void assignAllPartitionsToOneTask() {
+        runPartitionToTaskMappingTest(32, 32);
+    }
+    
+    public void runPartitionToTaskMappingTest(int numPartitions, int partitionsPerTask) {
+        GlobalPartitionInformation globalPartitionInformation = TestUtils.buildPartitionInfo(numPartitions);
+        List<GlobalPartitionInformation> partitions = new ArrayList<GlobalPartitionInformation>();
+        partitions.add(globalPartitionInformation);
+        int numTasks = numPartitions / partitionsPerTask;
+        for (int i = 0 ; i < numTasks ; i++) {
+            assertEquals(partitionsPerTask, KafkaUtils.calculatePartitionsForTask(partitions, numTasks, i).size());
+        }
+    }
+
+    @Test
+    public void moreTasksThanPartitions() {
+        GlobalPartitionInformation globalPartitionInformation = TestUtils.buildPartitionInfo(1);
+        List<GlobalPartitionInformation> partitions = new ArrayList<GlobalPartitionInformation>();
+        partitions.add(globalPartitionInformation);
+        int numTasks = 2;
+        assertEquals(1, KafkaUtils.calculatePartitionsForTask(partitions, numTasks, 0).size());
+        assertEquals(0, KafkaUtils.calculatePartitionsForTask(partitions, numTasks, 1).size());
+    }
+
+    @Test (expected = IllegalArgumentException.class )
+    public void assignInvalidTask() {
+        GlobalPartitionInformation globalPartitionInformation = new GlobalPartitionInformation(TEST_TOPIC);
+        List<GlobalPartitionInformation> partitions = new ArrayList<GlobalPartitionInformation>();
+        partitions.add(globalPartitionInformation);
+        KafkaUtils.calculatePartitionsForTask(partitions, 1, 1);
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/test/org/apache/storm/kafka/StringKeyValueSchemeTest.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/test/org/apache/storm/kafka/StringKeyValueSchemeTest.java b/external/storm-kafka/src/test/org/apache/storm/kafka/StringKeyValueSchemeTest.java
new file mode 100644
index 0000000..7e5ff00
--- /dev/null
+++ b/external/storm-kafka/src/test/org/apache/storm/kafka/StringKeyValueSchemeTest.java
@@ -0,0 +1,62 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+import org.apache.storm.tuple.Fields;
+import com.google.common.collect.ImmutableMap;
+import org.junit.Test;
+
+import java.nio.ByteBuffer;
+import java.nio.charset.Charset;
+import java.util.Collections;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class StringKeyValueSchemeTest {
+
+    private StringKeyValueScheme scheme = new StringKeyValueScheme();
+
+    @Test
+    public void testDeserialize() throws Exception {
+        assertEquals(Collections.singletonList("test"), scheme.deserialize(wrapString("test")));
+    }
+
+    @Test
+    public void testGetOutputFields() throws Exception {
+        Fields outputFields = scheme.getOutputFields();
+        assertTrue(outputFields.contains(StringScheme.STRING_SCHEME_KEY));
+        assertEquals(1, outputFields.size());
+    }
+
+    @Test
+    public void testDeserializeWithNullKeyAndValue() throws Exception {
+        assertEquals(Collections.singletonList("test"),
+            scheme.deserializeKeyAndValue(null, wrapString("test")));
+    }
+
+    @Test
+    public void testDeserializeWithKeyAndValue() throws Exception {
+        assertEquals(Collections.singletonList(ImmutableMap.of("key", "test")),
+                scheme.deserializeKeyAndValue(wrapString("key"), wrapString("test")));
+    }
+
+    private static ByteBuffer wrapString(String s) {
+        return ByteBuffer.wrap(s.getBytes(Charset.defaultCharset()));
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/test/org/apache/storm/kafka/TestStringScheme.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/test/org/apache/storm/kafka/TestStringScheme.java b/external/storm-kafka/src/test/org/apache/storm/kafka/TestStringScheme.java
new file mode 100644
index 0000000..23944ab
--- /dev/null
+++ b/external/storm-kafka/src/test/org/apache/storm/kafka/TestStringScheme.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+import org.junit.Test;
+
+import java.nio.ByteBuffer;
+import java.nio.charset.StandardCharsets;
+
+import static org.junit.Assert.assertEquals;
+
+public class TestStringScheme {
+  @Test
+  public void testDeserializeString() {
+    String s = "foo";
+    byte[] bytes = s.getBytes(StandardCharsets.UTF_8);
+    ByteBuffer direct = ByteBuffer.allocateDirect(bytes.length);
+    direct.put(bytes);
+    direct.flip();
+    String s1 = StringScheme.deserializeString(ByteBuffer.wrap(bytes));
+    String s2 = StringScheme.deserializeString(direct);
+    assertEquals(s, s1);
+    assertEquals(s, s2);
+  }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/test/org/apache/storm/kafka/TestUtils.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/test/org/apache/storm/kafka/TestUtils.java b/external/storm-kafka/src/test/org/apache/storm/kafka/TestUtils.java
new file mode 100644
index 0000000..cc3f2be
--- /dev/null
+++ b/external/storm-kafka/src/test/org/apache/storm/kafka/TestUtils.java
@@ -0,0 +1,101 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+import org.apache.storm.Config;
+import org.apache.storm.utils.Utils;
+import kafka.api.OffsetRequest;
+import kafka.javaapi.consumer.SimpleConsumer;
+import kafka.javaapi.message.ByteBufferMessageSet;
+import kafka.message.Message;
+import kafka.message.MessageAndOffset;
+import org.apache.storm.kafka.bolt.KafkaBolt;
+import org.apache.storm.kafka.trident.GlobalPartitionInformation;
+
+import java.nio.ByteBuffer;
+import java.util.*;
+
+import static org.junit.Assert.assertEquals;
+
+public class TestUtils {
+
+    public static final String TOPIC = "test";
+
+    public static GlobalPartitionInformation buildPartitionInfo(int numPartitions) {
+        return buildPartitionInfo(numPartitions, 9092);
+    }
+
+    public static List<GlobalPartitionInformation> buildPartitionInfoList(GlobalPartitionInformation partitionInformation) {
+        List<GlobalPartitionInformation> map = new ArrayList<GlobalPartitionInformation>();
+        map.add(partitionInformation);
+        return map;
+    }
+
+    public static GlobalPartitionInformation buildPartitionInfo(int numPartitions, int brokerPort) {
+        GlobalPartitionInformation globalPartitionInformation = new GlobalPartitionInformation(TOPIC);
+        for (int i = 0; i < numPartitions; i++) {
+            globalPartitionInformation.addPartition(i, Broker.fromString("broker-" + i + " :" + brokerPort));
+        }
+        return globalPartitionInformation;
+    }
+
+    public static SimpleConsumer getKafkaConsumer(KafkaTestBroker broker) {
+        BrokerHosts brokerHosts = getBrokerHosts(broker);
+        KafkaConfig kafkaConfig = new KafkaConfig(brokerHosts, TOPIC);
+        SimpleConsumer simpleConsumer = new SimpleConsumer("localhost", broker.getPort(), 60000, 1024, "testClient");
+        return simpleConsumer;
+    }
+
+    public static KafkaConfig getKafkaConfig(KafkaTestBroker broker) {
+        BrokerHosts brokerHosts = getBrokerHosts(broker);
+        KafkaConfig kafkaConfig = new KafkaConfig(brokerHosts, TOPIC);
+        return kafkaConfig;
+    }
+
+    private static BrokerHosts getBrokerHosts(KafkaTestBroker broker) {
+        GlobalPartitionInformation globalPartitionInformation = new GlobalPartitionInformation(TOPIC);
+        globalPartitionInformation.addPartition(0, Broker.fromString(broker.getBrokerConnectionString()));
+        return new StaticHosts(globalPartitionInformation);
+    }
+
+    public static Properties getProducerProperties(String brokerConnectionString) {
+        Properties props = new Properties();
+        props.put("bootstrap.servers", brokerConnectionString);
+        props.put("acks", "1");
+        props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
+        props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
+        return props;
+    }
+
+    public static boolean verifyMessage(String key, String message, KafkaTestBroker broker, SimpleConsumer simpleConsumer) {
+        long lastMessageOffset = KafkaUtils.getOffset(simpleConsumer, TestUtils.TOPIC, 0, OffsetRequest.LatestTime()) - 1;
+        ByteBufferMessageSet messageAndOffsets = KafkaUtils.fetchMessages(TestUtils.getKafkaConfig(broker), simpleConsumer,
+                new Partition(Broker.fromString(broker.getBrokerConnectionString()),TestUtils.TOPIC, 0), lastMessageOffset);
+        MessageAndOffset messageAndOffset = messageAndOffsets.iterator().next();
+        Message kafkaMessage = messageAndOffset.message();
+        ByteBuffer messageKeyBuffer = kafkaMessage.key();
+        String keyString = null;
+        String messageString = new String(Utils.toByteArray(kafkaMessage.payload()));
+        if (messageKeyBuffer != null) {
+            keyString = new String(Utils.toByteArray(messageKeyBuffer));
+        }
+        assertEquals(key, keyString);
+        assertEquals(message, messageString);
+        return true;
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/test/org/apache/storm/kafka/TridentKafkaTest.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/test/org/apache/storm/kafka/TridentKafkaTest.java b/external/storm-kafka/src/test/org/apache/storm/kafka/TridentKafkaTest.java
new file mode 100644
index 0000000..7a6073a
--- /dev/null
+++ b/external/storm-kafka/src/test/org/apache/storm/kafka/TridentKafkaTest.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+import org.apache.storm.tuple.Fields;
+import kafka.javaapi.consumer.SimpleConsumer;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.apache.storm.kafka.trident.TridentKafkaState;
+import org.apache.storm.kafka.trident.mapper.FieldNameBasedTupleToKafkaMapper;
+import org.apache.storm.kafka.trident.mapper.TridentTupleToKafkaMapper;
+import org.apache.storm.kafka.trident.selector.DefaultTopicSelector;
+import org.apache.storm.kafka.trident.selector.KafkaTopicSelector;
+import org.apache.storm.trident.tuple.TridentTuple;
+import org.apache.storm.trident.tuple.TridentTupleView;
+
+import java.util.ArrayList;
+import java.util.List;
+
+public class TridentKafkaTest {
+    private KafkaTestBroker broker;
+    private TridentKafkaState state;
+    private SimpleConsumer simpleConsumer;
+
+    @Before
+    public void setup() {
+        broker = new KafkaTestBroker();
+        simpleConsumer = TestUtils.getKafkaConsumer(broker);
+        TridentTupleToKafkaMapper mapper = new FieldNameBasedTupleToKafkaMapper("key", "message");
+        KafkaTopicSelector topicSelector = new DefaultTopicSelector(TestUtils.TOPIC);
+        state = new TridentKafkaState()
+                .withKafkaTopicSelector(topicSelector)
+                .withTridentTupleToKafkaMapper(mapper);
+        state.prepare(TestUtils.getProducerProperties(broker.getBrokerConnectionString()));
+    }
+
+    @Test
+    public void testKeyValue() {
+        String keyString = "key-123";
+        String valString = "message-123";
+        int batchSize = 10;
+
+        List<TridentTuple> tridentTuples = generateTupleBatch(keyString, valString, batchSize);
+
+        state.updateState(tridentTuples, null);
+
+        for(int i = 0 ; i < batchSize ; i++) {
+            TestUtils.verifyMessage(keyString, valString, broker, simpleConsumer);
+        }
+    }
+
+    private List<TridentTuple> generateTupleBatch(String key, String message, int batchsize) {
+        List<TridentTuple> batch = new ArrayList<>();
+        for(int i =0 ; i < batchsize; i++) {
+            batch.add(TridentTupleView.createFreshTuple(new Fields("key", "message"), key, message));
+        }
+        return batch;
+    }
+
+    @After
+    public void shutdown() {
+        simpleConsumer.close();
+        broker.shutdown();
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/test/org/apache/storm/kafka/TridentKafkaTopology.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/test/org/apache/storm/kafka/TridentKafkaTopology.java b/external/storm-kafka/src/test/org/apache/storm/kafka/TridentKafkaTopology.java
new file mode 100644
index 0000000..fdc6752
--- /dev/null
+++ b/external/storm-kafka/src/test/org/apache/storm/kafka/TridentKafkaTopology.java
@@ -0,0 +1,91 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+import org.apache.storm.Config;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.generated.StormTopology;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Values;
+import com.google.common.collect.ImmutableMap;
+import org.apache.storm.kafka.trident.TridentKafkaStateFactory;
+import org.apache.storm.kafka.trident.TridentKafkaUpdater;
+import org.apache.storm.kafka.trident.mapper.FieldNameBasedTupleToKafkaMapper;
+import org.apache.storm.kafka.trident.selector.DefaultTopicSelector;
+import org.apache.storm.trident.Stream;
+import org.apache.storm.trident.TridentTopology;
+import org.apache.storm.trident.testing.FixedBatchSpout;
+
+import java.util.Properties;
+
+public class TridentKafkaTopology {
+
+    private static StormTopology buildTopology(String brokerConnectionString) {
+        Fields fields = new Fields("word", "count");
+        FixedBatchSpout spout = new FixedBatchSpout(fields, 4,
+                new Values("storm", "1"),
+                new Values("trident", "1"),
+                new Values("needs", "1"),
+                new Values("javadoc", "1")
+        );
+        spout.setCycle(true);
+
+        TridentTopology topology = new TridentTopology();
+        Stream stream = topology.newStream("spout1", spout);
+
+        Properties props = new Properties();
+        props.put("bootstrap.servers", brokerConnectionString);
+        props.put("acks", "1");
+        props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
+        props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
+
+        TridentKafkaStateFactory stateFactory = new TridentKafkaStateFactory()
+            .withProducerProperties(props)
+            .withKafkaTopicSelector(new DefaultTopicSelector("test"))
+            .withTridentTupleToKafkaMapper(new FieldNameBasedTupleToKafkaMapper("word", "count"));
+        stream.partitionPersist(stateFactory, fields, new TridentKafkaUpdater(), new Fields());
+
+        return topology.build();
+    }
+
+    /**
+     * To run this topology ensure you have a kafka broker running and provide connection string to broker as argument.
+     * Create a topic test with command line,
+     * kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partition 1 --topic test
+     *
+     * run this program and run the kafka consumer:
+     * kafka-console-consumer.sh --zookeeper localhost:2181 --topic test --from-beginning
+     *
+     * you should see the messages flowing through.
+     *
+     * @param args
+     * @throws Exception
+     */
+    public static void main(String[] args) throws Exception {
+        if(args.length < 1) {
+            System.out.println("Please provide kafka broker url ,e.g. localhost:9092");
+        }
+
+        LocalCluster cluster = new LocalCluster();
+        cluster.submitTopology("wordCounter", new Config(), buildTopology(args[0]));
+        Thread.sleep(60 * 1000);
+        cluster.killTopology("wordCounter");
+
+        cluster.shutdown();
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/test/org/apache/storm/kafka/ZkCoordinatorTest.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/test/org/apache/storm/kafka/ZkCoordinatorTest.java b/external/storm-kafka/src/test/org/apache/storm/kafka/ZkCoordinatorTest.java
new file mode 100644
index 0000000..65bf0b4
--- /dev/null
+++ b/external/storm-kafka/src/test/org/apache/storm/kafka/ZkCoordinatorTest.java
@@ -0,0 +1,148 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+import org.apache.storm.Config;
+import org.apache.curator.test.TestingServer;
+import kafka.javaapi.consumer.SimpleConsumer;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+
+import java.util.*;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyInt;
+import static org.mockito.Mockito.when;
+
+public class ZkCoordinatorTest {
+
+
+    @Mock
+    private DynamicBrokersReader reader;
+
+    @Mock
+    private DynamicPartitionConnections dynamicPartitionConnections;
+
+    private KafkaTestBroker broker = new KafkaTestBroker();
+    private TestingServer server;
+    private Map stormConf = new HashMap();
+    private SpoutConfig spoutConfig;
+    private ZkState state;
+    private SimpleConsumer simpleConsumer;
+
+    @Before
+    public void setUp() throws Exception {
+        MockitoAnnotations.initMocks(this);
+        server = new TestingServer();
+        String connectionString = server.getConnectString();
+        ZkHosts hosts = new ZkHosts(connectionString);
+        hosts.refreshFreqSecs = 1;
+        spoutConfig = new SpoutConfig(hosts, "topic", "/test", "id");
+        Map conf = buildZookeeperConfig(server);
+        state = new ZkState(conf);
+        simpleConsumer = new SimpleConsumer("localhost", broker.getPort(), 60000, 1024, "testClient");
+        when(dynamicPartitionConnections.register(any(Broker.class), any(String.class) ,anyInt())).thenReturn(simpleConsumer);
+    }
+
+    private Map buildZookeeperConfig(TestingServer server) {
+        Map conf = new HashMap();
+        conf.put(Config.TRANSACTIONAL_ZOOKEEPER_PORT, server.getPort());
+        conf.put(Config.TRANSACTIONAL_ZOOKEEPER_SERVERS, Arrays.asList("localhost"));
+        conf.put(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT, 20000);
+        conf.put(Config.STORM_ZOOKEEPER_CONNECTION_TIMEOUT, 20000);
+        conf.put(Config.STORM_ZOOKEEPER_RETRY_TIMES, 3);
+        conf.put(Config.STORM_ZOOKEEPER_RETRY_INTERVAL, 30);
+        return conf;
+    }
+
+    @After
+    public void shutdown() throws Exception {
+        simpleConsumer.close();
+        broker.shutdown();
+        server.close();
+    }
+
+    @Test
+    public void testOnePartitionPerTask() throws Exception {
+        int totalTasks = 64;
+        int partitionsPerTask = 1;
+        List<ZkCoordinator> coordinatorList = buildCoordinators(totalTasks / partitionsPerTask);
+        when(reader.getBrokerInfo()).thenReturn(TestUtils.buildPartitionInfoList(TestUtils.buildPartitionInfo(totalTasks)));
+        for (ZkCoordinator coordinator : coordinatorList) {
+            List<PartitionManager> myManagedPartitions = coordinator.getMyManagedPartitions();
+            assertEquals(partitionsPerTask, myManagedPartitions.size());
+            assertEquals(coordinator._taskIndex, myManagedPartitions.get(0).getPartition().partition);
+        }
+    }
+
+
+    @Test
+    public void testPartitionsChange() throws Exception {
+        final int totalTasks = 64;
+        int partitionsPerTask = 2;
+        List<ZkCoordinator> coordinatorList = buildCoordinators(totalTasks / partitionsPerTask);
+        when(reader.getBrokerInfo()).thenReturn(TestUtils.buildPartitionInfoList(TestUtils.buildPartitionInfo(totalTasks, 9092)));
+        List<List<PartitionManager>> partitionManagersBeforeRefresh = getPartitionManagers(coordinatorList);
+        waitForRefresh();
+        when(reader.getBrokerInfo()).thenReturn(TestUtils.buildPartitionInfoList(TestUtils.buildPartitionInfo(totalTasks, 9093)));
+        List<List<PartitionManager>> partitionManagersAfterRefresh = getPartitionManagers(coordinatorList);
+        assertEquals(partitionManagersAfterRefresh.size(), partitionManagersAfterRefresh.size());
+        Iterator<List<PartitionManager>> iterator = partitionManagersAfterRefresh.iterator();
+        for (List<PartitionManager> partitionManagersBefore : partitionManagersBeforeRefresh) {
+            List<PartitionManager> partitionManagersAfter = iterator.next();
+            assertPartitionsAreDifferent(partitionManagersBefore, partitionManagersAfter, partitionsPerTask);
+        }
+    }
+
+    private void assertPartitionsAreDifferent(List<PartitionManager> partitionManagersBefore, List<PartitionManager> partitionManagersAfter, int partitionsPerTask) {
+        assertEquals(partitionsPerTask, partitionManagersBefore.size());
+        assertEquals(partitionManagersBefore.size(), partitionManagersAfter.size());
+        for (int i = 0; i < partitionsPerTask; i++) {
+            assertNotEquals(partitionManagersBefore.get(i).getPartition(), partitionManagersAfter.get(i).getPartition());
+        }
+
+    }
+
+    private List<List<PartitionManager>> getPartitionManagers(List<ZkCoordinator> coordinatorList) {
+        List<List<PartitionManager>> partitions = new ArrayList();
+        for (ZkCoordinator coordinator : coordinatorList) {
+            partitions.add(coordinator.getMyManagedPartitions());
+        }
+        return partitions;
+    }
+
+    private void waitForRefresh() throws InterruptedException {
+        Thread.sleep(((ZkHosts) spoutConfig.hosts).refreshFreqSecs * 1000 + 1);
+    }
+
+    private List<ZkCoordinator> buildCoordinators(int totalTasks) {
+        List<ZkCoordinator> coordinatorList = new ArrayList<ZkCoordinator>();
+        for (int i = 0; i < totalTasks; i++) {
+            ZkCoordinator coordinator = new ZkCoordinator(dynamicPartitionConnections, stormConf, spoutConfig, state, i, totalTasks, "test-id", reader);
+            coordinatorList.add(coordinator);
+        }
+        return coordinatorList;
+    }
+
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/test/org/apache/storm/kafka/bolt/KafkaBoltTest.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/test/org/apache/storm/kafka/bolt/KafkaBoltTest.java b/external/storm-kafka/src/test/org/apache/storm/kafka/bolt/KafkaBoltTest.java
new file mode 100644
index 0000000..180828e
--- /dev/null
+++ b/external/storm-kafka/src/test/org/apache/storm/kafka/bolt/KafkaBoltTest.java
@@ -0,0 +1,341 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka.bolt;
+
+import org.apache.storm.Config;
+import org.apache.storm.Constants;
+import org.apache.storm.task.GeneralTopologyContext;
+import org.apache.storm.task.IOutputCollector;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.topology.TopologyBuilder;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.TupleImpl;
+import org.apache.storm.tuple.Values;
+import org.apache.storm.utils.TupleUtils;
+import org.apache.storm.utils.Utils;
+import com.google.common.collect.ImmutableList;
+import kafka.api.OffsetRequest;
+import kafka.api.FetchRequest;
+import kafka.javaapi.FetchResponse;
+import kafka.javaapi.OffsetResponse;
+import kafka.javaapi.consumer.SimpleConsumer;
+import kafka.javaapi.message.ByteBufferMessageSet;
+import kafka.message.Message;
+import kafka.message.MessageAndOffset;
+import org.apache.kafka.clients.producer.Callback;
+import org.apache.kafka.clients.producer.KafkaProducer;
+import org.apache.kafka.clients.producer.ProducerRecord;
+import org.junit.*;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+import org.mockito.internal.util.reflection.Whitebox;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+import org.apache.storm.kafka.*;
+import org.apache.storm.kafka.trident.GlobalPartitionInformation;
+
+import java.nio.ByteBuffer;
+import java.util.List;
+import java.util.HashMap;
+import java.util.Properties;
+import java.util.concurrent.Future;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.*;
+
+public class KafkaBoltTest {
+
+    private static final String TEST_TOPIC = "test-topic";
+    private KafkaTestBroker broker;
+    private KafkaBolt bolt;
+    private Config config = new Config();
+    private KafkaConfig kafkaConfig;
+    private SimpleConsumer simpleConsumer;
+
+    @Mock
+    private IOutputCollector collector;
+
+    @Before
+    public void initMocks() {
+        MockitoAnnotations.initMocks(this);
+        broker = new KafkaTestBroker();
+        setupKafkaConsumer();
+        config.put(KafkaBolt.TOPIC, TEST_TOPIC);
+        bolt = generateStringSerializerBolt();
+    }
+
+    @After
+    public void shutdown() {
+        simpleConsumer.close();
+        broker.shutdown();
+        bolt.cleanup();
+    }
+
+    private void setupKafkaConsumer() {
+        GlobalPartitionInformation globalPartitionInformation = new GlobalPartitionInformation(TEST_TOPIC);
+        globalPartitionInformation.addPartition(0, Broker.fromString(broker.getBrokerConnectionString()));
+        BrokerHosts brokerHosts = new StaticHosts(globalPartitionInformation);
+        kafkaConfig = new KafkaConfig(brokerHosts, TEST_TOPIC);
+        simpleConsumer = new SimpleConsumer("localhost", broker.getPort(), 60000, 1024, "testClient");
+    }
+
+    @Test
+    public void shouldAcknowledgeTickTuples() throws Exception {
+        // Given
+        Tuple tickTuple = mockTickTuple();
+
+        // When
+        bolt.execute(tickTuple);
+
+        // Then
+        verify(collector).ack(tickTuple);
+    }
+
+    @Test
+    public void executeWithKey() throws Exception {
+        String message = "value-123";
+        String key = "key-123";
+        Tuple tuple = generateTestTuple(key, message);
+        bolt.execute(tuple);
+        verify(collector).ack(tuple);
+        verifyMessage(key, message);
+    }
+
+    /* test synchronous sending */
+    @Test
+    public void executeWithByteArrayKeyAndMessageSync() {
+        boolean async = false;
+        boolean fireAndForget = false;
+        bolt = generateDefaultSerializerBolt(async, fireAndForget, null);
+        String keyString = "test-key";
+        String messageString = "test-message";
+        byte[] key = keyString.getBytes();
+        byte[] message = messageString.getBytes();
+        Tuple tuple = generateTestTuple(key, message);
+        bolt.execute(tuple);
+        verify(collector).ack(tuple);
+        verifyMessage(keyString, messageString);
+    }
+
+    /* test asynchronous sending (default) */
+    @Test
+    public void executeWithByteArrayKeyAndMessageAsync() {
+        boolean async = true;
+        boolean fireAndForget = false;
+        String keyString = "test-key";
+        String messageString = "test-message";
+        byte[] key = keyString.getBytes();
+        byte[] message = messageString.getBytes();
+        final Tuple tuple = generateTestTuple(key, message);
+
+        final ByteBufferMessageSet mockMsg = mockSingleMessage(key, message);
+        simpleConsumer.close();
+        simpleConsumer = mockSimpleConsumer(mockMsg);
+        KafkaProducer<?, ?> producer = mock(KafkaProducer.class);
+        when(producer.send(any(ProducerRecord.class), any(Callback.class))).thenAnswer(new Answer<Future>() {
+            @Override
+            public Future answer(InvocationOnMock invocationOnMock) throws Throwable {
+                Callback cb = (Callback) invocationOnMock.getArguments()[1];
+                cb.onCompletion(null, null);
+                return mock(Future.class);
+            }
+        });
+        bolt = generateDefaultSerializerBolt(async, fireAndForget, producer);
+        bolt.execute(tuple);
+        verify(collector).ack(tuple);
+        verifyMessage(keyString, messageString);
+    }
+
+    /* test with fireAndForget option enabled */
+    @Test
+    public void executeWithByteArrayKeyAndMessageFire() {
+        boolean async = true;
+        boolean fireAndForget = true;
+        bolt = generateDefaultSerializerBolt(async, fireAndForget, null);
+        String keyString = "test-key";
+        String messageString = "test-message";
+        byte[] key = keyString.getBytes();
+        byte[] message = messageString.getBytes();
+        Tuple tuple = generateTestTuple(key, message);
+        final ByteBufferMessageSet mockMsg = mockSingleMessage(key, message);
+        simpleConsumer.close();
+        simpleConsumer = mockSimpleConsumer(mockMsg);
+        KafkaProducer<?, ?> producer = mock(KafkaProducer.class);
+        // do not invoke the callback of send() in order to test whether the bolt handle the fireAndForget option
+        // properly.
+        doReturn(mock(Future.class)).when(producer).send(any(ProducerRecord.class), any(Callback.class));
+        bolt.execute(tuple);
+        verify(collector).ack(tuple);
+        verifyMessage(keyString, messageString);
+    }
+
+    /* test bolt specified properties */
+    @Test
+    public void executeWithBoltSpecifiedProperties() {
+        boolean async = false;
+        boolean fireAndForget = false;
+        bolt = defaultSerializerBoltWithSpecifiedProperties(async, fireAndForget);
+        String keyString = "test-key";
+        String messageString = "test-message";
+        byte[] key = keyString.getBytes();
+        byte[] message = messageString.getBytes();
+        Tuple tuple = generateTestTuple(key, message);
+        bolt.execute(tuple);
+        verify(collector).ack(tuple);
+        verifyMessage(keyString, messageString);
+    }
+
+    private KafkaBolt generateStringSerializerBolt() {
+        Properties props = new Properties();
+        props.put("acks", "1");
+        props.put("bootstrap.servers", broker.getBrokerConnectionString());
+        props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
+        props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
+        props.put("metadata.fetch.timeout.ms", 1000);
+        KafkaBolt bolt = new KafkaBolt().withProducerProperties(props);
+        bolt.prepare(config, null, new OutputCollector(collector));
+        bolt.setAsync(false);
+        return bolt;
+    }
+
+    private KafkaBolt generateDefaultSerializerBolt(boolean async, boolean fireAndForget,
+                                                    KafkaProducer<?, ?> mockProducer) {
+        Properties props = new Properties();
+        props.put("acks", "1");
+        props.put("bootstrap.servers", broker.getBrokerConnectionString());
+        props.put("key.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
+        props.put("value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
+        props.put("metadata.fetch.timeout.ms", 1000);
+        props.put("linger.ms", 0);
+        KafkaBolt bolt = new KafkaBolt().withProducerProperties(props);
+        bolt.prepare(config, null, new OutputCollector(collector));
+        bolt.setAsync(async);
+        bolt.setFireAndForget(fireAndForget);
+        if (mockProducer != null) {
+            Whitebox.setInternalState(bolt, "producer", mockProducer);
+        }
+        return bolt;
+    }
+
+    private KafkaBolt defaultSerializerBoltWithSpecifiedProperties(boolean async, boolean fireAndForget) {
+        Properties props = new Properties();
+        props.put("acks", "1");
+        props.put("bootstrap.servers", broker.getBrokerConnectionString());
+        props.put("key.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
+        props.put("value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
+        props.put("metadata.fetch.timeout.ms", 1000);
+        props.put("linger.ms", 0);
+        KafkaBolt bolt = new KafkaBolt().withProducerProperties(props);
+        bolt.prepare(config, null, new OutputCollector(collector));
+        bolt.setAsync(async);
+        bolt.setFireAndForget(fireAndForget);
+        return bolt;
+    }
+
+    @Test
+    public void executeWithoutKey() throws Exception {
+        String message = "value-234";
+        Tuple tuple = generateTestTuple(message);
+        bolt.execute(tuple);
+        verify(collector).ack(tuple);
+        verifyMessage(null, message);
+    }
+
+
+    @Test
+    public void executeWithBrokerDown() throws Exception {
+        broker.shutdown();
+        String message = "value-234";
+        Tuple tuple = generateTestTuple(message);
+        bolt.execute(tuple);
+        verify(collector).fail(tuple);
+    }
+
+    private boolean verifyMessage(String key, String message) {
+        long lastMessageOffset = KafkaUtils.getOffset(simpleConsumer, kafkaConfig.topic, 0, OffsetRequest.LatestTime()) - 1;
+        ByteBufferMessageSet messageAndOffsets = KafkaUtils.fetchMessages(kafkaConfig, simpleConsumer,
+                new Partition(Broker.fromString(broker.getBrokerConnectionString()),kafkaConfig.topic, 0), lastMessageOffset);
+        MessageAndOffset messageAndOffset = messageAndOffsets.iterator().next();
+        Message kafkaMessage = messageAndOffset.message();
+        ByteBuffer messageKeyBuffer = kafkaMessage.key();
+        String keyString = null;
+        String messageString = new String(Utils.toByteArray(kafkaMessage.payload()));
+        if (messageKeyBuffer != null) {
+            keyString = new String(Utils.toByteArray(messageKeyBuffer));
+        }
+        assertEquals(key, keyString);
+        assertEquals(message, messageString);
+        return true;
+    }
+
+    private Tuple generateTestTuple(Object key, Object message) {
+        TopologyBuilder builder = new TopologyBuilder();
+        GeneralTopologyContext topologyContext = new GeneralTopologyContext(builder.createTopology(), new Config(), new HashMap(), new HashMap(), new HashMap(), "") {
+            @Override
+            public Fields getComponentOutputFields(String componentId, String streamId) {
+                return new Fields("key", "message");
+            }
+        };
+        return new TupleImpl(topologyContext, new Values(key, message), 1, "");
+    }
+
+    private Tuple generateTestTuple(Object message) {
+        TopologyBuilder builder = new TopologyBuilder();
+        GeneralTopologyContext topologyContext = new GeneralTopologyContext(builder.createTopology(), new Config(), new HashMap(), new HashMap(), new HashMap(), "") {
+            @Override
+            public Fields getComponentOutputFields(String componentId, String streamId) {
+                return new Fields("message");
+            }
+        };
+        return new TupleImpl(topologyContext, new Values(message), 1, "");
+    }
+
+    private Tuple mockTickTuple() {
+        Tuple tuple = mock(Tuple.class);
+        when(tuple.getSourceComponent()).thenReturn(Constants.SYSTEM_COMPONENT_ID);
+        when(tuple.getSourceStreamId()).thenReturn(Constants.SYSTEM_TICK_STREAM_ID);
+        // Sanity check
+        assertTrue(TupleUtils.isTick(tuple));
+        return tuple;
+    }
+
+    private static ByteBufferMessageSet mockSingleMessage(byte[] key, byte[] message) {
+        ByteBufferMessageSet sets = mock(ByteBufferMessageSet.class);
+        MessageAndOffset msg = mock(MessageAndOffset.class);
+        final List<MessageAndOffset> msgs = ImmutableList.of(msg);
+        doReturn(msgs.iterator()).when(sets).iterator();
+        Message kafkaMessage = mock(Message.class);
+        doReturn(ByteBuffer.wrap(key)).when(kafkaMessage).key();
+        doReturn(ByteBuffer.wrap(message)).when(kafkaMessage).payload();
+        doReturn(kafkaMessage).when(msg).message();
+        return sets;
+    }
+
+    private static SimpleConsumer mockSimpleConsumer(ByteBufferMessageSet mockMsg) {
+        SimpleConsumer simpleConsumer = mock(SimpleConsumer.class);
+        FetchResponse resp = mock(FetchResponse.class);
+        doReturn(resp).when(simpleConsumer).fetch(any(FetchRequest.class));
+        OffsetResponse mockOffsetResponse = mock(OffsetResponse.class);
+        doReturn(new long[] {}).when(mockOffsetResponse).offsets(anyString(), anyInt());
+        doReturn(mockOffsetResponse).when(simpleConsumer).getOffsetsBefore(any(kafka.javaapi.OffsetRequest.class));
+        doReturn(mockMsg).when(resp).messageSet(anyString(), anyInt());
+        return simpleConsumer;
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/test/storm/kafka/DynamicBrokersReaderTest.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/test/storm/kafka/DynamicBrokersReaderTest.java b/external/storm-kafka/src/test/storm/kafka/DynamicBrokersReaderTest.java
deleted file mode 100644
index d871924..0000000
--- a/external/storm-kafka/src/test/storm/kafka/DynamicBrokersReaderTest.java
+++ /dev/null
@@ -1,252 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-import backtype.storm.Config;
-import org.apache.curator.framework.CuratorFramework;
-import org.apache.curator.framework.CuratorFrameworkFactory;
-import org.apache.curator.retry.ExponentialBackoffRetry;
-import org.apache.curator.test.TestingServer;
-import org.apache.curator.utils.ZKPaths;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import storm.kafka.trident.GlobalPartitionInformation;
-
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-/**
- * Date: 16/05/2013
- * Time: 20:35
- */
-public class DynamicBrokersReaderTest {
-    private DynamicBrokersReader dynamicBrokersReader, wildCardBrokerReader;
-    private String masterPath = "/brokers";
-    private String topic = "testing1";
-    private String secondTopic = "testing2";
-    private String thirdTopic = "testing3";
-
-    private CuratorFramework zookeeper;
-    private TestingServer server;
-
-    @Before
-    public void setUp() throws Exception {
-        server = new TestingServer();
-        String connectionString = server.getConnectString();
-        Map conf = new HashMap();
-        conf.put(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT, 1000);
-        conf.put(Config.STORM_ZOOKEEPER_CONNECTION_TIMEOUT, 1000);
-        conf.put(Config.STORM_ZOOKEEPER_RETRY_TIMES, 4);
-        conf.put(Config.STORM_ZOOKEEPER_RETRY_INTERVAL, 5);
-
-        ExponentialBackoffRetry retryPolicy = new ExponentialBackoffRetry(1000, 3);
-        zookeeper = CuratorFrameworkFactory.newClient(connectionString, retryPolicy);
-        dynamicBrokersReader = new DynamicBrokersReader(conf, connectionString, masterPath, topic);
-
-        Map conf2 = new HashMap();
-        conf2.putAll(conf);
-        conf2.put("kafka.topic.wildcard.match",true);
-
-        wildCardBrokerReader = new DynamicBrokersReader(conf2, connectionString, masterPath, "^test.*$");
-        zookeeper.start();
-    }
-
-    @After
-    public void tearDown() throws Exception {
-        dynamicBrokersReader.close();
-        zookeeper.close();
-        server.close();
-    }
-
-    private void addPartition(int id, String host, int port, String topic) throws Exception {
-        writePartitionId(id, topic);
-        writeLeader(id, 0, topic);
-        writeLeaderDetails(0, host, port);
-    }
-
-    private void addPartition(int id, int leader, String host, int port, String topic) throws Exception {
-        writePartitionId(id, topic);
-        writeLeader(id, leader, topic);
-        writeLeaderDetails(leader, host, port);
-    }
-
-    private void writePartitionId(int id, String topic) throws Exception {
-        String path = dynamicBrokersReader.partitionPath(topic);
-        writeDataToPath(path, ("" + id));
-    }
-
-    private void writeDataToPath(String path, String data) throws Exception {
-        ZKPaths.mkdirs(zookeeper.getZookeeperClient().getZooKeeper(), path);
-        zookeeper.setData().forPath(path, data.getBytes());
-    }
-
-    private void writeLeader(int id, int leaderId, String topic) throws Exception {
-        String path = dynamicBrokersReader.partitionPath(topic) + "/" + id + "/state";
-        String value = " { \"controller_epoch\":4, \"isr\":[ 1, 0 ], \"leader\":" + leaderId + ", \"leader_epoch\":1, \"version\":1 }";
-        writeDataToPath(path, value);
-    }
-
-    private void writeLeaderDetails(int leaderId, String host, int port) throws Exception {
-        String path = dynamicBrokersReader.brokerPath() + "/" + leaderId;
-        String value = "{ \"host\":\"" + host + "\", \"jmx_port\":9999, \"port\":" + port + ", \"version\":1 }";
-        writeDataToPath(path, value);
-    }
-
-
-    private GlobalPartitionInformation getByTopic(List<GlobalPartitionInformation> partitions, String topic){
-        for(GlobalPartitionInformation partitionInformation : partitions) {
-            if (partitionInformation.topic.equals(topic)) return partitionInformation;
-        }
-        return null;
-    }
-
-    @Test
-    public void testGetBrokerInfo() throws Exception {
-        String host = "localhost";
-        int port = 9092;
-        int partition = 0;
-        addPartition(partition, host, port, topic);
-        List<GlobalPartitionInformation> partitions = dynamicBrokersReader.getBrokerInfo();
-
-        GlobalPartitionInformation brokerInfo = getByTopic(partitions, topic);
-        assertNotNull(brokerInfo);
-        assertEquals(1, brokerInfo.getOrderedPartitions().size());
-        assertEquals(port, brokerInfo.getBrokerFor(partition).port);
-        assertEquals(host, brokerInfo.getBrokerFor(partition).host);
-    }
-
-    @Test
-    public void testGetBrokerInfoWildcardMatch() throws Exception {
-        String host = "localhost";
-        int port = 9092;
-        int partition = 0;
-        addPartition(partition, host, port, topic);
-        addPartition(partition, host, port, secondTopic);
-
-        List<GlobalPartitionInformation> partitions = wildCardBrokerReader.getBrokerInfo();
-
-        GlobalPartitionInformation brokerInfo = getByTopic(partitions, topic);
-        assertNotNull(brokerInfo);
-        assertEquals(1, brokerInfo.getOrderedPartitions().size());
-        assertEquals(port, brokerInfo.getBrokerFor(partition).port);
-        assertEquals(host, brokerInfo.getBrokerFor(partition).host);
-
-        brokerInfo = getByTopic(partitions, secondTopic);
-        assertNotNull(brokerInfo);
-        assertEquals(1, brokerInfo.getOrderedPartitions().size());
-        assertEquals(port, brokerInfo.getBrokerFor(partition).port);
-        assertEquals(host, brokerInfo.getBrokerFor(partition).host);
-
-        addPartition(partition, host, port, thirdTopic);
-        //Discover newly added topic
-        partitions = wildCardBrokerReader.getBrokerInfo();
-        assertNotNull(getByTopic(partitions, topic));
-        assertNotNull(getByTopic(partitions, secondTopic));
-        assertNotNull(getByTopic(partitions, secondTopic));
-    }
-
-
-    @Test
-    public void testMultiplePartitionsOnDifferentHosts() throws Exception {
-        String host = "localhost";
-        int port = 9092;
-        int secondPort = 9093;
-        int partition = 0;
-        int secondPartition = partition + 1;
-        addPartition(partition, 0, host, port, topic);
-        addPartition(secondPartition, 1, host, secondPort, topic);
-
-        List<GlobalPartitionInformation> partitions = dynamicBrokersReader.getBrokerInfo();
-
-        GlobalPartitionInformation brokerInfo = getByTopic(partitions, topic);
-        assertNotNull(brokerInfo);
-        assertEquals(2, brokerInfo.getOrderedPartitions().size());
-
-        assertEquals(port, brokerInfo.getBrokerFor(partition).port);
-        assertEquals(host, brokerInfo.getBrokerFor(partition).host);
-
-        assertEquals(secondPort, brokerInfo.getBrokerFor(secondPartition).port);
-        assertEquals(host, brokerInfo.getBrokerFor(secondPartition).host);
-    }
-
-
-    @Test
-    public void testMultiplePartitionsOnSameHost() throws Exception {
-        String host = "localhost";
-        int port = 9092;
-        int partition = 0;
-        int secondPartition = partition + 1;
-        addPartition(partition, 0, host, port, topic);
-        addPartition(secondPartition, 0, host, port, topic);
-
-        List<GlobalPartitionInformation> partitions = dynamicBrokersReader.getBrokerInfo();
-
-        GlobalPartitionInformation brokerInfo = getByTopic(partitions, topic);
-        assertNotNull(brokerInfo);
-        assertEquals(2, brokerInfo.getOrderedPartitions().size());
-
-        assertEquals(port, brokerInfo.getBrokerFor(partition).port);
-        assertEquals(host, brokerInfo.getBrokerFor(partition).host);
-
-        assertEquals(port, brokerInfo.getBrokerFor(secondPartition).port);
-        assertEquals(host, brokerInfo.getBrokerFor(secondPartition).host);
-    }
-
-    @Test
-    public void testSwitchHostForPartition() throws Exception {
-        String host = "localhost";
-        int port = 9092;
-        int partition = 0;
-        addPartition(partition, host, port, topic);
-        List<GlobalPartitionInformation> partitions = dynamicBrokersReader.getBrokerInfo();
-
-        GlobalPartitionInformation brokerInfo = getByTopic(partitions, topic);
-        assertNotNull(brokerInfo);
-        assertEquals(port, brokerInfo.getBrokerFor(partition).port);
-        assertEquals(host, brokerInfo.getBrokerFor(partition).host);
-
-        String newHost = host + "switch";
-        int newPort = port + 1;
-        addPartition(partition, newHost, newPort, topic);
-        partitions = dynamicBrokersReader.getBrokerInfo();
-
-        brokerInfo = getByTopic(partitions, topic);
-        assertNotNull(brokerInfo);
-        assertEquals(newPort, brokerInfo.getBrokerFor(partition).port);
-        assertEquals(newHost, brokerInfo.getBrokerFor(partition).host);
-    }
-
-    @Test(expected = NullPointerException.class)
-    public void testErrorLogsWhenConfigIsMissing() throws Exception {
-        String connectionString = server.getConnectString();
-        Map conf = new HashMap();
-        conf.put(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT, 1000);
-//        conf.put(Config.STORM_ZOOKEEPER_CONNECTION_TIMEOUT, 1000);
-        conf.put(Config.STORM_ZOOKEEPER_RETRY_TIMES, 4);
-        conf.put(Config.STORM_ZOOKEEPER_RETRY_INTERVAL, 5);
-
-        DynamicBrokersReader dynamicBrokersReader1 = new DynamicBrokersReader(conf, connectionString, masterPath, topic);
-
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/test/storm/kafka/ExponentialBackoffMsgRetryManagerTest.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/test/storm/kafka/ExponentialBackoffMsgRetryManagerTest.java b/external/storm-kafka/src/test/storm/kafka/ExponentialBackoffMsgRetryManagerTest.java
deleted file mode 100644
index da23718..0000000
--- a/external/storm-kafka/src/test/storm/kafka/ExponentialBackoffMsgRetryManagerTest.java
+++ /dev/null
@@ -1,235 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import org.junit.Test;
-
-public class ExponentialBackoffMsgRetryManagerTest {
-
-    private static final Long TEST_OFFSET = 101L;
-    private static final Long TEST_OFFSET2 = 102L;
-    private static final Long TEST_OFFSET3 = 105L;
-    private static final Long TEST_NEW_OFFSET = 103L;
-
-    @Test
-    public void testImmediateRetry() throws Exception {
-        ExponentialBackoffMsgRetryManager manager = new ExponentialBackoffMsgRetryManager(0, 0d, 0);
-        manager.failed(TEST_OFFSET);
-        Long next = manager.nextFailedMessageToRetry();
-        assertEquals("expect test offset next available for retry", TEST_OFFSET, next);
-        assertTrue("message should be ready for retry immediately", manager.shouldRetryMsg(TEST_OFFSET));
-
-        manager.retryStarted(TEST_OFFSET);
-
-        manager.failed(TEST_OFFSET);
-        next = manager.nextFailedMessageToRetry();
-        assertEquals("expect test offset next available for retry", TEST_OFFSET, next);
-        assertTrue("message should be ready for retry immediately", manager.shouldRetryMsg(TEST_OFFSET));
-    }
-
-    @Test
-    public void testSingleDelay() throws Exception {
-        ExponentialBackoffMsgRetryManager manager = new ExponentialBackoffMsgRetryManager(100, 1d, 1000);
-        manager.failed(TEST_OFFSET);
-        Thread.sleep(5);
-        Long next = manager.nextFailedMessageToRetry();
-        assertNull("expect no message ready for retry yet", next);
-        assertFalse("message should not be ready for retry yet", manager.shouldRetryMsg(TEST_OFFSET));
-
-        Thread.sleep(100);
-        next = manager.nextFailedMessageToRetry();
-        assertEquals("expect test offset next available for retry", TEST_OFFSET, next);
-        assertTrue("message should be ready for retry", manager.shouldRetryMsg(TEST_OFFSET));
-    }
-
-    @Test
-    public void testExponentialBackoff() throws Exception {
-        final long initial = 10;
-        final double mult = 2d;
-        ExponentialBackoffMsgRetryManager manager = new ExponentialBackoffMsgRetryManager(initial, mult, initial * 10);
-
-        long expectedWaitTime = initial;
-        for (long i = 0L; i < 3L; ++i) {
-            manager.failed(TEST_OFFSET);
-
-            Thread.sleep((expectedWaitTime + 1L) / 2L);
-            assertFalse("message should not be ready for retry yet", manager.shouldRetryMsg(TEST_OFFSET));
-
-            Thread.sleep((expectedWaitTime + 1L) / 2L);
-            Long next = manager.nextFailedMessageToRetry();
-            assertEquals("expect test offset next available for retry", TEST_OFFSET, next);
-            assertTrue("message should be ready for retry", manager.shouldRetryMsg(TEST_OFFSET));
-
-            manager.retryStarted(TEST_OFFSET);
-            expectedWaitTime *= mult;
-        }
-    }
-
-    @Test
-    public void testRetryOrder() throws Exception {
-        final long initial = 10;
-        final double mult = 2d;
-        final long max = 20;
-        ExponentialBackoffMsgRetryManager manager = new ExponentialBackoffMsgRetryManager(initial, mult, max);
-
-        manager.failed(TEST_OFFSET);
-        Thread.sleep(initial);
-
-        manager.retryStarted(TEST_OFFSET);
-        manager.failed(TEST_OFFSET);
-        manager.failed(TEST_OFFSET2);
-
-        // although TEST_OFFSET failed first, it's retry delay time is longer b/c this is the second retry
-        // so TEST_OFFSET2 should come first
-
-        Thread.sleep(initial * 2);
-        assertTrue("message "+TEST_OFFSET+"should be ready for retry", manager.shouldRetryMsg(TEST_OFFSET));
-        assertTrue("message "+TEST_OFFSET2+"should be ready for retry", manager.shouldRetryMsg(TEST_OFFSET2));
-
-        Long next = manager.nextFailedMessageToRetry();
-        assertEquals("expect first message to retry is "+TEST_OFFSET2, TEST_OFFSET2, next);
-
-        Thread.sleep(initial);
-
-        // haven't retried yet, so first should still be TEST_OFFSET2
-        next = manager.nextFailedMessageToRetry();
-        assertEquals("expect first message to retry is "+TEST_OFFSET2, TEST_OFFSET2, next);
-        manager.retryStarted(next);
-
-        // now it should be TEST_OFFSET
-        next = manager.nextFailedMessageToRetry();
-        assertEquals("expect message to retry is now "+TEST_OFFSET, TEST_OFFSET, next);
-        manager.retryStarted(next);
-
-        // now none left
-        next = manager.nextFailedMessageToRetry();
-        assertNull("expect no message to retry now", next);
-    }
-
-    @Test
-    public void testQueriesAfterRetriedAlready() throws Exception {
-        ExponentialBackoffMsgRetryManager manager = new ExponentialBackoffMsgRetryManager(0, 0d, 0);
-        manager.failed(TEST_OFFSET);
-        Long next = manager.nextFailedMessageToRetry();
-        assertEquals("expect test offset next available for retry", TEST_OFFSET, next);
-        assertTrue("message should be ready for retry immediately", manager.shouldRetryMsg(TEST_OFFSET));
-
-        manager.retryStarted(TEST_OFFSET);
-        next = manager.nextFailedMessageToRetry();
-        assertNull("expect no message ready after retried", next);
-        assertFalse("message should not be ready after retried", manager.shouldRetryMsg(TEST_OFFSET));
-    }
-
-    @Test(expected = IllegalStateException.class)
-    public void testRetryWithoutFail() throws Exception {
-        ExponentialBackoffMsgRetryManager manager = new ExponentialBackoffMsgRetryManager(0, 0d, 0);
-        manager.retryStarted(TEST_OFFSET);
-    }
-
-    @Test(expected = IllegalStateException.class)
-    public void testFailRetryRetry() throws Exception {
-        ExponentialBackoffMsgRetryManager manager = new ExponentialBackoffMsgRetryManager(0, 0d, 0);
-        manager.failed(TEST_OFFSET);
-        try {
-            manager.retryStarted(TEST_OFFSET);
-        } catch (IllegalStateException ise) {
-            fail("IllegalStateException unexpected here: " + ise);
-        }
-
-        assertFalse("message should not be ready for retry", manager.shouldRetryMsg(TEST_OFFSET));
-        manager.retryStarted(TEST_OFFSET);
-    }
-
-    @Test
-    public void testMaxBackoff() throws Exception {
-        final long initial = 100;
-        final double mult = 2d;
-        final long max = 2000;
-        ExponentialBackoffMsgRetryManager manager = new ExponentialBackoffMsgRetryManager(initial, mult, max);
-
-        long expectedWaitTime = initial;
-        for (long i = 0L; i < 4L; ++i) {
-            manager.failed(TEST_OFFSET);
-
-            Thread.sleep((expectedWaitTime + 1L) / 2L);
-            assertFalse("message should not be ready for retry yet", manager.shouldRetryMsg(TEST_OFFSET));
-
-            Thread.sleep((expectedWaitTime + 1L) / 2L);
-            Long next = manager.nextFailedMessageToRetry();
-            assertEquals("expect test offset next available for retry", TEST_OFFSET, next);
-            assertTrue("message should be ready for retry", manager.shouldRetryMsg(TEST_OFFSET));
-
-            manager.retryStarted(TEST_OFFSET);
-            expectedWaitTime = Math.min((long) (expectedWaitTime * mult), max);
-        }
-    }
-
-    @Test
-    public void testFailThenAck() throws Exception {
-        ExponentialBackoffMsgRetryManager manager = new ExponentialBackoffMsgRetryManager(0, 0d, 0);
-        manager.failed(TEST_OFFSET);
-        assertTrue("message should be ready for retry", manager.shouldRetryMsg(TEST_OFFSET));
-
-        manager.acked(TEST_OFFSET);
-
-        Long next = manager.nextFailedMessageToRetry();
-        assertNull("expect no message ready after acked", next);
-        assertFalse("message should not be ready after acked", manager.shouldRetryMsg(TEST_OFFSET));
-    }
-
-    @Test
-    public void testAckThenFail() throws Exception {
-        ExponentialBackoffMsgRetryManager manager = new ExponentialBackoffMsgRetryManager(0, 0d, 0);
-        manager.acked(TEST_OFFSET);
-        assertFalse("message should not be ready after acked", manager.shouldRetryMsg(TEST_OFFSET));
-
-        manager.failed(TEST_OFFSET);
-
-        Long next = manager.nextFailedMessageToRetry();
-        assertEquals("expect test offset next available for retry", TEST_OFFSET, next);
-        assertTrue("message should be ready for retry", manager.shouldRetryMsg(TEST_OFFSET));
-    }
-    
-    @Test
-    public void testClearInvalidMessages() throws Exception {
-        ExponentialBackoffMsgRetryManager manager = new ExponentialBackoffMsgRetryManager(0, 0d, 0);
-        manager.failed(TEST_OFFSET);
-        manager.failed(TEST_OFFSET2);
-        manager.failed(TEST_OFFSET3);
-        
-        assertTrue("message should be ready for retry", manager.shouldRetryMsg(TEST_OFFSET));
-        assertTrue("message should be ready for retry", manager.shouldRetryMsg(TEST_OFFSET2));
-        assertTrue("message should be ready for retry", manager.shouldRetryMsg(TEST_OFFSET3));
-
-        manager.clearInvalidMessages(TEST_NEW_OFFSET);
-
-        Long next = manager.nextFailedMessageToRetry();
-        assertEquals("expect test offset next available for retry", TEST_OFFSET3, next);
-        
-        manager.acked(TEST_OFFSET3);
-        next = manager.nextFailedMessageToRetry();
-        assertNull("expect no message ready after acked", next);
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/test/storm/kafka/KafkaErrorTest.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/test/storm/kafka/KafkaErrorTest.java b/external/storm-kafka/src/test/storm/kafka/KafkaErrorTest.java
deleted file mode 100644
index 9f170db..0000000
--- a/external/storm-kafka/src/test/storm/kafka/KafkaErrorTest.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-import org.junit.Test;
-
-import static org.hamcrest.CoreMatchers.equalTo;
-import static org.hamcrest.CoreMatchers.is;
-import static org.junit.Assert.assertThat;
-
-/**
- * Date: 12/01/2014
- * Time: 18:09
- */
-public class KafkaErrorTest {
-
-    @Test
-    public void getError() {
-        assertThat(KafkaError.getError(0), is(equalTo(KafkaError.NO_ERROR)));
-    }
-
-    @Test
-    public void offsetMetaDataTooLarge() {
-        assertThat(KafkaError.getError(12), is(equalTo(KafkaError.OFFSET_METADATA_TOO_LARGE)));
-    }
-
-    @Test
-    public void unknownNegative() {
-        assertThat(KafkaError.getError(-1), is(equalTo(KafkaError.UNKNOWN)));
-    }
-
-    @Test
-    public void unknownPositive() {
-        assertThat(KafkaError.getError(75), is(equalTo(KafkaError.UNKNOWN)));
-    }
-
-    @Test
-    public void unknown() {
-        assertThat(KafkaError.getError(13), is(equalTo(KafkaError.UNKNOWN)));
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/test/storm/kafka/KafkaTestBroker.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/test/storm/kafka/KafkaTestBroker.java b/external/storm-kafka/src/test/storm/kafka/KafkaTestBroker.java
deleted file mode 100644
index 73203d1..0000000
--- a/external/storm-kafka/src/test/storm/kafka/KafkaTestBroker.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-import org.apache.curator.framework.CuratorFramework;
-import org.apache.curator.framework.CuratorFrameworkFactory;
-import org.apache.curator.framework.imps.CuratorFrameworkState;
-import org.apache.curator.retry.ExponentialBackoffRetry;
-import org.apache.curator.test.InstanceSpec;
-import org.apache.curator.test.TestingServer;
-
-import kafka.server.KafkaConfig;
-import kafka.server.KafkaServerStartable;
-import org.apache.commons.io.FileUtils;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.Properties;
-
-/**
- * Date: 11/01/2014
- * Time: 13:15
- */
-public class KafkaTestBroker {
-
-    private int port;
-    private KafkaServerStartable kafka;
-    private TestingServer server;
-    private CuratorFramework zookeeper;
-    private File logDir;
-
-    public KafkaTestBroker() {
-        try {
-            server = new TestingServer();
-            String zookeeperConnectionString = server.getConnectString();
-            ExponentialBackoffRetry retryPolicy = new ExponentialBackoffRetry(1000, 3);
-            zookeeper = CuratorFrameworkFactory.newClient(zookeeperConnectionString, retryPolicy);
-            zookeeper.start();
-            port = InstanceSpec.getRandomPort();
-            logDir = new File(System.getProperty("java.io.tmpdir"), "kafka/logs/kafka-test-" + port);
-            KafkaConfig config = buildKafkaConfig(zookeeperConnectionString);
-            kafka = new KafkaServerStartable(config);
-            kafka.startup();
-        } catch (Exception ex) {
-            throw new RuntimeException("Could not start test broker", ex);
-        }
-    }
-
-    private kafka.server.KafkaConfig buildKafkaConfig(String zookeeperConnectionString) {
-        Properties p = new Properties();
-        p.setProperty("zookeeper.connect", zookeeperConnectionString);
-        p.setProperty("broker.id", "0");
-        p.setProperty("port", "" + port);
-        p.setProperty("log.dirs", logDir.getAbsolutePath());
-        return new KafkaConfig(p);
-    }
-
-    public String getBrokerConnectionString() {
-        return "localhost:" + port;
-    }
-
-    public int getPort() {
-        return port;
-    }
-    public void shutdown() {
-        kafka.shutdown();
-        if (zookeeper.getState().equals(CuratorFrameworkState.STARTED)) {
-            zookeeper.close();
-        }
-        try {
-            server.close();
-        } catch (IOException e) {
-            e.printStackTrace();
-        }
-        FileUtils.deleteQuietly(logDir);
-    }
-}


[45/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/storm/starter/tools/RankableObjectWithFields.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/storm/starter/tools/RankableObjectWithFields.java b/examples/storm-starter/src/jvm/storm/starter/tools/RankableObjectWithFields.java
deleted file mode 100644
index 9a0ecae..0000000
--- a/examples/storm-starter/src/jvm/storm/starter/tools/RankableObjectWithFields.java
+++ /dev/null
@@ -1,148 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter.tools;
-
-import backtype.storm.tuple.Tuple;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Lists;
-
-import java.io.Serializable;
-import java.util.List;
-
-/**
- * This class wraps an objects and its associated count, including any additional data fields.
- * <p/>
- * This class can be used, for instance, to track the number of occurrences of an object in a Storm topology.
- */
-public class RankableObjectWithFields implements Rankable, Serializable {
-
-  private static final long serialVersionUID = -9102878650001058090L;
-  private static final String toStringSeparator = "|";
-
-  private final Object obj;
-  private final long count;
-  private final ImmutableList<Object> fields;
-
-  public RankableObjectWithFields(Object obj, long count, Object... otherFields) {
-    if (obj == null) {
-      throw new IllegalArgumentException("The object must not be null");
-    }
-    if (count < 0) {
-      throw new IllegalArgumentException("The count must be >= 0");
-    }
-    this.obj = obj;
-    this.count = count;
-    fields = ImmutableList.copyOf(otherFields);
-
-  }
-
-  /**
-   * Construct a new instance based on the provided {@link Tuple}.
-   * <p/>
-   * This method expects the object to be ranked in the first field (index 0) of the provided tuple, and the number of
-   * occurrences of the object (its count) in the second field (index 1). Any further fields in the tuple will be
-   * extracted and tracked, too. These fields can be accessed via {@link RankableObjectWithFields#getFields()}.
-   *
-   * @param tuple
-   *
-   * @return new instance based on the provided tuple
-   */
-  public static RankableObjectWithFields from(Tuple tuple) {
-    List<Object> otherFields = Lists.newArrayList(tuple.getValues());
-    Object obj = otherFields.remove(0);
-    Long count = (Long) otherFields.remove(0);
-    return new RankableObjectWithFields(obj, count, otherFields.toArray());
-  }
-
-  public Object getObject() {
-    return obj;
-  }
-
-  public long getCount() {
-    return count;
-  }
-
-  /**
-   * @return an immutable list of any additional data fields of the object (may be empty but will never be null)
-   */
-  public List<Object> getFields() {
-    return fields;
-  }
-
-  @Override
-  public int compareTo(Rankable other) {
-    long delta = this.getCount() - other.getCount();
-    if (delta > 0) {
-      return 1;
-    }
-    else if (delta < 0) {
-      return -1;
-    }
-    else {
-      return 0;
-    }
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (!(o instanceof RankableObjectWithFields)) {
-      return false;
-    }
-    RankableObjectWithFields other = (RankableObjectWithFields) o;
-    return obj.equals(other.obj) && count == other.count;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = 17;
-    int countHash = (int) (count ^ (count >>> 32));
-    result = 31 * result + countHash;
-    result = 31 * result + obj.hashCode();
-    return result;
-  }
-
-  public String toString() {
-    StringBuffer buf = new StringBuffer();
-    buf.append("[");
-    buf.append(obj);
-    buf.append(toStringSeparator);
-    buf.append(count);
-    for (Object field : fields) {
-      buf.append(toStringSeparator);
-      buf.append(field);
-    }
-    buf.append("]");
-    return buf.toString();
-  }
-
-  /**
-   * Note: We do not defensively copy the wrapped object and any accompanying fields.  We do guarantee, however,
-   * do return a defensive (shallow) copy of the List object that is wrapping any accompanying fields.
-   *
-   * @return
-   */
-  @Override
-  public Rankable copy() {
-    List<Object> shallowCopyOfFields = ImmutableList.copyOf(getFields());
-    return new RankableObjectWithFields(getObject(), getCount(), shallowCopyOfFields);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/storm/starter/tools/Rankings.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/storm/starter/tools/Rankings.java b/examples/storm-starter/src/jvm/storm/starter/tools/Rankings.java
deleted file mode 100644
index 551ebfb..0000000
--- a/examples/storm-starter/src/jvm/storm/starter/tools/Rankings.java
+++ /dev/null
@@ -1,156 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter.tools;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Lists;
-
-import java.io.Serializable;
-import java.util.Collections;
-import java.util.List;
-
-public class Rankings implements Serializable {
-
-  private static final long serialVersionUID = -1549827195410578903L;
-  private static final int DEFAULT_COUNT = 10;
-
-  private final int maxSize;
-  private final List<Rankable> rankedItems = Lists.newArrayList();
-
-  public Rankings() {
-    this(DEFAULT_COUNT);
-  }
-
-  public Rankings(int topN) {
-    if (topN < 1) {
-      throw new IllegalArgumentException("topN must be >= 1");
-    }
-    maxSize = topN;
-  }
-
-  /**
-   * Copy constructor.
-   * @param other
-   */
-  public Rankings(Rankings other) {
-    this(other.maxSize());
-    updateWith(other);
-  }
-
-  /**
-   * @return the maximum possible number (size) of ranked objects this instance can hold
-   */
-  public int maxSize() {
-    return maxSize;
-  }
-
-  /**
-   * @return the number (size) of ranked objects this instance is currently holding
-   */
-  public int size() {
-    return rankedItems.size();
-  }
-
-  /**
-   * The returned defensive copy is only "somewhat" defensive.  We do, for instance, return a defensive copy of the
-   * enclosing List instance, and we do try to defensively copy any contained Rankable objects, too.  However, the
-   * contract of {@link storm.starter.tools.Rankable#copy()} does not guarantee that any Object's embedded within
-   * a Rankable will be defensively copied, too.
-   *
-   * @return a somewhat defensive copy of ranked items
-   */
-  public List<Rankable> getRankings() {
-    List<Rankable> copy = Lists.newLinkedList();
-    for (Rankable r: rankedItems) {
-      copy.add(r.copy());
-    }
-    return ImmutableList.copyOf(copy);
-  }
-
-  public void updateWith(Rankings other) {
-    for (Rankable r : other.getRankings()) {
-      updateWith(r);
-    }
-  }
-
-  public void updateWith(Rankable r) {
-    synchronized(rankedItems) {
-      addOrReplace(r);
-      rerank();
-      shrinkRankingsIfNeeded();
-    }
-  }
-
-  private void addOrReplace(Rankable r) {
-    Integer rank = findRankOf(r);
-    if (rank != null) {
-      rankedItems.set(rank, r);
-    }
-    else {
-      rankedItems.add(r);
-    }
-  }
-
-  private Integer findRankOf(Rankable r) {
-    Object tag = r.getObject();
-    for (int rank = 0; rank < rankedItems.size(); rank++) {
-      Object cur = rankedItems.get(rank).getObject();
-      if (cur.equals(tag)) {
-        return rank;
-      }
-    }
-    return null;
-  }
-
-  private void rerank() {
-    Collections.sort(rankedItems);
-    Collections.reverse(rankedItems);
-  }
-
-  private void shrinkRankingsIfNeeded() {
-    if (rankedItems.size() > maxSize) {
-      rankedItems.remove(maxSize);
-    }
-  }
-
-  /**
-   * Removes ranking entries that have a count of zero.
-   */
-  public void pruneZeroCounts() {
-    int i = 0;
-    while (i < rankedItems.size()) {
-      if (rankedItems.get(i).getCount() == 0) {
-        rankedItems.remove(i);
-      }
-      else {
-        i++;
-      }
-    }
-  }
-
-  public String toString() {
-    return rankedItems.toString();
-  }
-
-  /**
-   * Creates a (defensive) copy of itself.
-   */
-  public Rankings copy() {
-    return new Rankings(this);
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/storm/starter/tools/SlidingWindowCounter.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/storm/starter/tools/SlidingWindowCounter.java b/examples/storm-starter/src/jvm/storm/starter/tools/SlidingWindowCounter.java
deleted file mode 100644
index 1199c40..0000000
--- a/examples/storm-starter/src/jvm/storm/starter/tools/SlidingWindowCounter.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter.tools;
-
-import java.io.Serializable;
-import java.util.Map;
-
-/**
- * This class counts objects in a sliding window fashion.
- * <p/>
- * It is designed 1) to give multiple "producer" threads write access to the counter, i.e. being able to increment
- * counts of objects, and 2) to give a single "consumer" thread (e.g. {@link PeriodicSlidingWindowCounter}) read access
- * to the counter. Whenever the consumer thread performs a read operation, this class will advance the head slot of the
- * sliding window counter. This means that the consumer thread indirectly controls where writes of the producer threads
- * will go to. Also, by itself this class will not advance the head slot.
- * <p/>
- * A note for analyzing data based on a sliding window count: During the initial <code>windowLengthInSlots</code>
- * iterations, this sliding window counter will always return object counts that are equal or greater than in the
- * previous iteration. This is the effect of the counter "loading up" at the very start of its existence. Conceptually,
- * this is the desired behavior.
- * <p/>
- * To give an example, using a counter with 5 slots which for the sake of this example represent 1 minute of time each:
- * <p/>
- * <pre>
- * {@code
- * Sliding window counts of an object X over time
- *
- * Minute (timeline):
- * 1    2   3   4   5   6   7   8
- *
- * Observed counts per minute:
- * 1    1   1   1   0   0   0   0
- *
- * Counts returned by counter:
- * 1    2   3   4   4   3   2   1
- * }
- * </pre>
- * <p/>
- * As you can see in this example, for the first <code>windowLengthInSlots</code> (here: the first five minutes) the
- * counter will always return counts equal or greater than in the previous iteration (1, 2, 3, 4, 4). This initial load
- * effect needs to be accounted for whenever you want to perform analyses such as trending topics; otherwise your
- * analysis algorithm might falsely identify the object to be trending as the counter seems to observe continuously
- * increasing counts. Also, note that during the initial load phase <em>every object</em> will exhibit increasing
- * counts.
- * <p/>
- * On a high-level, the counter exhibits the following behavior: If you asked the example counter after two minutes,
- * "how often did you count the object during the past five minutes?", then it should reply "I have counted it 2 times
- * in the past five minutes", implying that it can only account for the last two of those five minutes because the
- * counter was not running before that time.
- *
- * @param <T> The type of those objects we want to count.
- */
-public final class SlidingWindowCounter<T> implements Serializable {
-
-  private static final long serialVersionUID = -2645063988768785810L;
-
-  private SlotBasedCounter<T> objCounter;
-  private int headSlot;
-  private int tailSlot;
-  private int windowLengthInSlots;
-
-  public SlidingWindowCounter(int windowLengthInSlots) {
-    if (windowLengthInSlots < 2) {
-      throw new IllegalArgumentException(
-          "Window length in slots must be at least two (you requested " + windowLengthInSlots + ")");
-    }
-    this.windowLengthInSlots = windowLengthInSlots;
-    this.objCounter = new SlotBasedCounter<T>(this.windowLengthInSlots);
-
-    this.headSlot = 0;
-    this.tailSlot = slotAfter(headSlot);
-  }
-
-  public void incrementCount(T obj) {
-    objCounter.incrementCount(obj, headSlot);
-  }
-
-  /**
-   * Return the current (total) counts of all tracked objects, then advance the window.
-   * <p/>
-   * Whenever this method is called, we consider the counts of the current sliding window to be available to and
-   * successfully processed "upstream" (i.e. by the caller). Knowing this we will start counting any subsequent
-   * objects within the next "chunk" of the sliding window.
-   *
-   * @return The current (total) counts of all tracked objects.
-   */
-  public Map<T, Long> getCountsThenAdvanceWindow() {
-    Map<T, Long> counts = objCounter.getCounts();
-    objCounter.wipeZeros();
-    objCounter.wipeSlot(tailSlot);
-    advanceHead();
-    return counts;
-  }
-
-  private void advanceHead() {
-    headSlot = tailSlot;
-    tailSlot = slotAfter(tailSlot);
-  }
-
-  private int slotAfter(int slot) {
-    return (slot + 1) % windowLengthInSlots;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/storm/starter/tools/SlotBasedCounter.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/storm/starter/tools/SlotBasedCounter.java b/examples/storm-starter/src/jvm/storm/starter/tools/SlotBasedCounter.java
deleted file mode 100644
index 4b2d472..0000000
--- a/examples/storm-starter/src/jvm/storm/starter/tools/SlotBasedCounter.java
+++ /dev/null
@@ -1,118 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter.tools;
-
-import java.io.Serializable;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * This class provides per-slot counts of the occurrences of objects.
- * <p/>
- * It can be used, for instance, as a building block for implementing sliding window counting of objects.
- *
- * @param <T> The type of those objects we want to count.
- */
-public final class SlotBasedCounter<T> implements Serializable {
-
-  private static final long serialVersionUID = 4858185737378394432L;
-
-  private final Map<T, long[]> objToCounts = new HashMap<T, long[]>();
-  private final int numSlots;
-
-  public SlotBasedCounter(int numSlots) {
-    if (numSlots <= 0) {
-      throw new IllegalArgumentException("Number of slots must be greater than zero (you requested " + numSlots + ")");
-    }
-    this.numSlots = numSlots;
-  }
-
-  public void incrementCount(T obj, int slot) {
-    long[] counts = objToCounts.get(obj);
-    if (counts == null) {
-      counts = new long[this.numSlots];
-      objToCounts.put(obj, counts);
-    }
-    counts[slot]++;
-  }
-
-  public long getCount(T obj, int slot) {
-    long[] counts = objToCounts.get(obj);
-    if (counts == null) {
-      return 0;
-    }
-    else {
-      return counts[slot];
-    }
-  }
-
-  public Map<T, Long> getCounts() {
-    Map<T, Long> result = new HashMap<T, Long>();
-    for (T obj : objToCounts.keySet()) {
-      result.put(obj, computeTotalCount(obj));
-    }
-    return result;
-  }
-
-  private long computeTotalCount(T obj) {
-    long[] curr = objToCounts.get(obj);
-    long total = 0;
-    for (long l : curr) {
-      total += l;
-    }
-    return total;
-  }
-
-  /**
-   * Reset the slot count of any tracked objects to zero for the given slot.
-   *
-   * @param slot
-   */
-  public void wipeSlot(int slot) {
-    for (T obj : objToCounts.keySet()) {
-      resetSlotCountToZero(obj, slot);
-    }
-  }
-
-  private void resetSlotCountToZero(T obj, int slot) {
-    long[] counts = objToCounts.get(obj);
-    counts[slot] = 0;
-  }
-
-  private boolean shouldBeRemovedFromCounter(T obj) {
-    return computeTotalCount(obj) == 0;
-  }
-
-  /**
-   * Remove any object from the counter whose total count is zero (to free up memory).
-   */
-  public void wipeZeros() {
-    Set<T> objToBeRemoved = new HashSet<T>();
-    for (T obj : objToCounts.keySet()) {
-      if (shouldBeRemovedFromCounter(obj)) {
-        objToBeRemoved.add(obj);
-      }
-    }
-    for (T obj : objToBeRemoved) {
-      objToCounts.remove(obj);
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/storm/starter/trident/TridentKafkaWordCount.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/storm/starter/trident/TridentKafkaWordCount.java b/examples/storm-starter/src/jvm/storm/starter/trident/TridentKafkaWordCount.java
deleted file mode 100644
index bd8ecba..0000000
--- a/examples/storm-starter/src/jvm/storm/starter/trident/TridentKafkaWordCount.java
+++ /dev/null
@@ -1,229 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- * Contains some contributions under the Thrift Software License.
- * Please see doc/old-thrift-license.txt in the Thrift distribution for
- * details.
- */
-package storm.starter.trident;
-
-
-import backtype.storm.Config;
-import backtype.storm.LocalCluster;
-import backtype.storm.LocalDRPC;
-import backtype.storm.generated.StormTopology;
-import backtype.storm.spout.SchemeAsMultiScheme;
-import backtype.storm.topology.TopologyBuilder;
-import backtype.storm.tuple.Fields;
-import org.apache.kafka.clients.producer.ProducerConfig;
-import storm.kafka.StringScheme;
-import storm.kafka.ZkHosts;
-import storm.kafka.bolt.KafkaBolt;
-import storm.kafka.bolt.mapper.FieldNameBasedTupleToKafkaMapper;
-import storm.kafka.bolt.selector.DefaultTopicSelector;
-import storm.kafka.trident.TransactionalTridentKafkaSpout;
-import storm.kafka.trident.TridentKafkaConfig;
-import storm.starter.spout.RandomSentenceSpout;
-import storm.trident.Stream;
-import storm.trident.TridentState;
-import storm.trident.TridentTopology;
-import storm.trident.operation.builtin.Count;
-import storm.trident.operation.builtin.FilterNull;
-import storm.trident.operation.builtin.MapGet;
-import storm.trident.testing.MemoryMapState;
-import storm.trident.testing.Split;
-
-import java.util.Properties;
-
-/**
- * A sample word count trident topology using transactional kafka spout that has the following components.
- * <ol>
- * <li> {@link KafkaBolt}
- * that receives random sentences from {@link RandomSentenceSpout} and
- * publishes the sentences to a kafka "test" topic.
- * </li>
- * <li> {@link TransactionalTridentKafkaSpout}
- * that consumes sentences from the "test" topic, splits it into words, aggregates
- * and stores the word count in a {@link MemoryMapState}.
- * </li>
- * <li> DRPC query
- * that returns the word counts by querying the trident state (MemoryMapState).
- * </li>
- * </ol>
- * <p>
- *     For more background read the <a href="https://storm.apache.org/documentation/Trident-tutorial.html">trident tutorial</a>,
- *     <a href="https://storm.apache.org/documentation/Trident-state">trident state</a> and
- *     <a href="https://github.com/apache/storm/tree/master/external/storm-kafka"> Storm Kafka </a>.
- * </p>
- */
-public class TridentKafkaWordCount {
-
-    private String zkUrl;
-    private String brokerUrl;
-
-    TridentKafkaWordCount(String zkUrl, String brokerUrl) {
-        this.zkUrl = zkUrl;
-        this.brokerUrl = brokerUrl;
-    }
-
-    /**
-     * Creates a transactional kafka spout that consumes any new data published to "test" topic.
-     * <p/>
-     * For more info on transactional spouts
-     * see "Transactional spouts" section in
-     * <a href="https://storm.apache.org/documentation/Trident-state"> Trident state</a> doc.
-     *
-     * @return a transactional trident kafka spout.
-     */
-    private TransactionalTridentKafkaSpout createKafkaSpout() {
-        ZkHosts hosts = new ZkHosts(zkUrl);
-        TridentKafkaConfig config = new TridentKafkaConfig(hosts, "test");
-        config.scheme = new SchemeAsMultiScheme(new StringScheme());
-
-        // Consume new data from the topic
-        config.startOffsetTime = kafka.api.OffsetRequest.LatestTime();
-        return new TransactionalTridentKafkaSpout(config);
-    }
-
-
-    private Stream addDRPCStream(TridentTopology tridentTopology, TridentState state, LocalDRPC drpc) {
-        return tridentTopology.newDRPCStream("words", drpc)
-                .each(new Fields("args"), new Split(), new Fields("word"))
-                .groupBy(new Fields("word"))
-                .stateQuery(state, new Fields("word"), new MapGet(), new Fields("count"))
-                .each(new Fields("count"), new FilterNull())
-                .project(new Fields("word", "count"));
-    }
-
-    private TridentState addTridentState(TridentTopology tridentTopology) {
-        return tridentTopology.newStream("spout1", createKafkaSpout()).parallelismHint(1)
-                .each(new Fields("str"), new Split(), new Fields("word"))
-                .groupBy(new Fields("word"))
-                .persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count"))
-                .parallelismHint(1);
-    }
-
-    /**
-     * Creates a trident topology that consumes sentences from the kafka "test" topic using a
-     * {@link TransactionalTridentKafkaSpout} computes the word count and stores it in a {@link MemoryMapState}.
-     * A DRPC stream is then created to query the word counts.
-     * @param drpc
-     * @return
-     */
-    public StormTopology buildConsumerTopology(LocalDRPC drpc) {
-        TridentTopology tridentTopology = new TridentTopology();
-        addDRPCStream(tridentTopology, addTridentState(tridentTopology), drpc);
-        return tridentTopology.build();
-    }
-
-    /**
-     * Return the consumer topology config.
-     *
-     * @return the topology config
-     */
-    public Config getConsumerConfig() {
-        Config conf = new Config();
-        conf.setMaxSpoutPending(20);
-        //  conf.setDebug(true);
-        return conf;
-    }
-
-    /**
-     * A topology that produces random sentences using {@link RandomSentenceSpout} and
-     * publishes the sentences using a KafkaBolt to kafka "test" topic.
-     *
-     * @return the storm topology
-     */
-    public StormTopology buildProducerTopology(Properties prop) {
-        TopologyBuilder builder = new TopologyBuilder();
-        builder.setSpout("spout", new RandomSentenceSpout(), 2);
-        /**
-         * The output field of the RandomSentenceSpout ("word") is provided as the boltMessageField
-         * so that this gets written out as the message in the kafka topic.
-         */
-        KafkaBolt bolt = new KafkaBolt().withProducerProperties(prop)
-                .withTopicSelector(new DefaultTopicSelector("test"))
-                .withTupleToKafkaMapper(new FieldNameBasedTupleToKafkaMapper("key", "word"));
-        builder.setBolt("forwardToKafka", bolt, 1).shuffleGrouping("spout");
-        return builder.createTopology();
-    }
-
-    /**
-     * Returns the storm config for the topology that publishes sentences to kafka "test" topic using a kafka bolt.
-     * The KAFKA_BROKER_PROPERTIES is needed for the KafkaBolt.
-     *
-     * @return the topology config
-     */
-    public Properties getProducerConfig() {
-        Properties props = new Properties();
-        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerUrl);
-        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
-        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
-        props.put(ProducerConfig.CLIENT_ID_CONFIG, "storm-kafka-producer");
-        return props;
-    }
-
-    /**
-     * <p>
-     * To run this topology ensure you have a kafka broker running.
-     * </p>
-     * Create a topic test with command line,
-     * kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partition 1 --topic test
-     */
-    public static void main(String[] args) throws Exception {
-
-        String zkUrl = "localhost:2181";        // the defaults.
-        String brokerUrl = "localhost:9092";
-
-        if (args.length > 2 || (args.length == 1 && args[0].matches("^-h|--help$"))) {
-            System.out.println("Usage: TridentKafkaWordCount [kafka zookeeper url] [kafka broker url]");
-            System.out.println("   E.g TridentKafkaWordCount [" + zkUrl + "]" + " [" + brokerUrl + "]");
-            System.exit(1);
-        } else if (args.length == 1) {
-            zkUrl = args[0];
-        } else if (args.length == 2) {
-            zkUrl = args[0];
-            brokerUrl = args[1];
-        }
-
-        System.out.println("Using Kafka zookeeper url: " + zkUrl + " broker url: " + brokerUrl);
-
-        TridentKafkaWordCount wordCount = new TridentKafkaWordCount(zkUrl, brokerUrl);
-
-        LocalDRPC drpc = new LocalDRPC();
-        LocalCluster cluster = new LocalCluster();
-
-        // submit the consumer topology.
-        cluster.submitTopology("wordCounter", wordCount.getConsumerConfig(), wordCount.buildConsumerTopology(drpc));
-
-        Config conf = new Config();
-        conf.setMaxSpoutPending(20);
-        // submit the producer topology.
-        cluster.submitTopology("kafkaBolt", conf, wordCount.buildProducerTopology(wordCount.getProducerConfig()));
-
-        // keep querying the word counts for a minute.
-        for (int i = 0; i < 60; i++) {
-            System.out.println("DRPC RESULT: " + drpc.execute("words", "the and apple snow jumped"));
-            Thread.sleep(1000);
-        }
-
-        cluster.killTopology("kafkaBolt");
-        cluster.killTopology("wordCounter");
-        cluster.shutdown();
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/storm/starter/trident/TridentReach.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/storm/starter/trident/TridentReach.java b/examples/storm-starter/src/jvm/storm/starter/trident/TridentReach.java
deleted file mode 100644
index 2d87c47..0000000
--- a/examples/storm-starter/src/jvm/storm/starter/trident/TridentReach.java
+++ /dev/null
@@ -1,156 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter.trident;
-
-import backtype.storm.Config;
-import backtype.storm.LocalCluster;
-import backtype.storm.LocalDRPC;
-import backtype.storm.generated.StormTopology;
-import backtype.storm.task.IMetricsContext;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Values;
-import storm.trident.TridentState;
-import storm.trident.TridentTopology;
-import storm.trident.operation.BaseFunction;
-import storm.trident.operation.CombinerAggregator;
-import storm.trident.operation.TridentCollector;
-import storm.trident.operation.builtin.MapGet;
-import storm.trident.operation.builtin.Sum;
-import storm.trident.state.ReadOnlyState;
-import storm.trident.state.State;
-import storm.trident.state.StateFactory;
-import storm.trident.state.map.ReadOnlyMapState;
-import storm.trident.tuple.TridentTuple;
-
-import java.util.*;
-
-public class TridentReach {
-  public static Map<String, List<String>> TWEETERS_DB = new HashMap<String, List<String>>() {{
-    put("foo.com/blog/1", Arrays.asList("sally", "bob", "tim", "george", "nathan"));
-    put("engineering.twitter.com/blog/5", Arrays.asList("adam", "david", "sally", "nathan"));
-    put("tech.backtype.com/blog/123", Arrays.asList("tim", "mike", "john"));
-  }};
-
-  public static Map<String, List<String>> FOLLOWERS_DB = new HashMap<String, List<String>>() {{
-    put("sally", Arrays.asList("bob", "tim", "alice", "adam", "jim", "chris", "jai"));
-    put("bob", Arrays.asList("sally", "nathan", "jim", "mary", "david", "vivian"));
-    put("tim", Arrays.asList("alex"));
-    put("nathan", Arrays.asList("sally", "bob", "adam", "harry", "chris", "vivian", "emily", "jordan"));
-    put("adam", Arrays.asList("david", "carissa"));
-    put("mike", Arrays.asList("john", "bob"));
-    put("john", Arrays.asList("alice", "nathan", "jim", "mike", "bob"));
-  }};
-
-  public static class StaticSingleKeyMapState extends ReadOnlyState implements ReadOnlyMapState<Object> {
-    public static class Factory implements StateFactory {
-      Map _map;
-
-      public Factory(Map map) {
-        _map = map;
-      }
-
-      @Override
-      public State makeState(Map conf, IMetricsContext metrics, int partitionIndex, int numPartitions) {
-        return new StaticSingleKeyMapState(_map);
-      }
-
-    }
-
-    Map _map;
-
-    public StaticSingleKeyMapState(Map map) {
-      _map = map;
-    }
-
-
-    @Override
-    public List<Object> multiGet(List<List<Object>> keys) {
-      List<Object> ret = new ArrayList();
-      for (List<Object> key : keys) {
-        Object singleKey = key.get(0);
-        ret.add(_map.get(singleKey));
-      }
-      return ret;
-    }
-
-  }
-
-  public static class One implements CombinerAggregator<Integer> {
-    @Override
-    public Integer init(TridentTuple tuple) {
-      return 1;
-    }
-
-    @Override
-    public Integer combine(Integer val1, Integer val2) {
-      return 1;
-    }
-
-    @Override
-    public Integer zero() {
-      return 1;
-    }
-  }
-
-  public static class ExpandList extends BaseFunction {
-
-    @Override
-    public void execute(TridentTuple tuple, TridentCollector collector) {
-      List l = (List) tuple.getValue(0);
-      if (l != null) {
-        for (Object o : l) {
-          collector.emit(new Values(o));
-        }
-      }
-    }
-
-  }
-
-  public static StormTopology buildTopology(LocalDRPC drpc) {
-    TridentTopology topology = new TridentTopology();
-    TridentState urlToTweeters = topology.newStaticState(new StaticSingleKeyMapState.Factory(TWEETERS_DB));
-    TridentState tweetersToFollowers = topology.newStaticState(new StaticSingleKeyMapState.Factory(FOLLOWERS_DB));
-
-
-    topology.newDRPCStream("reach", drpc).stateQuery(urlToTweeters, new Fields("args"), new MapGet(), new Fields(
-        "tweeters")).each(new Fields("tweeters"), new ExpandList(), new Fields("tweeter")).shuffle().stateQuery(
-        tweetersToFollowers, new Fields("tweeter"), new MapGet(), new Fields("followers")).each(new Fields("followers"),
-        new ExpandList(), new Fields("follower")).groupBy(new Fields("follower")).aggregate(new One(), new Fields(
-        "one")).aggregate(new Fields("one"), new Sum(), new Fields("reach"));
-    return topology.build();
-  }
-
-  public static void main(String[] args) throws Exception {
-    LocalDRPC drpc = new LocalDRPC();
-
-    Config conf = new Config();
-    LocalCluster cluster = new LocalCluster();
-
-    cluster.submitTopology("reach", conf, buildTopology(drpc));
-
-    Thread.sleep(2000);
-
-    System.out.println("REACH: " + drpc.execute("reach", "aaa"));
-    System.out.println("REACH: " + drpc.execute("reach", "foo.com/blog/1"));
-    System.out.println("REACH: " + drpc.execute("reach", "engineering.twitter.com/blog/5"));
-
-
-    cluster.shutdown();
-    drpc.shutdown();
-  }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/storm/starter/trident/TridentWordCount.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/storm/starter/trident/TridentWordCount.java b/examples/storm-starter/src/jvm/storm/starter/trident/TridentWordCount.java
deleted file mode 100644
index e4a2d2e..0000000
--- a/examples/storm-starter/src/jvm/storm/starter/trident/TridentWordCount.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter.trident;
-
-import backtype.storm.Config;
-import backtype.storm.LocalCluster;
-import backtype.storm.LocalDRPC;
-import backtype.storm.StormSubmitter;
-import backtype.storm.generated.StormTopology;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Values;
-import storm.trident.TridentState;
-import storm.trident.TridentTopology;
-import storm.trident.operation.BaseFunction;
-import storm.trident.operation.TridentCollector;
-import storm.trident.operation.builtin.Count;
-import storm.trident.operation.builtin.FilterNull;
-import storm.trident.operation.builtin.MapGet;
-import storm.trident.operation.builtin.Sum;
-import storm.trident.testing.FixedBatchSpout;
-import storm.trident.testing.MemoryMapState;
-import storm.trident.tuple.TridentTuple;
-
-
-public class TridentWordCount {
-  public static class Split extends BaseFunction {
-    @Override
-    public void execute(TridentTuple tuple, TridentCollector collector) {
-      String sentence = tuple.getString(0);
-      for (String word : sentence.split(" ")) {
-        collector.emit(new Values(word));
-      }
-    }
-  }
-
-  public static StormTopology buildTopology(LocalDRPC drpc) {
-    FixedBatchSpout spout = new FixedBatchSpout(new Fields("sentence"), 3, new Values("the cow jumped over the moon"),
-        new Values("the man went to the store and bought some candy"), new Values("four score and seven years ago"),
-        new Values("how many apples can you eat"), new Values("to be or not to be the person"));
-    spout.setCycle(true);
-
-    TridentTopology topology = new TridentTopology();
-    TridentState wordCounts = topology.newStream("spout1", spout).parallelismHint(16).each(new Fields("sentence"),
-        new Split(), new Fields("word")).groupBy(new Fields("word")).persistentAggregate(new MemoryMapState.Factory(),
-        new Count(), new Fields("count")).parallelismHint(16);
-
-    topology.newDRPCStream("words", drpc).each(new Fields("args"), new Split(), new Fields("word")).groupBy(new Fields(
-        "word")).stateQuery(wordCounts, new Fields("word"), new MapGet(), new Fields("count")).each(new Fields("count"),
-        new FilterNull()).aggregate(new Fields("count"), new Sum(), new Fields("sum"));
-    return topology.build();
-  }
-
-  public static void main(String[] args) throws Exception {
-    Config conf = new Config();
-    conf.setMaxSpoutPending(20);
-    if (args.length == 0) {
-      LocalDRPC drpc = new LocalDRPC();
-      LocalCluster cluster = new LocalCluster();
-      cluster.submitTopology("wordCounter", conf, buildTopology(drpc));
-      for (int i = 0; i < 100; i++) {
-        System.out.println("DRPC RESULT: " + drpc.execute("words", "cat the dog jumped"));
-        Thread.sleep(1000);
-      }
-    }
-    else {
-      conf.setNumWorkers(3);
-      StormSubmitter.submitTopologyWithProgressBar(args[0], conf, buildTopology(null));
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/storm/starter/util/StormRunner.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/storm/starter/util/StormRunner.java b/examples/storm-starter/src/jvm/storm/starter/util/StormRunner.java
deleted file mode 100644
index eb25a86..0000000
--- a/examples/storm-starter/src/jvm/storm/starter/util/StormRunner.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter.util;
-
-import backtype.storm.Config;
-import backtype.storm.LocalCluster;
-import backtype.storm.StormSubmitter;
-import backtype.storm.generated.AlreadyAliveException;
-import backtype.storm.generated.AuthorizationException;
-import backtype.storm.generated.InvalidTopologyException;
-import backtype.storm.generated.StormTopology;
-
-public final class StormRunner {
-
-  private static final int MILLIS_IN_SEC = 1000;
-
-  private StormRunner() {
-  }
-
-  public static void runTopologyLocally(StormTopology topology, String topologyName, Config conf, int runtimeInSeconds)
-      throws InterruptedException {
-    LocalCluster cluster = new LocalCluster();
-    cluster.submitTopology(topologyName, conf, topology);
-    Thread.sleep((long) runtimeInSeconds * MILLIS_IN_SEC);
-    cluster.killTopology(topologyName);
-    cluster.shutdown();
-  }
-
-  public static void runTopologyRemotely(StormTopology topology, String topologyName, Config conf)
-      throws AlreadyAliveException, InvalidTopologyException, AuthorizationException {
-    StormSubmitter.submitTopology(topologyName, conf, topology);
-  }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/test/jvm/org/apache/storm/starter/bolt/IntermediateRankingsBoltTest.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/test/jvm/org/apache/storm/starter/bolt/IntermediateRankingsBoltTest.java b/examples/storm-starter/test/jvm/org/apache/storm/starter/bolt/IntermediateRankingsBoltTest.java
new file mode 100644
index 0000000..23326c5
--- /dev/null
+++ b/examples/storm-starter/test/jvm/org/apache/storm/starter/bolt/IntermediateRankingsBoltTest.java
@@ -0,0 +1,146 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter.bolt;
+
+import org.apache.storm.Config;
+import org.apache.storm.topology.BasicOutputCollector;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.Values;
+import org.apache.storm.utils.MockTupleHelpers;
+import com.google.common.collect.Lists;
+import org.testng.annotations.DataProvider;
+import org.testng.annotations.Test;
+
+import java.util.Map;
+
+import static org.fest.assertions.api.Assertions.assertThat;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.*;
+
+public class IntermediateRankingsBoltTest {
+
+  private static final String ANY_NON_SYSTEM_COMPONENT_ID = "irrelevant_component_id";
+  private static final String ANY_NON_SYSTEM_STREAM_ID = "irrelevant_stream_id";
+  private static final Object ANY_OBJECT = new Object();
+  private static final int ANY_TOPN = 10;
+  private static final long ANY_COUNT = 42;
+
+  private Tuple mockRankableTuple(Object obj, long count) {
+    Tuple tuple = MockTupleHelpers.mockTuple(ANY_NON_SYSTEM_COMPONENT_ID, ANY_NON_SYSTEM_STREAM_ID);
+    when(tuple.getValues()).thenReturn(Lists.newArrayList(ANY_OBJECT, ANY_COUNT));
+    return tuple;
+  }
+
+  @DataProvider
+  public Object[][] illegalTopN() {
+    return new Object[][]{ { -10 }, { -3 }, { -2 }, { -1 }, { 0 } };
+  }
+
+  @Test(expectedExceptions = IllegalArgumentException.class, dataProvider = "illegalTopN")
+  public void negativeOrZeroTopNShouldThrowIAE(int topN) {
+    new IntermediateRankingsBolt(topN);
+  }
+
+  @DataProvider
+  public Object[][] illegalEmitFrequency() {
+    return new Object[][]{ { -10 }, { -3 }, { -2 }, { -1 }, { 0 } };
+  }
+
+  @Test(expectedExceptions = IllegalArgumentException.class, dataProvider = "illegalEmitFrequency")
+  public void negativeOrZeroEmitFrequencyShouldThrowIAE(int emitFrequencyInSeconds) {
+    new IntermediateRankingsBolt(ANY_TOPN, emitFrequencyInSeconds);
+  }
+
+  @DataProvider
+  public Object[][] legalTopN() {
+    return new Object[][]{ { 1 }, { 2 }, { 3 }, { 20 } };
+  }
+
+  @Test(dataProvider = "legalTopN")
+  public void positiveTopNShouldBeOk(int topN) {
+    new IntermediateRankingsBolt(topN);
+  }
+
+  @DataProvider
+  public Object[][] legalEmitFrequency() {
+    return new Object[][]{ { 1 }, { 2 }, { 3 }, { 20 } };
+  }
+
+  @Test(dataProvider = "legalEmitFrequency")
+  public void positiveEmitFrequencyShouldBeOk(int emitFrequencyInSeconds) {
+    new IntermediateRankingsBolt(ANY_TOPN, emitFrequencyInSeconds);
+  }
+
+  @Test
+  public void shouldEmitSomethingIfTickTupleIsReceived() {
+    // given
+    Tuple tickTuple = MockTupleHelpers.mockTickTuple();
+    BasicOutputCollector collector = mock(BasicOutputCollector.class);
+    IntermediateRankingsBolt bolt = new IntermediateRankingsBolt();
+
+    // when
+    bolt.execute(tickTuple, collector);
+
+    // then
+    // verifyZeroInteractions(collector);
+    verify(collector).emit(any(Values.class));
+  }
+
+  @Test
+  public void shouldEmitNothingIfNormalTupleIsReceived() {
+    // given
+    Tuple normalTuple = mockRankableTuple(ANY_OBJECT, ANY_COUNT);
+    BasicOutputCollector collector = mock(BasicOutputCollector.class);
+    IntermediateRankingsBolt bolt = new IntermediateRankingsBolt();
+
+    // when
+    bolt.execute(normalTuple, collector);
+
+    // then
+    verifyZeroInteractions(collector);
+  }
+
+  @Test
+  public void shouldDeclareOutputFields() {
+    // given
+    OutputFieldsDeclarer declarer = mock(OutputFieldsDeclarer.class);
+    IntermediateRankingsBolt bolt = new IntermediateRankingsBolt();
+
+    // when
+    bolt.declareOutputFields(declarer);
+
+    // then
+    verify(declarer, times(1)).declare(any(Fields.class));
+  }
+
+  @Test
+  public void shouldSetTickTupleFrequencyInComponentConfigurationToNonZeroValue() {
+    // given
+    IntermediateRankingsBolt bolt = new IntermediateRankingsBolt();
+
+    // when
+    Map<String, Object> componentConfig = bolt.getComponentConfiguration();
+
+    // then
+    assertThat(componentConfig).containsKey(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS);
+    Integer emitFrequencyInSeconds = (Integer) componentConfig.get(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS);
+    assertThat(emitFrequencyInSeconds).isGreaterThan(0);
+  }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/test/jvm/org/apache/storm/starter/bolt/RollingCountBoltTest.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/test/jvm/org/apache/storm/starter/bolt/RollingCountBoltTest.java b/examples/storm-starter/test/jvm/org/apache/storm/starter/bolt/RollingCountBoltTest.java
new file mode 100644
index 0000000..d068e59
--- /dev/null
+++ b/examples/storm-starter/test/jvm/org/apache/storm/starter/bolt/RollingCountBoltTest.java
@@ -0,0 +1,113 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter.bolt;
+
+import org.apache.storm.Config;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.Values;
+import org.apache.storm.utils.MockTupleHelpers;
+import org.testng.annotations.Test;
+
+import java.util.Map;
+
+import static org.fest.assertions.api.Assertions.assertThat;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.*;
+
+public class RollingCountBoltTest {
+
+  private static final String ANY_NON_SYSTEM_COMPONENT_ID = "irrelevant_component_id";
+  private static final String ANY_NON_SYSTEM_STREAM_ID = "irrelevant_stream_id";
+
+  private Tuple mockNormalTuple(Object obj) {
+    Tuple tuple = MockTupleHelpers.mockTuple(ANY_NON_SYSTEM_COMPONENT_ID, ANY_NON_SYSTEM_STREAM_ID);
+    when(tuple.getValue(0)).thenReturn(obj);
+    return tuple;
+  }
+
+  @SuppressWarnings("rawtypes")
+  @Test
+  public void shouldEmitNothingIfNoObjectHasBeenCountedYetAndTickTupleIsReceived() {
+    // given
+    Tuple tickTuple = MockTupleHelpers.mockTickTuple();
+    RollingCountBolt bolt = new RollingCountBolt();
+    Map conf = mock(Map.class);
+    TopologyContext context = mock(TopologyContext.class);
+    OutputCollector collector = mock(OutputCollector.class);
+    bolt.prepare(conf, context, collector);
+
+    // when
+    bolt.execute(tickTuple);
+
+    // then
+    verifyZeroInteractions(collector);
+  }
+
+  @SuppressWarnings("rawtypes")
+  @Test
+  public void shouldEmitSomethingIfAtLeastOneObjectWasCountedAndTickTupleIsReceived() {
+    // given
+    Tuple normalTuple = mockNormalTuple(new Object());
+    Tuple tickTuple = MockTupleHelpers.mockTickTuple();
+
+    RollingCountBolt bolt = new RollingCountBolt();
+    Map conf = mock(Map.class);
+    TopologyContext context = mock(TopologyContext.class);
+    OutputCollector collector = mock(OutputCollector.class);
+    bolt.prepare(conf, context, collector);
+
+    // when
+    bolt.execute(normalTuple);
+    bolt.execute(tickTuple);
+
+    // then
+    verify(collector).emit(any(Values.class));
+  }
+
+  @Test
+  public void shouldDeclareOutputFields() {
+    // given
+    OutputFieldsDeclarer declarer = mock(OutputFieldsDeclarer.class);
+    RollingCountBolt bolt = new RollingCountBolt();
+
+    // when
+    bolt.declareOutputFields(declarer);
+
+    // then
+    verify(declarer, times(1)).declare(any(Fields.class));
+
+  }
+
+  @Test
+  public void shouldSetTickTupleFrequencyInComponentConfigurationToNonZeroValue() {
+    // given
+    RollingCountBolt bolt = new RollingCountBolt();
+
+    // when
+    Map<String, Object> componentConfig = bolt.getComponentConfiguration();
+
+    // then
+    assertThat(componentConfig).containsKey(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS);
+    Integer emitFrequencyInSeconds = (Integer) componentConfig.get(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS);
+    assertThat(emitFrequencyInSeconds).isGreaterThan(0);
+  }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/test/jvm/org/apache/storm/starter/bolt/TotalRankingsBoltTest.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/test/jvm/org/apache/storm/starter/bolt/TotalRankingsBoltTest.java b/examples/storm-starter/test/jvm/org/apache/storm/starter/bolt/TotalRankingsBoltTest.java
new file mode 100644
index 0000000..c3582d5
--- /dev/null
+++ b/examples/storm-starter/test/jvm/org/apache/storm/starter/bolt/TotalRankingsBoltTest.java
@@ -0,0 +1,147 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter.bolt;
+
+import org.apache.storm.Config;
+import org.apache.storm.topology.BasicOutputCollector;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.Values;
+import org.apache.storm.utils.MockTupleHelpers;
+import org.testng.annotations.DataProvider;
+import org.testng.annotations.Test;
+import org.apache.storm.starter.tools.Rankings;
+
+import java.util.Map;
+
+import static org.fest.assertions.api.Assertions.assertThat;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.*;
+
+public class TotalRankingsBoltTest {
+
+  private static final String ANY_NON_SYSTEM_COMPONENT_ID = "irrelevant_component_id";
+  private static final String ANY_NON_SYSTEM_STREAM_ID = "irrelevant_stream_id";
+  private static final Object ANY_OBJECT = new Object();
+  private static final int ANY_TOPN = 10;
+  private static final long ANY_COUNT = 42;
+
+  private Tuple mockRankingsTuple(Object obj, long count) {
+    Tuple tuple = MockTupleHelpers.mockTuple(ANY_NON_SYSTEM_COMPONENT_ID, ANY_NON_SYSTEM_STREAM_ID);
+    Rankings rankings = mock(Rankings.class);
+    when(tuple.getValue(0)).thenReturn(rankings);
+    return tuple;
+  }
+
+  @DataProvider
+  public Object[][] illegalTopN() {
+    return new Object[][]{ { -10 }, { -3 }, { -2 }, { -1 }, { 0 } };
+  }
+
+  @Test(expectedExceptions = IllegalArgumentException.class, dataProvider = "illegalTopN")
+  public void negativeOrZeroTopNShouldThrowIAE(int topN) {
+    new TotalRankingsBolt(topN);
+  }
+
+  @DataProvider
+  public Object[][] illegalEmitFrequency() {
+    return new Object[][]{ { -10 }, { -3 }, { -2 }, { -1 }, { 0 } };
+  }
+
+  @Test(expectedExceptions = IllegalArgumentException.class, dataProvider = "illegalEmitFrequency")
+  public void negativeOrZeroEmitFrequencyShouldThrowIAE(int emitFrequencyInSeconds) {
+    new TotalRankingsBolt(ANY_TOPN, emitFrequencyInSeconds);
+  }
+
+  @DataProvider
+  public Object[][] legalTopN() {
+    return new Object[][]{ { 1 }, { 2 }, { 3 }, { 20 } };
+  }
+
+  @Test(dataProvider = "legalTopN")
+  public void positiveTopNShouldBeOk(int topN) {
+    new TotalRankingsBolt(topN);
+  }
+
+  @DataProvider
+  public Object[][] legalEmitFrequency() {
+    return new Object[][]{ { 1 }, { 2 }, { 3 }, { 20 } };
+  }
+
+  @Test(dataProvider = "legalEmitFrequency")
+  public void positiveEmitFrequencyShouldBeOk(int emitFrequencyInSeconds) {
+    new TotalRankingsBolt(ANY_TOPN, emitFrequencyInSeconds);
+  }
+
+  @Test
+  public void shouldEmitSomethingIfTickTupleIsReceived() {
+    // given
+    Tuple tickTuple = MockTupleHelpers.mockTickTuple();
+    BasicOutputCollector collector = mock(BasicOutputCollector.class);
+    TotalRankingsBolt bolt = new TotalRankingsBolt();
+
+    // when
+    bolt.execute(tickTuple, collector);
+
+    // then
+    // verifyZeroInteractions(collector);
+    verify(collector).emit(any(Values.class));
+  }
+
+  @Test
+  public void shouldEmitNothingIfNormalTupleIsReceived() {
+    // given
+    Tuple normalTuple = mockRankingsTuple(ANY_OBJECT, ANY_COUNT);
+    BasicOutputCollector collector = mock(BasicOutputCollector.class);
+    TotalRankingsBolt bolt = new TotalRankingsBolt();
+
+    // when
+    bolt.execute(normalTuple, collector);
+
+    // then
+    verifyZeroInteractions(collector);
+  }
+
+  @Test
+  public void shouldDeclareOutputFields() {
+    // given
+    OutputFieldsDeclarer declarer = mock(OutputFieldsDeclarer.class);
+    TotalRankingsBolt bolt = new TotalRankingsBolt();
+
+    // when
+    bolt.declareOutputFields(declarer);
+
+    // then
+    verify(declarer, times(1)).declare(any(Fields.class));
+  }
+
+  @Test
+  public void shouldSetTickTupleFrequencyInComponentConfigurationToNonZeroValue() {
+    // given
+    TotalRankingsBolt bolt = new TotalRankingsBolt();
+
+    // when
+    Map<String, Object> componentConfig = bolt.getComponentConfiguration();
+
+    // then
+    assertThat(componentConfig).containsKey(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS);
+    Integer emitFrequencyInSeconds = (Integer) componentConfig.get(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS);
+    assertThat(emitFrequencyInSeconds).isGreaterThan(0);
+  }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/test/jvm/org/apache/storm/starter/tools/NthLastModifiedTimeTrackerTest.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/test/jvm/org/apache/storm/starter/tools/NthLastModifiedTimeTrackerTest.java b/examples/storm-starter/test/jvm/org/apache/storm/starter/tools/NthLastModifiedTimeTrackerTest.java
new file mode 100644
index 0000000..a28ea38
--- /dev/null
+++ b/examples/storm-starter/test/jvm/org/apache/storm/starter/tools/NthLastModifiedTimeTrackerTest.java
@@ -0,0 +1,125 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter.tools;
+
+import org.apache.storm.utils.Time;
+import org.testng.annotations.DataProvider;
+import org.testng.annotations.Test;
+
+import static org.fest.assertions.api.Assertions.assertThat;
+
+public class NthLastModifiedTimeTrackerTest {
+
+  private static final int ANY_NUM_TIMES_TO_TRACK = 3;
+  private static final int MILLIS_IN_SEC = 1000;
+
+  @DataProvider
+  public Object[][] illegalNumTimesData() {
+    return new Object[][]{ { -10 }, { -3 }, { -2 }, { -1 }, { 0 } };
+  }
+
+  @Test(expectedExceptions = IllegalArgumentException.class, dataProvider = "illegalNumTimesData")
+  public void negativeOrZeroNumTimesToTrackShouldThrowIAE(int numTimesToTrack) {
+    new NthLastModifiedTimeTracker(numTimesToTrack);
+  }
+
+  @DataProvider
+  public Object[][] legalNumTimesData() {
+    return new Object[][]{ { 1 }, { 2 }, { 3 }, { 20 } };
+  }
+
+  @Test(dataProvider = "legalNumTimesData")
+  public void positiveNumTimesToTrackShouldBeOk(int numTimesToTrack) {
+    new NthLastModifiedTimeTracker(numTimesToTrack);
+  }
+
+  @DataProvider
+  public Object[][] whenNotYetMarkedAsModifiedData() {
+    return new Object[][]{ { 0 }, { 1 }, { 2 }, { 3 }, { 4 }, { 5 }, { 8 }, { 10 } };
+  }
+
+  @Test(dataProvider = "whenNotYetMarkedAsModifiedData")
+  public void shouldReturnCorrectModifiedTimeEvenWhenNotYetMarkedAsModified(int secondsToAdvance) {
+    // given
+    Time.startSimulating();
+    NthLastModifiedTimeTracker tracker = new NthLastModifiedTimeTracker(ANY_NUM_TIMES_TO_TRACK);
+
+    // when
+    advanceSimulatedTimeBy(secondsToAdvance);
+    int seconds = tracker.secondsSinceOldestModification();
+
+    // then
+    assertThat(seconds).isEqualTo(secondsToAdvance);
+
+    // cleanup
+    Time.stopSimulating();
+  }
+
+  @DataProvider
+  public Object[][] simulatedTrackerIterations() {
+    return new Object[][]{ { 1, new int[]{ 0, 1 }, new int[]{ 0, 0 } }, { 1, new int[]{ 0, 2 }, new int[]{ 0, 0 } },
+        { 2, new int[]{ 2, 2 }, new int[]{ 2, 2 } }, { 2, new int[]{ 0, 4 }, new int[]{ 0, 4 } },
+        { 1, new int[]{ 1, 1, 1, 1, 1, 1, 1 }, new int[]{ 0, 0, 0, 0, 0, 0, 0 } },
+        { 1, new int[]{ 1, 2, 3, 4, 5, 6, 7 }, new int[]{ 0, 0, 0, 0, 0, 0, 0 } },
+        { 2, new int[]{ 1, 1, 1, 1, 1, 1, 1 }, new int[]{ 1, 1, 1, 1, 1, 1, 1 } },
+        { 2, new int[]{ 2, 2, 2, 2, 2, 2, 2 }, new int[]{ 2, 2, 2, 2, 2, 2, 2 } },
+        { 2, new int[]{ 1, 2, 3, 4, 5, 6, 7 }, new int[]{ 1, 2, 3, 4, 5, 6, 7 } },
+        { 3, new int[]{ 1, 1, 1, 1, 1, 1, 1 }, new int[]{ 1, 2, 2, 2, 2, 2, 2 } },
+        { 3, new int[]{ 1, 2, 3, 4, 5, 6, 7 }, new int[]{ 1, 3, 5, 7, 9, 11, 13 } },
+        { 3, new int[]{ 2, 2, 2, 2, 2, 2, 2 }, new int[]{ 2, 4, 4, 4, 4, 4, 4 } },
+        { 4, new int[]{ 1, 1, 1, 1, 1, 1, 1 }, new int[]{ 1, 2, 3, 3, 3, 3, 3 } },
+        { 4, new int[]{ 1, 2, 3, 4, 5, 6, 7 }, new int[]{ 1, 3, 6, 9, 12, 15, 18 } },
+        { 4, new int[]{ 2, 2, 2, 2, 2, 2, 2 }, new int[]{ 2, 4, 6, 6, 6, 6, 6 } },
+        { 5, new int[]{ 1, 1, 1, 1, 1, 1, 1 }, new int[]{ 1, 2, 3, 4, 4, 4, 4 } },
+        { 5, new int[]{ 1, 2, 3, 4, 5, 6, 7 }, new int[]{ 1, 3, 6, 10, 14, 18, 22 } },
+        { 5, new int[]{ 2, 2, 2, 2, 2, 2, 2 }, new int[]{ 2, 4, 6, 8, 8, 8, 8 } },
+        { 6, new int[]{ 1, 1, 1, 1, 1, 1, 1 }, new int[]{ 1, 2, 3, 4, 5, 5, 5 } },
+        { 6, new int[]{ 1, 2, 3, 4, 5, 6, 7 }, new int[]{ 1, 3, 6, 10, 15, 20, 25 } },
+        { 6, new int[]{ 2, 2, 2, 2, 2, 2, 2 }, new int[]{ 2, 4, 6, 8, 10, 10, 10 } },
+        { 3, new int[]{ 1, 2, 3 }, new int[]{ 1, 3, 5 } } };
+  }
+
+  @Test(dataProvider = "simulatedTrackerIterations")
+  public void shouldReturnCorrectModifiedTimeWhenMarkedAsModified(int numTimesToTrack,
+      int[] secondsToAdvancePerIteration, int[] expLastModifiedTimes) {
+    // given
+    Time.startSimulating();
+    NthLastModifiedTimeTracker tracker = new NthLastModifiedTimeTracker(numTimesToTrack);
+
+    int[] modifiedTimes = new int[expLastModifiedTimes.length];
+
+    // when
+    int i = 0;
+    for (int secondsToAdvance : secondsToAdvancePerIteration) {
+      advanceSimulatedTimeBy(secondsToAdvance);
+      tracker.markAsModified();
+      modifiedTimes[i] = tracker.secondsSinceOldestModification();
+      i++;
+    }
+
+    // then
+    assertThat(modifiedTimes).isEqualTo(expLastModifiedTimes);
+
+    // cleanup
+    Time.stopSimulating();
+  }
+
+  private void advanceSimulatedTimeBy(int seconds) {
+    Time.advanceTime(seconds * MILLIS_IN_SEC);
+  }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/test/jvm/org/apache/storm/starter/tools/RankableObjectWithFieldsTest.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/test/jvm/org/apache/storm/starter/tools/RankableObjectWithFieldsTest.java b/examples/storm-starter/test/jvm/org/apache/storm/starter/tools/RankableObjectWithFieldsTest.java
new file mode 100644
index 0000000..9837569
--- /dev/null
+++ b/examples/storm-starter/test/jvm/org/apache/storm/starter/tools/RankableObjectWithFieldsTest.java
@@ -0,0 +1,252 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter.tools;
+
+import org.apache.storm.tuple.Tuple;
+import com.google.common.collect.Lists;
+import org.testng.annotations.DataProvider;
+import org.testng.annotations.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.fest.assertions.api.Assertions.assertThat;
+import static org.mockito.Mockito.*;
+import static org.testng.Assert.assertFalse;
+import static org.testng.Assert.assertTrue;
+
+public class RankableObjectWithFieldsTest {
+
+  private static final Object ANY_OBJECT = new Object();
+  private static final long ANY_COUNT = 271;
+  private static final String ANY_FIELD = "someAdditionalField";
+  private static final int GREATER_THAN = 1;
+  private static final int EQUAL_TO = 0;
+  private static final int SMALLER_THAN = -1;
+
+  @Test(expectedExceptions = IllegalArgumentException.class)
+  public void constructorWithNullObjectAndNoFieldsShouldThrowIAE() {
+    new RankableObjectWithFields(null, ANY_COUNT);
+  }
+
+  @Test(expectedExceptions = IllegalArgumentException.class)
+  public void constructorWithNullObjectAndFieldsShouldThrowIAE() {
+    Object someAdditionalField = new Object();
+    new RankableObjectWithFields(null, ANY_COUNT, someAdditionalField);
+  }
+
+  @Test(expectedExceptions = IllegalArgumentException.class)
+  public void constructorWithNegativeCountAndNoFieldsShouldThrowIAE() {
+    new RankableObjectWithFields(ANY_OBJECT, -1);
+  }
+
+  @Test(expectedExceptions = IllegalArgumentException.class)
+  public void constructorWithNegativeCountAndFieldsShouldThrowIAE() {
+    Object someAdditionalField = new Object();
+    new RankableObjectWithFields(ANY_OBJECT, -1, someAdditionalField);
+  }
+
+  @Test
+  public void shouldBeEqualToItself() {
+    RankableObjectWithFields r = new RankableObjectWithFields(ANY_OBJECT, ANY_COUNT);
+    assertThat(r).isEqualTo(r);
+  }
+
+  @DataProvider
+  public Object[][] otherClassesData() {
+    return new Object[][]{ { new String("foo") }, { new Object() }, { Integer.valueOf(4) }, { Lists.newArrayList(7, 8,
+        9) } };
+  }
+
+  @Test(dataProvider = "otherClassesData")
+  public void shouldNotBeEqualToInstancesOfOtherClasses(Object notARankable) {
+    RankableObjectWithFields r = new RankableObjectWithFields(ANY_OBJECT, ANY_COUNT);
+    assertFalse(r.equals(notARankable), r + " is equal to " + notARankable + " but it should not be");
+  }
+
+  @DataProvider
+  public Object[][] falseDuplicatesData() {
+    return new Object[][]{ { new RankableObjectWithFields("foo", 0), new RankableObjectWithFields("foo", 1) },
+        { new RankableObjectWithFields("foo", 1), new RankableObjectWithFields("Foo", 1) },
+        { new RankableObjectWithFields("foo", 1), new RankableObjectWithFields("FOO", 1) },
+        { new RankableObjectWithFields("foo", 1), new RankableObjectWithFields("bar", 1) },
+        { new RankableObjectWithFields("", 0), new RankableObjectWithFields("", 1) }, { new RankableObjectWithFields("",
+        1), new RankableObjectWithFields("bar", 1) } };
+  }
+
+  @Test(dataProvider = "falseDuplicatesData")
+  public void shouldNotBeEqualToFalseDuplicates(RankableObjectWithFields r, RankableObjectWithFields falseDuplicate) {
+    assertFalse(r.equals(falseDuplicate), r + " is equal to " + falseDuplicate + " but it should not be");
+  }
+
+  @Test(dataProvider = "falseDuplicatesData")
+  public void shouldHaveDifferentHashCodeThanFalseDuplicates(RankableObjectWithFields r,
+      RankableObjectWithFields falseDuplicate) {
+    assertThat(r.hashCode()).isNotEqualTo(falseDuplicate.hashCode());
+  }
+
+  @DataProvider
+  public Object[][] trueDuplicatesData() {
+    return new Object[][]{ { new RankableObjectWithFields("foo", 0), new RankableObjectWithFields("foo", 0) },
+        { new RankableObjectWithFields("foo", 0), new RankableObjectWithFields("foo", 0, "someOtherField") },
+        { new RankableObjectWithFields("foo", 0, "someField"), new RankableObjectWithFields("foo", 0,
+            "someOtherField") } };
+  }
+
+  @Test(dataProvider = "trueDuplicatesData")
+  public void shouldBeEqualToTrueDuplicates(RankableObjectWithFields r, RankableObjectWithFields trueDuplicate) {
+    assertTrue(r.equals(trueDuplicate), r + " is not equal to " + trueDuplicate + " but it should be");
+  }
+
+  @Test(dataProvider = "trueDuplicatesData")
+  public void shouldHaveSameHashCodeAsTrueDuplicates(RankableObjectWithFields r,
+      RankableObjectWithFields trueDuplicate) {
+    assertThat(r.hashCode()).isEqualTo(trueDuplicate.hashCode());
+  }
+
+  @DataProvider
+  public Object[][] compareToData() {
+    return new Object[][]{ { new RankableObjectWithFields("foo", 1000), new RankableObjectWithFields("foo", 0),
+        GREATER_THAN }, { new RankableObjectWithFields("foo", 1), new RankableObjectWithFields("foo", 0),
+        GREATER_THAN }, { new RankableObjectWithFields("foo", 1000), new RankableObjectWithFields("bar", 0),
+        GREATER_THAN }, { new RankableObjectWithFields("foo", 1), new RankableObjectWithFields("bar", 0),
+        GREATER_THAN }, { new RankableObjectWithFields("foo", 0), new RankableObjectWithFields("foo", 0), EQUAL_TO },
+        { new RankableObjectWithFields("foo", 0), new RankableObjectWithFields("bar", 0), EQUAL_TO },
+        { new RankableObjectWithFields("foo", 0), new RankableObjectWithFields("foo", 1000), SMALLER_THAN },
+        { new RankableObjectWithFields("foo", 0), new RankableObjectWithFields("foo", 1), SMALLER_THAN },
+        { new RankableObjectWithFields("foo", 0), new RankableObjectWithFields("bar", 1), SMALLER_THAN },
+        { new RankableObjectWithFields("foo", 0), new RankableObjectWithFields("bar", 1000), SMALLER_THAN }, };
+  }
+
+  @Test(dataProvider = "compareToData")
+  public void verifyCompareTo(RankableObjectWithFields first, RankableObjectWithFields second, int expCompareToValue) {
+    assertThat(first.compareTo(second)).isEqualTo(expCompareToValue);
+  }
+
+  @DataProvider
+  public Object[][] toStringData() {
+    return new Object[][]{ { new String("foo"), 0L }, { new String("BAR"), 8L } };
+  }
+
+  @Test(dataProvider = "toStringData")
+  public void toStringShouldContainStringRepresentationsOfObjectAndCount(Object obj, long count) {
+    // given
+    RankableObjectWithFields r = new RankableObjectWithFields(obj, count);
+
+    // when
+    String strRepresentation = r.toString();
+
+    // then
+    assertThat(strRepresentation).contains(obj.toString()).contains("" + count);
+  }
+
+  @Test
+  public void shouldReturnTheObject() {
+    // given
+    RankableObjectWithFields r = new RankableObjectWithFields(ANY_OBJECT, ANY_COUNT, ANY_FIELD);
+
+    // when
+    Object obj = r.getObject();
+
+    // then
+    assertThat(obj).isEqualTo(ANY_OBJECT);
+  }
+
+  @Test
+  public void shouldReturnTheCount() {
+    // given
+    RankableObjectWithFields r = new RankableObjectWithFields(ANY_OBJECT, ANY_COUNT, ANY_FIELD);
+
+    // when
+    long count = r.getCount();
+
+    // then
+    assertThat(count).isEqualTo(ANY_COUNT);
+  }
+
+  @DataProvider
+  public Object[][] fieldsData() {
+    return new Object[][]{ { ANY_OBJECT, ANY_COUNT, new Object[]{ ANY_FIELD } },
+        { "quux", 42L, new Object[]{ "one", "two", "three" } } };
+  }
+
+  @Test(dataProvider = "fieldsData")
+  public void shouldReturnTheFields(Object obj, long count, Object[] fields) {
+    // given
+    RankableObjectWithFields r = new RankableObjectWithFields(obj, count, fields);
+
+    // when
+    List<Object> actualFields = r.getFields();
+
+    // then
+    assertThat(actualFields).isEqualTo(Lists.newArrayList(fields));
+  }
+
+  @Test(expectedExceptions = UnsupportedOperationException.class)
+  public void fieldsShouldBeImmutable() {
+    // given
+    RankableObjectWithFields r = new RankableObjectWithFields(ANY_OBJECT, ANY_COUNT, ANY_FIELD);
+
+    // when
+    List<Object> fields = r.getFields();
+    // try to modify the list, which should fail
+    fields.remove(0);
+
+    // then (exception)
+  }
+
+  @Test
+  public void shouldCreateRankableObjectFromTuple() {
+    // given
+    Tuple tuple = mock(Tuple.class);
+    List<Object> tupleValues = Lists.newArrayList(ANY_OBJECT, ANY_COUNT, ANY_FIELD);
+    when(tuple.getValues()).thenReturn(tupleValues);
+
+    // when
+    RankableObjectWithFields r = RankableObjectWithFields.from(tuple);
+
+    // then
+    assertThat(r.getObject()).isEqualTo(ANY_OBJECT);
+    assertThat(r.getCount()).isEqualTo(ANY_COUNT);
+    List<Object> fields = new ArrayList<Object>();
+    fields.add(ANY_FIELD);
+    assertThat(r.getFields()).isEqualTo(fields);
+
+  }
+
+  @DataProvider
+  public Object[][] copyData() {
+    return new Object[][]{ { new RankableObjectWithFields("foo", 0) }, { new RankableObjectWithFields("foo", 3,
+        "someOtherField") }, { new RankableObjectWithFields("foo", 0, "someField") } };
+  }
+
+  // TODO: What would be a good test to ensure that RankableObjectWithFields is at least somewhat defensively copied?
+  //       The contract of Rankable#copy() returns a Rankable value, not a RankableObjectWithFields.
+  @Test(dataProvider = "copyData")
+  public void copyShouldReturnCopy(RankableObjectWithFields original) {
+    // given
+
+    // when
+    Rankable copy = original.copy();
+
+    // then
+    assertThat(copy.getObject()).isEqualTo(original.getObject());
+    assertThat(copy.getCount()).isEqualTo(original.getCount());
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/test/jvm/org/apache/storm/starter/tools/RankingsTest.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/test/jvm/org/apache/storm/starter/tools/RankingsTest.java b/examples/storm-starter/test/jvm/org/apache/storm/starter/tools/RankingsTest.java
new file mode 100644
index 0000000..245c552
--- /dev/null
+++ b/examples/storm-starter/test/jvm/org/apache/storm/starter/tools/RankingsTest.java
@@ -0,0 +1,368 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter.tools;
+
+import com.google.common.base.Throwables;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+import org.jmock.lib.concurrent.Blitzer;
+import org.testng.annotations.DataProvider;
+import org.testng.annotations.Test;
+
+import java.util.List;
+
+import static org.fest.assertions.api.Assertions.assertThat;
+
+public class RankingsTest {
+
+  private static final int ANY_TOPN = 42;
+  private static final Rankable ANY_RANKABLE = new RankableObjectWithFields("someObject", ANY_TOPN);
+  private static final Rankable ZERO = new RankableObjectWithFields("ZERO_COUNT", 0);
+  private static final Rankable A = new RankableObjectWithFields("A", 1);
+  private static final Rankable B = new RankableObjectWithFields("B", 2);
+  private static final Rankable C = new RankableObjectWithFields("C", 3);
+  private static final Rankable D = new RankableObjectWithFields("D", 4);
+  private static final Rankable E = new RankableObjectWithFields("E", 5);
+  private static final Rankable F = new RankableObjectWithFields("F", 6);
+  private static final Rankable G = new RankableObjectWithFields("G", 7);
+  private static final Rankable H = new RankableObjectWithFields("H", 8);
+
+  @DataProvider
+  public Object[][] illegalTopNData() {
+    return new Object[][]{ { 0 }, { -1 }, { -2 }, { -10 } };
+  }
+
+  @Test(expectedExceptions = IllegalArgumentException.class, dataProvider = "illegalTopNData")
+  public void constructorWithNegativeOrZeroTopNShouldThrowIAE(int topN) {
+    new Rankings(topN);
+  }
+
+  @DataProvider
+  public Object[][] copyRankingsData() {
+    return new Object[][]{ { 5, Lists.newArrayList(A, B, C) }, { 2, Lists.newArrayList(A, B, C, D) },
+        { 1, Lists.newArrayList() }, { 1, Lists.newArrayList(A) }, { 1, Lists.newArrayList(A, B) } };
+  }
+
+  @Test(dataProvider = "copyRankingsData")
+  public void copyConstructorShouldReturnCopy(int topN, List<Rankable> rankables) {
+    // given
+    Rankings rankings = new Rankings(topN);
+    for (Rankable r : rankables) {
+      rankings.updateWith(r);
+    }
+
+    // when
+    Rankings copy = new Rankings(rankings);
+
+    // then
+    assertThat(copy.maxSize()).isEqualTo(rankings.maxSize());
+    assertThat(copy.getRankings()).isEqualTo(rankings.getRankings());
+  }
+
+  @DataProvider
+  public Object[][] defensiveCopyRankingsData() {
+    return new Object[][]{ { 5, Lists.newArrayList(A, B, C), Lists.newArrayList(D) }, { 2, Lists.newArrayList(A, B, C,
+        D), Lists.newArrayList(E, F) }, { 1, Lists.newArrayList(), Lists.newArrayList(A) }, { 1, Lists.newArrayList(A),
+        Lists.newArrayList(B) }, { 1, Lists.newArrayList(ZERO), Lists.newArrayList(B) }, { 1, Lists.newArrayList(ZERO),
+        Lists.newArrayList() } };
+  }
+
+  @Test(dataProvider = "defensiveCopyRankingsData")
+  public void copyConstructorShouldReturnDefensiveCopy(int topN, List<Rankable> rankables, List<Rankable> changes) {
+    // given
+    Rankings original = new Rankings(topN);
+    for (Rankable r : rankables) {
+      original.updateWith(r);
+    }
+    int expSize = original.size();
+    List<Rankable> expRankings = original.getRankings();
+
+    // when
+    Rankings copy = new Rankings(original);
+    for (Rankable r : changes) {
+      copy.updateWith(r);
+    }
+
+    // then
+    assertThat(original.size()).isEqualTo(expSize);
+    assertThat(original.getRankings()).isEqualTo(expRankings);
+  }
+
+  @DataProvider
+  public Object[][] legalTopNData() {
+    return new Object[][]{ { 1 }, { 2 }, { 1000 }, { 1000000 } };
+  }
+
+  @Test(dataProvider = "legalTopNData")
+  public void constructorWithPositiveTopNShouldBeOk(int topN) {
+    // given/when
+    Rankings rankings = new Rankings(topN);
+
+    // then
+    assertThat(rankings.maxSize()).isEqualTo(topN);
+  }
+
+  @Test
+  public void shouldHaveDefaultConstructor() {
+    new Rankings();
+  }
+
+  @Test
+  public void defaultConstructorShouldSetPositiveTopN() {
+    // given/when
+    Rankings rankings = new Rankings();
+
+    // then
+    assertThat(rankings.maxSize()).isGreaterThan(0);
+  }
+
+  @DataProvider
+  public Object[][] rankingsGrowData() {
+    return new Object[][]{ { 2, Lists.newArrayList(new RankableObjectWithFields("A", 1), new RankableObjectWithFields(
+        "B", 2), new RankableObjectWithFields("C", 3)) }, { 2, Lists.newArrayList(new RankableObjectWithFields("A", 1),
+        new RankableObjectWithFields("B", 2), new RankableObjectWithFields("C", 3), new RankableObjectWithFields("D",
+        4)) } };
+  }
+
+  @Test(dataProvider = "rankingsGrowData")
+  public void sizeOfRankingsShouldNotGrowBeyondTopN(int topN, List<Rankable> rankables) {
+    // sanity check of the provided test data
+    assertThat(rankables.size()).overridingErrorMessage(
+        "The supplied test data is not correct: the number of rankables <%d> should be greater than <%d>",
+        rankables.size(), topN).isGreaterThan(topN);
+
+    // given
+    Rankings rankings = new Rankings(topN);
+
+    // when
+    for (Rankable r : rankables) {
+      rankings.updateWith(r);
+    }
+
+    // then
+    assertThat(rankings.size()).isLessThanOrEqualTo(rankings.maxSize());
+  }
+
+  @DataProvider
+  public Object[][] simulatedRankingsData() {
+    return new Object[][]{ { Lists.newArrayList(A), Lists.newArrayList(A) }, { Lists.newArrayList(B, D, A, C),
+        Lists.newArrayList(D, C, B, A) }, { Lists.newArrayList(B, F, A, C, D, E), Lists.newArrayList(F, E, D, C, B,
+        A) }, { Lists.newArrayList(G, B, F, A, C, D, E, H), Lists.newArrayList(H, G, F, E, D, C, B, A) } };
+  }
+
+  @Test(dataProvider = "simulatedRankingsData")
+  public void shouldCorrectlyRankWhenUpdatedWithRankables(List<Rankable> unsorted, List<Rankable> expSorted) {
+    // given
+    Rankings rankings = new Rankings(unsorted.size());
+
+    // when
+    for (Rankable r : unsorted) {
+      rankings.updateWith(r);
+    }
+
+    // then
+    assertThat(rankings.getRankings()).isEqualTo(expSorted);
+  }
+
+  @Test(dataProvider = "simulatedRankingsData")
+  public void shouldCorrectlyRankWhenEmptyAndUpdatedWithOtherRankings(List<Rankable> unsorted,
+      List<Rankable> expSorted) {
+    // given
+    Rankings rankings = new Rankings(unsorted.size());
+    Rankings otherRankings = new Rankings(rankings.maxSize());
+    for (Rankable r : unsorted) {
+      otherRankings.updateWith(r);
+    }
+
+    // when
+    rankings.updateWith(otherRankings);
+
+    // then
+    assertThat(rankings.getRankings()).isEqualTo(expSorted);
+  }
+
+  @Test(dataProvider = "simulatedRankingsData")
+  public void shouldCorrectlyRankWhenUpdatedWithEmptyOtherRankings(List<Rankable> unsorted, List<Rankable> expSorted) {
+    // given
+    Rankings rankings = new Rankings(unsorted.size());
+    for (Rankable r : unsorted) {
+      rankings.updateWith(r);
+    }
+    Rankings emptyRankings = new Rankings(ANY_TOPN);
+
+    // when
+    rankings.updateWith(emptyRankings);
+
+    // then
+    assertThat(rankings.getRankings()).isEqualTo(expSorted);
+  }
+
+  @DataProvider
+  public Object[][] simulatedRankingsAndOtherRankingsData() {
+    return new Object[][]{ { Lists.newArrayList(A), Lists.newArrayList(A), Lists.newArrayList(A) },
+        { Lists.newArrayList(A, C), Lists.newArrayList(B, D), Lists.newArrayList(D, C, B, A) }, { Lists.newArrayList(B,
+        F, A), Lists.newArrayList(C, D, E), Lists.newArrayList(F, E, D, C, B, A) }, { Lists.newArrayList(G, B, F, A, C),
+        Lists.newArrayList(D, E, H), Lists.newArrayList(H, G, F, E, D, C, B, A) } };
+  }
+
+  @Test(dataProvider = "simulatedRankingsAndOtherRankingsData")
+  public void shouldCorrectlyRankWhenNotEmptyAndUpdatedWithOtherRankings(List<Rankable> unsorted,
+      List<Rankable> unsortedForOtherRankings, List<Rankable> expSorted) {
+    // given
+    Rankings rankings = new Rankings(expSorted.size());
+    for (Rankable r : unsorted) {
+      rankings.updateWith(r);
+    }
+    Rankings otherRankings = new Rankings(unsortedForOtherRankings.size());
+    for (Rankable r : unsortedForOtherRankings) {
+      otherRankings.updateWith(r);
+    }
+
+    // when
+    rankings.updateWith(otherRankings);
+
+    // then
+    assertThat(rankings.getRankings()).isEqualTo(expSorted);
+  }
+
+  @DataProvider
+  public Object[][] duplicatesData() {
+    Rankable A1 = new RankableObjectWithFields("A", 1);
+    Rankable A2 = new RankableObjectWithFields("A", 2);
+    Rankable A3 = new RankableObjectWithFields("A", 3);
+    return new Object[][]{ { Lists.newArrayList(ANY_RANKABLE, ANY_RANKABLE, ANY_RANKABLE) }, { Lists.newArrayList(A1,
+        A2, A3) }, };
+  }
+
+  @Test(dataProvider = "duplicatesData")
+  public void shouldNotRankDuplicateObjectsMoreThanOnce(List<Rankable> duplicates) {
+    // given
+    Rankings rankings = new Rankings(duplicates.size());
+
+    // when
+    for (Rankable r : duplicates) {
+      rankings.updateWith(r);
+    }
+
+    // then
+    assertThat(rankings.size()).isEqualTo(1);
+  }
+
+  @DataProvider
+  public Object[][] removeZeroRankingsData() {
+    return new Object[][]{ { Lists.newArrayList(A, ZERO), Lists.newArrayList(A) }, { Lists.newArrayList(A),
+        Lists.newArrayList(A) }, { Lists.newArrayList(ZERO, A), Lists.newArrayList(A) }, { Lists.newArrayList(ZERO),
+        Lists.newArrayList() }, { Lists.newArrayList(ZERO, new RankableObjectWithFields("ZERO2", 0)),
+        Lists.newArrayList() }, { Lists.newArrayList(B, ZERO, new RankableObjectWithFields("ZERO2", 0), D,
+        new RankableObjectWithFields("ZERO3", 0), new RankableObjectWithFields("ZERO4", 0), C), Lists.newArrayList(D, C,
+        B) }, { Lists.newArrayList(A, ZERO, B), Lists.newArrayList(B, A) } };
+  }
+
+  @Test(dataProvider = "removeZeroRankingsData")
+  public void shouldRemoveZeroCounts(List<Rankable> unsorted, List<Rankable> expSorted) {
+    // given
+    Rankings rankings = new Rankings(unsorted.size());
+    for (Rankable r : unsorted) {
+      rankings.updateWith(r);
+    }
+
+    // when
+    rankings.pruneZeroCounts();
+
+    // then
+    assertThat(rankings.getRankings()).isEqualTo(expSorted);
+  }
+
+  @Test
+  public void updatingWithNewRankablesShouldBeThreadSafe() throws InterruptedException {
+    // given
+    final List<Rankable> entries = ImmutableList.of(A, B, C, D);
+    final Rankings rankings = new Rankings(entries.size());
+
+    // We are capturing exceptions thrown in Blitzer's child threads into this data structure so that we can properly
+    // pass/fail this test.  The reason is that Blitzer doesn't report exceptions, which is a known bug in Blitzer
+    // (JMOCK-263).  See https://github.com/jmock-developers/jmock-library/issues/22 for more information.
+    final List<Exception> exceptions = Lists.newArrayList();
+    Blitzer blitzer = new Blitzer(1000);
+
+    // when
+    blitzer.blitz(new Runnable() {
+      public void run() {
+        for (Rankable r : entries) {
+          try {
+            rankings.updateWith(r);
+          }
+          catch (RuntimeException e) {
+            synchronized(exceptions) {
+              exceptions.add(e);
+            }
+          }
+        }
+      }
+    });
+    blitzer.shutdown();
+
+    // then
+    //
+    if (!exceptions.isEmpty()) {
+      for (Exception e : exceptions) {
+        System.err.println(Throwables.getStackTraceAsString(e));
+      }
+    }
+    assertThat(exceptions).isEmpty();
+  }
+
+  @Test(dataProvider = "copyRankingsData")
+  public void copyShouldReturnCopy(int topN, List<Rankable> rankables) {
+    // given
+    Rankings rankings = new Rankings(topN);
+    for (Rankable r : rankables) {
+      rankings.updateWith(r);
+    }
+
+    // when
+    Rankings copy = rankings.copy();
+
+    // then
+    assertThat(copy.maxSize()).isEqualTo(rankings.maxSize());
+    assertThat(copy.getRankings()).isEqualTo(rankings.getRankings());
+  }
+
+  @Test(dataProvider = "defensiveCopyRankingsData")
+  public void copyShouldReturnDefensiveCopy(int topN, List<Rankable> rankables, List<Rankable> changes) {
+    // given
+    Rankings original = new Rankings(topN);
+    for (Rankable r : rankables) {
+      original.updateWith(r);
+    }
+    int expSize = original.size();
+    List<Rankable> expRankings = original.getRankings();
+
+    // when
+    Rankings copy = original.copy();
+    for (Rankable r : changes) {
+      copy.updateWith(r);
+    }
+    copy.pruneZeroCounts();
+
+    // then
+    assertThat(original.size()).isEqualTo(expSize);
+    assertThat(original.getRankings()).isEqualTo(expRankings);
+  }
+
+}


[04/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/generated/BoltStats.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/generated/BoltStats.java b/storm-core/src/jvm/backtype/storm/generated/BoltStats.java
deleted file mode 100644
index cbadd32..0000000
--- a/storm-core/src/jvm/backtype/storm/generated/BoltStats.java
+++ /dev/null
@@ -1,1390 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * Autogenerated by Thrift Compiler (0.9.3)
- *
- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
- *  @generated
- */
-package backtype.storm.generated;
-
-import org.apache.thrift.scheme.IScheme;
-import org.apache.thrift.scheme.SchemeFactory;
-import org.apache.thrift.scheme.StandardScheme;
-
-import org.apache.thrift.scheme.TupleScheme;
-import org.apache.thrift.protocol.TTupleProtocol;
-import org.apache.thrift.protocol.TProtocolException;
-import org.apache.thrift.EncodingUtils;
-import org.apache.thrift.TException;
-import org.apache.thrift.async.AsyncMethodCallback;
-import org.apache.thrift.server.AbstractNonblockingServer.*;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.EnumMap;
-import java.util.Set;
-import java.util.HashSet;
-import java.util.EnumSet;
-import java.util.Collections;
-import java.util.BitSet;
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import javax.annotation.Generated;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
-public class BoltStats implements org.apache.thrift.TBase<BoltStats, BoltStats._Fields>, java.io.Serializable, Cloneable, Comparable<BoltStats> {
-  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("BoltStats");
-
-  private static final org.apache.thrift.protocol.TField ACKED_FIELD_DESC = new org.apache.thrift.protocol.TField("acked", org.apache.thrift.protocol.TType.MAP, (short)1);
-  private static final org.apache.thrift.protocol.TField FAILED_FIELD_DESC = new org.apache.thrift.protocol.TField("failed", org.apache.thrift.protocol.TType.MAP, (short)2);
-  private static final org.apache.thrift.protocol.TField PROCESS_MS_AVG_FIELD_DESC = new org.apache.thrift.protocol.TField("process_ms_avg", org.apache.thrift.protocol.TType.MAP, (short)3);
-  private static final org.apache.thrift.protocol.TField EXECUTED_FIELD_DESC = new org.apache.thrift.protocol.TField("executed", org.apache.thrift.protocol.TType.MAP, (short)4);
-  private static final org.apache.thrift.protocol.TField EXECUTE_MS_AVG_FIELD_DESC = new org.apache.thrift.protocol.TField("execute_ms_avg", org.apache.thrift.protocol.TType.MAP, (short)5);
-
-  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
-  static {
-    schemes.put(StandardScheme.class, new BoltStatsStandardSchemeFactory());
-    schemes.put(TupleScheme.class, new BoltStatsTupleSchemeFactory());
-  }
-
-  private Map<String,Map<GlobalStreamId,Long>> acked; // required
-  private Map<String,Map<GlobalStreamId,Long>> failed; // required
-  private Map<String,Map<GlobalStreamId,Double>> process_ms_avg; // required
-  private Map<String,Map<GlobalStreamId,Long>> executed; // required
-  private Map<String,Map<GlobalStreamId,Double>> execute_ms_avg; // required
-
-  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
-  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-    ACKED((short)1, "acked"),
-    FAILED((short)2, "failed"),
-    PROCESS_MS_AVG((short)3, "process_ms_avg"),
-    EXECUTED((short)4, "executed"),
-    EXECUTE_MS_AVG((short)5, "execute_ms_avg");
-
-    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
-
-    static {
-      for (_Fields field : EnumSet.allOf(_Fields.class)) {
-        byName.put(field.getFieldName(), field);
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, or null if its not found.
-     */
-    public static _Fields findByThriftId(int fieldId) {
-      switch(fieldId) {
-        case 1: // ACKED
-          return ACKED;
-        case 2: // FAILED
-          return FAILED;
-        case 3: // PROCESS_MS_AVG
-          return PROCESS_MS_AVG;
-        case 4: // EXECUTED
-          return EXECUTED;
-        case 5: // EXECUTE_MS_AVG
-          return EXECUTE_MS_AVG;
-        default:
-          return null;
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, throwing an exception
-     * if it is not found.
-     */
-    public static _Fields findByThriftIdOrThrow(int fieldId) {
-      _Fields fields = findByThriftId(fieldId);
-      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
-      return fields;
-    }
-
-    /**
-     * Find the _Fields constant that matches name, or null if its not found.
-     */
-    public static _Fields findByName(String name) {
-      return byName.get(name);
-    }
-
-    private final short _thriftId;
-    private final String _fieldName;
-
-    _Fields(short thriftId, String fieldName) {
-      _thriftId = thriftId;
-      _fieldName = fieldName;
-    }
-
-    public short getThriftFieldId() {
-      return _thriftId;
-    }
-
-    public String getFieldName() {
-      return _fieldName;
-    }
-  }
-
-  // isset id assignments
-  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
-  static {
-    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.ACKED, new org.apache.thrift.meta_data.FieldMetaData("acked", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
-            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
-            new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
-                new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GlobalStreamId.class), 
-                new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)))));
-    tmpMap.put(_Fields.FAILED, new org.apache.thrift.meta_data.FieldMetaData("failed", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
-            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
-            new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
-                new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GlobalStreamId.class), 
-                new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)))));
-    tmpMap.put(_Fields.PROCESS_MS_AVG, new org.apache.thrift.meta_data.FieldMetaData("process_ms_avg", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
-            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
-            new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
-                new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GlobalStreamId.class), 
-                new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE)))));
-    tmpMap.put(_Fields.EXECUTED, new org.apache.thrift.meta_data.FieldMetaData("executed", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
-            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
-            new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
-                new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GlobalStreamId.class), 
-                new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)))));
-    tmpMap.put(_Fields.EXECUTE_MS_AVG, new org.apache.thrift.meta_data.FieldMetaData("execute_ms_avg", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
-            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
-            new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
-                new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GlobalStreamId.class), 
-                new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE)))));
-    metaDataMap = Collections.unmodifiableMap(tmpMap);
-    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(BoltStats.class, metaDataMap);
-  }
-
-  public BoltStats() {
-  }
-
-  public BoltStats(
-    Map<String,Map<GlobalStreamId,Long>> acked,
-    Map<String,Map<GlobalStreamId,Long>> failed,
-    Map<String,Map<GlobalStreamId,Double>> process_ms_avg,
-    Map<String,Map<GlobalStreamId,Long>> executed,
-    Map<String,Map<GlobalStreamId,Double>> execute_ms_avg)
-  {
-    this();
-    this.acked = acked;
-    this.failed = failed;
-    this.process_ms_avg = process_ms_avg;
-    this.executed = executed;
-    this.execute_ms_avg = execute_ms_avg;
-  }
-
-  /**
-   * Performs a deep copy on <i>other</i>.
-   */
-  public BoltStats(BoltStats other) {
-    if (other.is_set_acked()) {
-      Map<String,Map<GlobalStreamId,Long>> __this__acked = new HashMap<String,Map<GlobalStreamId,Long>>(other.acked.size());
-      for (Map.Entry<String, Map<GlobalStreamId,Long>> other_element : other.acked.entrySet()) {
-
-        String other_element_key = other_element.getKey();
-        Map<GlobalStreamId,Long> other_element_value = other_element.getValue();
-
-        String __this__acked_copy_key = other_element_key;
-
-        Map<GlobalStreamId,Long> __this__acked_copy_value = new HashMap<GlobalStreamId,Long>(other_element_value.size());
-        for (Map.Entry<GlobalStreamId, Long> other_element_value_element : other_element_value.entrySet()) {
-
-          GlobalStreamId other_element_value_element_key = other_element_value_element.getKey();
-          Long other_element_value_element_value = other_element_value_element.getValue();
-
-          GlobalStreamId __this__acked_copy_value_copy_key = new GlobalStreamId(other_element_value_element_key);
-
-          Long __this__acked_copy_value_copy_value = other_element_value_element_value;
-
-          __this__acked_copy_value.put(__this__acked_copy_value_copy_key, __this__acked_copy_value_copy_value);
-        }
-
-        __this__acked.put(__this__acked_copy_key, __this__acked_copy_value);
-      }
-      this.acked = __this__acked;
-    }
-    if (other.is_set_failed()) {
-      Map<String,Map<GlobalStreamId,Long>> __this__failed = new HashMap<String,Map<GlobalStreamId,Long>>(other.failed.size());
-      for (Map.Entry<String, Map<GlobalStreamId,Long>> other_element : other.failed.entrySet()) {
-
-        String other_element_key = other_element.getKey();
-        Map<GlobalStreamId,Long> other_element_value = other_element.getValue();
-
-        String __this__failed_copy_key = other_element_key;
-
-        Map<GlobalStreamId,Long> __this__failed_copy_value = new HashMap<GlobalStreamId,Long>(other_element_value.size());
-        for (Map.Entry<GlobalStreamId, Long> other_element_value_element : other_element_value.entrySet()) {
-
-          GlobalStreamId other_element_value_element_key = other_element_value_element.getKey();
-          Long other_element_value_element_value = other_element_value_element.getValue();
-
-          GlobalStreamId __this__failed_copy_value_copy_key = new GlobalStreamId(other_element_value_element_key);
-
-          Long __this__failed_copy_value_copy_value = other_element_value_element_value;
-
-          __this__failed_copy_value.put(__this__failed_copy_value_copy_key, __this__failed_copy_value_copy_value);
-        }
-
-        __this__failed.put(__this__failed_copy_key, __this__failed_copy_value);
-      }
-      this.failed = __this__failed;
-    }
-    if (other.is_set_process_ms_avg()) {
-      Map<String,Map<GlobalStreamId,Double>> __this__process_ms_avg = new HashMap<String,Map<GlobalStreamId,Double>>(other.process_ms_avg.size());
-      for (Map.Entry<String, Map<GlobalStreamId,Double>> other_element : other.process_ms_avg.entrySet()) {
-
-        String other_element_key = other_element.getKey();
-        Map<GlobalStreamId,Double> other_element_value = other_element.getValue();
-
-        String __this__process_ms_avg_copy_key = other_element_key;
-
-        Map<GlobalStreamId,Double> __this__process_ms_avg_copy_value = new HashMap<GlobalStreamId,Double>(other_element_value.size());
-        for (Map.Entry<GlobalStreamId, Double> other_element_value_element : other_element_value.entrySet()) {
-
-          GlobalStreamId other_element_value_element_key = other_element_value_element.getKey();
-          Double other_element_value_element_value = other_element_value_element.getValue();
-
-          GlobalStreamId __this__process_ms_avg_copy_value_copy_key = new GlobalStreamId(other_element_value_element_key);
-
-          Double __this__process_ms_avg_copy_value_copy_value = other_element_value_element_value;
-
-          __this__process_ms_avg_copy_value.put(__this__process_ms_avg_copy_value_copy_key, __this__process_ms_avg_copy_value_copy_value);
-        }
-
-        __this__process_ms_avg.put(__this__process_ms_avg_copy_key, __this__process_ms_avg_copy_value);
-      }
-      this.process_ms_avg = __this__process_ms_avg;
-    }
-    if (other.is_set_executed()) {
-      Map<String,Map<GlobalStreamId,Long>> __this__executed = new HashMap<String,Map<GlobalStreamId,Long>>(other.executed.size());
-      for (Map.Entry<String, Map<GlobalStreamId,Long>> other_element : other.executed.entrySet()) {
-
-        String other_element_key = other_element.getKey();
-        Map<GlobalStreamId,Long> other_element_value = other_element.getValue();
-
-        String __this__executed_copy_key = other_element_key;
-
-        Map<GlobalStreamId,Long> __this__executed_copy_value = new HashMap<GlobalStreamId,Long>(other_element_value.size());
-        for (Map.Entry<GlobalStreamId, Long> other_element_value_element : other_element_value.entrySet()) {
-
-          GlobalStreamId other_element_value_element_key = other_element_value_element.getKey();
-          Long other_element_value_element_value = other_element_value_element.getValue();
-
-          GlobalStreamId __this__executed_copy_value_copy_key = new GlobalStreamId(other_element_value_element_key);
-
-          Long __this__executed_copy_value_copy_value = other_element_value_element_value;
-
-          __this__executed_copy_value.put(__this__executed_copy_value_copy_key, __this__executed_copy_value_copy_value);
-        }
-
-        __this__executed.put(__this__executed_copy_key, __this__executed_copy_value);
-      }
-      this.executed = __this__executed;
-    }
-    if (other.is_set_execute_ms_avg()) {
-      Map<String,Map<GlobalStreamId,Double>> __this__execute_ms_avg = new HashMap<String,Map<GlobalStreamId,Double>>(other.execute_ms_avg.size());
-      for (Map.Entry<String, Map<GlobalStreamId,Double>> other_element : other.execute_ms_avg.entrySet()) {
-
-        String other_element_key = other_element.getKey();
-        Map<GlobalStreamId,Double> other_element_value = other_element.getValue();
-
-        String __this__execute_ms_avg_copy_key = other_element_key;
-
-        Map<GlobalStreamId,Double> __this__execute_ms_avg_copy_value = new HashMap<GlobalStreamId,Double>(other_element_value.size());
-        for (Map.Entry<GlobalStreamId, Double> other_element_value_element : other_element_value.entrySet()) {
-
-          GlobalStreamId other_element_value_element_key = other_element_value_element.getKey();
-          Double other_element_value_element_value = other_element_value_element.getValue();
-
-          GlobalStreamId __this__execute_ms_avg_copy_value_copy_key = new GlobalStreamId(other_element_value_element_key);
-
-          Double __this__execute_ms_avg_copy_value_copy_value = other_element_value_element_value;
-
-          __this__execute_ms_avg_copy_value.put(__this__execute_ms_avg_copy_value_copy_key, __this__execute_ms_avg_copy_value_copy_value);
-        }
-
-        __this__execute_ms_avg.put(__this__execute_ms_avg_copy_key, __this__execute_ms_avg_copy_value);
-      }
-      this.execute_ms_avg = __this__execute_ms_avg;
-    }
-  }
-
-  public BoltStats deepCopy() {
-    return new BoltStats(this);
-  }
-
-  @Override
-  public void clear() {
-    this.acked = null;
-    this.failed = null;
-    this.process_ms_avg = null;
-    this.executed = null;
-    this.execute_ms_avg = null;
-  }
-
-  public int get_acked_size() {
-    return (this.acked == null) ? 0 : this.acked.size();
-  }
-
-  public void put_to_acked(String key, Map<GlobalStreamId,Long> val) {
-    if (this.acked == null) {
-      this.acked = new HashMap<String,Map<GlobalStreamId,Long>>();
-    }
-    this.acked.put(key, val);
-  }
-
-  public Map<String,Map<GlobalStreamId,Long>> get_acked() {
-    return this.acked;
-  }
-
-  public void set_acked(Map<String,Map<GlobalStreamId,Long>> acked) {
-    this.acked = acked;
-  }
-
-  public void unset_acked() {
-    this.acked = null;
-  }
-
-  /** Returns true if field acked is set (has been assigned a value) and false otherwise */
-  public boolean is_set_acked() {
-    return this.acked != null;
-  }
-
-  public void set_acked_isSet(boolean value) {
-    if (!value) {
-      this.acked = null;
-    }
-  }
-
-  public int get_failed_size() {
-    return (this.failed == null) ? 0 : this.failed.size();
-  }
-
-  public void put_to_failed(String key, Map<GlobalStreamId,Long> val) {
-    if (this.failed == null) {
-      this.failed = new HashMap<String,Map<GlobalStreamId,Long>>();
-    }
-    this.failed.put(key, val);
-  }
-
-  public Map<String,Map<GlobalStreamId,Long>> get_failed() {
-    return this.failed;
-  }
-
-  public void set_failed(Map<String,Map<GlobalStreamId,Long>> failed) {
-    this.failed = failed;
-  }
-
-  public void unset_failed() {
-    this.failed = null;
-  }
-
-  /** Returns true if field failed is set (has been assigned a value) and false otherwise */
-  public boolean is_set_failed() {
-    return this.failed != null;
-  }
-
-  public void set_failed_isSet(boolean value) {
-    if (!value) {
-      this.failed = null;
-    }
-  }
-
-  public int get_process_ms_avg_size() {
-    return (this.process_ms_avg == null) ? 0 : this.process_ms_avg.size();
-  }
-
-  public void put_to_process_ms_avg(String key, Map<GlobalStreamId,Double> val) {
-    if (this.process_ms_avg == null) {
-      this.process_ms_avg = new HashMap<String,Map<GlobalStreamId,Double>>();
-    }
-    this.process_ms_avg.put(key, val);
-  }
-
-  public Map<String,Map<GlobalStreamId,Double>> get_process_ms_avg() {
-    return this.process_ms_avg;
-  }
-
-  public void set_process_ms_avg(Map<String,Map<GlobalStreamId,Double>> process_ms_avg) {
-    this.process_ms_avg = process_ms_avg;
-  }
-
-  public void unset_process_ms_avg() {
-    this.process_ms_avg = null;
-  }
-
-  /** Returns true if field process_ms_avg is set (has been assigned a value) and false otherwise */
-  public boolean is_set_process_ms_avg() {
-    return this.process_ms_avg != null;
-  }
-
-  public void set_process_ms_avg_isSet(boolean value) {
-    if (!value) {
-      this.process_ms_avg = null;
-    }
-  }
-
-  public int get_executed_size() {
-    return (this.executed == null) ? 0 : this.executed.size();
-  }
-
-  public void put_to_executed(String key, Map<GlobalStreamId,Long> val) {
-    if (this.executed == null) {
-      this.executed = new HashMap<String,Map<GlobalStreamId,Long>>();
-    }
-    this.executed.put(key, val);
-  }
-
-  public Map<String,Map<GlobalStreamId,Long>> get_executed() {
-    return this.executed;
-  }
-
-  public void set_executed(Map<String,Map<GlobalStreamId,Long>> executed) {
-    this.executed = executed;
-  }
-
-  public void unset_executed() {
-    this.executed = null;
-  }
-
-  /** Returns true if field executed is set (has been assigned a value) and false otherwise */
-  public boolean is_set_executed() {
-    return this.executed != null;
-  }
-
-  public void set_executed_isSet(boolean value) {
-    if (!value) {
-      this.executed = null;
-    }
-  }
-
-  public int get_execute_ms_avg_size() {
-    return (this.execute_ms_avg == null) ? 0 : this.execute_ms_avg.size();
-  }
-
-  public void put_to_execute_ms_avg(String key, Map<GlobalStreamId,Double> val) {
-    if (this.execute_ms_avg == null) {
-      this.execute_ms_avg = new HashMap<String,Map<GlobalStreamId,Double>>();
-    }
-    this.execute_ms_avg.put(key, val);
-  }
-
-  public Map<String,Map<GlobalStreamId,Double>> get_execute_ms_avg() {
-    return this.execute_ms_avg;
-  }
-
-  public void set_execute_ms_avg(Map<String,Map<GlobalStreamId,Double>> execute_ms_avg) {
-    this.execute_ms_avg = execute_ms_avg;
-  }
-
-  public void unset_execute_ms_avg() {
-    this.execute_ms_avg = null;
-  }
-
-  /** Returns true if field execute_ms_avg is set (has been assigned a value) and false otherwise */
-  public boolean is_set_execute_ms_avg() {
-    return this.execute_ms_avg != null;
-  }
-
-  public void set_execute_ms_avg_isSet(boolean value) {
-    if (!value) {
-      this.execute_ms_avg = null;
-    }
-  }
-
-  public void setFieldValue(_Fields field, Object value) {
-    switch (field) {
-    case ACKED:
-      if (value == null) {
-        unset_acked();
-      } else {
-        set_acked((Map<String,Map<GlobalStreamId,Long>>)value);
-      }
-      break;
-
-    case FAILED:
-      if (value == null) {
-        unset_failed();
-      } else {
-        set_failed((Map<String,Map<GlobalStreamId,Long>>)value);
-      }
-      break;
-
-    case PROCESS_MS_AVG:
-      if (value == null) {
-        unset_process_ms_avg();
-      } else {
-        set_process_ms_avg((Map<String,Map<GlobalStreamId,Double>>)value);
-      }
-      break;
-
-    case EXECUTED:
-      if (value == null) {
-        unset_executed();
-      } else {
-        set_executed((Map<String,Map<GlobalStreamId,Long>>)value);
-      }
-      break;
-
-    case EXECUTE_MS_AVG:
-      if (value == null) {
-        unset_execute_ms_avg();
-      } else {
-        set_execute_ms_avg((Map<String,Map<GlobalStreamId,Double>>)value);
-      }
-      break;
-
-    }
-  }
-
-  public Object getFieldValue(_Fields field) {
-    switch (field) {
-    case ACKED:
-      return get_acked();
-
-    case FAILED:
-      return get_failed();
-
-    case PROCESS_MS_AVG:
-      return get_process_ms_avg();
-
-    case EXECUTED:
-      return get_executed();
-
-    case EXECUTE_MS_AVG:
-      return get_execute_ms_avg();
-
-    }
-    throw new IllegalStateException();
-  }
-
-  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
-  public boolean isSet(_Fields field) {
-    if (field == null) {
-      throw new IllegalArgumentException();
-    }
-
-    switch (field) {
-    case ACKED:
-      return is_set_acked();
-    case FAILED:
-      return is_set_failed();
-    case PROCESS_MS_AVG:
-      return is_set_process_ms_avg();
-    case EXECUTED:
-      return is_set_executed();
-    case EXECUTE_MS_AVG:
-      return is_set_execute_ms_avg();
-    }
-    throw new IllegalStateException();
-  }
-
-  @Override
-  public boolean equals(Object that) {
-    if (that == null)
-      return false;
-    if (that instanceof BoltStats)
-      return this.equals((BoltStats)that);
-    return false;
-  }
-
-  public boolean equals(BoltStats that) {
-    if (that == null)
-      return false;
-
-    boolean this_present_acked = true && this.is_set_acked();
-    boolean that_present_acked = true && that.is_set_acked();
-    if (this_present_acked || that_present_acked) {
-      if (!(this_present_acked && that_present_acked))
-        return false;
-      if (!this.acked.equals(that.acked))
-        return false;
-    }
-
-    boolean this_present_failed = true && this.is_set_failed();
-    boolean that_present_failed = true && that.is_set_failed();
-    if (this_present_failed || that_present_failed) {
-      if (!(this_present_failed && that_present_failed))
-        return false;
-      if (!this.failed.equals(that.failed))
-        return false;
-    }
-
-    boolean this_present_process_ms_avg = true && this.is_set_process_ms_avg();
-    boolean that_present_process_ms_avg = true && that.is_set_process_ms_avg();
-    if (this_present_process_ms_avg || that_present_process_ms_avg) {
-      if (!(this_present_process_ms_avg && that_present_process_ms_avg))
-        return false;
-      if (!this.process_ms_avg.equals(that.process_ms_avg))
-        return false;
-    }
-
-    boolean this_present_executed = true && this.is_set_executed();
-    boolean that_present_executed = true && that.is_set_executed();
-    if (this_present_executed || that_present_executed) {
-      if (!(this_present_executed && that_present_executed))
-        return false;
-      if (!this.executed.equals(that.executed))
-        return false;
-    }
-
-    boolean this_present_execute_ms_avg = true && this.is_set_execute_ms_avg();
-    boolean that_present_execute_ms_avg = true && that.is_set_execute_ms_avg();
-    if (this_present_execute_ms_avg || that_present_execute_ms_avg) {
-      if (!(this_present_execute_ms_avg && that_present_execute_ms_avg))
-        return false;
-      if (!this.execute_ms_avg.equals(that.execute_ms_avg))
-        return false;
-    }
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    List<Object> list = new ArrayList<Object>();
-
-    boolean present_acked = true && (is_set_acked());
-    list.add(present_acked);
-    if (present_acked)
-      list.add(acked);
-
-    boolean present_failed = true && (is_set_failed());
-    list.add(present_failed);
-    if (present_failed)
-      list.add(failed);
-
-    boolean present_process_ms_avg = true && (is_set_process_ms_avg());
-    list.add(present_process_ms_avg);
-    if (present_process_ms_avg)
-      list.add(process_ms_avg);
-
-    boolean present_executed = true && (is_set_executed());
-    list.add(present_executed);
-    if (present_executed)
-      list.add(executed);
-
-    boolean present_execute_ms_avg = true && (is_set_execute_ms_avg());
-    list.add(present_execute_ms_avg);
-    if (present_execute_ms_avg)
-      list.add(execute_ms_avg);
-
-    return list.hashCode();
-  }
-
-  @Override
-  public int compareTo(BoltStats other) {
-    if (!getClass().equals(other.getClass())) {
-      return getClass().getName().compareTo(other.getClass().getName());
-    }
-
-    int lastComparison = 0;
-
-    lastComparison = Boolean.valueOf(is_set_acked()).compareTo(other.is_set_acked());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_acked()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.acked, other.acked);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_failed()).compareTo(other.is_set_failed());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_failed()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.failed, other.failed);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_process_ms_avg()).compareTo(other.is_set_process_ms_avg());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_process_ms_avg()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.process_ms_avg, other.process_ms_avg);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_executed()).compareTo(other.is_set_executed());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_executed()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.executed, other.executed);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_execute_ms_avg()).compareTo(other.is_set_execute_ms_avg());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_execute_ms_avg()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.execute_ms_avg, other.execute_ms_avg);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    return 0;
-  }
-
-  public _Fields fieldForId(int fieldId) {
-    return _Fields.findByThriftId(fieldId);
-  }
-
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
-    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
-  }
-
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
-    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder("BoltStats(");
-    boolean first = true;
-
-    sb.append("acked:");
-    if (this.acked == null) {
-      sb.append("null");
-    } else {
-      sb.append(this.acked);
-    }
-    first = false;
-    if (!first) sb.append(", ");
-    sb.append("failed:");
-    if (this.failed == null) {
-      sb.append("null");
-    } else {
-      sb.append(this.failed);
-    }
-    first = false;
-    if (!first) sb.append(", ");
-    sb.append("process_ms_avg:");
-    if (this.process_ms_avg == null) {
-      sb.append("null");
-    } else {
-      sb.append(this.process_ms_avg);
-    }
-    first = false;
-    if (!first) sb.append(", ");
-    sb.append("executed:");
-    if (this.executed == null) {
-      sb.append("null");
-    } else {
-      sb.append(this.executed);
-    }
-    first = false;
-    if (!first) sb.append(", ");
-    sb.append("execute_ms_avg:");
-    if (this.execute_ms_avg == null) {
-      sb.append("null");
-    } else {
-      sb.append(this.execute_ms_avg);
-    }
-    first = false;
-    sb.append(")");
-    return sb.toString();
-  }
-
-  public void validate() throws org.apache.thrift.TException {
-    // check for required fields
-    if (!is_set_acked()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'acked' is unset! Struct:" + toString());
-    }
-
-    if (!is_set_failed()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'failed' is unset! Struct:" + toString());
-    }
-
-    if (!is_set_process_ms_avg()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'process_ms_avg' is unset! Struct:" + toString());
-    }
-
-    if (!is_set_executed()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'executed' is unset! Struct:" + toString());
-    }
-
-    if (!is_set_execute_ms_avg()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'execute_ms_avg' is unset! Struct:" + toString());
-    }
-
-    // check for sub-struct validity
-  }
-
-  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
-    try {
-      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
-    try {
-      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private static class BoltStatsStandardSchemeFactory implements SchemeFactory {
-    public BoltStatsStandardScheme getScheme() {
-      return new BoltStatsStandardScheme();
-    }
-  }
-
-  private static class BoltStatsStandardScheme extends StandardScheme<BoltStats> {
-
-    public void read(org.apache.thrift.protocol.TProtocol iprot, BoltStats struct) throws org.apache.thrift.TException {
-      org.apache.thrift.protocol.TField schemeField;
-      iprot.readStructBegin();
-      while (true)
-      {
-        schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
-          break;
-        }
-        switch (schemeField.id) {
-          case 1: // ACKED
-            if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
-              {
-                org.apache.thrift.protocol.TMap _map116 = iprot.readMapBegin();
-                struct.acked = new HashMap<String,Map<GlobalStreamId,Long>>(2*_map116.size);
-                String _key117;
-                Map<GlobalStreamId,Long> _val118;
-                for (int _i119 = 0; _i119 < _map116.size; ++_i119)
-                {
-                  _key117 = iprot.readString();
-                  {
-                    org.apache.thrift.protocol.TMap _map120 = iprot.readMapBegin();
-                    _val118 = new HashMap<GlobalStreamId,Long>(2*_map120.size);
-                    GlobalStreamId _key121;
-                    long _val122;
-                    for (int _i123 = 0; _i123 < _map120.size; ++_i123)
-                    {
-                      _key121 = new GlobalStreamId();
-                      _key121.read(iprot);
-                      _val122 = iprot.readI64();
-                      _val118.put(_key121, _val122);
-                    }
-                    iprot.readMapEnd();
-                  }
-                  struct.acked.put(_key117, _val118);
-                }
-                iprot.readMapEnd();
-              }
-              struct.set_acked_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 2: // FAILED
-            if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
-              {
-                org.apache.thrift.protocol.TMap _map124 = iprot.readMapBegin();
-                struct.failed = new HashMap<String,Map<GlobalStreamId,Long>>(2*_map124.size);
-                String _key125;
-                Map<GlobalStreamId,Long> _val126;
-                for (int _i127 = 0; _i127 < _map124.size; ++_i127)
-                {
-                  _key125 = iprot.readString();
-                  {
-                    org.apache.thrift.protocol.TMap _map128 = iprot.readMapBegin();
-                    _val126 = new HashMap<GlobalStreamId,Long>(2*_map128.size);
-                    GlobalStreamId _key129;
-                    long _val130;
-                    for (int _i131 = 0; _i131 < _map128.size; ++_i131)
-                    {
-                      _key129 = new GlobalStreamId();
-                      _key129.read(iprot);
-                      _val130 = iprot.readI64();
-                      _val126.put(_key129, _val130);
-                    }
-                    iprot.readMapEnd();
-                  }
-                  struct.failed.put(_key125, _val126);
-                }
-                iprot.readMapEnd();
-              }
-              struct.set_failed_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 3: // PROCESS_MS_AVG
-            if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
-              {
-                org.apache.thrift.protocol.TMap _map132 = iprot.readMapBegin();
-                struct.process_ms_avg = new HashMap<String,Map<GlobalStreamId,Double>>(2*_map132.size);
-                String _key133;
-                Map<GlobalStreamId,Double> _val134;
-                for (int _i135 = 0; _i135 < _map132.size; ++_i135)
-                {
-                  _key133 = iprot.readString();
-                  {
-                    org.apache.thrift.protocol.TMap _map136 = iprot.readMapBegin();
-                    _val134 = new HashMap<GlobalStreamId,Double>(2*_map136.size);
-                    GlobalStreamId _key137;
-                    double _val138;
-                    for (int _i139 = 0; _i139 < _map136.size; ++_i139)
-                    {
-                      _key137 = new GlobalStreamId();
-                      _key137.read(iprot);
-                      _val138 = iprot.readDouble();
-                      _val134.put(_key137, _val138);
-                    }
-                    iprot.readMapEnd();
-                  }
-                  struct.process_ms_avg.put(_key133, _val134);
-                }
-                iprot.readMapEnd();
-              }
-              struct.set_process_ms_avg_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 4: // EXECUTED
-            if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
-              {
-                org.apache.thrift.protocol.TMap _map140 = iprot.readMapBegin();
-                struct.executed = new HashMap<String,Map<GlobalStreamId,Long>>(2*_map140.size);
-                String _key141;
-                Map<GlobalStreamId,Long> _val142;
-                for (int _i143 = 0; _i143 < _map140.size; ++_i143)
-                {
-                  _key141 = iprot.readString();
-                  {
-                    org.apache.thrift.protocol.TMap _map144 = iprot.readMapBegin();
-                    _val142 = new HashMap<GlobalStreamId,Long>(2*_map144.size);
-                    GlobalStreamId _key145;
-                    long _val146;
-                    for (int _i147 = 0; _i147 < _map144.size; ++_i147)
-                    {
-                      _key145 = new GlobalStreamId();
-                      _key145.read(iprot);
-                      _val146 = iprot.readI64();
-                      _val142.put(_key145, _val146);
-                    }
-                    iprot.readMapEnd();
-                  }
-                  struct.executed.put(_key141, _val142);
-                }
-                iprot.readMapEnd();
-              }
-              struct.set_executed_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 5: // EXECUTE_MS_AVG
-            if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
-              {
-                org.apache.thrift.protocol.TMap _map148 = iprot.readMapBegin();
-                struct.execute_ms_avg = new HashMap<String,Map<GlobalStreamId,Double>>(2*_map148.size);
-                String _key149;
-                Map<GlobalStreamId,Double> _val150;
-                for (int _i151 = 0; _i151 < _map148.size; ++_i151)
-                {
-                  _key149 = iprot.readString();
-                  {
-                    org.apache.thrift.protocol.TMap _map152 = iprot.readMapBegin();
-                    _val150 = new HashMap<GlobalStreamId,Double>(2*_map152.size);
-                    GlobalStreamId _key153;
-                    double _val154;
-                    for (int _i155 = 0; _i155 < _map152.size; ++_i155)
-                    {
-                      _key153 = new GlobalStreamId();
-                      _key153.read(iprot);
-                      _val154 = iprot.readDouble();
-                      _val150.put(_key153, _val154);
-                    }
-                    iprot.readMapEnd();
-                  }
-                  struct.execute_ms_avg.put(_key149, _val150);
-                }
-                iprot.readMapEnd();
-              }
-              struct.set_execute_ms_avg_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          default:
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-        }
-        iprot.readFieldEnd();
-      }
-      iprot.readStructEnd();
-      struct.validate();
-    }
-
-    public void write(org.apache.thrift.protocol.TProtocol oprot, BoltStats struct) throws org.apache.thrift.TException {
-      struct.validate();
-
-      oprot.writeStructBegin(STRUCT_DESC);
-      if (struct.acked != null) {
-        oprot.writeFieldBegin(ACKED_FIELD_DESC);
-        {
-          oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, struct.acked.size()));
-          for (Map.Entry<String, Map<GlobalStreamId,Long>> _iter156 : struct.acked.entrySet())
-          {
-            oprot.writeString(_iter156.getKey());
-            {
-              oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.I64, _iter156.getValue().size()));
-              for (Map.Entry<GlobalStreamId, Long> _iter157 : _iter156.getValue().entrySet())
-              {
-                _iter157.getKey().write(oprot);
-                oprot.writeI64(_iter157.getValue());
-              }
-              oprot.writeMapEnd();
-            }
-          }
-          oprot.writeMapEnd();
-        }
-        oprot.writeFieldEnd();
-      }
-      if (struct.failed != null) {
-        oprot.writeFieldBegin(FAILED_FIELD_DESC);
-        {
-          oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, struct.failed.size()));
-          for (Map.Entry<String, Map<GlobalStreamId,Long>> _iter158 : struct.failed.entrySet())
-          {
-            oprot.writeString(_iter158.getKey());
-            {
-              oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.I64, _iter158.getValue().size()));
-              for (Map.Entry<GlobalStreamId, Long> _iter159 : _iter158.getValue().entrySet())
-              {
-                _iter159.getKey().write(oprot);
-                oprot.writeI64(_iter159.getValue());
-              }
-              oprot.writeMapEnd();
-            }
-          }
-          oprot.writeMapEnd();
-        }
-        oprot.writeFieldEnd();
-      }
-      if (struct.process_ms_avg != null) {
-        oprot.writeFieldBegin(PROCESS_MS_AVG_FIELD_DESC);
-        {
-          oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, struct.process_ms_avg.size()));
-          for (Map.Entry<String, Map<GlobalStreamId,Double>> _iter160 : struct.process_ms_avg.entrySet())
-          {
-            oprot.writeString(_iter160.getKey());
-            {
-              oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.DOUBLE, _iter160.getValue().size()));
-              for (Map.Entry<GlobalStreamId, Double> _iter161 : _iter160.getValue().entrySet())
-              {
-                _iter161.getKey().write(oprot);
-                oprot.writeDouble(_iter161.getValue());
-              }
-              oprot.writeMapEnd();
-            }
-          }
-          oprot.writeMapEnd();
-        }
-        oprot.writeFieldEnd();
-      }
-      if (struct.executed != null) {
-        oprot.writeFieldBegin(EXECUTED_FIELD_DESC);
-        {
-          oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, struct.executed.size()));
-          for (Map.Entry<String, Map<GlobalStreamId,Long>> _iter162 : struct.executed.entrySet())
-          {
-            oprot.writeString(_iter162.getKey());
-            {
-              oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.I64, _iter162.getValue().size()));
-              for (Map.Entry<GlobalStreamId, Long> _iter163 : _iter162.getValue().entrySet())
-              {
-                _iter163.getKey().write(oprot);
-                oprot.writeI64(_iter163.getValue());
-              }
-              oprot.writeMapEnd();
-            }
-          }
-          oprot.writeMapEnd();
-        }
-        oprot.writeFieldEnd();
-      }
-      if (struct.execute_ms_avg != null) {
-        oprot.writeFieldBegin(EXECUTE_MS_AVG_FIELD_DESC);
-        {
-          oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, struct.execute_ms_avg.size()));
-          for (Map.Entry<String, Map<GlobalStreamId,Double>> _iter164 : struct.execute_ms_avg.entrySet())
-          {
-            oprot.writeString(_iter164.getKey());
-            {
-              oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.DOUBLE, _iter164.getValue().size()));
-              for (Map.Entry<GlobalStreamId, Double> _iter165 : _iter164.getValue().entrySet())
-              {
-                _iter165.getKey().write(oprot);
-                oprot.writeDouble(_iter165.getValue());
-              }
-              oprot.writeMapEnd();
-            }
-          }
-          oprot.writeMapEnd();
-        }
-        oprot.writeFieldEnd();
-      }
-      oprot.writeFieldStop();
-      oprot.writeStructEnd();
-    }
-
-  }
-
-  private static class BoltStatsTupleSchemeFactory implements SchemeFactory {
-    public BoltStatsTupleScheme getScheme() {
-      return new BoltStatsTupleScheme();
-    }
-  }
-
-  private static class BoltStatsTupleScheme extends TupleScheme<BoltStats> {
-
-    @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, BoltStats struct) throws org.apache.thrift.TException {
-      TTupleProtocol oprot = (TTupleProtocol) prot;
-      {
-        oprot.writeI32(struct.acked.size());
-        for (Map.Entry<String, Map<GlobalStreamId,Long>> _iter166 : struct.acked.entrySet())
-        {
-          oprot.writeString(_iter166.getKey());
-          {
-            oprot.writeI32(_iter166.getValue().size());
-            for (Map.Entry<GlobalStreamId, Long> _iter167 : _iter166.getValue().entrySet())
-            {
-              _iter167.getKey().write(oprot);
-              oprot.writeI64(_iter167.getValue());
-            }
-          }
-        }
-      }
-      {
-        oprot.writeI32(struct.failed.size());
-        for (Map.Entry<String, Map<GlobalStreamId,Long>> _iter168 : struct.failed.entrySet())
-        {
-          oprot.writeString(_iter168.getKey());
-          {
-            oprot.writeI32(_iter168.getValue().size());
-            for (Map.Entry<GlobalStreamId, Long> _iter169 : _iter168.getValue().entrySet())
-            {
-              _iter169.getKey().write(oprot);
-              oprot.writeI64(_iter169.getValue());
-            }
-          }
-        }
-      }
-      {
-        oprot.writeI32(struct.process_ms_avg.size());
-        for (Map.Entry<String, Map<GlobalStreamId,Double>> _iter170 : struct.process_ms_avg.entrySet())
-        {
-          oprot.writeString(_iter170.getKey());
-          {
-            oprot.writeI32(_iter170.getValue().size());
-            for (Map.Entry<GlobalStreamId, Double> _iter171 : _iter170.getValue().entrySet())
-            {
-              _iter171.getKey().write(oprot);
-              oprot.writeDouble(_iter171.getValue());
-            }
-          }
-        }
-      }
-      {
-        oprot.writeI32(struct.executed.size());
-        for (Map.Entry<String, Map<GlobalStreamId,Long>> _iter172 : struct.executed.entrySet())
-        {
-          oprot.writeString(_iter172.getKey());
-          {
-            oprot.writeI32(_iter172.getValue().size());
-            for (Map.Entry<GlobalStreamId, Long> _iter173 : _iter172.getValue().entrySet())
-            {
-              _iter173.getKey().write(oprot);
-              oprot.writeI64(_iter173.getValue());
-            }
-          }
-        }
-      }
-      {
-        oprot.writeI32(struct.execute_ms_avg.size());
-        for (Map.Entry<String, Map<GlobalStreamId,Double>> _iter174 : struct.execute_ms_avg.entrySet())
-        {
-          oprot.writeString(_iter174.getKey());
-          {
-            oprot.writeI32(_iter174.getValue().size());
-            for (Map.Entry<GlobalStreamId, Double> _iter175 : _iter174.getValue().entrySet())
-            {
-              _iter175.getKey().write(oprot);
-              oprot.writeDouble(_iter175.getValue());
-            }
-          }
-        }
-      }
-    }
-
-    @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, BoltStats struct) throws org.apache.thrift.TException {
-      TTupleProtocol iprot = (TTupleProtocol) prot;
-      {
-        org.apache.thrift.protocol.TMap _map176 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, iprot.readI32());
-        struct.acked = new HashMap<String,Map<GlobalStreamId,Long>>(2*_map176.size);
-        String _key177;
-        Map<GlobalStreamId,Long> _val178;
-        for (int _i179 = 0; _i179 < _map176.size; ++_i179)
-        {
-          _key177 = iprot.readString();
-          {
-            org.apache.thrift.protocol.TMap _map180 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.I64, iprot.readI32());
-            _val178 = new HashMap<GlobalStreamId,Long>(2*_map180.size);
-            GlobalStreamId _key181;
-            long _val182;
-            for (int _i183 = 0; _i183 < _map180.size; ++_i183)
-            {
-              _key181 = new GlobalStreamId();
-              _key181.read(iprot);
-              _val182 = iprot.readI64();
-              _val178.put(_key181, _val182);
-            }
-          }
-          struct.acked.put(_key177, _val178);
-        }
-      }
-      struct.set_acked_isSet(true);
-      {
-        org.apache.thrift.protocol.TMap _map184 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, iprot.readI32());
-        struct.failed = new HashMap<String,Map<GlobalStreamId,Long>>(2*_map184.size);
-        String _key185;
-        Map<GlobalStreamId,Long> _val186;
-        for (int _i187 = 0; _i187 < _map184.size; ++_i187)
-        {
-          _key185 = iprot.readString();
-          {
-            org.apache.thrift.protocol.TMap _map188 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.I64, iprot.readI32());
-            _val186 = new HashMap<GlobalStreamId,Long>(2*_map188.size);
-            GlobalStreamId _key189;
-            long _val190;
-            for (int _i191 = 0; _i191 < _map188.size; ++_i191)
-            {
-              _key189 = new GlobalStreamId();
-              _key189.read(iprot);
-              _val190 = iprot.readI64();
-              _val186.put(_key189, _val190);
-            }
-          }
-          struct.failed.put(_key185, _val186);
-        }
-      }
-      struct.set_failed_isSet(true);
-      {
-        org.apache.thrift.protocol.TMap _map192 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, iprot.readI32());
-        struct.process_ms_avg = new HashMap<String,Map<GlobalStreamId,Double>>(2*_map192.size);
-        String _key193;
-        Map<GlobalStreamId,Double> _val194;
-        for (int _i195 = 0; _i195 < _map192.size; ++_i195)
-        {
-          _key193 = iprot.readString();
-          {
-            org.apache.thrift.protocol.TMap _map196 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.DOUBLE, iprot.readI32());
-            _val194 = new HashMap<GlobalStreamId,Double>(2*_map196.size);
-            GlobalStreamId _key197;
-            double _val198;
-            for (int _i199 = 0; _i199 < _map196.size; ++_i199)
-            {
-              _key197 = new GlobalStreamId();
-              _key197.read(iprot);
-              _val198 = iprot.readDouble();
-              _val194.put(_key197, _val198);
-            }
-          }
-          struct.process_ms_avg.put(_key193, _val194);
-        }
-      }
-      struct.set_process_ms_avg_isSet(true);
-      {
-        org.apache.thrift.protocol.TMap _map200 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, iprot.readI32());
-        struct.executed = new HashMap<String,Map<GlobalStreamId,Long>>(2*_map200.size);
-        String _key201;
-        Map<GlobalStreamId,Long> _val202;
-        for (int _i203 = 0; _i203 < _map200.size; ++_i203)
-        {
-          _key201 = iprot.readString();
-          {
-            org.apache.thrift.protocol.TMap _map204 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.I64, iprot.readI32());
-            _val202 = new HashMap<GlobalStreamId,Long>(2*_map204.size);
-            GlobalStreamId _key205;
-            long _val206;
-            for (int _i207 = 0; _i207 < _map204.size; ++_i207)
-            {
-              _key205 = new GlobalStreamId();
-              _key205.read(iprot);
-              _val206 = iprot.readI64();
-              _val202.put(_key205, _val206);
-            }
-          }
-          struct.executed.put(_key201, _val202);
-        }
-      }
-      struct.set_executed_isSet(true);
-      {
-        org.apache.thrift.protocol.TMap _map208 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, iprot.readI32());
-        struct.execute_ms_avg = new HashMap<String,Map<GlobalStreamId,Double>>(2*_map208.size);
-        String _key209;
-        Map<GlobalStreamId,Double> _val210;
-        for (int _i211 = 0; _i211 < _map208.size; ++_i211)
-        {
-          _key209 = iprot.readString();
-          {
-            org.apache.thrift.protocol.TMap _map212 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.DOUBLE, iprot.readI32());
-            _val210 = new HashMap<GlobalStreamId,Double>(2*_map212.size);
-            GlobalStreamId _key213;
-            double _val214;
-            for (int _i215 = 0; _i215 < _map212.size; ++_i215)
-            {
-              _key213 = new GlobalStreamId();
-              _key213.read(iprot);
-              _val214 = iprot.readDouble();
-              _val210.put(_key213, _val214);
-            }
-          }
-          struct.execute_ms_avg.put(_key209, _val210);
-        }
-      }
-      struct.set_execute_ms_avg_isSet(true);
-    }
-  }
-
-}
-

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/generated/ClusterSummary.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/generated/ClusterSummary.java b/storm-core/src/jvm/backtype/storm/generated/ClusterSummary.java
deleted file mode 100644
index 9c42427..0000000
--- a/storm-core/src/jvm/backtype/storm/generated/ClusterSummary.java
+++ /dev/null
@@ -1,879 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * Autogenerated by Thrift Compiler (0.9.3)
- *
- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
- *  @generated
- */
-package backtype.storm.generated;
-
-import org.apache.thrift.scheme.IScheme;
-import org.apache.thrift.scheme.SchemeFactory;
-import org.apache.thrift.scheme.StandardScheme;
-
-import org.apache.thrift.scheme.TupleScheme;
-import org.apache.thrift.protocol.TTupleProtocol;
-import org.apache.thrift.protocol.TProtocolException;
-import org.apache.thrift.EncodingUtils;
-import org.apache.thrift.TException;
-import org.apache.thrift.async.AsyncMethodCallback;
-import org.apache.thrift.server.AbstractNonblockingServer.*;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.EnumMap;
-import java.util.Set;
-import java.util.HashSet;
-import java.util.EnumSet;
-import java.util.Collections;
-import java.util.BitSet;
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import javax.annotation.Generated;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
-public class ClusterSummary implements org.apache.thrift.TBase<ClusterSummary, ClusterSummary._Fields>, java.io.Serializable, Cloneable, Comparable<ClusterSummary> {
-  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ClusterSummary");
-
-  private static final org.apache.thrift.protocol.TField SUPERVISORS_FIELD_DESC = new org.apache.thrift.protocol.TField("supervisors", org.apache.thrift.protocol.TType.LIST, (short)1);
-  private static final org.apache.thrift.protocol.TField NIMBUS_UPTIME_SECS_FIELD_DESC = new org.apache.thrift.protocol.TField("nimbus_uptime_secs", org.apache.thrift.protocol.TType.I32, (short)2);
-  private static final org.apache.thrift.protocol.TField TOPOLOGIES_FIELD_DESC = new org.apache.thrift.protocol.TField("topologies", org.apache.thrift.protocol.TType.LIST, (short)3);
-  private static final org.apache.thrift.protocol.TField NIMBUSES_FIELD_DESC = new org.apache.thrift.protocol.TField("nimbuses", org.apache.thrift.protocol.TType.LIST, (short)4);
-
-  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
-  static {
-    schemes.put(StandardScheme.class, new ClusterSummaryStandardSchemeFactory());
-    schemes.put(TupleScheme.class, new ClusterSummaryTupleSchemeFactory());
-  }
-
-  private List<SupervisorSummary> supervisors; // required
-  private int nimbus_uptime_secs; // optional
-  private List<TopologySummary> topologies; // required
-  private List<NimbusSummary> nimbuses; // required
-
-  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
-  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-    SUPERVISORS((short)1, "supervisors"),
-    NIMBUS_UPTIME_SECS((short)2, "nimbus_uptime_secs"),
-    TOPOLOGIES((short)3, "topologies"),
-    NIMBUSES((short)4, "nimbuses");
-
-    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
-
-    static {
-      for (_Fields field : EnumSet.allOf(_Fields.class)) {
-        byName.put(field.getFieldName(), field);
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, or null if its not found.
-     */
-    public static _Fields findByThriftId(int fieldId) {
-      switch(fieldId) {
-        case 1: // SUPERVISORS
-          return SUPERVISORS;
-        case 2: // NIMBUS_UPTIME_SECS
-          return NIMBUS_UPTIME_SECS;
-        case 3: // TOPOLOGIES
-          return TOPOLOGIES;
-        case 4: // NIMBUSES
-          return NIMBUSES;
-        default:
-          return null;
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, throwing an exception
-     * if it is not found.
-     */
-    public static _Fields findByThriftIdOrThrow(int fieldId) {
-      _Fields fields = findByThriftId(fieldId);
-      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
-      return fields;
-    }
-
-    /**
-     * Find the _Fields constant that matches name, or null if its not found.
-     */
-    public static _Fields findByName(String name) {
-      return byName.get(name);
-    }
-
-    private final short _thriftId;
-    private final String _fieldName;
-
-    _Fields(short thriftId, String fieldName) {
-      _thriftId = thriftId;
-      _fieldName = fieldName;
-    }
-
-    public short getThriftFieldId() {
-      return _thriftId;
-    }
-
-    public String getFieldName() {
-      return _fieldName;
-    }
-  }
-
-  // isset id assignments
-  private static final int __NIMBUS_UPTIME_SECS_ISSET_ID = 0;
-  private byte __isset_bitfield = 0;
-  private static final _Fields optionals[] = {_Fields.NIMBUS_UPTIME_SECS};
-  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
-  static {
-    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.SUPERVISORS, new org.apache.thrift.meta_data.FieldMetaData("supervisors", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
-            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SupervisorSummary.class))));
-    tmpMap.put(_Fields.NIMBUS_UPTIME_SECS, new org.apache.thrift.meta_data.FieldMetaData("nimbus_uptime_secs", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
-    tmpMap.put(_Fields.TOPOLOGIES, new org.apache.thrift.meta_data.FieldMetaData("topologies", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
-            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TopologySummary.class))));
-    tmpMap.put(_Fields.NIMBUSES, new org.apache.thrift.meta_data.FieldMetaData("nimbuses", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
-            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, NimbusSummary.class))));
-    metaDataMap = Collections.unmodifiableMap(tmpMap);
-    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ClusterSummary.class, metaDataMap);
-  }
-
-  public ClusterSummary() {
-    this.nimbus_uptime_secs = 0;
-
-  }
-
-  public ClusterSummary(
-    List<SupervisorSummary> supervisors,
-    List<TopologySummary> topologies,
-    List<NimbusSummary> nimbuses)
-  {
-    this();
-    this.supervisors = supervisors;
-    this.topologies = topologies;
-    this.nimbuses = nimbuses;
-  }
-
-  /**
-   * Performs a deep copy on <i>other</i>.
-   */
-  public ClusterSummary(ClusterSummary other) {
-    __isset_bitfield = other.__isset_bitfield;
-    if (other.is_set_supervisors()) {
-      List<SupervisorSummary> __this__supervisors = new ArrayList<SupervisorSummary>(other.supervisors.size());
-      for (SupervisorSummary other_element : other.supervisors) {
-        __this__supervisors.add(new SupervisorSummary(other_element));
-      }
-      this.supervisors = __this__supervisors;
-    }
-    this.nimbus_uptime_secs = other.nimbus_uptime_secs;
-    if (other.is_set_topologies()) {
-      List<TopologySummary> __this__topologies = new ArrayList<TopologySummary>(other.topologies.size());
-      for (TopologySummary other_element : other.topologies) {
-        __this__topologies.add(new TopologySummary(other_element));
-      }
-      this.topologies = __this__topologies;
-    }
-    if (other.is_set_nimbuses()) {
-      List<NimbusSummary> __this__nimbuses = new ArrayList<NimbusSummary>(other.nimbuses.size());
-      for (NimbusSummary other_element : other.nimbuses) {
-        __this__nimbuses.add(new NimbusSummary(other_element));
-      }
-      this.nimbuses = __this__nimbuses;
-    }
-  }
-
-  public ClusterSummary deepCopy() {
-    return new ClusterSummary(this);
-  }
-
-  @Override
-  public void clear() {
-    this.supervisors = null;
-    this.nimbus_uptime_secs = 0;
-
-    this.topologies = null;
-    this.nimbuses = null;
-  }
-
-  public int get_supervisors_size() {
-    return (this.supervisors == null) ? 0 : this.supervisors.size();
-  }
-
-  public java.util.Iterator<SupervisorSummary> get_supervisors_iterator() {
-    return (this.supervisors == null) ? null : this.supervisors.iterator();
-  }
-
-  public void add_to_supervisors(SupervisorSummary elem) {
-    if (this.supervisors == null) {
-      this.supervisors = new ArrayList<SupervisorSummary>();
-    }
-    this.supervisors.add(elem);
-  }
-
-  public List<SupervisorSummary> get_supervisors() {
-    return this.supervisors;
-  }
-
-  public void set_supervisors(List<SupervisorSummary> supervisors) {
-    this.supervisors = supervisors;
-  }
-
-  public void unset_supervisors() {
-    this.supervisors = null;
-  }
-
-  /** Returns true if field supervisors is set (has been assigned a value) and false otherwise */
-  public boolean is_set_supervisors() {
-    return this.supervisors != null;
-  }
-
-  public void set_supervisors_isSet(boolean value) {
-    if (!value) {
-      this.supervisors = null;
-    }
-  }
-
-  public int get_nimbus_uptime_secs() {
-    return this.nimbus_uptime_secs;
-  }
-
-  public void set_nimbus_uptime_secs(int nimbus_uptime_secs) {
-    this.nimbus_uptime_secs = nimbus_uptime_secs;
-    set_nimbus_uptime_secs_isSet(true);
-  }
-
-  public void unset_nimbus_uptime_secs() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NIMBUS_UPTIME_SECS_ISSET_ID);
-  }
-
-  /** Returns true if field nimbus_uptime_secs is set (has been assigned a value) and false otherwise */
-  public boolean is_set_nimbus_uptime_secs() {
-    return EncodingUtils.testBit(__isset_bitfield, __NIMBUS_UPTIME_SECS_ISSET_ID);
-  }
-
-  public void set_nimbus_uptime_secs_isSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NIMBUS_UPTIME_SECS_ISSET_ID, value);
-  }
-
-  public int get_topologies_size() {
-    return (this.topologies == null) ? 0 : this.topologies.size();
-  }
-
-  public java.util.Iterator<TopologySummary> get_topologies_iterator() {
-    return (this.topologies == null) ? null : this.topologies.iterator();
-  }
-
-  public void add_to_topologies(TopologySummary elem) {
-    if (this.topologies == null) {
-      this.topologies = new ArrayList<TopologySummary>();
-    }
-    this.topologies.add(elem);
-  }
-
-  public List<TopologySummary> get_topologies() {
-    return this.topologies;
-  }
-
-  public void set_topologies(List<TopologySummary> topologies) {
-    this.topologies = topologies;
-  }
-
-  public void unset_topologies() {
-    this.topologies = null;
-  }
-
-  /** Returns true if field topologies is set (has been assigned a value) and false otherwise */
-  public boolean is_set_topologies() {
-    return this.topologies != null;
-  }
-
-  public void set_topologies_isSet(boolean value) {
-    if (!value) {
-      this.topologies = null;
-    }
-  }
-
-  public int get_nimbuses_size() {
-    return (this.nimbuses == null) ? 0 : this.nimbuses.size();
-  }
-
-  public java.util.Iterator<NimbusSummary> get_nimbuses_iterator() {
-    return (this.nimbuses == null) ? null : this.nimbuses.iterator();
-  }
-
-  public void add_to_nimbuses(NimbusSummary elem) {
-    if (this.nimbuses == null) {
-      this.nimbuses = new ArrayList<NimbusSummary>();
-    }
-    this.nimbuses.add(elem);
-  }
-
-  public List<NimbusSummary> get_nimbuses() {
-    return this.nimbuses;
-  }
-
-  public void set_nimbuses(List<NimbusSummary> nimbuses) {
-    this.nimbuses = nimbuses;
-  }
-
-  public void unset_nimbuses() {
-    this.nimbuses = null;
-  }
-
-  /** Returns true if field nimbuses is set (has been assigned a value) and false otherwise */
-  public boolean is_set_nimbuses() {
-    return this.nimbuses != null;
-  }
-
-  public void set_nimbuses_isSet(boolean value) {
-    if (!value) {
-      this.nimbuses = null;
-    }
-  }
-
-  public void setFieldValue(_Fields field, Object value) {
-    switch (field) {
-    case SUPERVISORS:
-      if (value == null) {
-        unset_supervisors();
-      } else {
-        set_supervisors((List<SupervisorSummary>)value);
-      }
-      break;
-
-    case NIMBUS_UPTIME_SECS:
-      if (value == null) {
-        unset_nimbus_uptime_secs();
-      } else {
-        set_nimbus_uptime_secs((Integer)value);
-      }
-      break;
-
-    case TOPOLOGIES:
-      if (value == null) {
-        unset_topologies();
-      } else {
-        set_topologies((List<TopologySummary>)value);
-      }
-      break;
-
-    case NIMBUSES:
-      if (value == null) {
-        unset_nimbuses();
-      } else {
-        set_nimbuses((List<NimbusSummary>)value);
-      }
-      break;
-
-    }
-  }
-
-  public Object getFieldValue(_Fields field) {
-    switch (field) {
-    case SUPERVISORS:
-      return get_supervisors();
-
-    case NIMBUS_UPTIME_SECS:
-      return get_nimbus_uptime_secs();
-
-    case TOPOLOGIES:
-      return get_topologies();
-
-    case NIMBUSES:
-      return get_nimbuses();
-
-    }
-    throw new IllegalStateException();
-  }
-
-  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
-  public boolean isSet(_Fields field) {
-    if (field == null) {
-      throw new IllegalArgumentException();
-    }
-
-    switch (field) {
-    case SUPERVISORS:
-      return is_set_supervisors();
-    case NIMBUS_UPTIME_SECS:
-      return is_set_nimbus_uptime_secs();
-    case TOPOLOGIES:
-      return is_set_topologies();
-    case NIMBUSES:
-      return is_set_nimbuses();
-    }
-    throw new IllegalStateException();
-  }
-
-  @Override
-  public boolean equals(Object that) {
-    if (that == null)
-      return false;
-    if (that instanceof ClusterSummary)
-      return this.equals((ClusterSummary)that);
-    return false;
-  }
-
-  public boolean equals(ClusterSummary that) {
-    if (that == null)
-      return false;
-
-    boolean this_present_supervisors = true && this.is_set_supervisors();
-    boolean that_present_supervisors = true && that.is_set_supervisors();
-    if (this_present_supervisors || that_present_supervisors) {
-      if (!(this_present_supervisors && that_present_supervisors))
-        return false;
-      if (!this.supervisors.equals(that.supervisors))
-        return false;
-    }
-
-    boolean this_present_nimbus_uptime_secs = true && this.is_set_nimbus_uptime_secs();
-    boolean that_present_nimbus_uptime_secs = true && that.is_set_nimbus_uptime_secs();
-    if (this_present_nimbus_uptime_secs || that_present_nimbus_uptime_secs) {
-      if (!(this_present_nimbus_uptime_secs && that_present_nimbus_uptime_secs))
-        return false;
-      if (this.nimbus_uptime_secs != that.nimbus_uptime_secs)
-        return false;
-    }
-
-    boolean this_present_topologies = true && this.is_set_topologies();
-    boolean that_present_topologies = true && that.is_set_topologies();
-    if (this_present_topologies || that_present_topologies) {
-      if (!(this_present_topologies && that_present_topologies))
-        return false;
-      if (!this.topologies.equals(that.topologies))
-        return false;
-    }
-
-    boolean this_present_nimbuses = true && this.is_set_nimbuses();
-    boolean that_present_nimbuses = true && that.is_set_nimbuses();
-    if (this_present_nimbuses || that_present_nimbuses) {
-      if (!(this_present_nimbuses && that_present_nimbuses))
-        return false;
-      if (!this.nimbuses.equals(that.nimbuses))
-        return false;
-    }
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    List<Object> list = new ArrayList<Object>();
-
-    boolean present_supervisors = true && (is_set_supervisors());
-    list.add(present_supervisors);
-    if (present_supervisors)
-      list.add(supervisors);
-
-    boolean present_nimbus_uptime_secs = true && (is_set_nimbus_uptime_secs());
-    list.add(present_nimbus_uptime_secs);
-    if (present_nimbus_uptime_secs)
-      list.add(nimbus_uptime_secs);
-
-    boolean present_topologies = true && (is_set_topologies());
-    list.add(present_topologies);
-    if (present_topologies)
-      list.add(topologies);
-
-    boolean present_nimbuses = true && (is_set_nimbuses());
-    list.add(present_nimbuses);
-    if (present_nimbuses)
-      list.add(nimbuses);
-
-    return list.hashCode();
-  }
-
-  @Override
-  public int compareTo(ClusterSummary other) {
-    if (!getClass().equals(other.getClass())) {
-      return getClass().getName().compareTo(other.getClass().getName());
-    }
-
-    int lastComparison = 0;
-
-    lastComparison = Boolean.valueOf(is_set_supervisors()).compareTo(other.is_set_supervisors());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_supervisors()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.supervisors, other.supervisors);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_nimbus_uptime_secs()).compareTo(other.is_set_nimbus_uptime_secs());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_nimbus_uptime_secs()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.nimbus_uptime_secs, other.nimbus_uptime_secs);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_topologies()).compareTo(other.is_set_topologies());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_topologies()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.topologies, other.topologies);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_nimbuses()).compareTo(other.is_set_nimbuses());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_nimbuses()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.nimbuses, other.nimbuses);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    return 0;
-  }
-
-  public _Fields fieldForId(int fieldId) {
-    return _Fields.findByThriftId(fieldId);
-  }
-
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
-    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
-  }
-
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
-    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder("ClusterSummary(");
-    boolean first = true;
-
-    sb.append("supervisors:");
-    if (this.supervisors == null) {
-      sb.append("null");
-    } else {
-      sb.append(this.supervisors);
-    }
-    first = false;
-    if (is_set_nimbus_uptime_secs()) {
-      if (!first) sb.append(", ");
-      sb.append("nimbus_uptime_secs:");
-      sb.append(this.nimbus_uptime_secs);
-      first = false;
-    }
-    if (!first) sb.append(", ");
-    sb.append("topologies:");
-    if (this.topologies == null) {
-      sb.append("null");
-    } else {
-      sb.append(this.topologies);
-    }
-    first = false;
-    if (!first) sb.append(", ");
-    sb.append("nimbuses:");
-    if (this.nimbuses == null) {
-      sb.append("null");
-    } else {
-      sb.append(this.nimbuses);
-    }
-    first = false;
-    sb.append(")");
-    return sb.toString();
-  }
-
-  public void validate() throws org.apache.thrift.TException {
-    // check for required fields
-    if (!is_set_supervisors()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'supervisors' is unset! Struct:" + toString());
-    }
-
-    if (!is_set_topologies()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'topologies' is unset! Struct:" + toString());
-    }
-
-    if (!is_set_nimbuses()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'nimbuses' is unset! Struct:" + toString());
-    }
-
-    // check for sub-struct validity
-  }
-
-  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
-    try {
-      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
-    try {
-      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
-      __isset_bitfield = 0;
-      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private static class ClusterSummaryStandardSchemeFactory implements SchemeFactory {
-    public ClusterSummaryStandardScheme getScheme() {
-      return new ClusterSummaryStandardScheme();
-    }
-  }
-
-  private static class ClusterSummaryStandardScheme extends StandardScheme<ClusterSummary> {
-
-    public void read(org.apache.thrift.protocol.TProtocol iprot, ClusterSummary struct) throws org.apache.thrift.TException {
-      org.apache.thrift.protocol.TField schemeField;
-      iprot.readStructBegin();
-      while (true)
-      {
-        schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
-          break;
-        }
-        switch (schemeField.id) {
-          case 1: // SUPERVISORS
-            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
-              {
-                org.apache.thrift.protocol.TList _list92 = iprot.readListBegin();
-                struct.supervisors = new ArrayList<SupervisorSummary>(_list92.size);
-                SupervisorSummary _elem93;
-                for (int _i94 = 0; _i94 < _list92.size; ++_i94)
-                {
-                  _elem93 = new SupervisorSummary();
-                  _elem93.read(iprot);
-                  struct.supervisors.add(_elem93);
-                }
-                iprot.readListEnd();
-              }
-              struct.set_supervisors_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 2: // NIMBUS_UPTIME_SECS
-            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
-              struct.nimbus_uptime_secs = iprot.readI32();
-              struct.set_nimbus_uptime_secs_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 3: // TOPOLOGIES
-            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
-              {
-                org.apache.thrift.protocol.TList _list95 = iprot.readListBegin();
-                struct.topologies = new ArrayList<TopologySummary>(_list95.size);
-                TopologySummary _elem96;
-                for (int _i97 = 0; _i97 < _list95.size; ++_i97)
-                {
-                  _elem96 = new TopologySummary();
-                  _elem96.read(iprot);
-                  struct.topologies.add(_elem96);
-                }
-                iprot.readListEnd();
-              }
-              struct.set_topologies_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 4: // NIMBUSES
-            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
-              {
-                org.apache.thrift.protocol.TList _list98 = iprot.readListBegin();
-                struct.nimbuses = new ArrayList<NimbusSummary>(_list98.size);
-                NimbusSummary _elem99;
-                for (int _i100 = 0; _i100 < _list98.size; ++_i100)
-                {
-                  _elem99 = new NimbusSummary();
-                  _elem99.read(iprot);
-                  struct.nimbuses.add(_elem99);
-                }
-                iprot.readListEnd();
-              }
-              struct.set_nimbuses_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          default:
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-        }
-        iprot.readFieldEnd();
-      }
-      iprot.readStructEnd();
-      struct.validate();
-    }
-
-    public void write(org.apache.thrift.protocol.TProtocol oprot, ClusterSummary struct) throws org.apache.thrift.TException {
-      struct.validate();
-
-      oprot.writeStructBegin(STRUCT_DESC);
-      if (struct.supervisors != null) {
-        oprot.writeFieldBegin(SUPERVISORS_FIELD_DESC);
-        {
-          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.supervisors.size()));
-          for (SupervisorSummary _iter101 : struct.supervisors)
-          {
-            _iter101.write(oprot);
-          }
-          oprot.writeListEnd();
-        }
-        oprot.writeFieldEnd();
-      }
-      if (struct.is_set_nimbus_uptime_secs()) {
-        oprot.writeFieldBegin(NIMBUS_UPTIME_SECS_FIELD_DESC);
-        oprot.writeI32(struct.nimbus_uptime_secs);
-        oprot.writeFieldEnd();
-      }
-      if (struct.topologies != null) {
-        oprot.writeFieldBegin(TOPOLOGIES_FIELD_DESC);
-        {
-          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.topologies.size()));
-          for (TopologySummary _iter102 : struct.topologies)
-          {
-            _iter102.write(oprot);
-          }
-          oprot.writeListEnd();
-        }
-        oprot.writeFieldEnd();
-      }
-      if (struct.nimbuses != null) {
-        oprot.writeFieldBegin(NIMBUSES_FIELD_DESC);
-        {
-          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.nimbuses.size()));
-          for (NimbusSummary _iter103 : struct.nimbuses)
-          {
-            _iter103.write(oprot);
-          }
-          oprot.writeListEnd();
-        }
-        oprot.writeFieldEnd();
-      }
-      oprot.writeFieldStop();
-      oprot.writeStructEnd();
-    }
-
-  }
-
-  private static class ClusterSummaryTupleSchemeFactory implements SchemeFactory {
-    public ClusterSummaryTupleScheme getScheme() {
-      return new ClusterSummaryTupleScheme();
-    }
-  }
-
-  private static class ClusterSummaryTupleScheme extends TupleScheme<ClusterSummary> {
-
-    @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, ClusterSummary struct) throws org.apache.thrift.TException {
-      TTupleProtocol oprot = (TTupleProtocol) prot;
-      {
-        oprot.writeI32(struct.supervisors.size());
-        for (SupervisorSummary _iter104 : struct.supervisors)
-        {
-          _iter104.write(oprot);
-        }
-      }
-      {
-        oprot.writeI32(struct.topologies.size());
-        for (TopologySummary _iter105 : struct.topologies)
-        {
-          _iter105.write(oprot);
-        }
-      }
-      {
-        oprot.writeI32(struct.nimbuses.size());
-        for (NimbusSummary _iter106 : struct.nimbuses)
-        {
-          _iter106.write(oprot);
-        }
-      }
-      BitSet optionals = new BitSet();
-      if (struct.is_set_nimbus_uptime_secs()) {
-        optionals.set(0);
-      }
-      oprot.writeBitSet(optionals, 1);
-      if (struct.is_set_nimbus_uptime_secs()) {
-        oprot.writeI32(struct.nimbus_uptime_secs);
-      }
-    }
-
-    @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, ClusterSummary struct) throws org.apache.thrift.TException {
-      TTupleProtocol iprot = (TTupleProtocol) prot;
-      {
-        org.apache.thrift.protocol.TList _list107 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-        struct.supervisors = new ArrayList<SupervisorSummary>(_list107.size);
-        SupervisorSummary _elem108;
-        for (int _i109 = 0; _i109 < _list107.size; ++_i109)
-        {
-          _elem108 = new SupervisorSummary();
-          _elem108.read(iprot);
-          struct.supervisors.add(_elem108);
-        }
-      }
-      struct.set_supervisors_isSet(true);
-      {
-        org.apache.thrift.protocol.TList _list110 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-        struct.topologies = new ArrayList<TopologySummary>(_list110.size);
-        TopologySummary _elem111;
-        for (int _i112 = 0; _i112 < _list110.size; ++_i112)
-        {
-          _elem111 = new TopologySummary();
-          _elem111.read(iprot);
-          struct.topologies.add(_elem111);
-        }
-      }
-      struct.set_topologies_isSet(true);
-      {
-        org.apache.thrift.protocol.TList _list113 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-        struct.nimbuses = new ArrayList<NimbusSummary>(_list113.size);
-        NimbusSummary _elem114;
-        for (int _i115 = 0; _i115 < _list113.size; ++_i115)
-        {
-          _elem114 = new NimbusSummary();
-          _elem114.read(iprot);
-          struct.nimbuses.add(_elem114);
-        }
-      }
-      struct.set_nimbuses_isSet(true);
-      BitSet incoming = iprot.readBitSet(1);
-      if (incoming.get(0)) {
-        struct.nimbus_uptime_secs = iprot.readI32();
-        struct.set_nimbus_uptime_secs_isSet(true);
-      }
-    }
-  }
-
-}
-


[38/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/TridentKafkaEmitter.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/TridentKafkaEmitter.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/TridentKafkaEmitter.java
new file mode 100644
index 0000000..6eddaf5
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/TridentKafkaEmitter.java
@@ -0,0 +1,287 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka.trident;
+
+import org.apache.storm.Config;
+import org.apache.storm.metric.api.CombinedMetric;
+import org.apache.storm.metric.api.MeanReducer;
+import org.apache.storm.metric.api.ReducedMetric;
+import org.apache.storm.task.TopologyContext;
+import com.google.common.collect.ImmutableMap;
+import kafka.javaapi.consumer.SimpleConsumer;
+import kafka.javaapi.message.ByteBufferMessageSet;
+import kafka.message.Message;
+import kafka.message.MessageAndOffset;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.storm.kafka.*;
+import org.apache.storm.kafka.TopicOffsetOutOfRangeException;
+import org.apache.storm.trident.operation.TridentCollector;
+import org.apache.storm.trident.spout.IOpaquePartitionedTridentSpout;
+import org.apache.storm.trident.spout.IPartitionedTridentSpout;
+import org.apache.storm.trident.topology.TransactionAttempt;
+
+import java.util.*;
+
+public class TridentKafkaEmitter {
+
+    public static final Logger LOG = LoggerFactory.getLogger(TridentKafkaEmitter.class);
+
+    private DynamicPartitionConnections _connections;
+    private String _topologyName;
+    private KafkaUtils.KafkaOffsetMetric _kafkaOffsetMetric;
+    private ReducedMetric _kafkaMeanFetchLatencyMetric;
+    private CombinedMetric _kafkaMaxFetchLatencyMetric;
+    private TridentKafkaConfig _config;
+    private String _topologyInstanceId;
+
+    public TridentKafkaEmitter(Map conf, TopologyContext context, TridentKafkaConfig config, String topologyInstanceId) {
+        _config = config;
+        _topologyInstanceId = topologyInstanceId;
+        _connections = new DynamicPartitionConnections(_config, KafkaUtils.makeBrokerReader(conf, _config));
+        _topologyName = (String) conf.get(Config.TOPOLOGY_NAME);
+        _kafkaOffsetMetric = new KafkaUtils.KafkaOffsetMetric(_connections);
+        context.registerMetric("kafkaOffset", _kafkaOffsetMetric, _config.metricsTimeBucketSizeInSecs);
+        _kafkaMeanFetchLatencyMetric = context.registerMetric("kafkaFetchAvg", new MeanReducer(), _config.metricsTimeBucketSizeInSecs);
+        _kafkaMaxFetchLatencyMetric = context.registerMetric("kafkaFetchMax", new MaxMetric(), _config.metricsTimeBucketSizeInSecs);
+    }
+
+
+    private Map failFastEmitNewPartitionBatch(TransactionAttempt attempt, TridentCollector collector, Partition partition, Map lastMeta) {
+        SimpleConsumer consumer = _connections.register(partition);
+        Map ret = doEmitNewPartitionBatch(consumer, partition, collector, lastMeta);
+        _kafkaOffsetMetric.setLatestEmittedOffset(partition, (Long) ret.get("offset"));
+        return ret;
+    }
+
+    private Map emitNewPartitionBatch(TransactionAttempt attempt, TridentCollector collector, Partition partition, Map lastMeta) {
+        try {
+            return failFastEmitNewPartitionBatch(attempt, collector, partition, lastMeta);
+        } catch (FailedFetchException e) {
+            LOG.warn("Failed to fetch from partition " + partition);
+            if (lastMeta == null) {
+                return null;
+            } else {
+                Map ret = new HashMap();
+                ret.put("offset", lastMeta.get("nextOffset"));
+                ret.put("nextOffset", lastMeta.get("nextOffset"));
+                ret.put("partition", partition.partition);
+                ret.put("broker", ImmutableMap.of("host", partition.host.host, "port", partition.host.port));
+                ret.put("topic", partition.topic);
+                ret.put("topology", ImmutableMap.of("name", _topologyName, "id", _topologyInstanceId));
+                return ret;
+            }
+        }
+    }
+
+    private Map doEmitNewPartitionBatch(SimpleConsumer consumer, Partition partition, TridentCollector collector, Map lastMeta) {
+        long offset;
+        if (lastMeta != null) {
+            String lastInstanceId = null;
+            Map lastTopoMeta = (Map) lastMeta.get("topology");
+            if (lastTopoMeta != null) {
+                lastInstanceId = (String) lastTopoMeta.get("id");
+            }
+            if (_config.ignoreZkOffsets && !_topologyInstanceId.equals(lastInstanceId)) {
+                offset = KafkaUtils.getOffset(consumer, partition.topic, partition.partition, _config.startOffsetTime);
+            } else {
+                offset = (Long) lastMeta.get("nextOffset");
+            }
+        } else {
+            offset = KafkaUtils.getOffset(consumer, partition.topic, partition.partition, _config);
+        }
+
+        ByteBufferMessageSet msgs = null;
+        try {
+            msgs = fetchMessages(consumer, partition, offset);
+        } catch (TopicOffsetOutOfRangeException e) {
+            long newOffset = KafkaUtils.getOffset(consumer, partition.topic, partition.partition, kafka.api.OffsetRequest.EarliestTime());
+            LOG.warn("OffsetOutOfRange: Updating offset from offset = " + offset + " to offset = " + newOffset);
+            offset = newOffset;
+            msgs = KafkaUtils.fetchMessages(_config, consumer, partition, offset);
+        }
+
+        long endoffset = offset;
+        for (MessageAndOffset msg : msgs) {
+            emit(collector, msg.message(), partition, msg.offset());
+            endoffset = msg.nextOffset();
+        }
+        Map newMeta = new HashMap();
+        newMeta.put("offset", offset);
+        newMeta.put("nextOffset", endoffset);
+        newMeta.put("instanceId", _topologyInstanceId);
+        newMeta.put("partition", partition.partition);
+        newMeta.put("broker", ImmutableMap.of("host", partition.host.host, "port", partition.host.port));
+        newMeta.put("topic", partition.topic);
+        newMeta.put("topology", ImmutableMap.of("name", _topologyName, "id", _topologyInstanceId));
+        return newMeta;
+    }
+
+    private ByteBufferMessageSet fetchMessages(SimpleConsumer consumer, Partition partition, long offset) {
+        long start = System.nanoTime();
+        ByteBufferMessageSet msgs = null;
+        msgs = KafkaUtils.fetchMessages(_config, consumer, partition, offset);
+        long end = System.nanoTime();
+        long millis = (end - start) / 1000000;
+        _kafkaMeanFetchLatencyMetric.update(millis);
+        _kafkaMaxFetchLatencyMetric.update(millis);
+        return msgs;
+    }
+
+    /**
+     * re-emit the batch described by the meta data provided
+     *
+     * @param attempt
+     * @param collector
+     * @param partition
+     * @param meta
+     */
+    private void reEmitPartitionBatch(TransactionAttempt attempt, TridentCollector collector, Partition partition, Map meta) {
+        LOG.info("re-emitting batch, attempt " + attempt);
+        String instanceId = (String) meta.get("instanceId");
+        if (!_config.ignoreZkOffsets || instanceId.equals(_topologyInstanceId)) {
+            SimpleConsumer consumer = _connections.register(partition);
+            long offset = (Long) meta.get("offset");
+            long nextOffset = (Long) meta.get("nextOffset");
+            ByteBufferMessageSet msgs = null;
+            msgs = fetchMessages(consumer, partition, offset);
+
+            if(msgs != null) {
+                for (MessageAndOffset msg : msgs) {
+                    if (offset == nextOffset) {
+                        break;
+                    }
+                    if (offset > nextOffset) {
+                        throw new RuntimeException("Error when re-emitting batch. overshot the end offset");
+                    }
+                    emit(collector, msg.message(), partition, msg.offset());
+                    offset = msg.nextOffset();
+                }
+            }
+        }
+    }
+
+    private void emit(TridentCollector collector, Message msg, Partition partition, long offset) {
+        Iterable<List<Object>> values;
+        if (_config.scheme instanceof MessageMetadataSchemeAsMultiScheme) {
+            values = KafkaUtils.generateTuples((MessageMetadataSchemeAsMultiScheme) _config.scheme, msg, partition, offset);
+        } else {
+            values = KafkaUtils.generateTuples(_config, msg, partition.topic);
+        }
+
+        if (values != null) {
+            for (List<Object> value : values) {
+                collector.emit(value);
+            }
+        }
+    }
+
+    private void clear() {
+        _connections.clear();
+    }
+
+    private List<Partition> orderPartitions(List<GlobalPartitionInformation> partitions) {
+        List<Partition> part = new ArrayList<Partition>();
+        for (GlobalPartitionInformation globalPartitionInformation : partitions)
+            part.addAll(globalPartitionInformation.getOrderedPartitions());
+        return part;
+    }
+
+    private void refresh(List<Partition> list) {
+        _connections.clear();
+        _kafkaOffsetMetric.refreshPartitions(new HashSet<Partition>(list));
+    }
+
+
+    public IOpaquePartitionedTridentSpout.Emitter<List<GlobalPartitionInformation>, Partition, Map> asOpaqueEmitter() {
+
+        return new IOpaquePartitionedTridentSpout.Emitter<List<GlobalPartitionInformation>, Partition, Map>() {
+
+            /**
+             * Emit a batch of tuples for a partition/transaction.
+             *
+             * Return the metadata describing this batch that will be used as lastPartitionMeta
+             * for defining the parameters of the next batch.
+             */
+            @Override
+            public Map emitPartitionBatch(TransactionAttempt transactionAttempt, TridentCollector tridentCollector, Partition partition, Map map) {
+                return emitNewPartitionBatch(transactionAttempt, tridentCollector, partition, map);
+            }
+
+            @Override
+            public void refreshPartitions(List<Partition> partitions) {
+                refresh(partitions);
+            }
+
+            @Override
+            public List<Partition> getOrderedPartitions(List<GlobalPartitionInformation> partitionInformation) {
+                return orderPartitions(partitionInformation);
+            }
+
+            @Override
+            public void close() {
+                clear();
+            }
+        };
+    }
+
+    public IPartitionedTridentSpout.Emitter asTransactionalEmitter() {
+        return new IPartitionedTridentSpout.Emitter<List<GlobalPartitionInformation>, Partition, Map>() {
+
+            /**
+             * Emit a batch of tuples for a partition/transaction that's never been emitted before.
+             * Return the metadata that can be used to reconstruct this partition/batch in the future.
+             */
+            @Override
+            public Map emitPartitionBatchNew(TransactionAttempt transactionAttempt, TridentCollector tridentCollector, Partition partition, Map map) {
+                return failFastEmitNewPartitionBatch(transactionAttempt, tridentCollector, partition, map);
+            }
+
+            /**
+             * Emit a batch of tuples for a partition/transaction that has been emitted before, using
+             * the metadata created when it was first emitted.
+             */
+            @Override
+            public void emitPartitionBatch(TransactionAttempt transactionAttempt, TridentCollector tridentCollector, Partition partition, Map map) {
+                reEmitPartitionBatch(transactionAttempt, tridentCollector, partition, map);
+            }
+
+            /**
+             * This method is called when this task is responsible for a new set of partitions. Should be used
+             * to manage things like connections to brokers.
+             */
+            @Override
+            public void refreshPartitions(List<Partition> partitions) {
+                refresh(partitions);
+            }
+
+            @Override
+            public List<Partition> getOrderedPartitions(List<GlobalPartitionInformation> partitionInformation) {
+                return orderPartitions(partitionInformation);
+            }
+
+            @Override
+            public void close() {
+                clear();
+            }
+        };
+
+    }
+
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/TridentKafkaState.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/TridentKafkaState.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/TridentKafkaState.java
new file mode 100644
index 0000000..5741dc7
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/TridentKafkaState.java
@@ -0,0 +1,102 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka.trident;
+
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.topology.FailedException;
+import org.apache.commons.lang.Validate;
+import org.apache.kafka.clients.producer.KafkaProducer;
+import org.apache.kafka.clients.producer.ProducerRecord;
+import org.apache.kafka.clients.producer.RecordMetadata;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.storm.kafka.trident.mapper.TridentTupleToKafkaMapper;
+import org.apache.storm.kafka.trident.selector.KafkaTopicSelector;
+import org.apache.storm.trident.operation.TridentCollector;
+import org.apache.storm.trident.state.State;
+import org.apache.storm.trident.tuple.TridentTuple;
+
+import java.util.List;
+import java.util.Properties;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+
+public class TridentKafkaState implements State {
+    private static final Logger LOG = LoggerFactory.getLogger(TridentKafkaState.class);
+
+    private KafkaProducer producer;
+    private OutputCollector collector;
+
+    private TridentTupleToKafkaMapper mapper;
+    private KafkaTopicSelector topicSelector;
+
+    public TridentKafkaState withTridentTupleToKafkaMapper(TridentTupleToKafkaMapper mapper) {
+        this.mapper = mapper;
+        return this;
+    }
+
+    public TridentKafkaState withKafkaTopicSelector(KafkaTopicSelector selector) {
+        this.topicSelector = selector;
+        return this;
+    }
+
+    @Override
+    public void beginCommit(Long txid) {
+        LOG.debug("beginCommit is Noop.");
+    }
+
+    @Override
+    public void commit(Long txid) {
+        LOG.debug("commit is Noop.");
+    }
+
+    public void prepare(Properties options) {
+        Validate.notNull(mapper, "mapper can not be null");
+        Validate.notNull(topicSelector, "topicSelector can not be null");
+        producer = new KafkaProducer(options);
+    }
+
+    public void updateState(List<TridentTuple> tuples, TridentCollector collector) {
+        String topic = null;
+        for (TridentTuple tuple : tuples) {
+            try {
+                topic = topicSelector.getTopic(tuple);
+
+                if(topic != null) {
+                    Future<RecordMetadata> result = producer.send(new ProducerRecord(topic,
+                            mapper.getKeyFromTuple(tuple), mapper.getMessageFromTuple(tuple)));
+                    try {
+                        result.get();
+                    } catch (ExecutionException e) {
+                        String errorMsg = "Could not retrieve result for message with key = "
+                                + mapper.getKeyFromTuple(tuple) + " from topic = " + topic;
+                        LOG.error(errorMsg, e);
+                        throw new FailedException(errorMsg, e);
+                    }
+                } else {
+                    LOG.warn("skipping key = " + mapper.getKeyFromTuple(tuple) + ", topic selector returned null.");
+                }
+            } catch (Exception ex) {
+                String errorMsg = "Could not send message with key = " + mapper.getKeyFromTuple(tuple)
+                        + " to topic = " + topic;
+                LOG.warn(errorMsg, ex);
+                throw new FailedException(errorMsg, ex);
+            }
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/TridentKafkaStateFactory.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/TridentKafkaStateFactory.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/TridentKafkaStateFactory.java
new file mode 100644
index 0000000..f564510
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/TridentKafkaStateFactory.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka.trident;
+
+import org.apache.storm.task.IMetricsContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.storm.kafka.trident.mapper.TridentTupleToKafkaMapper;
+import org.apache.storm.kafka.trident.selector.KafkaTopicSelector;
+import org.apache.storm.trident.state.State;
+import org.apache.storm.trident.state.StateFactory;
+
+import java.util.Map;
+import java.util.Properties;
+
+public class TridentKafkaStateFactory implements StateFactory {
+
+    private static final Logger LOG = LoggerFactory.getLogger(TridentKafkaStateFactory.class);
+
+    private TridentTupleToKafkaMapper mapper;
+    private KafkaTopicSelector topicSelector;
+    private Properties producerProperties = new Properties();
+
+    public TridentKafkaStateFactory withTridentTupleToKafkaMapper(TridentTupleToKafkaMapper mapper) {
+        this.mapper = mapper;
+        return this;
+    }
+
+    public TridentKafkaStateFactory withKafkaTopicSelector(KafkaTopicSelector selector) {
+        this.topicSelector = selector;
+        return this;
+    }
+
+    public TridentKafkaStateFactory withProducerProperties(Properties props) {
+        this.producerProperties = props;
+        return this;
+    }
+
+    @Override
+    public State makeState(Map conf, IMetricsContext metrics, int partitionIndex, int numPartitions) {
+        LOG.info("makeState(partitonIndex={}, numpartitions={}", partitionIndex, numPartitions);
+        TridentKafkaState state = new TridentKafkaState()
+                .withKafkaTopicSelector(this.topicSelector)
+                .withTridentTupleToKafkaMapper(this.mapper);
+        state.prepare(producerProperties);
+        return state;
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/TridentKafkaUpdater.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/TridentKafkaUpdater.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/TridentKafkaUpdater.java
new file mode 100644
index 0000000..7a905ab
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/TridentKafkaUpdater.java
@@ -0,0 +1,31 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka.trident;
+
+import org.apache.storm.trident.operation.TridentCollector;
+import org.apache.storm.trident.state.BaseStateUpdater;
+import org.apache.storm.trident.tuple.TridentTuple;
+
+import java.util.List;
+
+public class TridentKafkaUpdater extends BaseStateUpdater<TridentKafkaState> {
+    @Override
+    public void updateState(TridentKafkaState state, List<TridentTuple> tuples, TridentCollector collector) {
+        state.updateState(tuples, collector);
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/ZkBrokerReader.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/ZkBrokerReader.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/ZkBrokerReader.java
new file mode 100644
index 0000000..d26c341
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/ZkBrokerReader.java
@@ -0,0 +1,84 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka.trident;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.storm.kafka.DynamicBrokersReader;
+import org.apache.storm.kafka.ZkHosts;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+
+public class ZkBrokerReader implements IBrokerReader {
+
+	public static final Logger LOG = LoggerFactory.getLogger(ZkBrokerReader.class);
+
+	List<GlobalPartitionInformation> cachedBrokers = new ArrayList<GlobalPartitionInformation>();
+	DynamicBrokersReader reader;
+	long lastRefreshTimeMs;
+
+
+	long refreshMillis;
+
+	public ZkBrokerReader(Map conf, String topic, ZkHosts hosts) {
+		try {
+			reader = new DynamicBrokersReader(conf, hosts.brokerZkStr, hosts.brokerZkPath, topic);
+			cachedBrokers = reader.getBrokerInfo();
+			lastRefreshTimeMs = System.currentTimeMillis();
+			refreshMillis = hosts.refreshFreqSecs * 1000L;
+		} catch (java.net.SocketTimeoutException e) {
+			LOG.warn("Failed to update brokers", e);
+		}
+
+	}
+
+	private void refresh() {
+		long currTime = System.currentTimeMillis();
+		if (currTime > lastRefreshTimeMs + refreshMillis) {
+			try {
+				LOG.info("brokers need refreshing because " + refreshMillis + "ms have expired");
+				cachedBrokers = reader.getBrokerInfo();
+				lastRefreshTimeMs = currTime;
+			} catch (java.net.SocketTimeoutException e) {
+				LOG.warn("Failed to update brokers", e);
+			}
+		}
+	}
+	@Override
+	public GlobalPartitionInformation getBrokerForTopic(String topic) {
+		refresh();
+        for(GlobalPartitionInformation partitionInformation : cachedBrokers) {
+            if (partitionInformation.topic.equals(topic)) return partitionInformation;
+        }
+		return null;
+	}
+
+	@Override
+	public List<GlobalPartitionInformation> getAllBrokers() {
+		refresh();
+		return cachedBrokers;
+	}
+
+	@Override
+	public void close() {
+		reader.close();
+	}
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/mapper/FieldNameBasedTupleToKafkaMapper.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/mapper/FieldNameBasedTupleToKafkaMapper.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/mapper/FieldNameBasedTupleToKafkaMapper.java
new file mode 100644
index 0000000..2d04971
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/mapper/FieldNameBasedTupleToKafkaMapper.java
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka.trident.mapper;
+
+import org.apache.storm.trident.tuple.TridentTuple;
+
+public class FieldNameBasedTupleToKafkaMapper<K, V> implements TridentTupleToKafkaMapper {
+
+    public final String keyFieldName;
+    public final String msgFieldName;
+
+    public FieldNameBasedTupleToKafkaMapper(String keyFieldName, String msgFieldName) {
+        this.keyFieldName = keyFieldName;
+        this.msgFieldName = msgFieldName;
+    }
+
+    @Override
+    public K getKeyFromTuple(TridentTuple tuple) {
+        return (K) tuple.getValueByField(keyFieldName);
+    }
+
+    @Override
+    public V getMessageFromTuple(TridentTuple tuple) {
+        return (V) tuple.getValueByField(msgFieldName);
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/mapper/TridentTupleToKafkaMapper.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/mapper/TridentTupleToKafkaMapper.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/mapper/TridentTupleToKafkaMapper.java
new file mode 100644
index 0000000..28c6c89
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/mapper/TridentTupleToKafkaMapper.java
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka.trident.mapper;
+
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.trident.tuple.TridentTuple;
+
+import java.io.Serializable;
+
+public interface TridentTupleToKafkaMapper<K,V>  extends Serializable {
+    K getKeyFromTuple(TridentTuple tuple);
+    V getMessageFromTuple(TridentTuple tuple);
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/selector/DefaultTopicSelector.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/selector/DefaultTopicSelector.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/selector/DefaultTopicSelector.java
new file mode 100644
index 0000000..7ae49a3
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/selector/DefaultTopicSelector.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka.trident.selector;
+
+import org.apache.storm.trident.tuple.TridentTuple;
+
+public class DefaultTopicSelector implements KafkaTopicSelector {
+
+    private final String topicName;
+
+    public DefaultTopicSelector(final String topicName) {
+        this.topicName = topicName;
+    }
+
+    @Override
+    public String getTopic(TridentTuple tuple) {
+        return topicName;
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/selector/KafkaTopicSelector.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/selector/KafkaTopicSelector.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/selector/KafkaTopicSelector.java
new file mode 100644
index 0000000..012a6c7
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/selector/KafkaTopicSelector.java
@@ -0,0 +1,26 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka.trident.selector;
+
+import org.apache.storm.trident.tuple.TridentTuple;
+
+import java.io.Serializable;
+
+public interface KafkaTopicSelector extends Serializable {
+    String getTopic(TridentTuple tuple);
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/Broker.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/Broker.java b/external/storm-kafka/src/jvm/storm/kafka/Broker.java
deleted file mode 100644
index 513ab22..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/Broker.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-import com.google.common.base.Objects;
-
-import java.io.Serializable;
-
-public class Broker implements Serializable, Comparable<Broker> {
-    public String host;
-    public int port;
-
-    // for kryo compatibility
-    private Broker() {
-	
-    }
-    
-    public Broker(String host, int port) {
-        this.host = host;
-        this.port = port;
-    }
-
-    public Broker(String host) {
-        this(host, 9092);
-    }
-
-    @Override
-    public int hashCode() {
-        return Objects.hashCode(host, port);
-    }
-
-    @Override
-    public boolean equals(Object obj) {
-        if (this == obj) {
-            return true;
-        }
-        if (obj == null || getClass() != obj.getClass()) {
-            return false;
-        }
-        final Broker other = (Broker) obj;
-        return Objects.equal(this.host, other.host) && Objects.equal(this.port, other.port);
-    }
-
-    @Override
-    public String toString() {
-        return host + ":" + port;
-    }
-
-    public static Broker fromString(String host) {
-        Broker hp;
-        String[] spec = host.split(":");
-        if (spec.length == 1) {
-            hp = new Broker(spec[0]);
-        } else if (spec.length == 2) {
-            hp = new Broker(spec[0], Integer.parseInt(spec[1]));
-        } else {
-            throw new IllegalArgumentException("Invalid host specification: " + host);
-        }
-        return hp;
-    }
-
-
-    @Override
-    public int compareTo(Broker o) {
-        if (this.host.equals(o.host)) {
-            return this.port - o.port;
-        } else {
-            return this.host.compareTo(o.host);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/BrokerHosts.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/BrokerHosts.java b/external/storm-kafka/src/jvm/storm/kafka/BrokerHosts.java
deleted file mode 100644
index 1a06fc5..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/BrokerHosts.java
+++ /dev/null
@@ -1,25 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-import java.io.Serializable;
-
-
-public interface BrokerHosts extends Serializable {
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/ByteBufferSerializer.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/ByteBufferSerializer.java b/external/storm-kafka/src/jvm/storm/kafka/ByteBufferSerializer.java
deleted file mode 100644
index 1a7238e..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/ByteBufferSerializer.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-import backtype.storm.utils.Utils;
-import org.apache.kafka.common.serialization.Serializer;
-
-import java.nio.ByteBuffer;
-import java.util.Map;
-
-public class ByteBufferSerializer implements Serializer<ByteBuffer> {
-  @Override
-  public void configure(Map<String, ?> map, boolean b) {
-
-  }
-
-  @Override
-  public void close() {
-
-  }
-
-  @Override
-  public byte[] serialize(String s, ByteBuffer b) {
-    return Utils.toByteArray(b);
-  }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/DynamicBrokersReader.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/DynamicBrokersReader.java b/external/storm-kafka/src/jvm/storm/kafka/DynamicBrokersReader.java
deleted file mode 100644
index d0f6724..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/DynamicBrokersReader.java
+++ /dev/null
@@ -1,213 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-import backtype.storm.Config;
-import backtype.storm.utils.Utils;
-import com.google.common.base.Preconditions;
-import org.apache.curator.framework.CuratorFramework;
-import org.apache.curator.framework.CuratorFrameworkFactory;
-import org.apache.curator.retry.RetryNTimes;
-import org.json.simple.JSONValue;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import storm.kafka.trident.GlobalPartitionInformation;
-
-import java.io.UnsupportedEncodingException;
-import java.net.SocketTimeoutException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-
-public class DynamicBrokersReader {
-
-    public static final Logger LOG = LoggerFactory.getLogger(DynamicBrokersReader.class);
-
-    private CuratorFramework _curator;
-    private String _zkPath;
-    private String _topic;
-    private Boolean _isWildcardTopic;
-
-    public DynamicBrokersReader(Map conf, String zkStr, String zkPath, String topic) {
-        // Check required parameters
-        Preconditions.checkNotNull(conf, "conf cannot be null");
-
-        validateConfig(conf);
-
-        Preconditions.checkNotNull(zkStr,"zkString cannot be null");
-        Preconditions.checkNotNull(zkPath, "zkPath cannot be null");
-        Preconditions.checkNotNull(topic, "topic cannot be null");
-
-        _zkPath = zkPath;
-        _topic = topic;
-        _isWildcardTopic = Utils.getBoolean(conf.get("kafka.topic.wildcard.match"), false);
-        try {
-            _curator = CuratorFrameworkFactory.newClient(
-                    zkStr,
-                    Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT)),
-                    Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_CONNECTION_TIMEOUT)),
-                    new RetryNTimes(Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_RETRY_TIMES)),
-                            Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_RETRY_INTERVAL))));
-            _curator.start();
-        } catch (Exception ex) {
-            LOG.error("Couldn't connect to zookeeper", ex);
-            throw new RuntimeException(ex);
-        }
-    }
-
-    /**
-     * Get all partitions with their current leaders
-     */
-    public List<GlobalPartitionInformation> getBrokerInfo() throws SocketTimeoutException {
-      List<String> topics =  getTopics();
-      List<GlobalPartitionInformation> partitions =  new ArrayList<GlobalPartitionInformation>();
-
-      for (String topic : topics) {
-          GlobalPartitionInformation globalPartitionInformation = new GlobalPartitionInformation(topic, this._isWildcardTopic);
-          try {
-              int numPartitionsForTopic = getNumPartitions(topic);
-              String brokerInfoPath = brokerPath();
-              for (int partition = 0; partition < numPartitionsForTopic; partition++) {
-                  int leader = getLeaderFor(topic,partition);
-                  String path = brokerInfoPath + "/" + leader;
-                  try {
-                      byte[] brokerData = _curator.getData().forPath(path);
-                      Broker hp = getBrokerHost(brokerData);
-                      globalPartitionInformation.addPartition(partition, hp);
-                  } catch (org.apache.zookeeper.KeeperException.NoNodeException e) {
-                      LOG.error("Node {} does not exist ", path);
-                  }
-              }
-          } catch (SocketTimeoutException e) {
-              throw e;
-          } catch (Exception e) {
-              throw new RuntimeException(e);
-          }
-          LOG.info("Read partition info from zookeeper: " + globalPartitionInformation);
-          partitions.add(globalPartitionInformation);
-      }
-        return partitions;
-    }
-
-    private int getNumPartitions(String topic) {
-        try {
-            String topicBrokersPath = partitionPath(topic);
-            List<String> children = _curator.getChildren().forPath(topicBrokersPath);
-            return children.size();
-        } catch (Exception e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    private List<String> getTopics() {
-        List<String> topics = new ArrayList<String>();
-        if (!_isWildcardTopic) {
-            topics.add(_topic);
-            return topics;
-        } else {
-            try {
-                List<String> children = _curator.getChildren().forPath(topicsPath());
-                for (String t : children) {
-                    if (t.matches(_topic)) {
-                        LOG.info(String.format("Found matching topic %s", t));
-                        topics.add(t);
-                    }
-                }
-                return topics;
-            } catch (Exception e) {
-                throw new RuntimeException(e);
-            }
-        }
-    }
-
-    public String topicsPath () {
-        return _zkPath + "/topics";
-    }
-    public String partitionPath(String topic) {
-        return topicsPath() + "/" + topic + "/partitions";
-    }
-
-    public String brokerPath() {
-        return _zkPath + "/ids";
-    }
-
-
-
-    /**
-     * get /brokers/topics/distributedTopic/partitions/1/state
-     * { "controller_epoch":4, "isr":[ 1, 0 ], "leader":1, "leader_epoch":1, "version":1 }
-     * @param topic
-     * @param partition
-     * @return
-     */
-    private int getLeaderFor(String topic, long partition) {
-        try {
-            String topicBrokersPath = partitionPath(topic);
-            byte[] hostPortData = _curator.getData().forPath(topicBrokersPath + "/" + partition + "/state");
-            Map<Object, Object> value = (Map<Object, Object>) JSONValue.parse(new String(hostPortData, "UTF-8"));
-            Integer leader = ((Number) value.get("leader")).intValue();
-            if (leader == -1) {
-                throw new RuntimeException("No leader found for partition " + partition);
-            }
-            return leader;
-        } catch (RuntimeException e) {
-            throw e;
-        } catch (Exception e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    public void close() {
-        _curator.close();
-    }
-
-    /**
-     * [zk: localhost:2181(CONNECTED) 56] get /brokers/ids/0
-     * { "host":"localhost", "jmx_port":9999, "port":9092, "version":1 }
-     *
-     * @param contents
-     * @return
-     */
-    private Broker getBrokerHost(byte[] contents) {
-        try {
-            Map<Object, Object> value = (Map<Object, Object>) JSONValue.parse(new String(contents, "UTF-8"));
-            String host = (String) value.get("host");
-            Integer port = ((Long) value.get("port")).intValue();
-            return new Broker(host, port);
-        } catch (UnsupportedEncodingException e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    /**
-     * Validate required parameters in the input configuration Map
-     * @param conf
-     */
-    private void validateConfig(final Map conf) {
-        Preconditions.checkNotNull(conf.get(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT),
-                "%s cannot be null", Config.STORM_ZOOKEEPER_SESSION_TIMEOUT);
-        Preconditions.checkNotNull(conf.get(Config.STORM_ZOOKEEPER_CONNECTION_TIMEOUT),
-                "%s cannot be null", Config.STORM_ZOOKEEPER_CONNECTION_TIMEOUT);
-        Preconditions.checkNotNull(conf.get(Config.STORM_ZOOKEEPER_RETRY_TIMES),
-                "%s cannot be null", Config.STORM_ZOOKEEPER_RETRY_TIMES);
-        Preconditions.checkNotNull(conf.get(Config.STORM_ZOOKEEPER_RETRY_INTERVAL),
-                "%s cannot be null", Config.STORM_ZOOKEEPER_RETRY_INTERVAL);
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/DynamicPartitionConnections.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/DynamicPartitionConnections.java b/external/storm-kafka/src/jvm/storm/kafka/DynamicPartitionConnections.java
deleted file mode 100644
index e237a7a..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/DynamicPartitionConnections.java
+++ /dev/null
@@ -1,98 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-import kafka.javaapi.consumer.SimpleConsumer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import storm.kafka.trident.IBrokerReader;
-
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-
-public class DynamicPartitionConnections {
-
-    public static final Logger LOG = LoggerFactory.getLogger(DynamicPartitionConnections.class);
-
-    static class ConnectionInfo {
-        SimpleConsumer consumer;
-        Set<String> partitions = new HashSet<String>();
-
-        public ConnectionInfo(SimpleConsumer consumer) {
-            this.consumer = consumer;
-        }
-    }
-
-    Map<Broker, ConnectionInfo> _connections = new HashMap();
-    KafkaConfig _config;
-    IBrokerReader _reader;
-
-    public DynamicPartitionConnections(KafkaConfig config, IBrokerReader brokerReader) {
-        _config = config;
-        _reader = brokerReader;
-    }
-
-    public SimpleConsumer register(Partition partition) {
-        Broker broker = _reader.getBrokerForTopic(partition.topic).getBrokerFor(partition.partition);
-        return register(broker, partition.topic, partition.partition);
-    }
-
-    public SimpleConsumer register(Broker host, String topic, int partition) {
-        if (!_connections.containsKey(host)) {
-            _connections.put(host, new ConnectionInfo(new SimpleConsumer(host.host, host.port, _config.socketTimeoutMs, _config.bufferSizeBytes, _config.clientId)));
-        }
-        ConnectionInfo info = _connections.get(host);
-        info.partitions.add(getHashKey(topic,partition));
-        return info.consumer;
-    }
-
-    public SimpleConsumer getConnection(Partition partition) {
-        ConnectionInfo info = _connections.get(partition.host);
-        if (info != null) {
-            return info.consumer;
-        }
-        return null;
-    }
-
-    public void unregister(Broker port, String topic, int partition) {
-        ConnectionInfo info = _connections.get(port);
-        info.partitions.remove(getHashKey(topic,partition));
-        if (info.partitions.isEmpty()) {
-            info.consumer.close();
-            _connections.remove(port);
-        }
-    }
-
-    public void unregister(Partition partition) {
-        unregister(partition.host, partition.topic, partition.partition);
-    }
-
-    public void clear() {
-        for (ConnectionInfo info : _connections.values()) {
-            info.consumer.close();
-        }
-        _connections.clear();
-    }
-
-    private String getHashKey(String topic, int partition) {
-        return topic + "_" + partition;
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/ExponentialBackoffMsgRetryManager.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/ExponentialBackoffMsgRetryManager.java b/external/storm-kafka/src/jvm/storm/kafka/ExponentialBackoffMsgRetryManager.java
deleted file mode 100644
index 5664f12..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/ExponentialBackoffMsgRetryManager.java
+++ /dev/null
@@ -1,184 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-import java.util.Comparator;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.PriorityQueue;
-import java.util.Queue;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-
-public class ExponentialBackoffMsgRetryManager implements FailedMsgRetryManager {
-
-    private final long retryInitialDelayMs;
-    private final double retryDelayMultiplier;
-    private final long retryDelayMaxMs;
-
-    private Queue<MessageRetryRecord> waiting = new PriorityQueue<MessageRetryRecord>(11, new RetryTimeComparator());
-    private Map<Long,MessageRetryRecord> records = new ConcurrentHashMap<Long,MessageRetryRecord>();
-
-    public ExponentialBackoffMsgRetryManager(long retryInitialDelayMs, double retryDelayMultiplier, long retryDelayMaxMs) {
-        this.retryInitialDelayMs = retryInitialDelayMs;
-        this.retryDelayMultiplier = retryDelayMultiplier;
-        this.retryDelayMaxMs = retryDelayMaxMs;
-    }
-
-    @Override
-    public void failed(Long offset) {
-        MessageRetryRecord oldRecord = this.records.get(offset);
-        MessageRetryRecord newRecord = oldRecord == null ?
-                                       new MessageRetryRecord(offset) :
-                                       oldRecord.createNextRetryRecord();
-        this.records.put(offset, newRecord);
-        this.waiting.add(newRecord);
-    }
-
-    @Override
-    public void acked(Long offset) {
-        MessageRetryRecord record = this.records.remove(offset);
-        if (record != null) {
-            this.waiting.remove(record);
-        }
-    }
-
-    @Override
-    public void retryStarted(Long offset) {
-        MessageRetryRecord record = this.records.get(offset);
-        if (record == null || !this.waiting.contains(record)) {
-            throw new IllegalStateException("cannot retry a message that has not failed");
-        } else {
-            this.waiting.remove(record);
-        }
-    }
-
-    @Override
-    public Long nextFailedMessageToRetry() {
-        if (this.waiting.size() > 0) {
-            MessageRetryRecord first = this.waiting.peek();
-            if (System.currentTimeMillis() >= first.retryTimeUTC) {
-                if (this.records.containsKey(first.offset)) {
-                    return first.offset;
-                } else {
-                    // defensive programming - should be impossible
-                    this.waiting.remove(first);
-                    return nextFailedMessageToRetry();
-                }
-            }
-        }
-        return null;
-    }
-
-    @Override
-    public boolean shouldRetryMsg(Long offset) {
-        MessageRetryRecord record = this.records.get(offset);
-        return record != null &&
-                this.waiting.contains(record) &&
-                System.currentTimeMillis() >= record.retryTimeUTC;
-    }
-
-    @Override
-    public Set<Long> clearInvalidMessages(Long kafkaOffset) {
-        Set<Long> invalidOffsets = new HashSet<Long>(); 
-        for(Long offset : records.keySet()){
-            if(offset < kafkaOffset){
-                MessageRetryRecord record = this.records.remove(offset);
-                if (record != null) {
-                    this.waiting.remove(record);
-                    invalidOffsets.add(offset);
-                }
-            }
-        }
-        return invalidOffsets;
-    }
-
-    /**
-     * A MessageRetryRecord holds the data of how many times a message has
-     * failed and been retried, and when the last failure occurred.  It can
-     * determine whether it is ready to be retried by employing an exponential
-     * back-off calculation using config values stored in SpoutConfig:
-     * <ul>
-     *  <li>retryInitialDelayMs - time to delay before the first retry</li>
-     *  <li>retryDelayMultiplier - multiplier by which to increase the delay for each subsequent retry</li>
-     *  <li>retryDelayMaxMs - maximum retry delay (once this delay time is reached, subsequent retries will
-     *                        delay for this amount of time every time)
-     *  </li>
-     * </ul>
-     */
-    private class MessageRetryRecord {
-        private final long offset;
-        private final int retryNum;
-        private final long retryTimeUTC;
-
-        public MessageRetryRecord(long offset) {
-            this(offset, 1);
-        }
-
-        private MessageRetryRecord(long offset, int retryNum) {
-            this.offset = offset;
-            this.retryNum = retryNum;
-            this.retryTimeUTC = System.currentTimeMillis() + calculateRetryDelay();
-        }
-
-        /**
-         * Create a MessageRetryRecord for the next retry that should occur after this one.
-         * @return MessageRetryRecord with the next retry time, or null to indicate that another
-         *         retry should not be performed.  The latter case can happen if we are about to
-         *         run into the backtype.storm.Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS in the Storm
-         *         configuration.
-         */
-        public MessageRetryRecord createNextRetryRecord() {
-            return new MessageRetryRecord(this.offset, this.retryNum + 1);
-        }
-
-        private long calculateRetryDelay() {
-            double delayMultiplier = Math.pow(retryDelayMultiplier, this.retryNum - 1);
-            double delay = retryInitialDelayMs * delayMultiplier;
-            Long maxLong = Long.MAX_VALUE;
-            long delayThisRetryMs = delay >= maxLong.doubleValue()
-                                    ?  maxLong
-                                    : (long) delay;
-            return Math.min(delayThisRetryMs, retryDelayMaxMs);
-        }
-
-        @Override
-        public boolean equals(Object other) {
-            return (other instanceof MessageRetryRecord
-                    && this.offset == ((MessageRetryRecord) other).offset);
-        }
-
-        @Override
-        public int hashCode() {
-            return Long.valueOf(this.offset).hashCode();
-        }
-    }
-
-    private static class RetryTimeComparator implements Comparator<MessageRetryRecord> {
-
-        @Override
-        public int compare(MessageRetryRecord record1, MessageRetryRecord record2) {
-            return Long.valueOf(record1.retryTimeUTC).compareTo(Long.valueOf(record2.retryTimeUTC));
-        }
-
-        @Override
-        public boolean equals(Object obj) {
-            return false;
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/FailedFetchException.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/FailedFetchException.java b/external/storm-kafka/src/jvm/storm/kafka/FailedFetchException.java
deleted file mode 100644
index 011240e..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/FailedFetchException.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-public class FailedFetchException extends RuntimeException {
-
-    public FailedFetchException(String message) {
-        super(message);
-    }
-
-    public FailedFetchException(Exception e) {
-        super(e);
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/FailedMsgRetryManager.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/FailedMsgRetryManager.java b/external/storm-kafka/src/jvm/storm/kafka/FailedMsgRetryManager.java
deleted file mode 100644
index 30c9a24..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/FailedMsgRetryManager.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-import java.util.Set;
-
-public interface FailedMsgRetryManager {
-    public void failed(Long offset);
-    public void acked(Long offset);
-    public void retryStarted(Long offset);
-    public Long nextFailedMessageToRetry();
-    public boolean shouldRetryMsg(Long offset);
-    public Set<Long> clearInvalidMessages(Long kafkaOffset);
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/IntSerializer.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/IntSerializer.java b/external/storm-kafka/src/jvm/storm/kafka/IntSerializer.java
deleted file mode 100644
index 07cbd26..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/IntSerializer.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-import org.apache.kafka.common.serialization.Serializer;
-
-import java.nio.ByteBuffer;
-import java.nio.IntBuffer;
-import java.util.Map;
-
-public class IntSerializer implements Serializer<Integer> {
-  @Override
-  public void configure(Map<String, ?> map, boolean b) {
-  }
-
-  @Override
-  public byte[] serialize(String topic, Integer val) {
-    byte[] r = new byte[4];
-    IntBuffer b = ByteBuffer.wrap(r).asIntBuffer();
-    b.put(val);
-    return r;
-  }
-
-  @Override
-  public void close() {
-  }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/KafkaConfig.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/KafkaConfig.java b/external/storm-kafka/src/jvm/storm/kafka/KafkaConfig.java
deleted file mode 100644
index 49c7526..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/KafkaConfig.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-import backtype.storm.spout.MultiScheme;
-import backtype.storm.spout.RawMultiScheme;
-
-import java.io.Serializable;
-
-public class KafkaConfig implements Serializable {
-    private static final long serialVersionUID = 5276718734571623855L;
-    
-    public final BrokerHosts hosts;
-    public final String topic;
-    public final String clientId;
-
-    public int fetchSizeBytes = 1024 * 1024;
-    public int socketTimeoutMs = 10000;
-    public int fetchMaxWait = 10000;
-    public int bufferSizeBytes = 1024 * 1024;
-    public MultiScheme scheme = new RawMultiScheme();
-    public boolean ignoreZkOffsets = false;
-    public long startOffsetTime = kafka.api.OffsetRequest.EarliestTime();
-    public long maxOffsetBehind = Long.MAX_VALUE;
-    public boolean useStartOffsetTimeIfOffsetOutOfRange = true;
-    public int metricsTimeBucketSizeInSecs = 60;
-
-    public KafkaConfig(BrokerHosts hosts, String topic) {
-        this(hosts, topic, kafka.api.OffsetRequest.DefaultClientId());
-    }
-
-    public KafkaConfig(BrokerHosts hosts, String topic, String clientId) {
-        this.hosts = hosts;
-        this.topic = topic;
-        this.clientId = clientId;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/KafkaError.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/KafkaError.java b/external/storm-kafka/src/jvm/storm/kafka/KafkaError.java
deleted file mode 100644
index 634af85..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/KafkaError.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-public enum KafkaError {
-    NO_ERROR,
-    OFFSET_OUT_OF_RANGE,
-    INVALID_MESSAGE,
-    UNKNOWN_TOPIC_OR_PARTITION,
-    INVALID_FETCH_SIZE,
-    LEADER_NOT_AVAILABLE,
-    NOT_LEADER_FOR_PARTITION,
-    REQUEST_TIMED_OUT,
-    BROKER_NOT_AVAILABLE,
-    REPLICA_NOT_AVAILABLE,
-    MESSAGE_SIZE_TOO_LARGE,
-    STALE_CONTROLLER_EPOCH,
-    OFFSET_METADATA_TOO_LARGE,
-    UNKNOWN;
-
-    public static KafkaError getError(int errorCode) {
-        if (errorCode < 0 || errorCode >= UNKNOWN.ordinal()) {
-            return UNKNOWN;
-        } else {
-            return values()[errorCode];
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/KafkaSpout.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/KafkaSpout.java b/external/storm-kafka/src/jvm/storm/kafka/KafkaSpout.java
deleted file mode 100644
index 8169014..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/KafkaSpout.java
+++ /dev/null
@@ -1,198 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p/>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p/>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-import backtype.storm.Config;
-import backtype.storm.metric.api.IMetric;
-import backtype.storm.spout.SpoutOutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.base.BaseRichSpout;
-import com.google.common.base.Strings;
-import kafka.message.Message;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import storm.kafka.PartitionManager.KafkaMessageId;
-
-import java.util.*;
-
-// TODO: need to add blacklisting
-// TODO: need to make a best effort to not re-emit messages if don't have to
-public class KafkaSpout extends BaseRichSpout {
-    static enum EmitState {
-        EMITTED_MORE_LEFT,
-        EMITTED_END,
-        NO_EMITTED
-    }
-
-    public static final Logger LOG = LoggerFactory.getLogger(KafkaSpout.class);
-
-    SpoutConfig _spoutConfig;
-    SpoutOutputCollector _collector;
-    PartitionCoordinator _coordinator;
-    DynamicPartitionConnections _connections;
-    ZkState _state;
-
-    long _lastUpdateMs = 0;
-
-    int _currPartitionIndex = 0;
-
-    public KafkaSpout(SpoutConfig spoutConf) {
-        _spoutConfig = spoutConf;
-    }
-
-    @Override
-    public void open(Map conf, final TopologyContext context, final SpoutOutputCollector collector) {
-        _collector = collector;
-        String topologyInstanceId = context.getStormId();
-        Map stateConf = new HashMap(conf);
-        List<String> zkServers = _spoutConfig.zkServers;
-        if (zkServers == null) {
-            zkServers = (List<String>) conf.get(Config.STORM_ZOOKEEPER_SERVERS);
-        }
-        Integer zkPort = _spoutConfig.zkPort;
-        if (zkPort == null) {
-            zkPort = ((Number) conf.get(Config.STORM_ZOOKEEPER_PORT)).intValue();
-        }
-        stateConf.put(Config.TRANSACTIONAL_ZOOKEEPER_SERVERS, zkServers);
-        stateConf.put(Config.TRANSACTIONAL_ZOOKEEPER_PORT, zkPort);
-        stateConf.put(Config.TRANSACTIONAL_ZOOKEEPER_ROOT, _spoutConfig.zkRoot);
-        _state = new ZkState(stateConf);
-
-        _connections = new DynamicPartitionConnections(_spoutConfig, KafkaUtils.makeBrokerReader(conf, _spoutConfig));
-
-        // using TransactionalState like this is a hack
-        int totalTasks = context.getComponentTasks(context.getThisComponentId()).size();
-        if (_spoutConfig.hosts instanceof StaticHosts) {
-            _coordinator = new StaticCoordinator(_connections, conf,
-                    _spoutConfig, _state, context.getThisTaskIndex(),
-                    totalTasks, topologyInstanceId);
-        } else {
-            _coordinator = new ZkCoordinator(_connections, conf,
-                    _spoutConfig, _state, context.getThisTaskIndex(),
-                    totalTasks, topologyInstanceId);
-        }
-
-        context.registerMetric("kafkaOffset", new IMetric() {
-            KafkaUtils.KafkaOffsetMetric _kafkaOffsetMetric = new KafkaUtils.KafkaOffsetMetric(_connections);
-
-            @Override
-            public Object getValueAndReset() {
-                List<PartitionManager> pms = _coordinator.getMyManagedPartitions();
-                Set<Partition> latestPartitions = new HashSet();
-                for (PartitionManager pm : pms) {
-                    latestPartitions.add(pm.getPartition());
-                }
-                _kafkaOffsetMetric.refreshPartitions(latestPartitions);
-                for (PartitionManager pm : pms) {
-                    _kafkaOffsetMetric.setLatestEmittedOffset(pm.getPartition(), pm.lastCompletedOffset());
-                }
-                return _kafkaOffsetMetric.getValueAndReset();
-            }
-        }, _spoutConfig.metricsTimeBucketSizeInSecs);
-
-        context.registerMetric("kafkaPartition", new IMetric() {
-            @Override
-            public Object getValueAndReset() {
-                List<PartitionManager> pms = _coordinator.getMyManagedPartitions();
-                Map concatMetricsDataMaps = new HashMap();
-                for (PartitionManager pm : pms) {
-                    concatMetricsDataMaps.putAll(pm.getMetricsDataMap());
-                }
-                return concatMetricsDataMaps;
-            }
-        }, _spoutConfig.metricsTimeBucketSizeInSecs);
-    }
-
-    @Override
-    public void close() {
-        _state.close();
-    }
-
-    @Override
-    public void nextTuple() {
-        List<PartitionManager> managers = _coordinator.getMyManagedPartitions();
-        for (int i = 0; i < managers.size(); i++) {
-
-            try {
-                // in case the number of managers decreased
-                _currPartitionIndex = _currPartitionIndex % managers.size();
-                EmitState state = managers.get(_currPartitionIndex).next(_collector);
-                if (state != EmitState.EMITTED_MORE_LEFT) {
-                    _currPartitionIndex = (_currPartitionIndex + 1) % managers.size();
-                }
-                if (state != EmitState.NO_EMITTED) {
-                    break;
-                }
-            } catch (FailedFetchException e) {
-                LOG.warn("Fetch failed", e);
-                _coordinator.refresh();
-            }
-        }
-
-        long diffWithNow = System.currentTimeMillis() - _lastUpdateMs;
-
-        /*
-             As far as the System.currentTimeMillis() is dependent on System clock,
-             additional check on negative value of diffWithNow in case of external changes.
-         */
-        if (diffWithNow > _spoutConfig.stateUpdateIntervalMs || diffWithNow < 0) {
-            commit();
-        }
-    }
-
-    @Override
-    public void ack(Object msgId) {
-        KafkaMessageId id = (KafkaMessageId) msgId;
-        PartitionManager m = _coordinator.getManager(id.partition);
-        if (m != null) {
-            m.ack(id.offset);
-        }
-    }
-
-    @Override
-    public void fail(Object msgId) {
-        KafkaMessageId id = (KafkaMessageId) msgId;
-        PartitionManager m = _coordinator.getManager(id.partition);
-        if (m != null) {
-            m.fail(id.offset);
-        }
-    }
-
-    @Override
-    public void deactivate() {
-        commit();
-    }
-
-    @Override
-    public void declareOutputFields(OutputFieldsDeclarer declarer) {
-       if (!Strings.isNullOrEmpty(_spoutConfig.outputStreamId)) {
-            declarer.declareStream(_spoutConfig.outputStreamId, _spoutConfig.scheme.getOutputFields());
-        } else {
-            declarer.declare(_spoutConfig.scheme.getOutputFields());
-        }
-    }
-
-    private void commit() {
-        _lastUpdateMs = System.currentTimeMillis();
-        for (PartitionManager manager : _coordinator.getMyManagedPartitions()) {
-            manager.commit();
-        }
-    }
-
-}


[41/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hbase/src/test/java/org/apache/storm/hbase/topology/WordSpout.java
----------------------------------------------------------------------
diff --git a/external/storm-hbase/src/test/java/org/apache/storm/hbase/topology/WordSpout.java b/external/storm-hbase/src/test/java/org/apache/storm/hbase/topology/WordSpout.java
index 799438c..c5fc490 100644
--- a/external/storm-hbase/src/test/java/org/apache/storm/hbase/topology/WordSpout.java
+++ b/external/storm-hbase/src/test/java/org/apache/storm/hbase/topology/WordSpout.java
@@ -17,12 +17,12 @@
  */
 package org.apache.storm.hbase.topology;
 
-import backtype.storm.spout.SpoutOutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.IRichSpout;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Values;
+import org.apache.storm.spout.SpoutOutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.IRichSpout;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Values;
 
 import java.util.Map;
 import java.util.Random;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hbase/src/test/java/org/apache/storm/hbase/trident/PrintFunction.java
----------------------------------------------------------------------
diff --git a/external/storm-hbase/src/test/java/org/apache/storm/hbase/trident/PrintFunction.java b/external/storm-hbase/src/test/java/org/apache/storm/hbase/trident/PrintFunction.java
index 91e0a7a..cdc7690 100644
--- a/external/storm-hbase/src/test/java/org/apache/storm/hbase/trident/PrintFunction.java
+++ b/external/storm-hbase/src/test/java/org/apache/storm/hbase/trident/PrintFunction.java
@@ -19,9 +19,9 @@ package org.apache.storm.hbase.trident;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import storm.trident.operation.BaseFunction;
-import storm.trident.operation.TridentCollector;
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.trident.operation.BaseFunction;
+import org.apache.storm.trident.operation.TridentCollector;
+import org.apache.storm.trident.tuple.TridentTuple;
 
 import java.util.Random;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hbase/src/test/java/org/apache/storm/hbase/trident/WordCountTrident.java
----------------------------------------------------------------------
diff --git a/external/storm-hbase/src/test/java/org/apache/storm/hbase/trident/WordCountTrident.java b/external/storm-hbase/src/test/java/org/apache/storm/hbase/trident/WordCountTrident.java
index 69d067d..b2f0ce8 100644
--- a/external/storm-hbase/src/test/java/org/apache/storm/hbase/trident/WordCountTrident.java
+++ b/external/storm-hbase/src/test/java/org/apache/storm/hbase/trident/WordCountTrident.java
@@ -17,12 +17,12 @@
  */
 package org.apache.storm.hbase.trident;
 
-import backtype.storm.Config;
-import backtype.storm.LocalCluster;
-import backtype.storm.StormSubmitter;
-import backtype.storm.generated.StormTopology;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Values;
+import org.apache.storm.Config;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.StormSubmitter;
+import org.apache.storm.generated.StormTopology;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Values;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.storm.hbase.bolt.mapper.HBaseProjectionCriteria;
 import org.apache.storm.hbase.bolt.mapper.HBaseValueMapper;
@@ -33,11 +33,11 @@ import org.apache.storm.hbase.trident.state.HBaseQuery;
 import org.apache.storm.hbase.trident.state.HBaseState;
 import org.apache.storm.hbase.trident.state.HBaseStateFactory;
 import org.apache.storm.hbase.trident.state.HBaseUpdater;
-import storm.trident.Stream;
-import storm.trident.TridentState;
-import storm.trident.TridentTopology;
-import storm.trident.state.StateFactory;
-import storm.trident.testing.FixedBatchSpout;
+import org.apache.storm.trident.Stream;
+import org.apache.storm.trident.TridentState;
+import org.apache.storm.trident.TridentTopology;
+import org.apache.storm.trident.state.StateFactory;
+import org.apache.storm.trident.testing.FixedBatchSpout;
 
 public class WordCountTrident {
     public static StormTopology buildTopology(String hbaseRoot){

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/blobstore/HdfsBlobStore.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/blobstore/HdfsBlobStore.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/blobstore/HdfsBlobStore.java
index fbc277e..dadabe8 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/blobstore/HdfsBlobStore.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/blobstore/HdfsBlobStore.java
@@ -17,19 +17,19 @@
  */
 package org.apache.storm.hdfs.blobstore;
 
-import backtype.storm.Config;
-import backtype.storm.blobstore.AtomicOutputStream;
-import backtype.storm.blobstore.BlobStore;
-import backtype.storm.blobstore.BlobStoreAclHandler;
-import backtype.storm.blobstore.BlobStoreFile;
-import backtype.storm.blobstore.InputStreamWithMeta;
-import backtype.storm.generated.AuthorizationException;
-import backtype.storm.generated.KeyNotFoundException;
-import backtype.storm.generated.KeyAlreadyExistsException;
-import backtype.storm.generated.ReadableBlobMeta;
-import backtype.storm.generated.SettableBlobMeta;
-import backtype.storm.nimbus.NimbusInfo;
-import backtype.storm.utils.Utils;
+import org.apache.storm.Config;
+import org.apache.storm.blobstore.AtomicOutputStream;
+import org.apache.storm.blobstore.BlobStore;
+import org.apache.storm.blobstore.BlobStoreAclHandler;
+import org.apache.storm.blobstore.BlobStoreFile;
+import org.apache.storm.blobstore.InputStreamWithMeta;
+import org.apache.storm.generated.AuthorizationException;
+import org.apache.storm.generated.KeyNotFoundException;
+import org.apache.storm.generated.KeyAlreadyExistsException;
+import org.apache.storm.generated.ReadableBlobMeta;
+import org.apache.storm.generated.SettableBlobMeta;
+import org.apache.storm.nimbus.NimbusInfo;
+import org.apache.storm.utils.Utils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -46,9 +46,9 @@ import java.security.PrivilegedAction;
 import java.util.Iterator;
 import java.util.Map;
 
-import static backtype.storm.blobstore.BlobStoreAclHandler.ADMIN;
-import static backtype.storm.blobstore.BlobStoreAclHandler.READ;
-import static backtype.storm.blobstore.BlobStoreAclHandler.WRITE;
+import static org.apache.storm.blobstore.BlobStoreAclHandler.ADMIN;
+import static org.apache.storm.blobstore.BlobStoreAclHandler.READ;
+import static org.apache.storm.blobstore.BlobStoreAclHandler.WRITE;
 
 /**
  * Provides a HDFS file system backed blob store implementation.

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/blobstore/HdfsBlobStoreFile.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/blobstore/HdfsBlobStoreFile.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/blobstore/HdfsBlobStoreFile.java
index 93b56c1..0192d94 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/blobstore/HdfsBlobStoreFile.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/blobstore/HdfsBlobStoreFile.java
@@ -17,8 +17,8 @@
  */
 package org.apache.storm.hdfs.blobstore;
 
-import backtype.storm.blobstore.BlobStoreFile;
-import backtype.storm.generated.SettableBlobMeta;
+import org.apache.storm.blobstore.BlobStoreFile;
+import org.apache.storm.generated.SettableBlobMeta;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.FileSystem;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/blobstore/HdfsBlobStoreImpl.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/blobstore/HdfsBlobStoreImpl.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/blobstore/HdfsBlobStoreImpl.java
index e434752..a4c88ce 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/blobstore/HdfsBlobStoreImpl.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/blobstore/HdfsBlobStoreImpl.java
@@ -17,9 +17,9 @@
  */
 package org.apache.storm.hdfs.blobstore;
 
-import backtype.storm.Config;
-import backtype.storm.blobstore.BlobStoreFile;
-import backtype.storm.utils.Utils;
+import org.apache.storm.Config;
+import org.apache.storm.blobstore.BlobStoreFile;
+import org.apache.storm.utils.Utils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/blobstore/HdfsClientBlobStore.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/blobstore/HdfsClientBlobStore.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/blobstore/HdfsClientBlobStore.java
index 18c97da..1fc80d0 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/blobstore/HdfsClientBlobStore.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/blobstore/HdfsClientBlobStore.java
@@ -17,15 +17,15 @@
  */
 package org.apache.storm.hdfs.blobstore;
 
-import backtype.storm.blobstore.AtomicOutputStream;
-import backtype.storm.blobstore.ClientBlobStore;
-import backtype.storm.blobstore.InputStreamWithMeta;
-import backtype.storm.generated.AuthorizationException;
-import backtype.storm.generated.ReadableBlobMeta;
-import backtype.storm.generated.SettableBlobMeta;
-import backtype.storm.generated.KeyAlreadyExistsException;
-import backtype.storm.generated.KeyNotFoundException;
-import backtype.storm.utils.NimbusClient;
+import org.apache.storm.blobstore.AtomicOutputStream;
+import org.apache.storm.blobstore.ClientBlobStore;
+import org.apache.storm.blobstore.InputStreamWithMeta;
+import org.apache.storm.generated.AuthorizationException;
+import org.apache.storm.generated.ReadableBlobMeta;
+import org.apache.storm.generated.SettableBlobMeta;
+import org.apache.storm.generated.KeyAlreadyExistsException;
+import org.apache.storm.generated.KeyNotFoundException;
+import org.apache.storm.utils.NimbusClient;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/AbstractHdfsBolt.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/AbstractHdfsBolt.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/AbstractHdfsBolt.java
index d2e65de..dbe986e 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/AbstractHdfsBolt.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/AbstractHdfsBolt.java
@@ -17,14 +17,14 @@
  */
 package org.apache.storm.hdfs.bolt;
 
-import backtype.storm.Config;
-import backtype.storm.task.OutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.base.BaseRichBolt;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.utils.TupleUtils;
-import backtype.storm.utils.Utils;
+import org.apache.storm.Config;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.base.BaseRichBolt;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.utils.TupleUtils;
+import org.apache.storm.utils.Utils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/AvroGenericRecordBolt.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/AvroGenericRecordBolt.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/AvroGenericRecordBolt.java
index d2bfba8..c817c98 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/AvroGenericRecordBolt.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/AvroGenericRecordBolt.java
@@ -17,11 +17,11 @@
  */
 package org.apache.storm.hdfs.bolt;
 
-import backtype.storm.Config;
-import backtype.storm.Constants;
-import backtype.storm.task.OutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.Config;
+import org.apache.storm.Constants;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.tuple.Tuple;
 import org.apache.avro.Schema;
 import org.apache.avro.file.DataFileWriter;
 import org.apache.avro.io.DatumWriter;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/HdfsBolt.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/HdfsBolt.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/HdfsBolt.java
index 101aa57..b351adc 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/HdfsBolt.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/HdfsBolt.java
@@ -17,9 +17,9 @@
  */
 package org.apache.storm.hdfs.bolt;
 
-import backtype.storm.task.OutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.tuple.Tuple;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/SequenceFileBolt.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/SequenceFileBolt.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/SequenceFileBolt.java
index fcd0d29..2a266c1 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/SequenceFileBolt.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/SequenceFileBolt.java
@@ -17,9 +17,9 @@
  */
 package org.apache.storm.hdfs.bolt;
 
-import backtype.storm.task.OutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.tuple.Tuple;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.SequenceFile;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/DefaultFileNameFormat.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/DefaultFileNameFormat.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/DefaultFileNameFormat.java
index e046f90..3bc9904 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/DefaultFileNameFormat.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/DefaultFileNameFormat.java
@@ -17,7 +17,7 @@
  */
 package org.apache.storm.hdfs.bolt.format;
 
-import backtype.storm.task.TopologyContext;
+import org.apache.storm.task.TopologyContext;
 
 import java.util.Map;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/DefaultSequenceFormat.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/DefaultSequenceFormat.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/DefaultSequenceFormat.java
index 115b1ee..ab07d43 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/DefaultSequenceFormat.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/DefaultSequenceFormat.java
@@ -17,7 +17,7 @@
  */
 package org.apache.storm.hdfs.bolt.format;
 
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.tuple.Tuple;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/DelimitedRecordFormat.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/DelimitedRecordFormat.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/DelimitedRecordFormat.java
index 2bd3abb..0c6e3f0 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/DelimitedRecordFormat.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/DelimitedRecordFormat.java
@@ -17,8 +17,8 @@
  */
 package org.apache.storm.hdfs.bolt.format;
 
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
 
 /**
  * RecordFormat implementation that uses field and record delimiters.

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/FileNameFormat.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/FileNameFormat.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/FileNameFormat.java
index 8344d78..90e99cb 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/FileNameFormat.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/FileNameFormat.java
@@ -17,7 +17,7 @@
  */
 package org.apache.storm.hdfs.bolt.format;
 
-import backtype.storm.task.TopologyContext;
+import org.apache.storm.task.TopologyContext;
 
 import java.io.Serializable;
 import java.util.Map;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/RecordFormat.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/RecordFormat.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/RecordFormat.java
index 3a26a61..fe48f05 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/RecordFormat.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/RecordFormat.java
@@ -18,7 +18,7 @@
 package org.apache.storm.hdfs.bolt.format;
 
 
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.tuple.Tuple;
 
 import java.io.Serializable;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/SequenceFormat.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/SequenceFormat.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/SequenceFormat.java
index cdadcc1..fcb7f45 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/SequenceFormat.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/format/SequenceFormat.java
@@ -17,7 +17,7 @@
  */
 package org.apache.storm.hdfs.bolt.format;
 
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.tuple.Tuple;
 
 import java.io.Serializable;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/rotation/FileRotationPolicy.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/rotation/FileRotationPolicy.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/rotation/FileRotationPolicy.java
index 658579e..90ef772 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/rotation/FileRotationPolicy.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/rotation/FileRotationPolicy.java
@@ -18,7 +18,7 @@
 package org.apache.storm.hdfs.bolt.rotation;
 
 
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.tuple.Tuple;
 
 import java.io.Serializable;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/rotation/FileSizeRotationPolicy.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/rotation/FileSizeRotationPolicy.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/rotation/FileSizeRotationPolicy.java
index 70d170f..f0df921 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/rotation/FileSizeRotationPolicy.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/rotation/FileSizeRotationPolicy.java
@@ -18,7 +18,7 @@
 package org.apache.storm.hdfs.bolt.rotation;
 
 
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.tuple.Tuple;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/rotation/NoRotationPolicy.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/rotation/NoRotationPolicy.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/rotation/NoRotationPolicy.java
index 884872e..14fa496 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/rotation/NoRotationPolicy.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/rotation/NoRotationPolicy.java
@@ -17,7 +17,7 @@
  */
 package org.apache.storm.hdfs.bolt.rotation;
 
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.tuple.Tuple;
 
 /**
  * File rotation policy that will never rotate...

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/rotation/TimedRotationPolicy.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/rotation/TimedRotationPolicy.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/rotation/TimedRotationPolicy.java
index 6c21f0d..84762a0 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/rotation/TimedRotationPolicy.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/rotation/TimedRotationPolicy.java
@@ -17,7 +17,7 @@
  */
 package org.apache.storm.hdfs.bolt.rotation;
 
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.tuple.Tuple;
 
 public class TimedRotationPolicy implements FileRotationPolicy {
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/sync/CountSyncPolicy.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/sync/CountSyncPolicy.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/sync/CountSyncPolicy.java
index 2a69e4f..9f31d58 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/sync/CountSyncPolicy.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/sync/CountSyncPolicy.java
@@ -18,7 +18,7 @@
 package org.apache.storm.hdfs.bolt.sync;
 
 
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.tuple.Tuple;
 
 /**
  * SyncPolicy implementation that will trigger a

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/sync/SyncPolicy.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/sync/SyncPolicy.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/sync/SyncPolicy.java
index 205f13f..19c0e25 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/sync/SyncPolicy.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/sync/SyncPolicy.java
@@ -17,7 +17,7 @@
  */
 package org.apache.storm.hdfs.bolt.sync;
 
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.tuple.Tuple;
 
 import java.io.Serializable;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/security/AutoHDFS.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/security/AutoHDFS.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/security/AutoHDFS.java
index faa21ac..ff3f9cc 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/security/AutoHDFS.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/security/AutoHDFS.java
@@ -18,10 +18,10 @@
 
 package org.apache.storm.hdfs.common.security;
 
-import backtype.storm.Config;
-import backtype.storm.security.INimbusCredentialPlugin;
-import backtype.storm.security.auth.IAutoCredentials;
-import backtype.storm.security.auth.ICredentialsRenewer;
+import org.apache.storm.Config;
+import org.apache.storm.security.INimbusCredentialPlugin;
+import org.apache.storm.security.auth.IAutoCredentials;
+import org.apache.storm.security.auth.ICredentialsRenewer;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.security.Credentials;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/security/HdfsSecurityUtil.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/security/HdfsSecurityUtil.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/security/HdfsSecurityUtil.java
index 86e7e3d..993214a 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/security/HdfsSecurityUtil.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/security/HdfsSecurityUtil.java
@@ -17,7 +17,7 @@
  */
 package org.apache.storm.hdfs.common.security;
 
-import backtype.storm.security.auth.kerberos.AutoTGT;
+import org.apache.storm.security.auth.kerberos.AutoTGT;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.SecurityUtil;
@@ -29,7 +29,7 @@ import java.io.IOException;
 import java.util.List;
 import java.util.Map;
 
-import static backtype.storm.Config.TOPOLOGY_AUTO_CREDENTIALS;
+import static org.apache.storm.Config.TOPOLOGY_AUTO_CREDENTIALS;
 
 /**
  * This class provides util methods for storm-hdfs connector communicating

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/HdfsState.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/HdfsState.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/HdfsState.java
index 4448868..6c4acca 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/HdfsState.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/HdfsState.java
@@ -17,9 +17,9 @@
  */
 package org.apache.storm.hdfs.trident;
 
-import backtype.storm.Config;
-import backtype.storm.task.IMetricsContext;
-import backtype.storm.topology.FailedException;
+import org.apache.storm.Config;
+import org.apache.storm.task.IMetricsContext;
+import org.apache.storm.topology.FailedException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -39,9 +39,9 @@ import org.apache.storm.hdfs.trident.rotation.FileSizeRotationPolicy;
 import org.apache.storm.hdfs.trident.rotation.TimedRotationPolicy;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import storm.trident.operation.TridentCollector;
-import storm.trident.state.State;
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.trident.operation.TridentCollector;
+import org.apache.storm.trident.state.State;
+import org.apache.storm.trident.tuple.TridentTuple;
 
 import java.io.BufferedReader;
 import java.io.BufferedWriter;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/HdfsStateFactory.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/HdfsStateFactory.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/HdfsStateFactory.java
index 3bc5c9e..3f4400b 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/HdfsStateFactory.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/HdfsStateFactory.java
@@ -17,11 +17,11 @@
  */
 package org.apache.storm.hdfs.trident;
 
-import backtype.storm.task.IMetricsContext;
+import org.apache.storm.task.IMetricsContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import storm.trident.state.State;
-import storm.trident.state.StateFactory;
+import org.apache.storm.trident.state.State;
+import org.apache.storm.trident.state.StateFactory;
 
 import java.util.Map;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/HdfsUpdater.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/HdfsUpdater.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/HdfsUpdater.java
index 1255d05..c603334 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/HdfsUpdater.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/HdfsUpdater.java
@@ -17,9 +17,9 @@
  */
 package org.apache.storm.hdfs.trident;
 
-import storm.trident.operation.TridentCollector;
-import storm.trident.state.BaseStateUpdater;
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.trident.operation.TridentCollector;
+import org.apache.storm.trident.state.BaseStateUpdater;
+import org.apache.storm.trident.tuple.TridentTuple;
 
 import java.util.List;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/format/DefaultSequenceFormat.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/format/DefaultSequenceFormat.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/format/DefaultSequenceFormat.java
index 7490e34..1336144 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/format/DefaultSequenceFormat.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/format/DefaultSequenceFormat.java
@@ -20,7 +20,7 @@ package org.apache.storm.hdfs.trident.format;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.trident.tuple.TridentTuple;
 
 /**
  * Basic <code>SequenceFormat</code> implementation that uses

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/format/DelimitedRecordFormat.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/format/DelimitedRecordFormat.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/format/DelimitedRecordFormat.java
index 577d385..a08664d 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/format/DelimitedRecordFormat.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/format/DelimitedRecordFormat.java
@@ -17,8 +17,8 @@
  */
 package org.apache.storm.hdfs.trident.format;
 
-import backtype.storm.tuple.Fields;
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.trident.tuple.TridentTuple;
 
 /**
  * RecordFormat implementation that uses field and record delimiters.

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/format/RecordFormat.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/format/RecordFormat.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/format/RecordFormat.java
index 2af29a8..76179d9 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/format/RecordFormat.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/format/RecordFormat.java
@@ -18,7 +18,7 @@
 package org.apache.storm.hdfs.trident.format;
 
 
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.trident.tuple.TridentTuple;
 
 import java.io.Serializable;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/format/SequenceFormat.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/format/SequenceFormat.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/format/SequenceFormat.java
index a23d23f..b4d6c5c 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/format/SequenceFormat.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/format/SequenceFormat.java
@@ -18,7 +18,7 @@
 package org.apache.storm.hdfs.trident.format;
 
 import org.apache.hadoop.io.Writable;
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.trident.tuple.TridentTuple;
 
 import java.io.Serializable;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/rotation/FileRotationPolicy.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/rotation/FileRotationPolicy.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/rotation/FileRotationPolicy.java
index f429221..c69f312 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/rotation/FileRotationPolicy.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/rotation/FileRotationPolicy.java
@@ -17,7 +17,7 @@
  */
 package org.apache.storm.hdfs.trident.rotation;
 
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.trident.tuple.TridentTuple;
 
 import java.io.Serializable;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/rotation/FileSizeRotationPolicy.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/rotation/FileSizeRotationPolicy.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/rotation/FileSizeRotationPolicy.java
index fad6455..70865b2 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/rotation/FileSizeRotationPolicy.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/rotation/FileSizeRotationPolicy.java
@@ -20,7 +20,7 @@ package org.apache.storm.hdfs.trident.rotation;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.trident.tuple.TridentTuple;
 
 /**
  * File rotation policy that will rotate files when a certain

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/rotation/NoRotationPolicy.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/rotation/NoRotationPolicy.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/rotation/NoRotationPolicy.java
index 8117f95..4c65acd 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/rotation/NoRotationPolicy.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/rotation/NoRotationPolicy.java
@@ -17,7 +17,7 @@
  */
 package org.apache.storm.hdfs.trident.rotation;
 
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.trident.tuple.TridentTuple;
 
 /**
  * File rotation policy that will never rotate...

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/rotation/TimedRotationPolicy.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/rotation/TimedRotationPolicy.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/rotation/TimedRotationPolicy.java
index f8cfe44..7278c9a 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/rotation/TimedRotationPolicy.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/rotation/TimedRotationPolicy.java
@@ -17,7 +17,7 @@
  */
 package org.apache.storm.hdfs.trident.rotation;
 
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.trident.tuple.TridentTuple;
 
 import java.util.Timer;
 import java.util.TimerTask;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/sync/CountSyncPolicy.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/sync/CountSyncPolicy.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/sync/CountSyncPolicy.java
index 3100fd1..3b4e32e 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/sync/CountSyncPolicy.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/sync/CountSyncPolicy.java
@@ -18,7 +18,7 @@
 package org.apache.storm.hdfs.trident.sync;
 
 
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.trident.tuple.TridentTuple;
 
 /**
  * SyncPolicy implementation that will trigger a

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/sync/SyncPolicy.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/sync/SyncPolicy.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/sync/SyncPolicy.java
index db70369..d87da9c 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/sync/SyncPolicy.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/sync/SyncPolicy.java
@@ -17,7 +17,7 @@
  */
 package org.apache.storm.hdfs.trident.sync;
 
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.trident.tuple.TridentTuple;
 
 import java.io.Serializable;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/blobstore/BlobStoreTest.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/blobstore/BlobStoreTest.java b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/blobstore/BlobStoreTest.java
index 2a0a15a..6e03bcf 100644
--- a/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/blobstore/BlobStoreTest.java
+++ b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/blobstore/BlobStoreTest.java
@@ -17,21 +17,21 @@
  */
 package org.apache.storm.hdfs.blobstore;
 
-import backtype.storm.Config;
-import backtype.storm.blobstore.AtomicOutputStream;
-import backtype.storm.blobstore.BlobStore;
-import backtype.storm.blobstore.BlobStoreAclHandler;
-import backtype.storm.generated.AccessControl;
-import backtype.storm.generated.AuthorizationException;
-import backtype.storm.generated.KeyAlreadyExistsException;
-import backtype.storm.generated.KeyNotFoundException;
-import backtype.storm.generated.ReadableBlobMeta;
-import backtype.storm.generated.SettableBlobMeta;
-import backtype.storm.generated.AccessControlType;
-
-import backtype.storm.security.auth.NimbusPrincipal;
-import backtype.storm.security.auth.SingleUserPrincipal;
-import backtype.storm.utils.Utils;
+import org.apache.storm.Config;
+import org.apache.storm.blobstore.AtomicOutputStream;
+import org.apache.storm.blobstore.BlobStore;
+import org.apache.storm.blobstore.BlobStoreAclHandler;
+import org.apache.storm.generated.AccessControl;
+import org.apache.storm.generated.AuthorizationException;
+import org.apache.storm.generated.KeyAlreadyExistsException;
+import org.apache.storm.generated.KeyNotFoundException;
+import org.apache.storm.generated.ReadableBlobMeta;
+import org.apache.storm.generated.SettableBlobMeta;
+import org.apache.storm.generated.AccessControlType;
+
+import org.apache.storm.security.auth.NimbusPrincipal;
+import org.apache.storm.security.auth.SingleUserPrincipal;
+import org.apache.storm.utils.Utils;
 import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -172,7 +172,7 @@ public class BlobStoreTest {
     }
     Map conf = new HashMap();
     conf.put(Config.BLOBSTORE_DIR, dirName);
-    conf.put(Config.STORM_PRINCIPAL_TO_LOCAL_PLUGIN,"backtype.storm.security.auth.DefaultPrincipalToLocal");
+    conf.put(Config.STORM_PRINCIPAL_TO_LOCAL_PLUGIN,"org.apache.storm.security.auth.DefaultPrincipalToLocal");
     conf.put(Config.STORM_BLOBSTORE_REPLICATION_FACTOR, 3);
     HdfsBlobStore store = new HdfsBlobStore();
     store.prepareInternal(conf, null, dfscluster.getConfiguration(0));

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/blobstore/HdfsBlobStoreImplTest.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/blobstore/HdfsBlobStoreImplTest.java b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/blobstore/HdfsBlobStoreImplTest.java
index e7bcfc0..c49c44b 100644
--- a/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/blobstore/HdfsBlobStoreImplTest.java
+++ b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/blobstore/HdfsBlobStoreImplTest.java
@@ -17,8 +17,8 @@
  */
 package org.apache.storm.hdfs.blobstore;
 
-import backtype.storm.blobstore.BlobStoreFile;
-import backtype.storm.generated.SettableBlobMeta;
+import org.apache.storm.blobstore.BlobStoreFile;
+import org.apache.storm.generated.SettableBlobMeta;
 import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -229,4 +229,4 @@ public class HdfsBlobStoreImplTest {
         ios.close();
         assertEquals(testString.getBytes(Charset.forName("UTF-8")).length, pfile.getFileLength());
     }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/bolt/AvroGenericRecordBoltTest.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/bolt/AvroGenericRecordBoltTest.java b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/bolt/AvroGenericRecordBoltTest.java
index 37ba27a..8ff05bc 100644
--- a/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/bolt/AvroGenericRecordBoltTest.java
+++ b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/bolt/AvroGenericRecordBoltTest.java
@@ -17,15 +17,15 @@
  */
 package org.apache.storm.hdfs.bolt;
 
-import backtype.storm.Config;
-import backtype.storm.task.GeneralTopologyContext;
-import backtype.storm.task.OutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.TopologyBuilder;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.TupleImpl;
-import backtype.storm.tuple.Values;
+import org.apache.storm.Config;
+import org.apache.storm.task.GeneralTopologyContext;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.TopologyBuilder;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.TupleImpl;
+import org.apache.storm.tuple.Values;
 import org.apache.avro.Schema;
 import org.apache.avro.generic.GenericData;
 import org.apache.avro.generic.GenericDatumReader;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/bolt/HdfsFileTopology.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/bolt/HdfsFileTopology.java b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/bolt/HdfsFileTopology.java
index 32f1f2d..7409cc4 100644
--- a/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/bolt/HdfsFileTopology.java
+++ b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/bolt/HdfsFileTopology.java
@@ -17,19 +17,19 @@
  */
 package org.apache.storm.hdfs.bolt;
 
-import backtype.storm.Config;
-import backtype.storm.LocalCluster;
-import backtype.storm.StormSubmitter;
-import backtype.storm.spout.SpoutOutputCollector;
-import backtype.storm.task.OutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.TopologyBuilder;
-import backtype.storm.topology.base.BaseRichBolt;
-import backtype.storm.topology.base.BaseRichSpout;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.Values;
+import org.apache.storm.Config;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.StormSubmitter;
+import org.apache.storm.spout.SpoutOutputCollector;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.TopologyBuilder;
+import org.apache.storm.topology.base.BaseRichBolt;
+import org.apache.storm.topology.base.BaseRichSpout;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.Values;
 import org.apache.storm.hdfs.bolt.format.DefaultFileNameFormat;
 import org.apache.storm.hdfs.bolt.format.DelimitedRecordFormat;
 import org.apache.storm.hdfs.bolt.format.FileNameFormat;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/bolt/SequenceFileTopology.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/bolt/SequenceFileTopology.java b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/bolt/SequenceFileTopology.java
index 2351cd3..1c10f10 100644
--- a/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/bolt/SequenceFileTopology.java
+++ b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/bolt/SequenceFileTopology.java
@@ -17,19 +17,19 @@
  */
 package org.apache.storm.hdfs.bolt;
 
-import backtype.storm.Config;
-import backtype.storm.LocalCluster;
-import backtype.storm.StormSubmitter;
-import backtype.storm.spout.SpoutOutputCollector;
-import backtype.storm.task.OutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.TopologyBuilder;
-import backtype.storm.topology.base.BaseRichBolt;
-import backtype.storm.topology.base.BaseRichSpout;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.Values;
+import org.apache.storm.Config;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.StormSubmitter;
+import org.apache.storm.spout.SpoutOutputCollector;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.TopologyBuilder;
+import org.apache.storm.topology.base.BaseRichBolt;
+import org.apache.storm.topology.base.BaseRichSpout;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.Values;
 import org.apache.storm.hdfs.bolt.format.*;
 import org.apache.storm.hdfs.bolt.rotation.FileRotationPolicy;
 import org.apache.storm.hdfs.bolt.rotation.FileSizeRotationPolicy;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/bolt/TestHdfsBolt.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/bolt/TestHdfsBolt.java b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/bolt/TestHdfsBolt.java
index 2f2014c..ecbad8a 100644
--- a/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/bolt/TestHdfsBolt.java
+++ b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/bolt/TestHdfsBolt.java
@@ -17,16 +17,16 @@
  */
 package org.apache.storm.hdfs.bolt;
 
-import backtype.storm.Config;
-import backtype.storm.task.GeneralTopologyContext;
-import backtype.storm.task.OutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.TopologyBuilder;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.TupleImpl;
-import backtype.storm.tuple.Values;
-import backtype.storm.utils.MockTupleHelpers;
+import org.apache.storm.Config;
+import org.apache.storm.task.GeneralTopologyContext;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.TopologyBuilder;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.TupleImpl;
+import org.apache.storm.tuple.Values;
+import org.apache.storm.utils.MockTupleHelpers;
 import org.apache.storm.hdfs.bolt.format.DefaultFileNameFormat;
 import org.apache.storm.hdfs.bolt.format.DelimitedRecordFormat;
 import org.apache.storm.hdfs.bolt.format.FileNameFormat;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/bolt/TestSequenceFileBolt.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/bolt/TestSequenceFileBolt.java b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/bolt/TestSequenceFileBolt.java
index 5c760ef..870d4ca 100644
--- a/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/bolt/TestSequenceFileBolt.java
+++ b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/bolt/TestSequenceFileBolt.java
@@ -17,16 +17,16 @@
  */
 package org.apache.storm.hdfs.bolt;
 
-import backtype.storm.Config;
-import backtype.storm.Constants;
-import backtype.storm.task.GeneralTopologyContext;
-import backtype.storm.task.OutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.TopologyBuilder;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.TupleImpl;
-import backtype.storm.tuple.Values;
+import org.apache.storm.Config;
+import org.apache.storm.Constants;
+import org.apache.storm.task.GeneralTopologyContext;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.TopologyBuilder;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.TupleImpl;
+import org.apache.storm.tuple.Values;
 import org.apache.storm.hdfs.bolt.format.DefaultFileNameFormat;
 import org.apache.storm.hdfs.bolt.format.FileNameFormat;
 import org.apache.storm.hdfs.bolt.format.SequenceFormat;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/trident/FixedBatchSpout.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/trident/FixedBatchSpout.java b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/trident/FixedBatchSpout.java
index 1c9a336..76cc2aa 100644
--- a/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/trident/FixedBatchSpout.java
+++ b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/trident/FixedBatchSpout.java
@@ -17,11 +17,11 @@
  */
 package org.apache.storm.hdfs.trident;
 
-import backtype.storm.Config;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.tuple.Fields;
-import storm.trident.operation.TridentCollector;
-import storm.trident.spout.IBatchSpout;
+import org.apache.storm.Config;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.trident.operation.TridentCollector;
+import org.apache.storm.trident.spout.IBatchSpout;
 
 import java.util.ArrayList;
 import java.util.HashMap;
@@ -94,4 +94,4 @@ public class FixedBatchSpout implements IBatchSpout {
     public Fields getOutputFields() {
         return fields;
     }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/trident/HdfsStateTest.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/trident/HdfsStateTest.java b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/trident/HdfsStateTest.java
index 51869d2..4480441 100644
--- a/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/trident/HdfsStateTest.java
+++ b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/trident/HdfsStateTest.java
@@ -17,8 +17,8 @@
  */
 package org.apache.storm.hdfs.trident;
 
-import backtype.storm.Config;
-import backtype.storm.tuple.Fields;
+import org.apache.storm.Config;
+import org.apache.storm.tuple.Fields;
 import org.apache.commons.io.FileUtils;
 import org.apache.storm.hdfs.trident.format.DelimitedRecordFormat;
 import org.apache.storm.hdfs.trident.format.FileNameFormat;
@@ -28,7 +28,7 @@ import org.apache.storm.hdfs.trident.rotation.FileSizeRotationPolicy;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.trident.tuple.TridentTuple;
 
 import java.io.File;
 import java.io.IOException;
@@ -220,4 +220,4 @@ public class HdfsStateTest {
         Assert.assertNotEquals(preReplayCount, lines.size());
         Assert.assertEquals(expectedTupleCount, lines.size());
     }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/trident/TridentFileTopology.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/trident/TridentFileTopology.java b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/trident/TridentFileTopology.java
index b0a22df..b66eced 100644
--- a/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/trident/TridentFileTopology.java
+++ b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/trident/TridentFileTopology.java
@@ -17,23 +17,23 @@
  */
 package org.apache.storm.hdfs.trident;
 
-import backtype.storm.Config;
-import backtype.storm.LocalCluster;
-import backtype.storm.StormSubmitter;
-import backtype.storm.generated.StormTopology;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Values;
+import org.apache.storm.Config;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.StormSubmitter;
+import org.apache.storm.generated.StormTopology;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Values;
 import org.apache.storm.hdfs.common.rotation.MoveFileAction;
 import org.apache.storm.hdfs.trident.format.*;
 import org.apache.storm.hdfs.trident.rotation.FileRotationPolicy;
 import org.apache.storm.hdfs.trident.rotation.FileSizeRotationPolicy;
-import storm.trident.Stream;
-import storm.trident.TridentState;
-import storm.trident.TridentTopology;
-import storm.trident.operation.BaseFunction;
-import storm.trident.operation.TridentCollector;
-import storm.trident.state.StateFactory;
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.trident.Stream;
+import org.apache.storm.trident.TridentState;
+import org.apache.storm.trident.TridentTopology;
+import org.apache.storm.trident.operation.BaseFunction;
+import org.apache.storm.trident.operation.TridentCollector;
+import org.apache.storm.trident.state.StateFactory;
+import org.apache.storm.trident.tuple.TridentTuple;
 
 public class TridentFileTopology {
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/trident/TridentSequenceTopology.java
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/trident/TridentSequenceTopology.java b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/trident/TridentSequenceTopology.java
index 711811e..48f1950 100644
--- a/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/trident/TridentSequenceTopology.java
+++ b/external/storm-hdfs/src/test/java/org/apache/storm/hdfs/trident/TridentSequenceTopology.java
@@ -17,23 +17,23 @@
  */
 package org.apache.storm.hdfs.trident;
 
-import backtype.storm.Config;
-import backtype.storm.LocalCluster;
-import backtype.storm.StormSubmitter;
-import backtype.storm.generated.StormTopology;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Values;
+import org.apache.storm.Config;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.StormSubmitter;
+import org.apache.storm.generated.StormTopology;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Values;
 import org.apache.storm.hdfs.common.rotation.MoveFileAction;
 import org.apache.storm.hdfs.trident.format.*;
 import org.apache.storm.hdfs.trident.rotation.FileRotationPolicy;
 import org.apache.storm.hdfs.trident.rotation.FileSizeRotationPolicy;
-import storm.trident.Stream;
-import storm.trident.TridentState;
-import storm.trident.TridentTopology;
-import storm.trident.operation.BaseFunction;
-import storm.trident.operation.TridentCollector;
-import storm.trident.state.StateFactory;
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.trident.Stream;
+import org.apache.storm.trident.TridentState;
+import org.apache.storm.trident.TridentTopology;
+import org.apache.storm.trident.operation.BaseFunction;
+import org.apache.storm.trident.operation.TridentCollector;
+import org.apache.storm.trident.state.StateFactory;
+import org.apache.storm.trident.tuple.TridentTuple;
 
 public class TridentSequenceTopology {
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hive/src/main/java/org/apache/storm/hive/bolt/HiveBolt.java
----------------------------------------------------------------------
diff --git a/external/storm-hive/src/main/java/org/apache/storm/hive/bolt/HiveBolt.java b/external/storm-hive/src/main/java/org/apache/storm/hive/bolt/HiveBolt.java
index 35407e3..8b3362b 100644
--- a/external/storm-hive/src/main/java/org/apache/storm/hive/bolt/HiveBolt.java
+++ b/external/storm-hive/src/main/java/org/apache/storm/hive/bolt/HiveBolt.java
@@ -18,13 +18,13 @@
 
 package org.apache.storm.hive.bolt;
 
-import backtype.storm.task.OutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.topology.base.BaseRichBolt;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.utils.TupleUtils;
-import backtype.storm.Config;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.topology.base.BaseRichBolt;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.utils.TupleUtils;
+import org.apache.storm.Config;
 import org.apache.storm.hive.common.HiveWriter;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import org.apache.hive.hcatalog.streaming.*;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hive/src/main/java/org/apache/storm/hive/bolt/mapper/DelimitedRecordHiveMapper.java
----------------------------------------------------------------------
diff --git a/external/storm-hive/src/main/java/org/apache/storm/hive/bolt/mapper/DelimitedRecordHiveMapper.java b/external/storm-hive/src/main/java/org/apache/storm/hive/bolt/mapper/DelimitedRecordHiveMapper.java
index d516795..958080f 100644
--- a/external/storm-hive/src/main/java/org/apache/storm/hive/bolt/mapper/DelimitedRecordHiveMapper.java
+++ b/external/storm-hive/src/main/java/org/apache/storm/hive/bolt/mapper/DelimitedRecordHiveMapper.java
@@ -18,9 +18,9 @@
 package org.apache.storm.hive.bolt.mapper;
 
 
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.trident.tuple.TridentTuple;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hive.hcatalog.streaming.DelimitedInputWriter;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hive/src/main/java/org/apache/storm/hive/bolt/mapper/HiveMapper.java
----------------------------------------------------------------------
diff --git a/external/storm-hive/src/main/java/org/apache/storm/hive/bolt/mapper/HiveMapper.java b/external/storm-hive/src/main/java/org/apache/storm/hive/bolt/mapper/HiveMapper.java
index a3b5531..db45af2 100644
--- a/external/storm-hive/src/main/java/org/apache/storm/hive/bolt/mapper/HiveMapper.java
+++ b/external/storm-hive/src/main/java/org/apache/storm/hive/bolt/mapper/HiveMapper.java
@@ -18,8 +18,8 @@
 package org.apache.storm.hive.bolt.mapper;
 
 
-import backtype.storm.tuple.Tuple;
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.trident.tuple.TridentTuple;
 import java.util.List;
 import org.apache.hive.hcatalog.streaming.HiveEndPoint;
 import org.apache.hive.hcatalog.streaming.RecordWriter;
@@ -30,7 +30,7 @@ import java.io.Serializable;
 import java.io.IOException;
 
 /**
- * Maps a <code>backtype.storm.tuple.Tupe</code> object
+ * Maps a <code>org.apache.storm.tuple.Tupe</code> object
  * to a row in an Hive table.
  */
 public interface HiveMapper extends Serializable {

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hive/src/main/java/org/apache/storm/hive/bolt/mapper/JsonRecordHiveMapper.java
----------------------------------------------------------------------
diff --git a/external/storm-hive/src/main/java/org/apache/storm/hive/bolt/mapper/JsonRecordHiveMapper.java b/external/storm-hive/src/main/java/org/apache/storm/hive/bolt/mapper/JsonRecordHiveMapper.java
index ce3e475..a391fc6 100644
--- a/external/storm-hive/src/main/java/org/apache/storm/hive/bolt/mapper/JsonRecordHiveMapper.java
+++ b/external/storm-hive/src/main/java/org/apache/storm/hive/bolt/mapper/JsonRecordHiveMapper.java
@@ -18,9 +18,9 @@
 package org.apache.storm.hive.bolt.mapper;
 
 
-import backtype.storm.tuple.Fields;
-import storm.trident.tuple.TridentTuple;
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.trident.tuple.TridentTuple;
+import org.apache.storm.tuple.Tuple;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hive/src/main/java/org/apache/storm/hive/common/HiveWriter.java
----------------------------------------------------------------------
diff --git a/external/storm-hive/src/main/java/org/apache/storm/hive/common/HiveWriter.java b/external/storm-hive/src/main/java/org/apache/storm/hive/common/HiveWriter.java
index 7fc33f2..233fec0 100644
--- a/external/storm-hive/src/main/java/org/apache/storm/hive/common/HiveWriter.java
+++ b/external/storm-hive/src/main/java/org/apache/storm/hive/common/HiveWriter.java
@@ -31,7 +31,7 @@ import java.util.concurrent.TimeoutException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hive.hcatalog.streaming.*;
 import org.apache.storm.hive.bolt.mapper.HiveMapper;
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.tuple.Tuple;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hive/src/main/java/org/apache/storm/hive/trident/HiveState.java
----------------------------------------------------------------------
diff --git a/external/storm-hive/src/main/java/org/apache/storm/hive/trident/HiveState.java b/external/storm-hive/src/main/java/org/apache/storm/hive/trident/HiveState.java
index 6050aa8..dd296e4 100644
--- a/external/storm-hive/src/main/java/org/apache/storm/hive/trident/HiveState.java
+++ b/external/storm-hive/src/main/java/org/apache/storm/hive/trident/HiveState.java
@@ -17,11 +17,11 @@
  */
 package org.apache.storm.hive.trident;
 
-import storm.trident.operation.TridentCollector;
-import storm.trident.state.State;
-import storm.trident.tuple.TridentTuple;
-import backtype.storm.task.IMetricsContext;
-import backtype.storm.topology.FailedException;
+import org.apache.storm.trident.operation.TridentCollector;
+import org.apache.storm.trident.state.State;
+import org.apache.storm.trident.tuple.TridentTuple;
+import org.apache.storm.task.IMetricsContext;
+import org.apache.storm.topology.FailedException;
 import org.apache.storm.hive.common.HiveWriter;
 import org.apache.storm.hive.common.HiveWriter;
 import org.apache.hive.hcatalog.streaming.*;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hive/src/main/java/org/apache/storm/hive/trident/HiveStateFactory.java
----------------------------------------------------------------------
diff --git a/external/storm-hive/src/main/java/org/apache/storm/hive/trident/HiveStateFactory.java b/external/storm-hive/src/main/java/org/apache/storm/hive/trident/HiveStateFactory.java
index 982ce03..7e0e1f2 100644
--- a/external/storm-hive/src/main/java/org/apache/storm/hive/trident/HiveStateFactory.java
+++ b/external/storm-hive/src/main/java/org/apache/storm/hive/trident/HiveStateFactory.java
@@ -17,11 +17,11 @@
  */
 package org.apache.storm.hive.trident;
 
-import backtype.storm.task.IMetricsContext;
+import org.apache.storm.task.IMetricsContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import storm.trident.state.State;
-import storm.trident.state.StateFactory;
+import org.apache.storm.trident.state.State;
+import org.apache.storm.trident.state.StateFactory;
 import org.apache.storm.hive.common.HiveOptions;
 
 import java.util.Map;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hive/src/main/java/org/apache/storm/hive/trident/HiveUpdater.java
----------------------------------------------------------------------
diff --git a/external/storm-hive/src/main/java/org/apache/storm/hive/trident/HiveUpdater.java b/external/storm-hive/src/main/java/org/apache/storm/hive/trident/HiveUpdater.java
index f4c2a9a..82cfc15 100644
--- a/external/storm-hive/src/main/java/org/apache/storm/hive/trident/HiveUpdater.java
+++ b/external/storm-hive/src/main/java/org/apache/storm/hive/trident/HiveUpdater.java
@@ -17,9 +17,9 @@
  */
 package org.apache.storm.hive.trident;
 
-import storm.trident.operation.TridentCollector;
-import storm.trident.state.BaseStateUpdater;
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.trident.operation.TridentCollector;
+import org.apache.storm.trident.state.BaseStateUpdater;
+import org.apache.storm.trident.tuple.TridentTuple;
 
 import java.util.List;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hive/src/test/java/org/apache/storm/hive/bolt/HiveTopology.java
----------------------------------------------------------------------
diff --git a/external/storm-hive/src/test/java/org/apache/storm/hive/bolt/HiveTopology.java b/external/storm-hive/src/test/java/org/apache/storm/hive/bolt/HiveTopology.java
index e9ecbd0..1132587 100644
--- a/external/storm-hive/src/test/java/org/apache/storm/hive/bolt/HiveTopology.java
+++ b/external/storm-hive/src/test/java/org/apache/storm/hive/bolt/HiveTopology.java
@@ -18,16 +18,16 @@
 
 package org.apache.storm.hive.bolt;
 
-import backtype.storm.Config;
-import backtype.storm.LocalCluster;
-import backtype.storm.StormSubmitter;
-import backtype.storm.spout.SpoutOutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.TopologyBuilder;
-import backtype.storm.topology.base.BaseRichSpout;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Values;
+import org.apache.storm.Config;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.StormSubmitter;
+import org.apache.storm.spout.SpoutOutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.TopologyBuilder;
+import org.apache.storm.topology.base.BaseRichSpout;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Values;
 
 import org.apache.storm.hive.bolt.mapper.DelimitedRecordHiveMapper;
 import org.apache.storm.hive.common.HiveOptions;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hive/src/test/java/org/apache/storm/hive/bolt/HiveTopologyPartitioned.java
----------------------------------------------------------------------
diff --git a/external/storm-hive/src/test/java/org/apache/storm/hive/bolt/HiveTopologyPartitioned.java b/external/storm-hive/src/test/java/org/apache/storm/hive/bolt/HiveTopologyPartitioned.java
index c3197c2..a52c490 100644
--- a/external/storm-hive/src/test/java/org/apache/storm/hive/bolt/HiveTopologyPartitioned.java
+++ b/external/storm-hive/src/test/java/org/apache/storm/hive/bolt/HiveTopologyPartitioned.java
@@ -18,17 +18,17 @@
 
 package org.apache.storm.hive.bolt;
 
-import backtype.storm.Config;
-import backtype.storm.LocalCluster;
-import backtype.storm.StormSubmitter;
-import backtype.storm.spout.SpoutOutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.TopologyBuilder;
-import backtype.storm.topology.base.BaseRichSpout;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Values;
-import backtype.storm.utils.Utils;
+import org.apache.storm.Config;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.StormSubmitter;
+import org.apache.storm.spout.SpoutOutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.TopologyBuilder;
+import org.apache.storm.topology.base.BaseRichSpout;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Values;
+import org.apache.storm.utils.Utils;
 
 import org.apache.storm.hive.bolt.mapper.DelimitedRecordHiveMapper;
 import org.apache.storm.hive.common.HiveOptions;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hive/src/test/java/org/apache/storm/hive/bolt/TestHiveBolt.java
----------------------------------------------------------------------
diff --git a/external/storm-hive/src/test/java/org/apache/storm/hive/bolt/TestHiveBolt.java b/external/storm-hive/src/test/java/org/apache/storm/hive/bolt/TestHiveBolt.java
index 159beda..ead2c8d 100644
--- a/external/storm-hive/src/test/java/org/apache/storm/hive/bolt/TestHiveBolt.java
+++ b/external/storm-hive/src/test/java/org/apache/storm/hive/bolt/TestHiveBolt.java
@@ -18,15 +18,15 @@
 
 package org.apache.storm.hive.bolt;
 
-import backtype.storm.Config;
-import backtype.storm.task.GeneralTopologyContext;
-import backtype.storm.task.OutputCollector;
-import backtype.storm.topology.TopologyBuilder;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.TupleImpl;
-import backtype.storm.tuple.Values;
-import backtype.storm.utils.MockTupleHelpers;
+import org.apache.storm.Config;
+import org.apache.storm.task.GeneralTopologyContext;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.topology.TopologyBuilder;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.TupleImpl;
+import org.apache.storm.tuple.Values;
+import org.apache.storm.utils.MockTupleHelpers;
 
 import org.apache.storm.hive.common.HiveOptions;
 import org.apache.storm.hive.bolt.mapper.DelimitedRecordHiveMapper;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hive/src/test/java/org/apache/storm/hive/common/TestHiveWriter.java
----------------------------------------------------------------------
diff --git a/external/storm-hive/src/test/java/org/apache/storm/hive/common/TestHiveWriter.java b/external/storm-hive/src/test/java/org/apache/storm/hive/common/TestHiveWriter.java
index 63b1949..952b0fb 100644
--- a/external/storm-hive/src/test/java/org/apache/storm/hive/common/TestHiveWriter.java
+++ b/external/storm-hive/src/test/java/org/apache/storm/hive/common/TestHiveWriter.java
@@ -36,13 +36,13 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TemporaryFolder;
 
-import backtype.storm.Config;
-import backtype.storm.task.GeneralTopologyContext;
-import backtype.storm.topology.TopologyBuilder;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.TupleImpl;
-import backtype.storm.tuple.Values;
+import org.apache.storm.Config;
+import org.apache.storm.task.GeneralTopologyContext;
+import org.apache.storm.topology.TopologyBuilder;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.TupleImpl;
+import org.apache.storm.tuple.Values;
 import org.apache.hadoop.hive.metastore.txn.TxnDbUtil;
 
 import java.io.IOException;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-hive/src/test/java/org/apache/storm/hive/trident/TridentHiveTopology.java
----------------------------------------------------------------------
diff --git a/external/storm-hive/src/test/java/org/apache/storm/hive/trident/TridentHiveTopology.java b/external/storm-hive/src/test/java/org/apache/storm/hive/trident/TridentHiveTopology.java
index a208502..10921bc 100644
--- a/external/storm-hive/src/test/java/org/apache/storm/hive/trident/TridentHiveTopology.java
+++ b/external/storm-hive/src/test/java/org/apache/storm/hive/trident/TridentHiveTopology.java
@@ -22,25 +22,25 @@ package org.apache.storm.hive.trident;
 import org.apache.storm.hive.bolt.mapper.DelimitedRecordHiveMapper;
 import org.apache.storm.hive.common.HiveOptions;
 
-import backtype.storm.Config;
-import backtype.storm.LocalCluster;
-import backtype.storm.StormSubmitter;
-import backtype.storm.generated.StormTopology;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Values;
-import backtype.storm.task.TopologyContext;
-import storm.trident.operation.TridentCollector;
-import storm.trident.spout.IBatchSpout;
+import org.apache.storm.Config;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.StormSubmitter;
+import org.apache.storm.generated.StormTopology;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Values;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.trident.operation.TridentCollector;
+import org.apache.storm.trident.spout.IBatchSpout;
 
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
-import storm.trident.Stream;
-import storm.trident.TridentState;
-import storm.trident.TridentTopology;
-import storm.trident.state.StateFactory;
+import org.apache.storm.trident.Stream;
+import org.apache.storm.trident.TridentState;
+import org.apache.storm.trident.TridentTopology;
+import org.apache.storm.trident.state.StateFactory;
 
 
 public class TridentHiveTopology {

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/bolt/AbstractJdbcBolt.java
----------------------------------------------------------------------
diff --git a/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/bolt/AbstractJdbcBolt.java b/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/bolt/AbstractJdbcBolt.java
index 0c0cca6..a09c73c 100644
--- a/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/bolt/AbstractJdbcBolt.java
+++ b/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/bolt/AbstractJdbcBolt.java
@@ -17,10 +17,10 @@
  */
 package org.apache.storm.jdbc.bolt;
 
-import backtype.storm.Config;
-import backtype.storm.task.OutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.base.BaseRichBolt;
+import org.apache.storm.Config;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.base.BaseRichBolt;
 import org.apache.commons.lang.Validate;
 import org.apache.storm.jdbc.common.ConnectionProvider;
 import org.apache.storm.jdbc.common.JdbcClient;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/bolt/JdbcInsertBolt.java
----------------------------------------------------------------------
diff --git a/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/bolt/JdbcInsertBolt.java b/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/bolt/JdbcInsertBolt.java
index c3328f1..b93a1eb 100644
--- a/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/bolt/JdbcInsertBolt.java
+++ b/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/bolt/JdbcInsertBolt.java
@@ -16,10 +16,10 @@
  * limitations under the License.
  */
 package org.apache.storm.jdbc.bolt;
-import backtype.storm.task.OutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.tuple.Tuple;
 import org.apache.commons.lang.Validate;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.storm.jdbc.common.Column;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/bolt/JdbcLookupBolt.java
----------------------------------------------------------------------
diff --git a/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/bolt/JdbcLookupBolt.java b/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/bolt/JdbcLookupBolt.java
index b1dadb7..7224786 100644
--- a/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/bolt/JdbcLookupBolt.java
+++ b/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/bolt/JdbcLookupBolt.java
@@ -17,9 +17,9 @@
  */
 package org.apache.storm.jdbc.bolt;
 
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.Values;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.Values;
 import org.apache.commons.lang.Validate;
 import org.apache.storm.jdbc.common.Column;
 import org.apache.storm.jdbc.common.ConnectionProvider;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/mapper/JdbcLookupMapper.java
----------------------------------------------------------------------
diff --git a/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/mapper/JdbcLookupMapper.java b/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/mapper/JdbcLookupMapper.java
index 0660a4c..462bee9 100644
--- a/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/mapper/JdbcLookupMapper.java
+++ b/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/mapper/JdbcLookupMapper.java
@@ -17,9 +17,9 @@
  */
 package org.apache.storm.jdbc.mapper;
 
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.tuple.ITuple;
-import backtype.storm.tuple.Values;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.tuple.ITuple;
+import org.apache.storm.tuple.Values;
 import org.apache.storm.jdbc.common.Column;
 
 import java.util.List;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/mapper/JdbcMapper.java
----------------------------------------------------------------------
diff --git a/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/mapper/JdbcMapper.java b/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/mapper/JdbcMapper.java
index c8c80bc..b18af32 100644
--- a/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/mapper/JdbcMapper.java
+++ b/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/mapper/JdbcMapper.java
@@ -17,7 +17,7 @@
  */
 package org.apache.storm.jdbc.mapper;
 
-import backtype.storm.tuple.ITuple;
+import org.apache.storm.tuple.ITuple;
 import org.apache.storm.jdbc.common.Column;
 
 import java.io.Serializable;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/mapper/SimpleJdbcLookupMapper.java
----------------------------------------------------------------------
diff --git a/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/mapper/SimpleJdbcLookupMapper.java b/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/mapper/SimpleJdbcLookupMapper.java
index b267bd1..7fadbe9 100644
--- a/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/mapper/SimpleJdbcLookupMapper.java
+++ b/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/mapper/SimpleJdbcLookupMapper.java
@@ -18,10 +18,10 @@
 package org.apache.storm.jdbc.mapper;
 
 
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.ITuple;
-import backtype.storm.tuple.Values;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.ITuple;
+import org.apache.storm.tuple.Values;
 import org.apache.commons.lang.Validate;
 import org.apache.storm.jdbc.common.Column;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/mapper/SimpleJdbcMapper.java
----------------------------------------------------------------------
diff --git a/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/mapper/SimpleJdbcMapper.java b/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/mapper/SimpleJdbcMapper.java
index 9befb1e..d93c29f 100644
--- a/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/mapper/SimpleJdbcMapper.java
+++ b/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/mapper/SimpleJdbcMapper.java
@@ -17,7 +17,7 @@
  */
 package org.apache.storm.jdbc.mapper;
 
-import backtype.storm.tuple.ITuple;
+import org.apache.storm.tuple.ITuple;
 import org.apache.commons.lang.Validate;
 import org.apache.storm.jdbc.common.Column;
 import org.apache.storm.jdbc.common.ConnectionProvider;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/trident/state/JdbcQuery.java
----------------------------------------------------------------------
diff --git a/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/trident/state/JdbcQuery.java b/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/trident/state/JdbcQuery.java
index ad39f4b..8d014cd 100644
--- a/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/trident/state/JdbcQuery.java
+++ b/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/trident/state/JdbcQuery.java
@@ -17,10 +17,10 @@
  */
 package org.apache.storm.jdbc.trident.state;
 
-import backtype.storm.tuple.Values;
-import storm.trident.operation.TridentCollector;
-import storm.trident.state.BaseQueryFunction;
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.tuple.Values;
+import org.apache.storm.trident.operation.TridentCollector;
+import org.apache.storm.trident.state.BaseQueryFunction;
+import org.apache.storm.trident.tuple.TridentTuple;
 
 import java.util.List;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/trident/state/JdbcState.java
----------------------------------------------------------------------
diff --git a/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/trident/state/JdbcState.java b/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/trident/state/JdbcState.java
index 27c911c..814b20b 100644
--- a/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/trident/state/JdbcState.java
+++ b/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/trident/state/JdbcState.java
@@ -17,9 +17,9 @@
  */
 package org.apache.storm.jdbc.trident.state;
 
-import backtype.storm.Config;
-import backtype.storm.topology.FailedException;
-import backtype.storm.tuple.Values;
+import org.apache.storm.Config;
+import org.apache.storm.topology.FailedException;
+import org.apache.storm.tuple.Values;
 import com.google.common.collect.Lists;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.storm.jdbc.common.Column;
@@ -29,9 +29,9 @@ import org.apache.storm.jdbc.mapper.JdbcMapper;
 import org.apache.storm.jdbc.mapper.JdbcLookupMapper;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import storm.trident.operation.TridentCollector;
-import storm.trident.state.State;
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.trident.operation.TridentCollector;
+import org.apache.storm.trident.state.State;
+import org.apache.storm.trident.tuple.TridentTuple;
 
 import java.io.Serializable;
 import java.util.ArrayList;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/trident/state/JdbcStateFactory.java
----------------------------------------------------------------------
diff --git a/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/trident/state/JdbcStateFactory.java b/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/trident/state/JdbcStateFactory.java
index a1bbdef..0295748 100644
--- a/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/trident/state/JdbcStateFactory.java
+++ b/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/trident/state/JdbcStateFactory.java
@@ -17,9 +17,9 @@
  */
 package org.apache.storm.jdbc.trident.state;
 
-import backtype.storm.task.IMetricsContext;
-import storm.trident.state.State;
-import storm.trident.state.StateFactory;
+import org.apache.storm.task.IMetricsContext;
+import org.apache.storm.trident.state.State;
+import org.apache.storm.trident.state.StateFactory;
 
 import java.util.Map;
 


[29/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/daemon/nimbus.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/daemon/nimbus.clj b/storm-core/src/clj/backtype/storm/daemon/nimbus.clj
deleted file mode 100644
index b68f844..0000000
--- a/storm-core/src/clj/backtype/storm/daemon/nimbus.clj
+++ /dev/null
@@ -1,2259 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-(ns backtype.storm.daemon.nimbus
-  (:import [org.apache.thrift.server THsHaServer THsHaServer$Args])
-  (:import [backtype.storm.generated KeyNotFoundException])
-  (:import [backtype.storm.blobstore LocalFsBlobStore])
-  (:import [org.apache.thrift.protocol TBinaryProtocol TBinaryProtocol$Factory])
-  (:import [org.apache.thrift.exception])
-  (:import [org.apache.thrift.transport TNonblockingServerTransport TNonblockingServerSocket])
-  (:import [org.apache.commons.io FileUtils])
-  (:import [javax.security.auth Subject])
-  (:import [backtype.storm.security.auth NimbusPrincipal])
-  (:import [java.nio ByteBuffer]
-           [java.util Collections List HashMap]
-           [backtype.storm.generated NimbusSummary])
-  (:import [java.nio ByteBuffer]
-           [java.util Collections List HashMap ArrayList Iterator])
-  (:import [backtype.storm.blobstore AtomicOutputStream BlobStoreAclHandler
-            InputStreamWithMeta KeyFilter KeySequenceNumber BlobSynchronizer])
-  (:import [java.io File FileOutputStream FileInputStream])
-  (:import [java.net InetAddress ServerSocket BindException])
-  (:import [java.nio.channels Channels WritableByteChannel])
-  (:import [backtype.storm.security.auth ThriftServer ThriftConnectionType ReqContext AuthUtils])
-  (:use [backtype.storm.scheduler.DefaultScheduler])
-  (:import [backtype.storm.scheduler INimbus SupervisorDetails WorkerSlot TopologyDetails
-            Cluster Topologies SchedulerAssignment SchedulerAssignmentImpl DefaultScheduler ExecutorDetails])
-  (:import [backtype.storm.nimbus NimbusInfo])
-  (:import [backtype.storm.utils TimeCacheMap TimeCacheMap$ExpiredCallback Utils TupleUtils ThriftTopologyUtils
-            BufferFileInputStream BufferInputStream])
-  (:import [backtype.storm.generated NotAliveException AlreadyAliveException StormTopology ErrorInfo
-            ExecutorInfo InvalidTopologyException Nimbus$Iface Nimbus$Processor SubmitOptions TopologyInitialStatus
-            KillOptions RebalanceOptions ClusterSummary SupervisorSummary TopologySummary TopologyInfo TopologyHistoryInfo
-            ExecutorSummary AuthorizationException GetInfoOptions NumErrorsChoice SettableBlobMeta ReadableBlobMeta
-            BeginDownloadResult ListBlobsResult ComponentPageInfo TopologyPageInfo LogConfig LogLevel LogLevelAction
-            ProfileRequest ProfileAction NodeInfo])
-  (:import [backtype.storm.daemon Shutdownable])
-  (:import [backtype.storm.cluster ClusterStateContext DaemonType])
-  (:use [backtype.storm util config log timer zookeeper local-state])
-  (:require [backtype.storm [cluster :as cluster]
-                            [converter :as converter]
-                            [stats :as stats]])
-  (:require [clojure.set :as set])
-  (:import [backtype.storm.daemon.common StormBase Assignment])
-  (:use [backtype.storm.daemon common])
-  (:use [backtype.storm config])
-  (:import [org.apache.zookeeper data.ACL ZooDefs$Ids ZooDefs$Perms])
-  (:import [backtype.storm.utils VersionInfo])
-  (:require [clj-time.core :as time])
-  (:require [clj-time.coerce :as coerce])
-  (:require [metrics.meters :refer [defmeter mark!]])
-  (:require [metrics.gauges :refer [defgauge]])
-  (:gen-class
-    :methods [^{:static true} [launch [backtype.storm.scheduler.INimbus] void]]))
-
-(defmeter nimbus:num-submitTopologyWithOpts-calls)
-(defmeter nimbus:num-submitTopology-calls)
-(defmeter nimbus:num-killTopologyWithOpts-calls)
-(defmeter nimbus:num-killTopology-calls)
-(defmeter nimbus:num-rebalance-calls)
-(defmeter nimbus:num-activate-calls)
-(defmeter nimbus:num-deactivate-calls)
-(defmeter nimbus:num-debug-calls)
-(defmeter nimbus:num-setWorkerProfiler-calls)
-(defmeter nimbus:num-getComponentPendingProfileActions-calls)
-(defmeter nimbus:num-setLogConfig-calls)
-(defmeter nimbus:num-uploadNewCredentials-calls)
-(defmeter nimbus:num-beginFileUpload-calls)
-(defmeter nimbus:num-uploadChunk-calls)
-(defmeter nimbus:num-finishFileUpload-calls)
-(defmeter nimbus:num-beginFileDownload-calls)
-(defmeter nimbus:num-downloadChunk-calls)
-(defmeter nimbus:num-getNimbusConf-calls)
-(defmeter nimbus:num-getLogConfig-calls)
-(defmeter nimbus:num-getTopologyConf-calls)
-(defmeter nimbus:num-getTopology-calls)
-(defmeter nimbus:num-getUserTopology-calls)
-(defmeter nimbus:num-getClusterInfo-calls)
-(defmeter nimbus:num-getTopologyInfoWithOpts-calls)
-(defmeter nimbus:num-getTopologyInfo-calls)
-(defmeter nimbus:num-getTopologyPageInfo-calls)
-(defmeter nimbus:num-getComponentPageInfo-calls)
-(defmeter nimbus:num-shutdown-calls)
-
-(def STORM-VERSION (VersionInfo/getVersion))
-
-(defn file-cache-map [conf]
-  (TimeCacheMap.
-   (int (conf NIMBUS-FILE-COPY-EXPIRATION-SECS))
-   (reify TimeCacheMap$ExpiredCallback
-          (expire [this id stream]
-                  (.close stream)
-                  ))
-   ))
-
-(defn mk-scheduler [conf inimbus]
-  (let [forced-scheduler (.getForcedScheduler inimbus)
-        scheduler (cond
-                    forced-scheduler
-                    (do (log-message "Using forced scheduler from INimbus " (class forced-scheduler))
-                        forced-scheduler)
-
-                    (conf STORM-SCHEDULER)
-                    (do (log-message "Using custom scheduler: " (conf STORM-SCHEDULER))
-                        (-> (conf STORM-SCHEDULER) new-instance))
-
-                    :else
-                    (do (log-message "Using default scheduler")
-                        (DefaultScheduler.)))]
-    (.prepare scheduler conf)
-    scheduler
-    ))
-
-(defmulti blob-sync cluster-mode)
-
-(defnk is-leader [nimbus :throw-exception true]
-  (let [leader-elector (:leader-elector nimbus)]
-    (if (.isLeader leader-elector) true
-      (if throw-exception
-        (let [leader-address (.getLeader leader-elector)]
-          (throw (RuntimeException. (str "not a leader, current leader is " leader-address))))))))
-
-(def NIMBUS-ZK-ACLS
-  [(first ZooDefs$Ids/CREATOR_ALL_ACL)
-   (ACL. (bit-or ZooDefs$Perms/READ ZooDefs$Perms/CREATE) ZooDefs$Ids/ANYONE_ID_UNSAFE)])
-
-(defn mk-blob-cache-map
-  "Constructs a TimeCacheMap instance with a blob store timeout whose
-  expiration callback invokes cancel on the value held by an expired entry when
-  that value is an AtomicOutputStream and calls close otherwise."
-  [conf]
-  (TimeCacheMap.
-    (int (conf NIMBUS-BLOBSTORE-EXPIRATION-SECS))
-    (reify TimeCacheMap$ExpiredCallback
-      (expire [this id stream]
-        (if (instance? AtomicOutputStream stream)
-          (.cancel stream)
-          (.close stream))))))
-
-(defn mk-bloblist-cache-map
-  "Constructs a TimeCacheMap instance with a blobstore timeout and no callback
-  function."
-  [conf]
-  (TimeCacheMap. (int (conf NIMBUS-BLOBSTORE-EXPIRATION-SECS))))
-
-(defn create-tology-action-notifier [conf]
-  (when-not (clojure.string/blank? (conf NIMBUS-TOPOLOGY-ACTION-NOTIFIER-PLUGIN))
-    (let [instance (new-instance (conf NIMBUS-TOPOLOGY-ACTION-NOTIFIER-PLUGIN))]
-      (try
-        (.prepare instance conf)
-        instance
-        (catch Exception e
-          (log-warn-error e "Ingoring exception, Could not initialize " (conf NIMBUS-TOPOLOGY-ACTION-NOTIFIER-PLUGIN)))))))
-
-(defn nimbus-data [conf inimbus]
-  (let [forced-scheduler (.getForcedScheduler inimbus)]
-    {:conf conf
-     :nimbus-host-port-info (NimbusInfo/fromConf conf)
-     :inimbus inimbus
-     :authorization-handler (mk-authorization-handler (conf NIMBUS-AUTHORIZER) conf)
-     :impersonation-authorization-handler (mk-authorization-handler (conf NIMBUS-IMPERSONATION-AUTHORIZER) conf)
-     :submitted-count (atom 0)
-     :storm-cluster-state (cluster/mk-storm-cluster-state conf :acls (when
-                                                                       (Utils/isZkAuthenticationConfiguredStormServer
-                                                                         conf)
-                                                                       NIMBUS-ZK-ACLS)
-                                                          :context (ClusterStateContext. DaemonType/NIMBUS))
-     :submit-lock (Object.)
-     :cred-update-lock (Object.)
-     :log-update-lock (Object.)
-     :heartbeats-cache (atom {})
-     :downloaders (file-cache-map conf)
-     :uploaders (file-cache-map conf)
-     :blob-store (Utils/getNimbusBlobStore conf (NimbusInfo/fromConf conf))
-     :blob-downloaders (mk-blob-cache-map conf)
-     :blob-uploaders (mk-blob-cache-map conf)
-     :blob-listers (mk-bloblist-cache-map conf)
-     :uptime (uptime-computer)
-     :validator (new-instance (conf NIMBUS-TOPOLOGY-VALIDATOR))
-     :timer (mk-timer :kill-fn (fn [t]
-                                 (log-error t "Error when processing event")
-                                 (exit-process! 20 "Error when processing an event")
-                                 ))
-     :scheduler (mk-scheduler conf inimbus)
-     :leader-elector (zk-leader-elector conf)
-     :id->sched-status (atom {})
-     :node-id->resources (atom {}) ;;resources of supervisors
-     :id->resources (atom {}) ;;resources of topologies
-     :cred-renewers (AuthUtils/GetCredentialRenewers conf)
-     :topology-history-lock (Object.)
-     :topo-history-state (nimbus-topo-history-state conf)
-     :nimbus-autocred-plugins (AuthUtils/getNimbusAutoCredPlugins conf)
-     :nimbus-topology-action-notifier (create-tology-action-notifier conf)
-     }))
-
-(defn inbox [nimbus]
-  (master-inbox (:conf nimbus)))
-
-(defn- get-subject
-  []
-  (let [req (ReqContext/context)]
-    (.subject req)))
-
-(defn- read-storm-conf [conf storm-id blob-store]
-  (clojurify-structure
-    (Utils/fromCompressedJsonConf
-      (.readBlob blob-store (master-stormconf-key storm-id) (get-subject)))))
-
-(declare delay-event)
-(declare mk-assignments)
-
-(defn get-nimbus-subject
-  []
-  (let [subject (Subject.)
-        principal (NimbusPrincipal.)
-        principals (.getPrincipals subject)]
-    (.add principals principal)
-    subject))
-
-(def nimbus-subject
-  (get-nimbus-subject))
-
-(defn- get-key-list-from-id
-  [conf id]
-  (log-debug "set keys id = " id "set = " #{(master-stormcode-key id) (master-stormjar-key id) (master-stormconf-key id)})
-  (if (local-mode? conf)
-    [(master-stormcode-key id) (master-stormconf-key id)]
-    [(master-stormcode-key id) (master-stormjar-key id) (master-stormconf-key id)]))
-
-(defn kill-transition [nimbus storm-id]
-  (fn [kill-time]
-    (let [delay (if kill-time
-                  kill-time
-                  (get (read-storm-conf (:conf nimbus) storm-id (:blob-store nimbus))
-                       TOPOLOGY-MESSAGE-TIMEOUT-SECS))]
-      (delay-event nimbus
-                   storm-id
-                   delay
-                   :remove)
-      {
-        :status {:type :killed}
-        :topology-action-options {:delay-secs delay :action :kill}})
-    ))
-
-(defn rebalance-transition [nimbus storm-id status]
-  (fn [time num-workers executor-overrides]
-    (let [delay (if time
-                  time
-                  (get (read-storm-conf (:conf nimbus) storm-id (:blob-store nimbus))
-                       TOPOLOGY-MESSAGE-TIMEOUT-SECS))]
-      (delay-event nimbus
-                   storm-id
-                   delay
-                   :do-rebalance)
-      {:status {:type :rebalancing}
-       :prev-status status
-       :topology-action-options (-> {:delay-secs delay :action :rebalance}
-                                  (assoc-non-nil :num-workers num-workers)
-                                  (assoc-non-nil :component->executors executor-overrides))
-       })))
-
-(defn do-rebalance [nimbus storm-id status storm-base]
-  (let [rebalance-options (:topology-action-options storm-base)]
-    (.update-storm! (:storm-cluster-state nimbus)
-      storm-id
-        (-> {:topology-action-options nil}
-          (assoc-non-nil :component->executors (:component->executors rebalance-options))
-          (assoc-non-nil :num-workers (:num-workers rebalance-options)))))
-  (mk-assignments nimbus :scratch-topology-id storm-id))
-
-(defn state-transitions [nimbus storm-id status storm-base]
-  {:active {:inactivate :inactive
-            :activate nil
-            :rebalance (rebalance-transition nimbus storm-id status)
-            :kill (kill-transition nimbus storm-id)
-            }
-   :inactive {:activate :active
-              :inactivate nil
-              :rebalance (rebalance-transition nimbus storm-id status)
-              :kill (kill-transition nimbus storm-id)
-              }
-   :killed {:startup (fn [] (delay-event nimbus
-                                         storm-id
-                                         (-> storm-base
-                                             :topology-action-options
-                                             :delay-secs)
-                                         :remove)
-                             nil)
-            :kill (kill-transition nimbus storm-id)
-            :remove (fn []
-                      (log-message "Killing topology: " storm-id)
-                      (.remove-storm! (:storm-cluster-state nimbus)
-                                      storm-id)
-                      (when (instance? LocalFsBlobStore (:blob-store nimbus))
-                        (doseq [blob-key (get-key-list-from-id (:conf nimbus) storm-id)]
-                          (.remove-blobstore-key! (:storm-cluster-state nimbus) blob-key)
-                          (.remove-key-version! (:storm-cluster-state nimbus) blob-key)))
-                      nil)
-            }
-   :rebalancing {:startup (fn [] (delay-event nimbus
-                                              storm-id
-                                              (-> storm-base
-                                                  :topology-action-options
-                                                  :delay-secs)
-                                              :do-rebalance)
-                                 nil)
-                 :kill (kill-transition nimbus storm-id)
-                 :do-rebalance (fn []
-                                 (do-rebalance nimbus storm-id status storm-base)
-                                 (:type (:prev-status storm-base)))
-                 }})
-
-(defn transition!
-  ([nimbus storm-id event]
-     (transition! nimbus storm-id event false))
-  ([nimbus storm-id event error-on-no-transition?]
-    (is-leader nimbus)
-    (locking (:submit-lock nimbus)
-       (let [system-events #{:startup}
-             [event & event-args] (if (keyword? event) [event] event)
-             storm-base (-> nimbus :storm-cluster-state  (.storm-base storm-id nil))
-             status (:status storm-base)]
-         ;; handles the case where event was scheduled but topology has been removed
-         (if-not status
-           (log-message "Cannot apply event " event " to " storm-id " because topology no longer exists")
-           (let [get-event (fn [m e]
-                             (if (contains? m e)
-                               (m e)
-                               (let [msg (str "No transition for event: " event
-                                              ", status: " status,
-                                              " storm-id: " storm-id)]
-                                 (if error-on-no-transition?
-                                   (throw-runtime msg)
-                                   (do (when-not (contains? system-events event)
-                                         (log-message msg))
-                                       nil))
-                                 )))
-                 transition (-> (state-transitions nimbus storm-id status storm-base)
-                                (get (:type status))
-                                (get-event event))
-                 transition (if (or (nil? transition)
-                                    (keyword? transition))
-                              (fn [] transition)
-                              transition)
-                 storm-base-updates (apply transition event-args)
-                 storm-base-updates (if (keyword? storm-base-updates) ;if it's just a symbol, that just indicates new status.
-                                      {:status {:type storm-base-updates}}
-                                      storm-base-updates)]
-
-             (when storm-base-updates
-               (.update-storm! (:storm-cluster-state nimbus) storm-id storm-base-updates)))))
-       )))
-
-(defn transition-name! [nimbus storm-name event & args]
-  (let [storm-id (get-storm-id (:storm-cluster-state nimbus) storm-name)]
-    (when-not storm-id
-      (throw (NotAliveException. storm-name)))
-    (apply transition! nimbus storm-id event args)))
-
-(defn delay-event [nimbus storm-id delay-secs event]
-  (log-message "Delaying event " event " for " delay-secs " secs for " storm-id)
-  (schedule (:timer nimbus)
-            delay-secs
-            #(transition! nimbus storm-id event false)
-            ))
-
-;; active -> reassign in X secs
-
-;; killed -> wait kill time then shutdown
-;; active -> reassign in X secs
-;; inactive -> nothing
-;; rebalance -> wait X seconds then rebalance
-;; swap... (need to handle kill during swap, etc.)
-;; event transitions are delayed by timer... anything else that comes through (e.g. a kill) override the transition? or just disable other transitions during the transition?
-
-
-(defmulti setup-jar cluster-mode)
-(defmulti clean-inbox cluster-mode)
-
-;; swapping design
-;; -- need 2 ports per worker (swap port and regular port)
-;; -- topology that swaps in can use all the existing topologies swap ports, + unused worker slots
-;; -- how to define worker resources? port range + number of workers?
-
-
-;; Monitoring (or by checking when nodes go down or heartbeats aren't received):
-;; 1. read assignment
-;; 2. see which executors/nodes are up
-;; 3. make new assignment to fix any problems
-;; 4. if a storm exists but is not taken down fully, ensure that storm takedown is launched (step by step remove executors and finally remove assignments)
-
-(defn- assigned-slots
-  "Returns a map from node-id to a set of ports"
-  [storm-cluster-state]
-
-  (let [assignments (.assignments storm-cluster-state nil)]
-    (defaulted
-      (apply merge-with set/union
-             (for [a assignments
-                   [_ [node port]] (-> (.assignment-info storm-cluster-state a nil) :executor->node+port)]
-               {node #{port}}
-               ))
-      {})
-    ))
-
-(defn- all-supervisor-info
-  ([storm-cluster-state] (all-supervisor-info storm-cluster-state nil))
-  ([storm-cluster-state callback]
-     (let [supervisor-ids (.supervisors storm-cluster-state callback)]
-       (into {}
-             (mapcat
-              (fn [id]
-                (if-let [info (.supervisor-info storm-cluster-state id)]
-                  [[id info]]
-                  ))
-              supervisor-ids))
-       )))
-
-(defn- all-scheduling-slots
-  [nimbus topologies missing-assignment-topologies]
-  (let [storm-cluster-state (:storm-cluster-state nimbus)
-        ^INimbus inimbus (:inimbus nimbus)
-
-        supervisor-infos (all-supervisor-info storm-cluster-state nil)
-
-        supervisor-details (dofor [[id info] supervisor-infos]
-                             (SupervisorDetails. id (:meta info) (:resources-map info)))
-
-        ret (.allSlotsAvailableForScheduling inimbus
-                     supervisor-details
-                     topologies
-                     (set missing-assignment-topologies)
-                     )
-        ]
-    (dofor [^WorkerSlot slot ret]
-      [(.getNodeId slot) (.getPort slot)]
-      )))
-
-(defn- get-version-for-key [key nimbus-host-port-info conf]
-  (let [version (KeySequenceNumber. key nimbus-host-port-info)]
-    (.getKeySequenceNumber version conf)))
-
-(defn get-key-seq-from-blob-store [blob-store]
-  (let [key-iter (.listKeys blob-store)]
-    (iterator-seq key-iter)))
-
-(defn- setup-storm-code [nimbus conf storm-id tmp-jar-location storm-conf topology]
-  (let [subject (get-subject)
-        storm-cluster-state (:storm-cluster-state nimbus)
-        blob-store (:blob-store nimbus)
-        jar-key (master-stormjar-key storm-id)
-        code-key (master-stormcode-key storm-id)
-        conf-key (master-stormconf-key storm-id)
-        nimbus-host-port-info (:nimbus-host-port-info nimbus)]
-    (when tmp-jar-location ;;in local mode there is no jar
-      (.createBlob blob-store jar-key (FileInputStream. tmp-jar-location) (SettableBlobMeta. BlobStoreAclHandler/DEFAULT) subject)
-      (if (instance? LocalFsBlobStore blob-store)
-        (.setup-blobstore! storm-cluster-state jar-key nimbus-host-port-info (get-version-for-key jar-key nimbus-host-port-info conf))))
-    (.createBlob blob-store conf-key (Utils/toCompressedJsonConf storm-conf) (SettableBlobMeta. BlobStoreAclHandler/DEFAULT) subject)
-    (if (instance? LocalFsBlobStore blob-store)
-      (.setup-blobstore! storm-cluster-state conf-key nimbus-host-port-info (get-version-for-key conf-key nimbus-host-port-info conf)))
-    (.createBlob blob-store code-key (Utils/serialize topology) (SettableBlobMeta. BlobStoreAclHandler/DEFAULT) subject)
-    (if (instance? LocalFsBlobStore blob-store)
-      (.setup-blobstore! storm-cluster-state code-key nimbus-host-port-info (get-version-for-key code-key nimbus-host-port-info conf)))))
-
-(defn- read-storm-topology [storm-id blob-store]
-  (Utils/deserialize
-    (.readBlob blob-store (master-stormcode-key storm-id) (get-subject)) StormTopology))
-
-(defn get-blob-replication-count
-  [blob-key nimbus]
-  (if (:blob-store nimbus)
-        (-> (:blob-store nimbus)
-          (.getBlobReplication  blob-key nimbus-subject))))
-
-(defn- wait-for-desired-code-replication [nimbus conf storm-id]
-  (let [min-replication-count (conf TOPOLOGY-MIN-REPLICATION-COUNT)
-        max-replication-wait-time (conf TOPOLOGY-MAX-REPLICATION-WAIT-TIME-SEC)
-        current-replication-count-jar (if (not (local-mode? conf))
-                                        (atom (get-blob-replication-count (master-stormjar-key storm-id) nimbus))
-                                        (atom min-replication-count))
-        current-replication-count-code (atom (get-blob-replication-count (master-stormcode-key storm-id) nimbus))
-        current-replication-count-conf (atom (get-blob-replication-count (master-stormconf-key storm-id) nimbus))
-        total-wait-time (atom 0)]
-    (if (:blob-store nimbus)
-      (while (and
-               (or (> min-replication-count @current-replication-count-jar)
-                   (> min-replication-count @current-replication-count-code)
-                   (> min-replication-count @current-replication-count-conf))
-               (or (neg? max-replication-wait-time)
-                   (< @total-wait-time max-replication-wait-time)))
-        (sleep-secs 1)
-        (log-debug "waiting for desired replication to be achieved.
-          min-replication-count = " min-replication-count  " max-replication-wait-time = " max-replication-wait-time
-          (if (not (local-mode? conf))"current-replication-count for jar key = " @current-replication-count-jar)
-          "current-replication-count for code key = " @current-replication-count-code
-          "current-replication-count for conf key = " @current-replication-count-conf
-          " total-wait-time " @total-wait-time)
-        (swap! total-wait-time inc)
-        (if (not (local-mode? conf))
-          (reset! current-replication-count-conf  (get-blob-replication-count (master-stormconf-key storm-id) nimbus)))
-        (reset! current-replication-count-code  (get-blob-replication-count (master-stormcode-key storm-id) nimbus))
-        (reset! current-replication-count-jar  (get-blob-replication-count (master-stormjar-key storm-id) nimbus))))
-    (if (and (< min-replication-count @current-replication-count-conf)
-             (< min-replication-count @current-replication-count-code)
-             (< min-replication-count @current-replication-count-jar))
-      (log-message "desired replication count of "  min-replication-count " not achieved but we have hit the max wait time "
-        max-replication-wait-time " so moving on with replication count for conf key = " @current-replication-count-conf
-        " for code key = " @current-replication-count-code "for jar key = " @current-replication-count-jar)
-      (log-message "desired replication count "  min-replication-count " achieved, "
-        "current-replication-count for conf key = " @current-replication-count-conf ", "
-        "current-replication-count for code key = " @current-replication-count-code ", "
-        "current-replication-count for jar key = " @current-replication-count-jar))))
-
-(defn- read-storm-topology-as-nimbus [storm-id blob-store]
-  (Utils/deserialize
-    (.readBlob blob-store (master-stormcode-key storm-id) nimbus-subject) StormTopology))
-
-(declare compute-executor->component)
-
-(defn read-storm-conf-as-nimbus [storm-id blob-store]
-  (clojurify-structure
-    (Utils/fromCompressedJsonConf
-      (.readBlob blob-store (master-stormconf-key storm-id) nimbus-subject))))
-
-(defn read-topology-details [nimbus storm-id]
-  (let [blob-store (:blob-store nimbus)
-        storm-base (or
-                     (.storm-base (:storm-cluster-state nimbus) storm-id nil)
-                     (throw (NotAliveException. storm-id)))
-        topology-conf (read-storm-conf-as-nimbus storm-id blob-store)
-        topology (read-storm-topology-as-nimbus storm-id blob-store)
-        executor->component (->> (compute-executor->component nimbus storm-id)
-                                 (map-key (fn [[start-task end-task]]
-                                            (ExecutorDetails. (int start-task) (int end-task)))))]
-    (TopologyDetails. storm-id
-                      topology-conf
-                      topology
-                      (:num-workers storm-base)
-                      executor->component
-                      (:launch-time-secs storm-base))))
-
-;; Does not assume that clocks are synchronized. Executor heartbeat is only used so that
-;; nimbus knows when it's received a new heartbeat. All timing is done by nimbus and
-;; tracked through heartbeat-cache
-(defn- update-executor-cache [curr hb timeout]
-  (let [reported-time (:time-secs hb)
-        {last-nimbus-time :nimbus-time
-         last-reported-time :executor-reported-time} curr
-        reported-time (cond reported-time reported-time
-                            last-reported-time last-reported-time
-                            :else 0)
-        nimbus-time (if (or (not last-nimbus-time)
-                        (not= last-reported-time reported-time))
-                      (current-time-secs)
-                      last-nimbus-time
-                      )]
-      {:is-timed-out (and
-                       nimbus-time
-                       (>= (time-delta nimbus-time) timeout))
-       :nimbus-time nimbus-time
-       :executor-reported-time reported-time
-       :heartbeat hb}))
-
-(defn update-heartbeat-cache [cache executor-beats all-executors timeout]
-  (let [cache (select-keys cache all-executors)]
-    (into {}
-      (for [executor all-executors :let [curr (cache executor)]]
-        [executor
-         (update-executor-cache curr (get executor-beats executor) timeout)]
-         ))))
-
-(defn update-heartbeats! [nimbus storm-id all-executors existing-assignment]
-  (log-debug "Updating heartbeats for " storm-id " " (pr-str all-executors))
-  (let [storm-cluster-state (:storm-cluster-state nimbus)
-        executor-beats (.executor-beats storm-cluster-state storm-id (:executor->node+port existing-assignment))
-        cache (update-heartbeat-cache (@(:heartbeats-cache nimbus) storm-id)
-                                      executor-beats
-                                      all-executors
-                                      ((:conf nimbus) NIMBUS-TASK-TIMEOUT-SECS))]
-      (swap! (:heartbeats-cache nimbus) assoc storm-id cache)))
-
-(defn- update-all-heartbeats! [nimbus existing-assignments topology->executors]
-  "update all the heartbeats for all the topologies's executors"
-  (doseq [[tid assignment] existing-assignments
-          :let [all-executors (topology->executors tid)]]
-    (update-heartbeats! nimbus tid all-executors assignment)))
-
-(defn- alive-executors
-  [nimbus ^TopologyDetails topology-details all-executors existing-assignment]
-  (log-debug "Computing alive executors for " (.getId topology-details) "\n"
-             "Executors: " (pr-str all-executors) "\n"
-             "Assignment: " (pr-str existing-assignment) "\n"
-             "Heartbeat cache: " (pr-str (@(:heartbeats-cache nimbus) (.getId topology-details)))
-             )
-  ;; TODO: need to consider all executors associated with a dead executor (in same slot) dead as well,
-  ;; don't just rely on heartbeat being the same
-  (let [conf (:conf nimbus)
-        storm-id (.getId topology-details)
-        executor-start-times (:executor->start-time-secs existing-assignment)
-        heartbeats-cache (@(:heartbeats-cache nimbus) storm-id)]
-    (->> all-executors
-        (filter (fn [executor]
-          (let [start-time (get executor-start-times executor)
-                is-timed-out (-> heartbeats-cache (get executor) :is-timed-out)]
-            (if (and start-time
-                   (or
-                    (< (time-delta start-time)
-                       (conf NIMBUS-TASK-LAUNCH-SECS))
-                    (not is-timed-out)
-                    ))
-              true
-              (do
-                (log-message "Executor " storm-id ":" executor " not alive")
-                false))
-            )))
-        doall)))
-
-
-(defn- to-executor-id [task-ids]
-  [(first task-ids) (last task-ids)])
-
-(defn- compute-executors [nimbus storm-id]
-  (let [conf (:conf nimbus)
-        blob-store (:blob-store nimbus)
-        storm-base (.storm-base (:storm-cluster-state nimbus) storm-id nil)
-        component->executors (:component->executors storm-base)
-        storm-conf (read-storm-conf-as-nimbus storm-id blob-store)
-        topology (read-storm-topology-as-nimbus storm-id blob-store)
-        task->component (storm-task-info topology storm-conf)]
-    (->> (storm-task-info topology storm-conf)
-         reverse-map
-         (map-val sort)
-         (join-maps component->executors)
-         (map-val (partial apply partition-fixed))
-         (mapcat second)
-         (map to-executor-id)
-         )))
-
-(defn- compute-executor->component [nimbus storm-id]
-  (let [conf (:conf nimbus)
-        blob-store (:blob-store nimbus)
-        executors (compute-executors nimbus storm-id)
-        topology (read-storm-topology-as-nimbus storm-id blob-store)
-        storm-conf (read-storm-conf-as-nimbus storm-id blob-store)
-        task->component (storm-task-info topology storm-conf)
-        executor->component (into {} (for [executor executors
-                                           :let [start-task (first executor)
-                                                 component (task->component start-task)]]
-                                       {executor component}))]
-        executor->component))
-
-(defn- compute-topology->executors [nimbus storm-ids]
-  "compute a topology-id -> executors map"
-  (into {} (for [tid storm-ids]
-             {tid (set (compute-executors nimbus tid))})))
-
-(defn- compute-topology->alive-executors [nimbus existing-assignments topologies topology->executors scratch-topology-id]
-  "compute a topology-id -> alive executors map"
-  (into {} (for [[tid assignment] existing-assignments
-                 :let [topology-details (.getById topologies tid)
-                       all-executors (topology->executors tid)
-                       alive-executors (if (and scratch-topology-id (= scratch-topology-id tid))
-                                         all-executors
-                                         (set (alive-executors nimbus topology-details all-executors assignment)))]]
-             {tid alive-executors})))
-
-(defn- compute-supervisor->dead-ports [nimbus existing-assignments topology->executors topology->alive-executors]
-  (let [dead-slots (into [] (for [[tid assignment] existing-assignments
-                                  :let [all-executors (topology->executors tid)
-                                        alive-executors (topology->alive-executors tid)
-                                        dead-executors (set/difference all-executors alive-executors)
-                                        dead-slots (->> (:executor->node+port assignment)
-                                                        (filter #(contains? dead-executors (first %)))
-                                                        vals)]]
-                              dead-slots))
-        supervisor->dead-ports (->> dead-slots
-                                    (apply concat)
-                                    (map (fn [[sid port]] {sid #{port}}))
-                                    (apply (partial merge-with set/union)))]
-    (or supervisor->dead-ports {})))
-
-(defn- compute-topology->scheduler-assignment [nimbus existing-assignments topology->alive-executors]
-  "convert assignment information in zk to SchedulerAssignment, so it can be used by scheduler api."
-  (into {} (for [[tid assignment] existing-assignments
-                 :let [alive-executors (topology->alive-executors tid)
-                       executor->node+port (:executor->node+port assignment)
-                       worker->resources (:worker->resources assignment)
-                       ;; making a map from node+port to WorkerSlot with allocated resources
-                       node+port->slot (into {} (for [[[node port] [mem-on-heap mem-off-heap cpu]] worker->resources]
-                                                  {[node port]
-                                                   (doto (WorkerSlot. node port)
-                                                     (.allocateResource
-                                                       mem-on-heap
-                                                       mem-off-heap
-                                                       cpu))}))
-                       executor->slot (into {} (for [[executor [node port]] executor->node+port]
-                                                 ;; filter out the dead executors
-                                                 (if (contains? alive-executors executor)
-                                                   {(ExecutorDetails. (first executor)
-                                                                      (second executor))
-                                                    (get node+port->slot [node port])}
-                                                   {})))]]
-             {tid (SchedulerAssignmentImpl. tid executor->slot)})))
-
-(defn- read-all-supervisor-details [nimbus all-scheduling-slots supervisor->dead-ports]
-  "return a map: {supervisor-id SupervisorDetails}"
-  (let [storm-cluster-state (:storm-cluster-state nimbus)
-        supervisor-infos (all-supervisor-info storm-cluster-state)
-        nonexistent-supervisor-slots (apply dissoc all-scheduling-slots (keys supervisor-infos))
-        all-supervisor-details (into {} (for [[sid supervisor-info] supervisor-infos
-                                              :let [hostname (:hostname supervisor-info)
-                                                    scheduler-meta (:scheduler-meta supervisor-info)
-                                                    dead-ports (supervisor->dead-ports sid)
-                                                    ;; hide the dead-ports from the all-ports
-                                                    ;; these dead-ports can be reused in next round of assignments
-                                                    all-ports (-> (get all-scheduling-slots sid)
-                                                                  (set/difference dead-ports)
-                                                                  ((fn [ports] (map int ports))))
-                                                    supervisor-details (SupervisorDetails. sid hostname scheduler-meta all-ports (:resources-map supervisor-info))
-                                                    ]]
-                                          {sid supervisor-details}))]
-    (merge all-supervisor-details
-           (into {}
-              (for [[sid ports] nonexistent-supervisor-slots]
-                [sid (SupervisorDetails. sid nil ports)]))
-           )))
-
-(defn- compute-topology->executor->node+port [scheduler-assignments]
-  "convert {topology-id -> SchedulerAssignment} to
-           {topology-id -> {executor [node port]}}"
-  (map-val (fn [^SchedulerAssignment assignment]
-             (->> assignment
-                  .getExecutorToSlot
-                  (#(into {} (for [[^ExecutorDetails executor ^WorkerSlot slot] %]
-                              {[(.getStartTask executor) (.getEndTask executor)]
-                               [(.getNodeId slot) (.getPort slot)]})))))
-           scheduler-assignments))
-
-;; NEW NOTES
-;; only assign to supervisors who are there and haven't timed out
-;; need to reassign workers with executors that have timed out (will this make it brittle?)
-;; need to read in the topology and storm-conf from disk
-;; if no slots available and no slots used by this storm, just skip and do nothing
-;; otherwise, package rest of executors into available slots (up to how much it needs)
-
-;; in the future could allocate executors intelligently (so that "close" tasks reside on same machine)
-
-;; TODO: slots that have dead executor should be reused as long as supervisor is active
-
-
-;; (defn- assigned-slots-from-scheduler-assignments [topology->assignment]
-;;   (->> topology->assignment
-;;        vals
-;;        (map (fn [^SchedulerAssignment a] (.getExecutorToSlot a)))
-;;        (mapcat vals)
-;;        (map (fn [^WorkerSlot s] {(.getNodeId s) #{(.getPort s)}}))
-;;        (apply merge-with set/union)
-;;        ))
-
-(defn num-used-workers [^SchedulerAssignment scheduler-assignment]
-  (if scheduler-assignment
-    (count (.getSlots scheduler-assignment))
-    0 ))
-
-(defn convert-assignments-to-worker->resources [new-scheduler-assignments]
-  "convert {topology-id -> SchedulerAssignment} to
-           {topology-id -> {[node port] [mem-on-heap mem-off-heap cpu]}}
-   Make sure this can deal with other non-RAS schedulers
-   later we may further support map-for-any-resources"
-  (map-val (fn [^SchedulerAssignment assignment]
-             (->> assignment
-                  .getExecutorToSlot
-                  .values
-                  (#(into {} (for [^WorkerSlot slot %]
-                              {[(.getNodeId slot) (.getPort slot)]
-                               [(.getAllocatedMemOnHeap slot) (.getAllocatedMemOffHeap slot) (.getAllocatedCpu slot)]
-                               })))))
-           new-scheduler-assignments))
-
-(defn compute-new-topology->executor->node+port [new-scheduler-assignments existing-assignments]
-  (let [new-topology->executor->node+port (compute-topology->executor->node+port new-scheduler-assignments)]
-    ;; print some useful information.
-    (doseq [[topology-id executor->node+port] new-topology->executor->node+port
-            :let [old-executor->node+port (-> topology-id
-                                              existing-assignments
-                                              :executor->node+port)
-                  reassignment (filter (fn [[executor node+port]]
-                                         (and (contains? old-executor->node+port executor)
-                                              (not (= node+port (old-executor->node+port executor)))))
-                                       executor->node+port)]]
-      (when-not (empty? reassignment)
-        (let [new-slots-cnt (count (set (vals executor->node+port)))
-              reassign-executors (keys reassignment)]
-          (log-message "Reassigning " topology-id " to " new-slots-cnt " slots")
-          (log-message "Reassign executors: " (vec reassign-executors)))))
-
-    new-topology->executor->node+port))
-
-;; public so it can be mocked out
-(defn compute-new-scheduler-assignments [nimbus existing-assignments topologies scratch-topology-id]
-  (let [conf (:conf nimbus)
-        storm-cluster-state (:storm-cluster-state nimbus)
-        topology->executors (compute-topology->executors nimbus (keys existing-assignments))
-        ;; update the executors heartbeats first.
-        _ (update-all-heartbeats! nimbus existing-assignments topology->executors)
-        topology->alive-executors (compute-topology->alive-executors nimbus
-                                                                     existing-assignments
-                                                                     topologies
-                                                                     topology->executors
-                                                                     scratch-topology-id)
-        supervisor->dead-ports (compute-supervisor->dead-ports nimbus
-                                                               existing-assignments
-                                                               topology->executors
-                                                               topology->alive-executors)
-        topology->scheduler-assignment (compute-topology->scheduler-assignment nimbus
-                                                                               existing-assignments
-                                                                               topology->alive-executors)
-
-        missing-assignment-topologies (->> topologies
-                                           .getTopologies
-                                           (map (memfn getId))
-                                           (filter (fn [t]
-                                                     (let [alle (get topology->executors t)
-                                                           alivee (get topology->alive-executors t)]
-                                                       (or (empty? alle)
-                                                           (not= alle alivee)
-                                                           (< (-> topology->scheduler-assignment
-                                                                  (get t)
-                                                                  num-used-workers )
-                                                              (-> topologies (.getById t) .getNumWorkers)))))))
-        all-scheduling-slots (->> (all-scheduling-slots nimbus topologies missing-assignment-topologies)
-                                  (map (fn [[node-id port]] {node-id #{port}}))
-                                  (apply merge-with set/union))
-
-        supervisors (read-all-supervisor-details nimbus all-scheduling-slots supervisor->dead-ports)
-        cluster (Cluster. (:inimbus nimbus) supervisors topology->scheduler-assignment conf)
-        _ (.setStatusMap cluster (deref (:id->sched-status nimbus)))
-        ;; call scheduler.schedule to schedule all the topologies
-        ;; the new assignments for all the topologies are in the cluster object.
-        _ (.schedule (:scheduler nimbus) topologies cluster)
-        _ (.setResourcesMap cluster @(:id->resources nimbus))
-        _ (if-not (conf SCHEDULER-DISPLAY-RESOURCE) (.updateAssignedMemoryForTopologyAndSupervisor cluster topologies))
-        ;;merge with existing statuses
-        _ (reset! (:id->sched-status nimbus) (merge (deref (:id->sched-status nimbus)) (.getStatusMap cluster)))
-        _ (reset! (:node-id->resources nimbus) (.getSupervisorsResourcesMap cluster))
-        _ (reset! (:id->resources nimbus) (.getResourcesMap cluster))]
-    (.getAssignments cluster)))
-
-(defn changed-executors [executor->node+port new-executor->node+port]
-  (let [executor->node+port (if executor->node+port (sort executor->node+port) nil)
-        new-executor->node+port (if new-executor->node+port (sort new-executor->node+port) nil)
-        slot-assigned (reverse-map executor->node+port)
-        new-slot-assigned (reverse-map new-executor->node+port)
-        brand-new-slots (map-diff slot-assigned new-slot-assigned)]
-    (apply concat (vals brand-new-slots))
-    ))
-
-(defn newly-added-slots [existing-assignment new-assignment]
-  (let [old-slots (-> (:executor->node+port existing-assignment)
-                      vals
-                      set)
-        new-slots (-> (:executor->node+port new-assignment)
-                      vals
-                      set)]
-    (set/difference new-slots old-slots)))
-
-
-(defn basic-supervisor-details-map [storm-cluster-state]
-  (let [infos (all-supervisor-info storm-cluster-state)]
-    (->> infos
-         (map (fn [[id info]]
-                 [id (SupervisorDetails. id (:hostname info) (:scheduler-meta info) nil (:resources-map info))]))
-         (into {}))))
-
-(defn- to-worker-slot [[node port]]
-  (WorkerSlot. node port))
-
-;; get existing assignment (just the executor->node+port map) -> default to {}
-;; filter out ones which have a executor timeout
-;; figure out available slots on cluster. add to that the used valid slots to get total slots. figure out how many executors should be in each slot (e.g., 4, 4, 4, 5)
-;; only keep existing slots that satisfy one of those slots. for rest, reassign them across remaining slots
-;; edge case for slots with no executor timeout but with supervisor timeout... just treat these as valid slots that can be reassigned to. worst comes to worse the executor will timeout and won't assign here next time around
-(defnk mk-assignments [nimbus :scratch-topology-id nil]
-  (if (is-leader nimbus :throw-exception false)
-    (let [conf (:conf nimbus)
-        storm-cluster-state (:storm-cluster-state nimbus)
-        ^INimbus inimbus (:inimbus nimbus)
-        ;; read all the topologies
-        topology-ids (.active-storms storm-cluster-state)
-        topologies (into {} (for [tid topology-ids]
-                              {tid (read-topology-details nimbus tid)}))
-        topologies (Topologies. topologies)
-        ;; read all the assignments
-        assigned-topology-ids (.assignments storm-cluster-state nil)
-        existing-assignments (into {} (for [tid assigned-topology-ids]
-                                        ;; for the topology which wants rebalance (specified by the scratch-topology-id)
-                                        ;; we exclude its assignment, meaning that all the slots occupied by its assignment
-                                        ;; will be treated as free slot in the scheduler code.
-                                        (when (or (nil? scratch-topology-id) (not= tid scratch-topology-id))
-                                          {tid (.assignment-info storm-cluster-state tid nil)})))
-        ;; make the new assignments for topologies
-        new-scheduler-assignments (compute-new-scheduler-assignments
-                                       nimbus
-                                       existing-assignments
-                                       topologies
-                                       scratch-topology-id)
-
-        topology->executor->node+port (compute-new-topology->executor->node+port new-scheduler-assignments existing-assignments)
-
-        topology->executor->node+port (merge (into {} (for [id assigned-topology-ids] {id nil})) topology->executor->node+port)
-        new-assigned-worker->resources (convert-assignments-to-worker->resources new-scheduler-assignments)
-        now-secs (current-time-secs)
-
-        basic-supervisor-details-map (basic-supervisor-details-map storm-cluster-state)
-
-        ;; construct the final Assignments by adding start-times etc into it
-        new-assignments (into {} (for [[topology-id executor->node+port] topology->executor->node+port
-                                        :let [existing-assignment (get existing-assignments topology-id)
-                                              all-nodes (->> executor->node+port vals (map first) set)
-                                              node->host (->> all-nodes
-                                                              (mapcat (fn [node]
-                                                                        (if-let [host (.getHostName inimbus basic-supervisor-details-map node)]
-                                                                          [[node host]]
-                                                                          )))
-                                                              (into {}))
-                                              all-node->host (merge (:node->host existing-assignment) node->host)
-                                              reassign-executors (changed-executors (:executor->node+port existing-assignment) executor->node+port)
-                                              start-times (merge (:executor->start-time-secs existing-assignment)
-                                                                (into {}
-                                                                      (for [id reassign-executors]
-                                                                        [id now-secs]
-                                                                        )))
-                                              worker->resources (get new-assigned-worker->resources topology-id)]]
-                                   {topology-id (Assignment.
-                                                 (conf STORM-LOCAL-DIR)
-                                                 (select-keys all-node->host all-nodes)
-                                                 executor->node+port
-                                                 start-times
-                                                 worker->resources)}))]
-
-    ;; tasks figure out what tasks to talk to by looking at topology at runtime
-    ;; only log/set when there's been a change to the assignment
-    (doseq [[topology-id assignment] new-assignments
-            :let [existing-assignment (get existing-assignments topology-id)
-                  topology-details (.getById topologies topology-id)]]
-      (if (= existing-assignment assignment)
-        (log-debug "Assignment for " topology-id " hasn't changed")
-        (do
-          (log-message "Setting new assignment for topology id " topology-id ": " (pr-str assignment))
-          (.set-assignment! storm-cluster-state topology-id assignment)
-          )))
-    (->> new-assignments
-          (map (fn [[topology-id assignment]]
-            (let [existing-assignment (get existing-assignments topology-id)]
-              [topology-id (map to-worker-slot (newly-added-slots existing-assignment assignment))]
-              )))
-          (into {})
-          (.assignSlots inimbus topologies)))
-    (log-message "not a leader, skipping assignments")))
-
-(defn notify-topology-action-listener [nimbus storm-id action]
-  (let [topology-action-notifier (:nimbus-topology-action-notifier nimbus)]
-    (when (not-nil? topology-action-notifier)
-      (try (.notify topology-action-notifier storm-id action)
-        (catch Exception e
-        (log-warn-error e "Ignoring exception from Topology action notifier for storm-Id " storm-id))))))
-
-(defn- start-storm [nimbus storm-name storm-id topology-initial-status]
-  {:pre [(#{:active :inactive} topology-initial-status)]}
-  (let [storm-cluster-state (:storm-cluster-state nimbus)
-        conf (:conf nimbus)
-        blob-store (:blob-store nimbus)
-        storm-conf (read-storm-conf conf storm-id blob-store)
-        topology (system-topology! storm-conf (read-storm-topology storm-id blob-store))
-        num-executors (->> (all-components topology) (map-val num-start-executors))]
-    (log-message "Activating " storm-name ": " storm-id)
-    (.activate-storm! storm-cluster-state
-                      storm-id
-                      (StormBase. storm-name
-                                  (current-time-secs)
-                                  {:type topology-initial-status}
-                                  (storm-conf TOPOLOGY-WORKERS)
-                                  num-executors
-                                  (storm-conf TOPOLOGY-SUBMITTER-USER)
-                                  nil
-                                  nil
-                                  {}))
-    (notify-topology-action-listener nimbus storm-name "activate")))
-
-;; Master:
-;; job submit:
-;; 1. read which nodes are available
-;; 2. set assignments
-;; 3. start storm - necessary in case master goes down, when goes back up can remember to take down the storm (2 states: on or off)
-
-(defn storm-active? [storm-cluster-state storm-name]
-  (not-nil? (get-storm-id storm-cluster-state storm-name)))
-
-(defn check-storm-active! [nimbus storm-name active?]
-  (if (= (not active?)
-         (storm-active? (:storm-cluster-state nimbus)
-                        storm-name))
-    (if active?
-      (throw (NotAliveException. (str storm-name " is not alive")))
-      (throw (AlreadyAliveException. (str storm-name " is already active"))))
-    ))
-
-(defn check-authorization!
-  ([nimbus storm-name storm-conf operation context]
-     (let [aclHandler (:authorization-handler nimbus)
-           impersonation-authorizer (:impersonation-authorization-handler nimbus)
-           ctx (or context (ReqContext/context))
-           check-conf (if storm-conf storm-conf (if storm-name {TOPOLOGY-NAME storm-name}))]
-       (log-thrift-access (.requestID ctx) (.remoteAddress ctx) (.principal ctx) operation)
-       (if (.isImpersonating ctx)
-         (do
-          (log-warn "principal: " (.realPrincipal ctx) " is trying to impersonate principal: " (.principal ctx))
-          (if impersonation-authorizer
-           (if-not (.permit impersonation-authorizer ctx operation check-conf)
-             (throw (AuthorizationException. (str "principal " (.realPrincipal ctx) " is not authorized to impersonate
-                        principal " (.principal ctx) " from host " (.remoteAddress ctx) " Please see SECURITY.MD to learn
-                        how to configure impersonation acls."))))
-           (log-warn "impersonation attempt but " NIMBUS-IMPERSONATION-AUTHORIZER " has no authorizer configured. potential
-                      security risk, please see SECURITY.MD to learn how to configure impersonation authorizer."))))
-
-       (if aclHandler
-         (if-not (.permit aclHandler ctx operation check-conf)
-           (throw (AuthorizationException. (str operation (if storm-name (str " on topology " storm-name)) " is not authorized")))
-           ))))
-  ([nimbus storm-name storm-conf operation]
-     (check-authorization! nimbus storm-name storm-conf operation (ReqContext/context))))
-
-(defn code-ids [blob-store]
-  (let [to-id (reify KeyFilter
-                (filter [this key] (get-id-from-blob-key key)))]
-    (set (.filterAndListKeys blob-store to-id))))
-
-(defn cleanup-storm-ids [conf storm-cluster-state blob-store]
-  (let [heartbeat-ids (set (.heartbeat-storms storm-cluster-state))
-        error-ids (set (.error-topologies storm-cluster-state))
-        code-ids (code-ids blob-store)
-        assigned-ids (set (.active-storms storm-cluster-state))]
-    (set/difference (set/union heartbeat-ids error-ids code-ids) assigned-ids)
-    ))
-
-(defn extract-status-str [base]
-  (let [t (-> base :status :type)]
-    (.toUpperCase (name t))
-    ))
-
-(defn mapify-serializations [sers]
-  (->> sers
-       (map (fn [e] (if (map? e) e {e nil})))
-       (apply merge)
-       ))
-
-(defn- component-parallelism [storm-conf component]
-  (let [storm-conf (merge storm-conf (component-conf component))
-        num-tasks (or (storm-conf TOPOLOGY-TASKS) (num-start-executors component))
-        max-parallelism (storm-conf TOPOLOGY-MAX-TASK-PARALLELISM)
-        ]
-    (if max-parallelism
-      (min max-parallelism num-tasks)
-      num-tasks)))
-
-(defn normalize-topology [storm-conf ^StormTopology topology]
-  (let [ret (.deepCopy topology)]
-    (doseq [[_ component] (all-components ret)]
-      (.set_json_conf
-        (.get_common component)
-        (->> {TOPOLOGY-TASKS (component-parallelism storm-conf component)}
-             (merge (component-conf component))
-             to-json )))
-    ret ))
-
-(defn normalize-conf [conf storm-conf ^StormTopology topology]
-  ;; ensure that serializations are same for all tasks no matter what's on
-  ;; the supervisors. this also allows you to declare the serializations as a sequence
-  (let [component-confs (map
-                         #(-> (ThriftTopologyUtils/getComponentCommon topology %)
-                              .get_json_conf
-                              from-json)
-                         (ThriftTopologyUtils/getComponentIds topology))
-        total-conf (merge conf storm-conf)
-
-        get-merged-conf-val (fn [k merge-fn]
-                              (merge-fn
-                               (concat
-                                (mapcat #(get % k) component-confs)
-                                (or (get storm-conf k)
-                                    (get conf k)))))]
-    ;; topology level serialization registrations take priority
-    ;; that way, if there's a conflict, a user can force which serialization to use
-    ;; append component conf to storm-conf
-    (merge storm-conf
-           {TOPOLOGY-KRYO-DECORATORS (get-merged-conf-val TOPOLOGY-KRYO-DECORATORS distinct)
-            TOPOLOGY-KRYO-REGISTER (get-merged-conf-val TOPOLOGY-KRYO-REGISTER mapify-serializations)
-            TOPOLOGY-ACKER-EXECUTORS (total-conf TOPOLOGY-ACKER-EXECUTORS)
-            TOPOLOGY-EVENTLOGGER-EXECUTORS (total-conf TOPOLOGY-EVENTLOGGER-EXECUTORS)
-            TOPOLOGY-MAX-TASK-PARALLELISM (total-conf TOPOLOGY-MAX-TASK-PARALLELISM)})))
-
-(defn blob-rm-key [blob-store key storm-cluster-state]
-  (try
-    (.deleteBlob blob-store key nimbus-subject)
-    (if (instance? LocalFsBlobStore blob-store)
-      (.remove-blobstore-key! storm-cluster-state key))
-    (catch Exception e
-      (log-message "Exception" e))))
-
-(defn blob-rm-topology-keys [id blob-store storm-cluster-state]
-  (blob-rm-key blob-store (master-stormjar-key id) storm-cluster-state)
-  (blob-rm-key blob-store (master-stormconf-key id) storm-cluster-state)
-  (blob-rm-key blob-store (master-stormcode-key id) storm-cluster-state))
-
-(defn do-cleanup [nimbus]
-  (if (is-leader nimbus :throw-exception false)
-    (let [storm-cluster-state (:storm-cluster-state nimbus)
-          conf (:conf nimbus)
-          submit-lock (:submit-lock nimbus)
-          blob-store (:blob-store nimbus)]
-      (let [to-cleanup-ids (locking submit-lock
-                             (cleanup-storm-ids conf storm-cluster-state blob-store))]
-        (when-not (empty? to-cleanup-ids)
-          (doseq [id to-cleanup-ids]
-            (log-message "Cleaning up " id)
-            (.teardown-heartbeats! storm-cluster-state id)
-            (.teardown-topology-errors! storm-cluster-state id)
-            (rmr (master-stormdist-root conf id))
-            (blob-rm-topology-keys id blob-store storm-cluster-state)
-            (swap! (:heartbeats-cache nimbus) dissoc id)))))
-    (log-message "not a leader, skipping cleanup")))
-
-(defn- file-older-than? [now seconds file]
-  (<= (+ (.lastModified file) (to-millis seconds)) (to-millis now)))
-
-(defn clean-inbox [dir-location seconds]
-  "Deletes jar files in dir older than seconds."
-  (let [now (current-time-secs)
-        pred #(and (.isFile %) (file-older-than? now seconds %))
-        files (filter pred (file-seq (File. dir-location)))]
-    (doseq [f files]
-      (if (.delete f)
-        (log-message "Cleaning inbox ... deleted: " (.getName f))
-        ;; This should never happen
-        (log-error "Cleaning inbox ... error deleting: " (.getName f))))))
-
-(defn clean-topology-history
-  "Deletes topologies from history older than minutes."
-  [mins nimbus]
-  (locking (:topology-history-lock nimbus)
-    (let [cutoff-age (- (current-time-secs) (* mins 60))
-          topo-history-state (:topo-history-state nimbus)
-          curr-history (vec (ls-topo-hist topo-history-state))
-          new-history (vec (filter (fn [line]
-                                     (> (line :timestamp) cutoff-age)) curr-history))]
-      (ls-topo-hist! topo-history-state new-history))))
-
-(defn cleanup-corrupt-topologies! [nimbus]
-  (let [storm-cluster-state (:storm-cluster-state nimbus)
-        blob-store (:blob-store nimbus)
-        code-ids (set (code-ids blob-store))
-        active-topologies (set (.active-storms storm-cluster-state))
-        corrupt-topologies (set/difference active-topologies code-ids)]
-    (doseq [corrupt corrupt-topologies]
-      (log-message "Corrupt topology " corrupt " has state on zookeeper but doesn't have a local dir on Nimbus. Cleaning up...")
-      (.remove-storm! storm-cluster-state corrupt)
-      (if (instance? LocalFsBlobStore blob-store)
-        (doseq [blob-key (get-key-list-from-id (:conf nimbus) corrupt)]
-          (.remove-blobstore-key! storm-cluster-state blob-key))))))
-
-(defn setup-blobstore [nimbus]
-  "Sets up blobstore state for all current keys."
-  (let [storm-cluster-state (:storm-cluster-state nimbus)
-        blob-store (:blob-store nimbus)
-        local-set-of-keys (set (get-key-seq-from-blob-store blob-store))
-        all-keys (set (.active-keys storm-cluster-state))
-        locally-available-active-keys (set/intersection local-set-of-keys all-keys)
-        keys-to-delete (set/difference local-set-of-keys all-keys)
-        conf (:conf nimbus)
-        nimbus-host-port-info (:nimbus-host-port-info nimbus)]
-    (log-debug "Deleting keys not on the zookeeper" keys-to-delete)
-    (doseq [key keys-to-delete]
-      (.deleteBlob blob-store key nimbus-subject))
-    (log-debug "Creating list of key entries for blobstore inside zookeeper" all-keys "local" locally-available-active-keys)
-    (doseq [key locally-available-active-keys]
-      (.setup-blobstore! storm-cluster-state key (:nimbus-host-port-info nimbus) (get-version-for-key key nimbus-host-port-info conf)))))
-
-(defn- get-errors [storm-cluster-state storm-id component-id]
-  (->> (.errors storm-cluster-state storm-id component-id)
-       (map #(doto (ErrorInfo. (:error %) (:time-secs %))
-                   (.set_host (:host %))
-                   (.set_port (:port %))))))
-
-(defn- thriftify-executor-id [[first-task-id last-task-id]]
-  (ExecutorInfo. (int first-task-id) (int last-task-id)))
-
-(def DISALLOWED-TOPOLOGY-NAME-STRS #{"/" "." ":" "\\"})
-
-(defn validate-topology-name! [name]
-  (if (some #(.contains name %) DISALLOWED-TOPOLOGY-NAME-STRS)
-    (throw (InvalidTopologyException.
-            (str "Topology name cannot contain any of the following: " (pr-str DISALLOWED-TOPOLOGY-NAME-STRS))))
-  (if (clojure.string/blank? name)
-    (throw (InvalidTopologyException.
-            ("Topology name cannot be blank"))))))
-
-;; We will only file at <Storm dist root>/<Topology ID>/<File>
-;; to be accessed via Thrift
-;; ex., storm-local/nimbus/stormdist/aa-1-1377104853/stormjar.jar
-(defn check-file-access [conf file-path]
-  (log-debug "check file access:" file-path)
-  (try
-    (if (not= (.getCanonicalFile (File. (master-stormdist-root conf)))
-          (-> (File. file-path) .getCanonicalFile .getParentFile .getParentFile))
-      (throw (AuthorizationException. (str "Invalid file path: " file-path))))
-    (catch Exception e
-      (throw (AuthorizationException. (str "Invalid file path: " file-path))))))
-
-(defn try-read-storm-conf
-  [conf storm-id blob-store]
-  (try-cause
-    (read-storm-conf-as-nimbus storm-id blob-store)
-    (catch KeyNotFoundException e
-      (throw (NotAliveException. (str storm-id))))))
-
-(defn try-read-storm-conf-from-name
-  [conf storm-name nimbus]
-  (let [storm-cluster-state (:storm-cluster-state nimbus)
-        blob-store (:blob-store nimbus)
-        id (get-storm-id storm-cluster-state storm-name)]
-    (try-read-storm-conf conf id blob-store)))
-
-(defn try-read-storm-topology
-  [storm-id blob-store]
-  (try-cause
-    (read-storm-topology-as-nimbus storm-id blob-store)
-    (catch KeyNotFoundException e
-      (throw (NotAliveException. (str storm-id))))))
-
-(defn add-topology-to-history-log
-  [storm-id nimbus topology-conf]
-  (log-message "Adding topo to history log: " storm-id)
-  (locking (:topology-history-lock nimbus)
-    (let [topo-history-state (:topo-history-state nimbus)
-          users (get-topo-logs-users topology-conf)
-          groups (get-topo-logs-groups topology-conf)
-          curr-history (vec (ls-topo-hist topo-history-state))
-          new-history (conj curr-history {:topoid storm-id :timestamp (current-time-secs)
-                                          :users users :groups groups})]
-      (ls-topo-hist! topo-history-state new-history))))
-
-(defn igroup-mapper
-  [storm-conf]
-  (AuthUtils/GetGroupMappingServiceProviderPlugin storm-conf))
-
-(defn user-groups
-  [user storm-conf]
-  (if (clojure.string/blank? user) [] (.getGroups (igroup-mapper storm-conf) user)))
-
-(defn does-users-group-intersect?
-  "Check to see if any of the users groups intersect with the list of groups passed in"
-  [user groups-to-check storm-conf]
-  (let [groups (user-groups user storm-conf)]
-    (> (.size (set/intersection (set groups) (set groups-to-check))) 0)))
-
-(defn read-topology-history
-  [nimbus user admin-users]
-  (let [topo-history-state (:topo-history-state nimbus)
-        curr-history (vec (ls-topo-hist topo-history-state))
-        topo-user-can-access (fn [line user storm-conf]
-                               (if (nil? user)
-                                 (line :topoid)
-                                 (if (or (some #(= % user) admin-users)
-                                       (does-users-group-intersect? user (line :groups) storm-conf)
-                                       (some #(= % user) (line :users)))
-                                   (line :topoid)
-                                   nil)))]
-    (remove nil? (map #(topo-user-can-access % user (:conf nimbus)) curr-history))))
-
-(defn renew-credentials [nimbus]
-  (if (is-leader nimbus :throw-exception false)
-    (let [storm-cluster-state (:storm-cluster-state nimbus)
-          blob-store (:blob-store nimbus)
-          renewers (:cred-renewers nimbus)
-          update-lock (:cred-update-lock nimbus)
-          assigned-ids (set (.active-storms storm-cluster-state))]
-      (when-not (empty? assigned-ids)
-        (doseq [id assigned-ids]
-          (locking update-lock
-            (let [orig-creds (.credentials storm-cluster-state id nil)
-                  topology-conf (try-read-storm-conf (:conf nimbus) id blob-store)]
-              (if orig-creds
-                (let [new-creds (HashMap. orig-creds)]
-                  (doseq [renewer renewers]
-                    (log-message "Renewing Creds For " id " with " renewer)
-                    (.renew renewer new-creds (Collections/unmodifiableMap topology-conf)))
-                  (when-not (= orig-creds new-creds)
-                    (.set-credentials! storm-cluster-state id new-creds topology-conf)
-                    ))))))))
-    (log-message "not a leader skipping , credential renweal.")))
-
-(defn validate-topology-size [topo-conf nimbus-conf topology]
-  (let [workers-count (get topo-conf TOPOLOGY-WORKERS)
-        workers-allowed (get nimbus-conf NIMBUS-SLOTS-PER-TOPOLOGY)
-        num-executors (->> (all-components topology) (map-val num-start-executors))
-        executors-count (reduce + (vals num-executors))
-        executors-allowed (get nimbus-conf NIMBUS-EXECUTORS-PER-TOPOLOGY)]
-    (when (and
-           (not (nil? executors-allowed))
-           (> executors-count executors-allowed))
-      (throw
-       (InvalidTopologyException.
-        (str "Failed to submit topology. Topology requests more than " executors-allowed " executors."))))
-    (when (and
-           (not (nil? workers-allowed))
-           (> workers-count workers-allowed))
-      (throw
-       (InvalidTopologyException.
-        (str "Failed to submit topology. Topology requests more than " workers-allowed " workers."))))))
-
-(defn- set-logger-timeouts [log-config]
-  (let [timeout-secs (.get_reset_log_level_timeout_secs log-config)
-       timeout (time/plus (time/now) (time/secs timeout-secs))]
-   (if (time/after? timeout (time/now))
-     (.set_reset_log_level_timeout_epoch log-config (coerce/to-long timeout))
-     (.unset_reset_log_level_timeout_epoch log-config))))
-
-(defmethod blob-sync :distributed [conf nimbus]
-  (if (not (is-leader nimbus :throw-exception false))
-    (let [storm-cluster-state (:storm-cluster-state nimbus)
-          nimbus-host-port-info (:nimbus-host-port-info nimbus)
-          blob-store-key-set (set (get-key-seq-from-blob-store (:blob-store nimbus)))
-          zk-key-set (set (.blobstore storm-cluster-state (fn [] (blob-sync conf nimbus))))]
-      (log-debug "blob-sync " "blob-store-keys " blob-store-key-set "zookeeper-keys " zk-key-set)
-      (let [sync-blobs (doto
-                          (BlobSynchronizer. (:blob-store nimbus) conf)
-                          (.setNimbusInfo nimbus-host-port-info)
-                          (.setBlobStoreKeySet blob-store-key-set)
-                          (.setZookeeperKeySet zk-key-set))]
-        (.syncBlobs sync-blobs)))))
-
-(defmethod blob-sync :local [conf nimbus]
-  nil)
-
-(defserverfn service-handler [conf inimbus]
-  (.prepare inimbus conf (master-inimbus-dir conf))
-  (log-message "Starting Nimbus with conf " conf)
-  (let [nimbus (nimbus-data conf inimbus)
-        blob-store (:blob-store nimbus)
-        principal-to-local (AuthUtils/GetPrincipalToLocalPlugin conf)
-        admin-users (or (.get conf NIMBUS-ADMINS) [])
-        get-common-topo-info
-          (fn [^String storm-id operation]
-            (let [storm-cluster-state (:storm-cluster-state nimbus)
-                  topology-conf (try-read-storm-conf conf storm-id blob-store)
-                  storm-name (topology-conf TOPOLOGY-NAME)
-                  _ (check-authorization! nimbus
-                                          storm-name
-                                          topology-conf
-                                          operation)
-                  topology (try-read-storm-topology storm-id blob-store)
-                  task->component (storm-task-info topology topology-conf)
-                  base (.storm-base storm-cluster-state storm-id nil)
-                  launch-time-secs (if base (:launch-time-secs base)
-                                     (throw
-                                       (NotAliveException. (str storm-id))))
-                  assignment (.assignment-info storm-cluster-state storm-id nil)
-                  beats (map-val :heartbeat (get @(:heartbeats-cache nimbus)
-                                                 storm-id))
-                  all-components (set (vals task->component))]
-              {:storm-name storm-name
-               :storm-cluster-state storm-cluster-state
-               :all-components all-components
-               :launch-time-secs launch-time-secs
-               :assignment assignment
-               :beats beats
-               :topology topology
-               :task->component task->component
-               :base base}))
-        get-last-error (fn [storm-cluster-state storm-id component-id]
-                         (if-let [e (.last-error storm-cluster-state
-                                                 storm-id
-                                                 component-id)]
-                           (doto (ErrorInfo. (:error e) (:time-secs e))
-                             (.set_host (:host e))
-                             (.set_port (:port e)))))]
-    (.prepare ^backtype.storm.nimbus.ITopologyValidator (:validator nimbus) conf)
-
-    ;add to nimbuses
-    (.add-nimbus-host! (:storm-cluster-state nimbus) (.toHostPortString (:nimbus-host-port-info nimbus))
-      (NimbusSummary.
-        (.getHost (:nimbus-host-port-info nimbus))
-        (.getPort (:nimbus-host-port-info nimbus))
-        (current-time-secs)
-        false ;is-leader
-        STORM-VERSION))
-
-    (.addToLeaderLockQueue (:leader-elector nimbus))
-    (cleanup-corrupt-topologies! nimbus)
-    (when (instance? LocalFsBlobStore blob-store)
-      ;register call back for blob-store
-      (.blobstore (:storm-cluster-state nimbus) (fn [] (blob-sync conf nimbus)))
-      (setup-blobstore nimbus))
-
-    (when (is-leader nimbus :throw-exception false)
-      (doseq [storm-id (.active-storms (:storm-cluster-state nimbus))]
-        (transition! nimbus storm-id :startup)))
-    (schedule-recurring (:timer nimbus)
-                        0
-                        (conf NIMBUS-MONITOR-FREQ-SECS)
-                        (fn []
-                          (when-not (conf NIMBUS-DO-NOT-REASSIGN)
-                            (locking (:submit-lock nimbus)
-                              (mk-assignments nimbus)))
-                          (do-cleanup nimbus)))
-    ;; Schedule Nimbus inbox cleaner
-    (schedule-recurring (:timer nimbus)
-                        0
-                        (conf NIMBUS-CLEANUP-INBOX-FREQ-SECS)
-                        (fn []
-                          (clean-inbox (inbox nimbus) (conf NIMBUS-INBOX-JAR-EXPIRATION-SECS))))
-    ;; Schedule nimbus code sync thread to sync code from other nimbuses.
-    (if (instance? LocalFsBlobStore blob-store)
-      (schedule-recurring (:timer nimbus)
-                          0
-                          (conf NIMBUS-CODE-SYNC-FREQ-SECS)
-                          (fn []
-                            (blob-sync conf nimbus))))
-    ;; Schedule topology history cleaner
-    (when-let [interval (conf LOGVIEWER-CLEANUP-INTERVAL-SECS)]
-      (schedule-recurring (:timer nimbus)
-        0
-        (conf LOGVIEWER-CLEANUP-INTERVAL-SECS)
-        (fn []
-          (clean-topology-history (conf LOGVIEWER-CLEANUP-AGE-MINS) nimbus))))
-    (schedule-recurring (:timer nimbus)
-                        0
-                        (conf NIMBUS-CREDENTIAL-RENEW-FREQ-SECS)
-                        (fn []
-                          (renew-credentials nimbus)))
-
-    (defgauge nimbus:num-supervisors
-      (fn [] (.size (.supervisors (:storm-cluster-state nimbus) nil))))
-
-    (start-metrics-reporters)
-
-    (reify Nimbus$Iface
-      (^void submitTopologyWithOpts
-        [this ^String storm-name ^String uploadedJarLocation ^String serializedConf ^StormTopology topology
-         ^SubmitOptions submitOptions]
-        (try
-          (mark! nimbus:num-submitTopologyWithOpts-calls)
-          (is-leader nimbus)
-          (assert (not-nil? submitOptions))
-          (validate-topology-name! storm-name)
-          (check-authorization! nimbus storm-name nil "submitTopology")
-          (check-storm-active! nimbus storm-name false)
-          (let [topo-conf (from-json serializedConf)]
-            (try
-              (validate-configs-with-schemas topo-conf)
-              (catch IllegalArgumentException ex
-                (throw (InvalidTopologyException. (.getMessage ex)))))
-            (.validate ^backtype.storm.nimbus.ITopologyValidator (:validator nimbus)
-                       storm-name
-                       topo-conf
-                       topology))
-          (swap! (:submitted-count nimbus) inc)
-          (let [storm-id (str storm-name "-" @(:submitted-count nimbus) "-" (current-time-secs))
-                credentials (.get_creds submitOptions)
-                credentials (when credentials (.get_creds credentials))
-                topo-conf (from-json serializedConf)
-                storm-conf-submitted (normalize-conf
-                            conf
-                            (-> topo-conf
-                              (assoc STORM-ID storm-id)
-                              (assoc TOPOLOGY-NAME storm-name))
-                            topology)
-                req (ReqContext/context)
-                principal (.principal req)
-                submitter-principal (if principal (.toString principal))
-                submitter-user (.toLocal principal-to-local principal)
-                system-user (System/getProperty "user.name")
-                topo-acl (distinct (remove nil? (conj (.get storm-conf-submitted TOPOLOGY-USERS) submitter-principal, submitter-user)))
-                storm-conf (-> storm-conf-submitted
-                               (assoc TOPOLOGY-SUBMITTER-PRINCIPAL (if submitter-principal submitter-principal ""))
-                               (assoc TOPOLOGY-SUBMITTER-USER (if submitter-user submitter-user system-user)) ;Don't let the user set who we launch as
-                               (assoc TOPOLOGY-USERS topo-acl)
-                               (assoc STORM-ZOOKEEPER-SUPERACL (.get conf STORM-ZOOKEEPER-SUPERACL)))
-                storm-conf (if (Utils/isZkAuthenticationConfiguredStormServer conf)
-                                storm-conf
-                                (dissoc storm-conf STORM-ZOOKEEPER-TOPOLOGY-AUTH-SCHEME STORM-ZOOKEEPER-TOPOLOGY-AUTH-PAYLOAD))
-                total-storm-conf (merge conf storm-conf)
-                topology (normalize-topology total-storm-conf topology)
-                storm-cluster-state (:storm-cluster-state nimbus)]
-            (when credentials (doseq [nimbus-autocred-plugin (:nimbus-autocred-plugins nimbus)]
-              (.populateCredentials nimbus-autocred-plugin credentials (Collections/unmodifiableMap storm-conf))))
-            (if (and (conf SUPERVISOR-RUN-WORKER-AS-USER) (or (nil? submitter-user) (.isEmpty (.trim submitter-user))))
-              (throw (AuthorizationException. "Could not determine the user to run this topology as.")))
-            (system-topology! total-storm-conf topology) ;; this validates the structure of the topology
-            (validate-topology-size topo-conf conf topology)
-            (when (and (Utils/isZkAuthenticationConfiguredStormServer conf)
-                       (not (Utils/isZkAuthenticationConfiguredTopology storm-conf)))
-                (throw (IllegalArgumentException. "The cluster is configured for zookeeper authentication, but no payload was provided.")))
-            (log-message "Received topology submission for "
-                         storm-name
-                         " with conf "
-                         (redact-value storm-conf STORM-ZOOKEEPER-TOPOLOGY-AUTH-PAYLOAD))
-            ;; lock protects against multiple topologies being submitted at once and
-            ;; cleanup thread killing topology in b/w assignment and starting the topology
-            (locking (:submit-lock nimbus)
-              (check-storm-active! nimbus storm-name false)
-              ;;cred-update-lock is not needed here because creds are being added for the first time.
-              (.set-credentials! storm-cluster-state storm-id credentials storm-conf)
-              (log-message "uploadedJar " uploadedJarLocation)
-              (setup-storm-code nimbus conf storm-id uploadedJarLocation total-storm-conf topology)
-              (wait-for-desired-code-replication nimbus total-storm-conf storm-id)
-              (.setup-heartbeats! storm-cluster-state storm-id)
-              (.setup-backpressure! storm-cluster-state storm-id)
-              (notify-topology-action-listener nimbus storm-name "submitTopology")
-              (let [thrift-status->kw-status {TopologyInitialStatus/INACTIVE :inactive
-                                              TopologyInitialStatus/ACTIVE :active}]
-                (start-storm nimbus storm-name storm-id (thrift-status->kw-status (.get_initial_status submitOptions))))))
-          (catch Throwable e
-            (log-warn-error e "Topology submission exception. (topology name='" storm-name "')")
-            (throw e))))
-
-      (^void submitTopology
-        [this ^String storm-name ^String uploadedJarLocation ^String serializedConf ^StormTopology topology]
-        (mark! nimbus:num-submitTopology-calls)
-        (.submitTopologyWithOpts this storm-name uploadedJarLocation serializedConf topology
-                                 (SubmitOptions. TopologyInitialStatus/ACTIVE)))
-
-      (^void killTopology [this ^String name]
-        (mark! nimbus:num-killTopology-calls)
-        (.killTopologyWithOpts this name (KillOptions.)))
-
-      (^void killTopologyWithOpts [this ^String storm-name ^KillOptions options]
-        (mark! nimbus:num-killTopologyWithOpts-calls)
-        (check-storm-active! nimbus storm-name true)
-        (let [topology-conf (try-read-storm-conf-from-name conf storm-name nimbus)
-              operation "killTopology"]
-          (check-authorization! nimbus storm-name topology-conf operation)
-          (let [wait-amt (if (.is_set_wait_secs options)
-                           (.get_wait_secs options)
-                           )]
-            (transition-name! nimbus storm-name [:kill wait-amt] true)
-            (notify-topology-action-listener nimbus storm-name operation))
-          (add-topology-to-history-log (get-storm-id (:storm-cluster-state nimbus) storm-name)
-            nimbus topology-conf)))
-
-      (^void rebalance [this ^String storm-name ^RebalanceOptions options]
-        (mark! nimbus:num-rebalance-calls)
-        (check-storm-active! nimbus storm-name true)
-        (let [topology-conf (try-read-storm-conf-from-name conf storm-name nimbus)
-              operation "rebalance"]
-          (check-authorization! nimbus storm-name topology-conf operation)
-          (let [wait-amt (if (.is_set_wait_secs options)
-                           (.get_wait_secs options))
-                num-workers (if (.is_set_num_workers options)
-                              (.get_num_workers options))
-                executor-overrides (if (.is_set_num_executors options)
-                                     (.get_num_executors options)
-                                     {})]
-            (doseq [[c num-executors] executor-overrides]
-              (when (<= num-executors 0)
-                (throw (InvalidTopologyException. "Number of executors must be greater than 0"))
-                ))
-            (transition-name! nimbus storm-name [:rebalance wait-amt num-workers executor-overrides] true)
-
-            (notify-topology-action-listener nimbus storm-name operation))))
-
-      (activate [this storm-name]
-        (mark! nimbus:num-activate-calls)
-        (let [topology-conf (try-read-storm-conf-from-name conf storm-name nimbus)
-              operation "activate"]
-          (check-authorization! nimbus storm-name topology-conf operation)
-          (transition-name! nimbus storm-name :activate true)
-          (notify-topology-action-listener nimbus storm-name operation)))
-
-      (deactivate [this storm-name]
-        (mark! nimbus:num-deactivate-calls)
-        (let [topology-conf (try-read-storm-conf-from-name conf storm-name nimbus)
-              operation "deactivate"]
-          (check-authorization! nimbus storm-name topology-conf operation)
-          (transition-name! nimbus storm-name :inactivate true)
-          (notify-topology-action-listener nimbus storm-name operation)))
-
-      (debug [this storm-name component-id enable? samplingPct]
-        (mark! nimbus:num-debug-calls)
-        (let [storm-cluster-state (:storm-cluster-state nimbus)
-              storm-id (get-storm-id storm-cluster-state storm-name)
-              topology-conf (try-read-storm-conf conf storm-id blob-store)
-              ;; make sure samplingPct is within bounds.
-              spct (Math/max (Math/min samplingPct 100.0) 0.0)
-              ;; while disabling we retain the sampling pct.
-              debug-options (if enable? {:enable enable? :samplingpct spct} {:enable enable?})
-              storm-base-updates (assoc {} :component->debug (if (empty? component-id)
-                                                               {storm-id debug-options}
-                                                               {component-id debug-options}))]
-          (check-authorization! nimbus storm-name topology-conf "debug")
-          (when-not storm-id
-            (throw (NotAliveException. storm-name)))
-          (log-message "Nimbus setting debug to " enable? " for storm-name '" storm-name "' storm-id '" storm-id "' sampling pct '" spct "'"
-            (if (not (clojure.string/blank? component-id)) (str " component-id '" component-id "'")))
-          (locking (:submit-lock nimbus)
-            (.update-storm! storm-cluster-state storm-id storm-base-updates))))
-
-      (^void setWorkerProfiler
-        [this ^String id ^ProfileRequest profileRequest]
-        (mark! nimbus:num-setWorkerProfiler-calls)
-        (let [topology-conf (try-read-storm-conf conf id (:blob-store nimbus))
-              storm-name (topology-conf TOPOLOGY-NAME)
-              _ (check-authorization! nimbus storm-name topology-conf "setWorkerProfiler")
-              storm-cluster-state (:storm-cluster-state nimbus)]
-          (.set-worker-profile-request storm-cluster-state id profileRequest)))
-
-      (^List getComponentPendingProfileActions
-        [this ^String id ^String component_id ^ProfileAction action]
-        (mark! nimbus:num-getComponentPendingProfileActions-calls)
-        (let [info (get-common-topo-info id "getComponentPendingProfileActions")
-              storm-cluster-state (:storm-cluster-state info)
-              task->component (:task->component info)
-              {:keys [executor->node+port node->host]} (:assignment info)
-              executor->host+port (map-val (fn [[node port]]
-                                             [(node->host node) port])
-                                    executor->node+port)
-              nodeinfos (stats/extract-nodeinfos-from-hb-for-comp executor->host+port task->component false component_id)
-              all-pending-actions-for-topology (.get-topology-profile-requests storm-cluster-state id true)
-              latest-profile-actions (remove nil? (map (fn [nodeInfo]
-                                                         (->> all-pending-actions-for-topology
-                                                              (filter #(and (= (:host nodeInfo) (.get_node (.get_nodeInfo %)))
-                                                                         (= (:port nodeInfo) (first (.get_port (.get_nodeInfo  %))))))
-                                                              (filter #(= action (.get_action %)))
-                                                              (sort-by #(.get_time_stamp %) >)
-                                                              first))
-                                                    nodeinfos))]
-          (log-message "Latest profile actions for topology " id " component " component_id " " (pr-str latest-profile-actions))
-          latest-profile-actions))
-
-      (^void setLogConfig [this ^String id ^LogConfig log-config-msg]
-        (mark! nimbus:num-setLogConfig-calls)
-        (let [topology-conf (try-read-storm-conf conf id (:blob-store nimbus))
-              storm-name (topology-conf TOPOLOGY-NAME)
-              _ (check-authorization! nimbus storm-name topology-conf "setLogConfig")
-              storm-cluster-state (:storm-cluster-state nimbus)
-              merged-log-co

<TRUNCATED>

[49/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/org/apache/storm/starter/ReachTopology.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/ReachTopology.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/ReachTopology.java
new file mode 100644
index 0000000..13b2121
--- /dev/null
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/ReachTopology.java
@@ -0,0 +1,196 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter;
+
+import org.apache.storm.Config;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.LocalDRPC;
+import org.apache.storm.StormSubmitter;
+import org.apache.storm.coordination.BatchOutputCollector;
+import org.apache.storm.drpc.LinearDRPCTopologyBuilder;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.BasicOutputCollector;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.base.BaseBasicBolt;
+import org.apache.storm.topology.base.BaseBatchBolt;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.Values;
+
+import java.util.*;
+
+/**
+ * This is a good example of doing complex Distributed RPC on top of Storm. This program creates a topology that can
+ * compute the reach for any URL on Twitter in realtime by parallelizing the whole computation.
+ * <p/>
+ * Reach is the number of unique people exposed to a URL on Twitter. To compute reach, you have to get all the people
+ * who tweeted the URL, get all the followers of all those people, unique that set of followers, and then count the
+ * unique set. It's an intense computation that can involve thousands of database calls and tens of millions of follower
+ * records.
+ * <p/>
+ * This Storm topology does every piece of that computation in parallel, turning what would be a computation that takes
+ * minutes on a single machine into one that takes just a couple seconds.
+ * <p/>
+ * For the purposes of demonstration, this topology replaces the use of actual DBs with in-memory hashmaps.
+ *
+ * @see <a href="http://storm.apache.org/documentation/Distributed-RPC.html">Distributed RPC</a>
+ */
+public class ReachTopology {
+  public static Map<String, List<String>> TWEETERS_DB = new HashMap<String, List<String>>() {{
+    put("foo.com/blog/1", Arrays.asList("sally", "bob", "tim", "george", "nathan"));
+    put("engineering.twitter.com/blog/5", Arrays.asList("adam", "david", "sally", "nathan"));
+    put("tech.backtype.com/blog/123", Arrays.asList("tim", "mike", "john"));
+  }};
+
+  public static Map<String, List<String>> FOLLOWERS_DB = new HashMap<String, List<String>>() {{
+    put("sally", Arrays.asList("bob", "tim", "alice", "adam", "jim", "chris", "jai"));
+    put("bob", Arrays.asList("sally", "nathan", "jim", "mary", "david", "vivian"));
+    put("tim", Arrays.asList("alex"));
+    put("nathan", Arrays.asList("sally", "bob", "adam", "harry", "chris", "vivian", "emily", "jordan"));
+    put("adam", Arrays.asList("david", "carissa"));
+    put("mike", Arrays.asList("john", "bob"));
+    put("john", Arrays.asList("alice", "nathan", "jim", "mike", "bob"));
+  }};
+
+  public static class GetTweeters extends BaseBasicBolt {
+    @Override
+    public void execute(Tuple tuple, BasicOutputCollector collector) {
+      Object id = tuple.getValue(0);
+      String url = tuple.getString(1);
+      List<String> tweeters = TWEETERS_DB.get(url);
+      if (tweeters != null) {
+        for (String tweeter : tweeters) {
+          collector.emit(new Values(id, tweeter));
+        }
+      }
+    }
+
+    @Override
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+      declarer.declare(new Fields("id", "tweeter"));
+    }
+  }
+
+  public static class GetFollowers extends BaseBasicBolt {
+    @Override
+    public void execute(Tuple tuple, BasicOutputCollector collector) {
+      Object id = tuple.getValue(0);
+      String tweeter = tuple.getString(1);
+      List<String> followers = FOLLOWERS_DB.get(tweeter);
+      if (followers != null) {
+        for (String follower : followers) {
+          collector.emit(new Values(id, follower));
+        }
+      }
+    }
+
+    @Override
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+      declarer.declare(new Fields("id", "follower"));
+    }
+  }
+
+  public static class PartialUniquer extends BaseBatchBolt {
+    BatchOutputCollector _collector;
+    Object _id;
+    Set<String> _followers = new HashSet<String>();
+
+    @Override
+    public void prepare(Map conf, TopologyContext context, BatchOutputCollector collector, Object id) {
+      _collector = collector;
+      _id = id;
+    }
+
+    @Override
+    public void execute(Tuple tuple) {
+      _followers.add(tuple.getString(1));
+    }
+
+    @Override
+    public void finishBatch() {
+      _collector.emit(new Values(_id, _followers.size()));
+    }
+
+    @Override
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+      declarer.declare(new Fields("id", "partial-count"));
+    }
+  }
+
+  public static class CountAggregator extends BaseBatchBolt {
+    BatchOutputCollector _collector;
+    Object _id;
+    int _count = 0;
+
+    @Override
+    public void prepare(Map conf, TopologyContext context, BatchOutputCollector collector, Object id) {
+      _collector = collector;
+      _id = id;
+    }
+
+    @Override
+    public void execute(Tuple tuple) {
+      _count += tuple.getInteger(1);
+    }
+
+    @Override
+    public void finishBatch() {
+      _collector.emit(new Values(_id, _count));
+    }
+
+    @Override
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+      declarer.declare(new Fields("id", "reach"));
+    }
+  }
+
+  public static LinearDRPCTopologyBuilder construct() {
+    LinearDRPCTopologyBuilder builder = new LinearDRPCTopologyBuilder("reach");
+    builder.addBolt(new GetTweeters(), 4);
+    builder.addBolt(new GetFollowers(), 12).shuffleGrouping();
+    builder.addBolt(new PartialUniquer(), 6).fieldsGrouping(new Fields("id", "follower"));
+    builder.addBolt(new CountAggregator(), 3).fieldsGrouping(new Fields("id"));
+    return builder;
+  }
+
+  public static void main(String[] args) throws Exception {
+    LinearDRPCTopologyBuilder builder = construct();
+
+
+    Config conf = new Config();
+
+    if (args == null || args.length == 0) {
+      conf.setMaxTaskParallelism(3);
+      LocalDRPC drpc = new LocalDRPC();
+      LocalCluster cluster = new LocalCluster();
+      cluster.submitTopology("reach-drpc", conf, builder.createLocalTopology(drpc));
+
+      String[] urlsToTry = new String[]{ "foo.com/blog/1", "engineering.twitter.com/blog/5", "notaurl.com" };
+      for (String url : urlsToTry) {
+        System.out.println("Reach of " + url + ": " + drpc.execute("reach", url));
+      }
+
+      cluster.shutdown();
+      drpc.shutdown();
+    }
+    else {
+      conf.setNumWorkers(6);
+      StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createRemoteTopology());
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/org/apache/storm/starter/ResourceAwareExampleTopology.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/ResourceAwareExampleTopology.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/ResourceAwareExampleTopology.java
new file mode 100644
index 0000000..d4aa304
--- /dev/null
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/ResourceAwareExampleTopology.java
@@ -0,0 +1,106 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter;
+
+import org.apache.storm.Config;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.StormSubmitter;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.testing.TestWordSpout;
+import org.apache.storm.topology.BoltDeclarer;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.SpoutDeclarer;
+import org.apache.storm.topology.TopologyBuilder;
+import org.apache.storm.topology.base.BaseRichBolt;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.Values;
+import org.apache.storm.utils.Utils;
+
+import java.util.Map;
+
+public class ResourceAwareExampleTopology {
+  public static class ExclamationBolt extends BaseRichBolt {
+    OutputCollector _collector;
+
+    @Override
+    public void prepare(Map conf, TopologyContext context, OutputCollector collector) {
+      _collector = collector;
+    }
+
+    @Override
+    public void execute(Tuple tuple) {
+      _collector.emit(tuple, new Values(tuple.getString(0) + "!!!"));
+      _collector.ack(tuple);
+    }
+
+    @Override
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+      declarer.declare(new Fields("word"));
+    }
+  }
+
+  public static void main(String[] args) throws Exception {
+    TopologyBuilder builder = new TopologyBuilder();
+
+    SpoutDeclarer spout =  builder.setSpout("word", new TestWordSpout(), 10);
+    //set cpu requirement
+    spout.setCPULoad(20);
+    //set onheap and offheap memory requirement
+    spout.setMemoryLoad(64, 16);
+
+    BoltDeclarer bolt1 = builder.setBolt("exclaim1", new ExclamationBolt(), 3).shuffleGrouping("word");
+    //sets cpu requirement.  Not neccessary to set both CPU and memory.
+    //For requirements not set, a default value will be used
+    bolt1.setCPULoad(15);
+
+    BoltDeclarer bolt2 = builder.setBolt("exclaim2", new ExclamationBolt(), 2).shuffleGrouping("exclaim1");
+    bolt2.setMemoryLoad(100);
+
+    Config conf = new Config();
+    conf.setDebug(true);
+
+    /**
+     * Use to limit the maximum amount of memory (in MB) allocated to one worker process.
+     * Can be used to spread executors to to multiple workers
+     */
+    conf.setTopologyWorkerMaxHeapSize(1024.0);
+
+    //topology priority describing the importance of the topology in decreasing importance starting from 0 (i.e. 0 is the highest priority and the priority importance decreases as the priority number increases).
+    //Recommended range of 0-29 but no hard limit set.
+    conf.setTopologyPriority(29);
+
+    // Set strategy to schedule topology. If not specified, default to org.apache.storm.scheduler.resource.strategies.scheduling.DefaultResourceAwareStrategy
+    conf.setTopologyStrategy(org.apache.storm.scheduler.resource.strategies.scheduling.DefaultResourceAwareStrategy.class);
+
+    if (args != null && args.length > 0) {
+      conf.setNumWorkers(3);
+
+      StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
+    }
+    else {
+
+      LocalCluster cluster = new LocalCluster();
+      cluster.submitTopology("test", conf, builder.createTopology());
+      Utils.sleep(10000);
+      cluster.killTopology("test");
+      cluster.shutdown();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/org/apache/storm/starter/RollingTopWords.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/RollingTopWords.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/RollingTopWords.java
new file mode 100644
index 0000000..b5ee161
--- /dev/null
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/RollingTopWords.java
@@ -0,0 +1,130 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter;
+
+import org.apache.storm.Config;
+import org.apache.storm.testing.TestWordSpout;
+import org.apache.storm.topology.TopologyBuilder;
+import org.apache.storm.tuple.Fields;
+import org.apache.log4j.Logger;
+import org.apache.storm.starter.bolt.IntermediateRankingsBolt;
+import org.apache.storm.starter.bolt.RollingCountBolt;
+import org.apache.storm.starter.bolt.TotalRankingsBolt;
+import org.apache.storm.starter.util.StormRunner;
+
+/**
+ * This topology does a continuous computation of the top N words that the topology has seen in terms of cardinality.
+ * The top N computation is done in a completely scalable way, and a similar approach could be used to compute things
+ * like trending topics or trending images on Twitter.
+ */
+public class RollingTopWords {
+
+  private static final Logger LOG = Logger.getLogger(RollingTopWords.class);
+  private static final int DEFAULT_RUNTIME_IN_SECONDS = 60;
+  private static final int TOP_N = 5;
+
+  private final TopologyBuilder builder;
+  private final String topologyName;
+  private final Config topologyConfig;
+  private final int runtimeInSeconds;
+
+  public RollingTopWords(String topologyName) throws InterruptedException {
+    builder = new TopologyBuilder();
+    this.topologyName = topologyName;
+    topologyConfig = createTopologyConfiguration();
+    runtimeInSeconds = DEFAULT_RUNTIME_IN_SECONDS;
+
+    wireTopology();
+  }
+
+  private static Config createTopologyConfiguration() {
+    Config conf = new Config();
+    conf.setDebug(true);
+    return conf;
+  }
+
+  private void wireTopology() throws InterruptedException {
+    String spoutId = "wordGenerator";
+    String counterId = "counter";
+    String intermediateRankerId = "intermediateRanker";
+    String totalRankerId = "finalRanker";
+    builder.setSpout(spoutId, new TestWordSpout(), 5);
+    builder.setBolt(counterId, new RollingCountBolt(9, 3), 4).fieldsGrouping(spoutId, new Fields("word"));
+    builder.setBolt(intermediateRankerId, new IntermediateRankingsBolt(TOP_N), 4).fieldsGrouping(counterId, new Fields(
+        "obj"));
+    builder.setBolt(totalRankerId, new TotalRankingsBolt(TOP_N)).globalGrouping(intermediateRankerId);
+  }
+
+  public void runLocally() throws InterruptedException {
+    StormRunner.runTopologyLocally(builder.createTopology(), topologyName, topologyConfig, runtimeInSeconds);
+  }
+
+  public void runRemotely() throws Exception {
+    StormRunner.runTopologyRemotely(builder.createTopology(), topologyName, topologyConfig);
+  }
+
+  /**
+   * Submits (runs) the topology.
+   *
+   * Usage: "RollingTopWords [topology-name] [local|remote]"
+   *
+   * By default, the topology is run locally under the name "slidingWindowCounts".
+   *
+   * Examples:
+   *
+   * ```
+   *
+   * # Runs in local mode (LocalCluster), with topology name "slidingWindowCounts"
+   * $ storm jar storm-starter-jar-with-dependencies.jar org.apache.storm.starter.RollingTopWords
+   *
+   * # Runs in local mode (LocalCluster), with topology name "foobar"
+   * $ storm jar storm-starter-jar-with-dependencies.jar org.apache.storm.starter.RollingTopWords foobar
+   *
+   * # Runs in local mode (LocalCluster), with topology name "foobar"
+   * $ storm jar storm-starter-jar-with-dependencies.jar org.apache.storm.starter.RollingTopWords foobar local
+   *
+   * # Runs in remote/cluster mode, with topology name "production-topology"
+   * $ storm jar storm-starter-jar-with-dependencies.jar org.apache.storm.starter.RollingTopWords production-topology remote
+   * ```
+   *
+   * @param args First positional argument (optional) is topology name, second positional argument (optional) defines
+   *             whether to run the topology locally ("local") or remotely, i.e. on a real cluster ("remote").
+   * @throws Exception
+   */
+  public static void main(String[] args) throws Exception {
+    String topologyName = "slidingWindowCounts";
+    if (args.length >= 1) {
+      topologyName = args[0];
+    }
+    boolean runLocally = true;
+    if (args.length >= 2 && args[1].equalsIgnoreCase("remote")) {
+      runLocally = false;
+    }
+
+    LOG.info("Topology name: " + topologyName);
+    RollingTopWords rtw = new RollingTopWords(topologyName);
+    if (runLocally) {
+      LOG.info("Running in local mode");
+      rtw.runLocally();
+    }
+    else {
+      LOG.info("Running in remote (cluster) mode");
+      rtw.runRemotely();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/org/apache/storm/starter/SingleJoinExample.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/SingleJoinExample.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/SingleJoinExample.java
new file mode 100644
index 0000000..b153372
--- /dev/null
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/SingleJoinExample.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter;
+
+import org.apache.storm.Config;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.testing.FeederSpout;
+import org.apache.storm.topology.TopologyBuilder;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Values;
+import org.apache.storm.utils.Utils;
+import org.apache.storm.starter.bolt.SingleJoinBolt;
+
+public class SingleJoinExample {
+  public static void main(String[] args) {
+    FeederSpout genderSpout = new FeederSpout(new Fields("id", "gender"));
+    FeederSpout ageSpout = new FeederSpout(new Fields("id", "age"));
+
+    TopologyBuilder builder = new TopologyBuilder();
+    builder.setSpout("gender", genderSpout);
+    builder.setSpout("age", ageSpout);
+    builder.setBolt("join", new SingleJoinBolt(new Fields("gender", "age"))).fieldsGrouping("gender", new Fields("id"))
+        .fieldsGrouping("age", new Fields("id"));
+
+    Config conf = new Config();
+    conf.setDebug(true);
+
+    LocalCluster cluster = new LocalCluster();
+    cluster.submitTopology("join-example", conf, builder.createTopology());
+
+    for (int i = 0; i < 10; i++) {
+      String gender;
+      if (i % 2 == 0) {
+        gender = "male";
+      }
+      else {
+        gender = "female";
+      }
+      genderSpout.feed(new Values(i, gender));
+    }
+
+    for (int i = 9; i >= 0; i--) {
+      ageSpout.feed(new Values(i, i + 20));
+    }
+
+    Utils.sleep(2000);
+    cluster.shutdown();
+  }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/org/apache/storm/starter/SkewedRollingTopWords.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/SkewedRollingTopWords.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/SkewedRollingTopWords.java
new file mode 100644
index 0000000..3addc15
--- /dev/null
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/SkewedRollingTopWords.java
@@ -0,0 +1,132 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter;
+
+import org.apache.storm.Config;
+import org.apache.storm.testing.TestWordSpout;
+import org.apache.storm.topology.TopologyBuilder;
+import org.apache.storm.tuple.Fields;
+import org.apache.log4j.Logger;
+import org.apache.storm.starter.bolt.IntermediateRankingsBolt;
+import org.apache.storm.starter.bolt.RollingCountBolt;
+import org.apache.storm.starter.bolt.RollingCountAggBolt;
+import org.apache.storm.starter.bolt.TotalRankingsBolt;
+import org.apache.storm.starter.util.StormRunner;
+
+/**
+ * This topology does a continuous computation of the top N words that the topology has seen in terms of cardinality.
+ * The top N computation is done in a completely scalable way, and a similar approach could be used to compute things
+ * like trending topics or trending images on Twitter. It takes an approach that assumes that some works will be much
+ * more common then other words, and uses partialKeyGrouping to better balance the skewed load.
+ */
+public class SkewedRollingTopWords {
+  private static final Logger LOG = Logger.getLogger(SkewedRollingTopWords.class);
+  private static final int DEFAULT_RUNTIME_IN_SECONDS = 60;
+  private static final int TOP_N = 5;
+
+  private final TopologyBuilder builder;
+  private final String topologyName;
+  private final Config topologyConfig;
+  private final int runtimeInSeconds;
+
+  public SkewedRollingTopWords(String topologyName) throws InterruptedException {
+    builder = new TopologyBuilder();
+    this.topologyName = topologyName;
+    topologyConfig = createTopologyConfiguration();
+    runtimeInSeconds = DEFAULT_RUNTIME_IN_SECONDS;
+
+    wireTopology();
+  }
+
+  private static Config createTopologyConfiguration() {
+    Config conf = new Config();
+    conf.setDebug(true);
+    return conf;
+  }
+
+  private void wireTopology() throws InterruptedException {
+    String spoutId = "wordGenerator";
+    String counterId = "counter";
+    String aggId = "aggregator";
+    String intermediateRankerId = "intermediateRanker";
+    String totalRankerId = "finalRanker";
+    builder.setSpout(spoutId, new TestWordSpout(), 5);
+    builder.setBolt(counterId, new RollingCountBolt(9, 3), 4).partialKeyGrouping(spoutId, new Fields("word"));
+    builder.setBolt(aggId, new RollingCountAggBolt(), 4).fieldsGrouping(counterId, new Fields("obj"));
+    builder.setBolt(intermediateRankerId, new IntermediateRankingsBolt(TOP_N), 4).fieldsGrouping(aggId, new Fields("obj"));
+    builder.setBolt(totalRankerId, new TotalRankingsBolt(TOP_N)).globalGrouping(intermediateRankerId);
+  }
+
+  public void runLocally() throws InterruptedException {
+    StormRunner.runTopologyLocally(builder.createTopology(), topologyName, topologyConfig, runtimeInSeconds);
+  }
+
+  public void runRemotely() throws Exception {
+    StormRunner.runTopologyRemotely(builder.createTopology(), topologyName, topologyConfig);
+  }
+
+  /**
+   * Submits (runs) the topology.
+   *
+   * Usage: "RollingTopWords [topology-name] [local|remote]"
+   *
+   * By default, the topology is run locally under the name "slidingWindowCounts".
+   *
+   * Examples:
+   *
+   * ```
+   *
+   * # Runs in local mode (LocalCluster), with topology name "slidingWindowCounts"
+   * $ storm jar storm-starter-jar-with-dependencies.jar org.apache.storm.starter.RollingTopWords
+   *
+   * # Runs in local mode (LocalCluster), with topology name "foobar"
+   * $ storm jar storm-starter-jar-with-dependencies.jar org.apache.storm.starter.RollingTopWords foobar
+   *
+   * # Runs in local mode (LocalCluster), with topology name "foobar"
+   * $ storm jar storm-starter-jar-with-dependencies.jar org.apache.storm.starter.RollingTopWords foobar local
+   *
+   * # Runs in remote/cluster mode, with topology name "production-topology"
+   * $ storm jar storm-starter-jar-with-dependencies.jar org.apache.storm.starter.RollingTopWords production-topology remote
+   * ```
+   *
+   * @param args First positional argument (optional) is topology name, second positional argument (optional) defines
+   *             whether to run the topology locally ("local") or remotely, i.e. on a real cluster ("remote").
+   * @throws Exception
+   */
+  public static void main(String[] args) throws Exception {
+    String topologyName = "slidingWindowCounts";
+    if (args.length >= 1) {
+      topologyName = args[0];
+    }
+    boolean runLocally = true;
+    if (args.length >= 2 && args[1].equalsIgnoreCase("remote")) {
+      runLocally = false;
+    }
+
+    LOG.info("Topology name: " + topologyName);
+    SkewedRollingTopWords rtw = new SkewedRollingTopWords(topologyName);
+    if (runLocally) {
+      LOG.info("Running in local mode");
+      rtw.runLocally();
+    }
+    else {
+      LOG.info("Running in remote (cluster) mode");
+      rtw.runRemotely();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/org/apache/storm/starter/SlidingTupleTsTopology.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/SlidingTupleTsTopology.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/SlidingTupleTsTopology.java
new file mode 100644
index 0000000..90744f2
--- /dev/null
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/SlidingTupleTsTopology.java
@@ -0,0 +1,62 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter;
+
+import org.apache.storm.Config;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.StormSubmitter;
+import org.apache.storm.topology.TopologyBuilder;
+import org.apache.storm.topology.base.BaseWindowedBolt;
+import org.apache.storm.utils.Utils;
+import org.apache.storm.starter.bolt.PrinterBolt;
+import org.apache.storm.starter.bolt.SlidingWindowSumBolt;
+import org.apache.storm.starter.spout.RandomIntegerSpout;
+
+import java.util.concurrent.TimeUnit;
+
+import static org.apache.storm.topology.base.BaseWindowedBolt.Duration;
+
+/**
+ * Windowing based on tuple timestamp (e.g. the time when tuple is generated
+ * rather than when its processed).
+ */
+public class SlidingTupleTsTopology {
+    public static void main(String[] args) throws Exception {
+        TopologyBuilder builder = new TopologyBuilder();
+        BaseWindowedBolt bolt = new SlidingWindowSumBolt()
+                .withWindow(new Duration(5, TimeUnit.SECONDS), new Duration(3, TimeUnit.SECONDS))
+                .withTimestampField("ts")
+                .withLag(new Duration(5, TimeUnit.SECONDS));
+        builder.setSpout("integer", new RandomIntegerSpout(), 1);
+        builder.setBolt("slidingsum", bolt, 1).shuffleGrouping("integer");
+        builder.setBolt("printer", new PrinterBolt(), 1).shuffleGrouping("slidingsum");
+        Config conf = new Config();
+        conf.setDebug(true);
+
+        if (args != null && args.length > 0) {
+            conf.setNumWorkers(1);
+            StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
+        } else {
+            LocalCluster cluster = new LocalCluster();
+            cluster.submitTopology("test", conf, builder.createTopology());
+            Utils.sleep(40000);
+            cluster.killTopology("test");
+            cluster.shutdown();
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/org/apache/storm/starter/SlidingWindowTopology.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/SlidingWindowTopology.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/SlidingWindowTopology.java
new file mode 100644
index 0000000..cedcec5
--- /dev/null
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/SlidingWindowTopology.java
@@ -0,0 +1,108 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter;
+
+import org.apache.storm.Config;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.StormSubmitter;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.TopologyBuilder;
+import org.apache.storm.topology.base.BaseWindowedBolt;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.Values;
+import org.apache.storm.utils.Utils;
+import org.apache.storm.windowing.TupleWindow;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.storm.starter.bolt.PrinterBolt;
+import org.apache.storm.starter.bolt.SlidingWindowSumBolt;
+import org.apache.storm.starter.spout.RandomIntegerSpout;
+
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.storm.topology.base.BaseWindowedBolt.Count;
+
+/**
+ * A sample topology that demonstrates the usage of {@link org.apache.storm.topology.IWindowedBolt}
+ * to calculate sliding window sum.
+ */
+public class SlidingWindowTopology {
+
+    private static final Logger LOG = LoggerFactory.getLogger(SlidingWindowTopology.class);
+
+    /*
+     * Computes tumbling window average
+     */
+    private static class TumblingWindowAvgBolt extends BaseWindowedBolt {
+        private OutputCollector collector;
+
+        @Override
+        public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
+            this.collector = collector;
+        }
+
+        @Override
+        public void execute(TupleWindow inputWindow) {
+            int sum = 0;
+            List<Tuple> tuplesInWindow = inputWindow.get();
+            LOG.debug("Events in current window: " + tuplesInWindow.size());
+            if (tuplesInWindow.size() > 0) {
+                /*
+                * Since this is a tumbling window calculation,
+                * we use all the tuples in the window to compute the avg.
+                */
+                for (Tuple tuple : tuplesInWindow) {
+                    sum += (int) tuple.getValue(0);
+                }
+                collector.emit(new Values(sum / tuplesInWindow.size()));
+            }
+        }
+
+        @Override
+        public void declareOutputFields(OutputFieldsDeclarer declarer) {
+            declarer.declare(new Fields("avg"));
+        }
+    }
+
+
+    public static void main(String[] args) throws Exception {
+        TopologyBuilder builder = new TopologyBuilder();
+        builder.setSpout("integer", new RandomIntegerSpout(), 1);
+        builder.setBolt("slidingsum", new SlidingWindowSumBolt().withWindow(new Count(30), new Count(10)), 1)
+                .shuffleGrouping("integer");
+        builder.setBolt("tumblingavg", new TumblingWindowAvgBolt().withTumblingWindow(new Count(3)), 1)
+                .shuffleGrouping("slidingsum");
+        builder.setBolt("printer", new PrinterBolt(), 1).shuffleGrouping("tumblingavg");
+        Config conf = new Config();
+        conf.setDebug(true);
+        if (args != null && args.length > 0) {
+            conf.setNumWorkers(1);
+            StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
+        } else {
+            LocalCluster cluster = new LocalCluster();
+            cluster.submitTopology("test", conf, builder.createTopology());
+            Utils.sleep(40000);
+            cluster.killTopology("test");
+            cluster.shutdown();
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/org/apache/storm/starter/ThroughputVsLatency.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/ThroughputVsLatency.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/ThroughputVsLatency.java
new file mode 100644
index 0000000..8ee48c9
--- /dev/null
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/ThroughputVsLatency.java
@@ -0,0 +1,432 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter;
+
+import org.apache.storm.Config;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.StormSubmitter;
+import org.apache.storm.metric.HttpForwardingMetricsServer;
+import org.apache.storm.metric.HttpForwardingMetricsConsumer;
+import org.apache.storm.metric.api.IMetric;
+import org.apache.storm.metric.api.IMetricsConsumer.TaskInfo;
+import org.apache.storm.metric.api.IMetricsConsumer.DataPoint;
+import org.apache.storm.generated.*;
+import org.apache.storm.spout.SpoutOutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.BasicOutputCollector;
+import org.apache.storm.topology.IRichBolt;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.TopologyBuilder;
+import org.apache.storm.topology.base.BaseBasicBolt;
+import org.apache.storm.topology.base.BaseRichSpout;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.Values;
+import org.apache.storm.utils.NimbusClient;
+import org.apache.storm.utils.Utils;
+import org.apache.storm.StormSubmitter;
+
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Random;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.storm.metrics.hdrhistogram.HistogramMetric;
+import org.HdrHistogram.Histogram;
+
+/**
+ * WordCount but the spout goes at a predefined rate and we collect
+ * proper latency statistics.
+ */
+public class ThroughputVsLatency {
+  private static class SentWithTime {
+    public final String sentence;
+    public final long time;
+
+    SentWithTime(String sentence, long time) {
+        this.sentence = sentence;
+        this.time = time;
+    }
+  }
+
+  public static class C {
+    LocalCluster _local = null;
+    Nimbus.Client _client = null;
+
+    public C(Map conf) {
+      Map clusterConf = Utils.readStormConfig();
+      if (conf != null) {
+        clusterConf.putAll(conf);
+      }
+      Boolean isLocal = (Boolean)clusterConf.get("run.local");
+      if (isLocal != null && isLocal) {
+        _local = new LocalCluster();
+      } else {
+        _client = NimbusClient.getConfiguredClient(clusterConf).getClient();
+      }
+    }
+
+    public ClusterSummary getClusterInfo() throws Exception {
+      if (_local != null) {
+        return _local.getClusterInfo();
+      } else {
+        return _client.getClusterInfo();
+      }
+    }
+
+    public TopologyInfo getTopologyInfo(String id) throws Exception {
+      if (_local != null) {
+        return _local.getTopologyInfo(id);
+      } else {
+        return _client.getTopologyInfo(id);
+      }
+    }
+
+    public void killTopologyWithOpts(String name, KillOptions opts) throws Exception {
+      if (_local != null) {
+        _local.killTopologyWithOpts(name, opts);
+      } else {
+        _client.killTopologyWithOpts(name, opts);
+      }
+    }
+
+    public void submitTopology(String name, Map stormConf, StormTopology topology) throws Exception {
+      if (_local != null) {
+        _local.submitTopology(name, stormConf, topology);
+      } else {
+        StormSubmitter.submitTopology(name, stormConf, topology);
+      }
+    }
+
+    public boolean isLocal() {
+      return _local != null;
+    }
+  }
+
+  public static class FastRandomSentenceSpout extends BaseRichSpout {
+    static final String[] SENTENCES = new String[]{ "the cow jumped over the moon", "an apple a day keeps the doctor away",
+          "four score and seven years ago", "snow white and the seven dwarfs", "i am at two with nature" };
+
+    SpoutOutputCollector _collector;
+    long _periodNano;
+    long _emitAmount;
+    Random _rand;
+    long _nextEmitTime;
+    long _emitsLeft;
+    HistogramMetric _histo;
+
+    public FastRandomSentenceSpout(long ratePerSecond) {
+        if (ratePerSecond > 0) {
+            _periodNano = Math.max(1, 1000000000/ratePerSecond);
+            _emitAmount = Math.max(1, (long)((ratePerSecond / 1000000000.0) * _periodNano));
+        } else {
+            _periodNano = Long.MAX_VALUE - 1;
+            _emitAmount = 1;
+        }
+    }
+
+    @Override
+    public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
+      _collector = collector;
+      _rand = ThreadLocalRandom.current();
+      _nextEmitTime = System.nanoTime();
+      _emitsLeft = _emitAmount;
+      _histo = new HistogramMetric(3600000000000L, 3);
+      context.registerMetric("comp-lat-histo", _histo, 10); //Update every 10 seconds, so we are not too far behind
+    }
+
+    @Override
+    public void nextTuple() {
+      if (_emitsLeft <= 0 && _nextEmitTime <= System.nanoTime()) {
+          _emitsLeft = _emitAmount;
+          _nextEmitTime = _nextEmitTime + _periodNano;
+      }
+
+      if (_emitsLeft > 0) {
+          String sentence = SENTENCES[_rand.nextInt(SENTENCES.length)];
+          _collector.emit(new Values(sentence), new SentWithTime(sentence, _nextEmitTime - _periodNano));
+          _emitsLeft--;
+      }
+    }
+
+    @Override
+    public void ack(Object id) {
+      long end = System.nanoTime();
+      SentWithTime st = (SentWithTime)id;
+      _histo.recordValue(end-st.time);
+    }
+
+    @Override
+    public void fail(Object id) {
+      SentWithTime st = (SentWithTime)id;
+      _collector.emit(new Values(st.sentence), id);
+    }
+
+    @Override
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+      declarer.declare(new Fields("sentence"));
+    }
+  }
+
+  public static class SplitSentence extends BaseBasicBolt {
+    @Override
+    public void execute(Tuple tuple, BasicOutputCollector collector) {
+      String sentence = tuple.getString(0);
+      for (String word: sentence.split("\\s+")) {
+          collector.emit(new Values(word, 1));
+      }
+    }
+
+    @Override
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+      declarer.declare(new Fields("word", "count"));
+    }
+  }
+
+  public static class WordCount extends BaseBasicBolt {
+    Map<String, Integer> counts = new HashMap<String, Integer>();
+
+    @Override
+    public void execute(Tuple tuple, BasicOutputCollector collector) {
+      String word = tuple.getString(0);
+      Integer count = counts.get(word);
+      if (count == null)
+        count = 0;
+      count++;
+      counts.put(word, count);
+      collector.emit(new Values(word, count));
+    }
+
+    @Override
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+      declarer.declare(new Fields("word", "count"));
+    }
+  }
+
+  private static class MemMeasure {
+    private long _mem = 0;
+    private long _time = 0;
+
+    public synchronized void update(long mem) {
+        _mem = mem;
+        _time = System.currentTimeMillis();
+    }
+
+    public synchronized long get() {
+        return isExpired() ? 0l : _mem;
+    }
+
+    public synchronized boolean isExpired() {
+        return (System.currentTimeMillis() - _time) >= 20000;
+    }
+  }
+
+  private static final Histogram _histo = new Histogram(3600000000000L, 3);
+  private static final AtomicLong _systemCPU = new AtomicLong(0);
+  private static final AtomicLong _userCPU = new AtomicLong(0);
+  private static final AtomicLong _gcCount = new AtomicLong(0);
+  private static final AtomicLong _gcMs = new AtomicLong(0);
+  private static final ConcurrentHashMap<String, MemMeasure> _memoryBytes = new ConcurrentHashMap<String, MemMeasure>();
+
+  private static long readMemory() {
+    long total = 0;
+    for (MemMeasure mem: _memoryBytes.values()) {
+      total += mem.get();
+    }
+    return total;
+  }
+
+  private static long _prev_acked = 0;
+  private static long _prev_uptime = 0;
+
+  public static void printMetrics(C client, String name) throws Exception {
+    ClusterSummary summary = client.getClusterInfo();
+    String id = null;
+    for (TopologySummary ts: summary.get_topologies()) {
+      if (name.equals(ts.get_name())) {
+        id = ts.get_id();
+      }
+    }
+    if (id == null) {
+      throw new Exception("Could not find a topology named "+name);
+    }
+    TopologyInfo info = client.getTopologyInfo(id);
+    int uptime = info.get_uptime_secs();
+    long acked = 0;
+    long failed = 0;
+    for (ExecutorSummary exec: info.get_executors()) {
+      if ("spout".equals(exec.get_component_id())) {
+        SpoutStats stats = exec.get_stats().get_specific().get_spout();
+        Map<String, Long> failedMap = stats.get_failed().get(":all-time");
+        Map<String, Long> ackedMap = stats.get_acked().get(":all-time");
+        if (ackedMap != null) {
+          for (String key: ackedMap.keySet()) {
+            if (failedMap != null) {
+              Long tmp = failedMap.get(key);
+              if (tmp != null) {
+                  failed += tmp;
+              }
+            }
+            long ackVal = ackedMap.get(key);
+            acked += ackVal;
+          }
+        }
+      }
+    }
+    long ackedThisTime = acked - _prev_acked;
+    long thisTime = uptime - _prev_uptime;
+    long nnpct, nnnpct, min, max;
+    double mean, stddev;
+    synchronized(_histo) {
+      nnpct = _histo.getValueAtPercentile(99.0);
+      nnnpct = _histo.getValueAtPercentile(99.9);
+      min = _histo.getMinValue();
+      max = _histo.getMaxValue();
+      mean = _histo.getMean();
+      stddev = _histo.getStdDeviation();
+      _histo.reset();
+    }
+    long user = _userCPU.getAndSet(0);
+    long sys = _systemCPU.getAndSet(0);
+    long gc = _gcMs.getAndSet(0);
+    double memMB = readMemory() / (1024.0 * 1024.0);
+    System.out.printf("uptime: %,4d acked: %,9d acked/sec: %,10.2f failed: %,8d " +
+                      "99%%: %,15d 99.9%%: %,15d min: %,15d max: %,15d mean: %,15.2f " +
+                      "stddev: %,15.2f user: %,10d sys: %,10d gc: %,10d mem: %,10.2f\n",
+                       uptime, ackedThisTime, (((double)ackedThisTime)/thisTime), failed, nnpct, nnnpct,
+                       min, max, mean, stddev, user, sys, gc, memMB);
+    _prev_uptime = uptime;
+    _prev_acked = acked;
+  }
+
+  public static void kill(C client, String name) throws Exception {
+    KillOptions opts = new KillOptions();
+    opts.set_wait_secs(0);
+    client.killTopologyWithOpts(name, opts);
+  }
+
+  public static void main(String[] args) throws Exception {
+    long ratePerSecond = 500;
+    if (args != null && args.length > 0) {
+        ratePerSecond = Long.valueOf(args[0]);
+    }
+
+    int parallelism = 4;
+    if (args != null && args.length > 1) {
+        parallelism = Integer.valueOf(args[1]);
+    }
+
+    int numMins = 5;
+    if (args != null && args.length > 2) {
+        numMins = Integer.valueOf(args[2]);
+    }
+
+    String name = "wc-test";
+    if (args != null && args.length > 3) {
+        name = args[3];
+    }
+
+    Config conf = new Config();
+    HttpForwardingMetricsServer metricServer = new HttpForwardingMetricsServer(conf) {
+        @Override
+        public void handle(TaskInfo taskInfo, Collection<DataPoint> dataPoints) {
+            String worker = taskInfo.srcWorkerHost + ":" + taskInfo.srcWorkerPort;
+            for (DataPoint dp: dataPoints) {
+                if ("comp-lat-histo".equals(dp.name) && dp.value instanceof Histogram) {
+                    synchronized(_histo) {
+                        _histo.add((Histogram)dp.value);
+                    }
+                } else if ("CPU".equals(dp.name) && dp.value instanceof Map) {
+                   Map<Object, Object> m = (Map<Object, Object>)dp.value;
+                   Object sys = m.get("sys-ms");
+                   if (sys instanceof Number) {
+                       _systemCPU.getAndAdd(((Number)sys).longValue());
+                   }
+                   Object user = m.get("user-ms");
+                   if (user instanceof Number) {
+                       _userCPU.getAndAdd(((Number)user).longValue());
+                   }
+                } else if (dp.name.startsWith("GC/") && dp.value instanceof Map) {
+                   Map<Object, Object> m = (Map<Object, Object>)dp.value;
+                   Object count = m.get("count");
+                   if (count instanceof Number) {
+                       _gcCount.getAndAdd(((Number)count).longValue());
+                   }
+                   Object time = m.get("timeMs");
+                   if (time instanceof Number) {
+                       _gcMs.getAndAdd(((Number)time).longValue());
+                   }
+                } else if (dp.name.startsWith("memory/") && dp.value instanceof Map) {
+                   Map<Object, Object> m = (Map<Object, Object>)dp.value;
+                   Object val = m.get("usedBytes");
+                   if (val instanceof Number) {
+                       MemMeasure mm = _memoryBytes.get(worker);
+                       if (mm == null) {
+                         mm = new MemMeasure();
+                         MemMeasure tmp = _memoryBytes.putIfAbsent(worker, mm);
+                         mm = tmp == null ? mm : tmp; 
+                       }
+                       mm.update(((Number)val).longValue());
+                   }
+                }
+            }
+        }
+    };
+
+    metricServer.serve();
+    String url = metricServer.getUrl();
+
+    C cluster = new C(conf);
+    conf.setNumWorkers(parallelism);
+    conf.registerMetricsConsumer(org.apache.storm.metric.LoggingMetricsConsumer.class);
+    conf.registerMetricsConsumer(org.apache.storm.metric.HttpForwardingMetricsConsumer.class, url, 1);
+    Map<String, String> workerMetrics = new HashMap<String, String>();
+    if (!cluster.isLocal()) {
+      //sigar uses JNI and does not work in local mode
+      workerMetrics.put("CPU", "org.apache.storm.metrics.sigar.CPUMetric");
+    }
+    conf.put(Config.TOPOLOGY_WORKER_METRICS, workerMetrics);
+    conf.put(Config.TOPOLOGY_BUILTIN_METRICS_BUCKET_SIZE_SECS, 10);
+    conf.put(Config.TOPOLOGY_WORKER_GC_CHILDOPTS,
+      "-XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:NewSize=128m -XX:CMSInitiatingOccupancyFraction=70 -XX:-CMSConcurrentMTEnabled");
+    conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-Xmx2g");
+
+    TopologyBuilder builder = new TopologyBuilder();
+
+    int numEach = 4 * parallelism;
+    builder.setSpout("spout", new FastRandomSentenceSpout(ratePerSecond/numEach), numEach);
+
+    builder.setBolt("split", new SplitSentence(), numEach).shuffleGrouping("spout");
+    builder.setBolt("count", new WordCount(), numEach).fieldsGrouping("split", new Fields("word"));
+
+    try {
+        cluster.submitTopology(name, conf, builder.createTopology());
+
+        for (int i = 0; i < numMins * 2; i++) {
+            Thread.sleep(30 * 1000);
+            printMetrics(cluster, name);
+        }
+    } finally {
+        kill(cluster, name);
+    }
+    System.exit(0);
+  }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/org/apache/storm/starter/TransactionalGlobalCount.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/TransactionalGlobalCount.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/TransactionalGlobalCount.java
new file mode 100644
index 0000000..312f83e
--- /dev/null
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/TransactionalGlobalCount.java
@@ -0,0 +1,174 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter;
+
+import org.apache.storm.Config;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.coordination.BatchOutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.testing.MemoryTransactionalSpout;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.base.BaseBatchBolt;
+import org.apache.storm.topology.base.BaseTransactionalBolt;
+import org.apache.storm.transactional.ICommitter;
+import org.apache.storm.transactional.TransactionAttempt;
+import org.apache.storm.transactional.TransactionalTopologyBuilder;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.Values;
+
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * This is a basic example of a transactional topology. It keeps a count of the number of tuples seen so far in a
+ * database. The source of data and the databases are mocked out as in memory maps for demonstration purposes.
+ *
+ * @see <a href="http://storm.apache.org/documentation/Transactional-topologies.html">Transactional topologies</a>
+ */
+public class TransactionalGlobalCount {
+  public static final int PARTITION_TAKE_PER_BATCH = 3;
+  public static final Map<Integer, List<List<Object>>> DATA = new HashMap<Integer, List<List<Object>>>() {{
+    put(0, new ArrayList<List<Object>>() {{
+      add(new Values("cat"));
+      add(new Values("dog"));
+      add(new Values("chicken"));
+      add(new Values("cat"));
+      add(new Values("dog"));
+      add(new Values("apple"));
+    }});
+    put(1, new ArrayList<List<Object>>() {{
+      add(new Values("cat"));
+      add(new Values("dog"));
+      add(new Values("apple"));
+      add(new Values("banana"));
+    }});
+    put(2, new ArrayList<List<Object>>() {{
+      add(new Values("cat"));
+      add(new Values("cat"));
+      add(new Values("cat"));
+      add(new Values("cat"));
+      add(new Values("cat"));
+      add(new Values("dog"));
+      add(new Values("dog"));
+      add(new Values("dog"));
+      add(new Values("dog"));
+    }});
+  }};
+
+  public static class Value {
+    int count = 0;
+    BigInteger txid;
+  }
+
+  public static Map<String, Value> DATABASE = new HashMap<String, Value>();
+  public static final String GLOBAL_COUNT_KEY = "GLOBAL-COUNT";
+
+  public static class BatchCount extends BaseBatchBolt {
+    Object _id;
+    BatchOutputCollector _collector;
+
+    int _count = 0;
+
+    @Override
+    public void prepare(Map conf, TopologyContext context, BatchOutputCollector collector, Object id) {
+      _collector = collector;
+      _id = id;
+    }
+
+    @Override
+    public void execute(Tuple tuple) {
+      _count++;
+    }
+
+    @Override
+    public void finishBatch() {
+      _collector.emit(new Values(_id, _count));
+    }
+
+    @Override
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+      declarer.declare(new Fields("id", "count"));
+    }
+  }
+
+  public static class UpdateGlobalCount extends BaseTransactionalBolt implements ICommitter {
+    TransactionAttempt _attempt;
+    BatchOutputCollector _collector;
+
+    int _sum = 0;
+
+    @Override
+    public void prepare(Map conf, TopologyContext context, BatchOutputCollector collector, TransactionAttempt attempt) {
+      _collector = collector;
+      _attempt = attempt;
+    }
+
+    @Override
+    public void execute(Tuple tuple) {
+      _sum += tuple.getInteger(1);
+    }
+
+    @Override
+    public void finishBatch() {
+      Value val = DATABASE.get(GLOBAL_COUNT_KEY);
+      Value newval;
+      if (val == null || !val.txid.equals(_attempt.getTransactionId())) {
+        newval = new Value();
+        newval.txid = _attempt.getTransactionId();
+        if (val == null) {
+          newval.count = _sum;
+        }
+        else {
+          newval.count = _sum + val.count;
+        }
+        DATABASE.put(GLOBAL_COUNT_KEY, newval);
+      }
+      else {
+        newval = val;
+      }
+      _collector.emit(new Values(_attempt, newval.count));
+    }
+
+    @Override
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+      declarer.declare(new Fields("id", "sum"));
+    }
+  }
+
+  public static void main(String[] args) throws Exception {
+    MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH);
+    TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("global-count", "spout", spout, 3);
+    builder.setBolt("partial-count", new BatchCount(), 5).noneGrouping("spout");
+    builder.setBolt("sum", new UpdateGlobalCount()).globalGrouping("partial-count");
+
+    LocalCluster cluster = new LocalCluster();
+
+    Config config = new Config();
+    config.setDebug(true);
+    config.setMaxSpoutPending(3);
+
+    cluster.submitTopology("global-count-topology", config, builder.buildTopology());
+
+    Thread.sleep(3000);
+    cluster.shutdown();
+  }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/org/apache/storm/starter/TransactionalWords.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/TransactionalWords.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/TransactionalWords.java
new file mode 100644
index 0000000..64689b0
--- /dev/null
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/TransactionalWords.java
@@ -0,0 +1,246 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter;
+
+import org.apache.storm.Config;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.coordination.BatchOutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.testing.MemoryTransactionalSpout;
+import org.apache.storm.topology.BasicOutputCollector;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.base.BaseBasicBolt;
+import org.apache.storm.topology.base.BaseTransactionalBolt;
+import org.apache.storm.transactional.ICommitter;
+import org.apache.storm.transactional.TransactionAttempt;
+import org.apache.storm.transactional.TransactionalTopologyBuilder;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.Values;
+
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * This class defines a more involved transactional topology then TransactionalGlobalCount. This topology processes a
+ * stream of words and produces two outputs:
+ * <p/>
+ * 1. A count for each word (stored in a database) 2. The number of words for every bucket of 10 counts. So it stores in
+ * the database how many words have appeared 0-9 times, how many have appeared 10-19 times, and so on.
+ * <p/>
+ * A batch of words can cause the bucket counts to decrement for some buckets and increment for others as words move
+ * between buckets as their counts accumulate.
+ */
+public class TransactionalWords {
+  public static class CountValue {
+    Integer prev_count = null;
+    int count = 0;
+    BigInteger txid = null;
+  }
+
+  public static class BucketValue {
+    int count = 0;
+    BigInteger txid;
+  }
+
+  public static final int BUCKET_SIZE = 10;
+
+  public static Map<String, CountValue> COUNT_DATABASE = new HashMap<String, CountValue>();
+  public static Map<Integer, BucketValue> BUCKET_DATABASE = new HashMap<Integer, BucketValue>();
+
+
+  public static final int PARTITION_TAKE_PER_BATCH = 3;
+
+  public static final Map<Integer, List<List<Object>>> DATA = new HashMap<Integer, List<List<Object>>>() {{
+    put(0, new ArrayList<List<Object>>() {{
+      add(new Values("cat"));
+      add(new Values("dog"));
+      add(new Values("chicken"));
+      add(new Values("cat"));
+      add(new Values("dog"));
+      add(new Values("apple"));
+    }});
+    put(1, new ArrayList<List<Object>>() {{
+      add(new Values("cat"));
+      add(new Values("dog"));
+      add(new Values("apple"));
+      add(new Values("banana"));
+    }});
+    put(2, new ArrayList<List<Object>>() {{
+      add(new Values("cat"));
+      add(new Values("cat"));
+      add(new Values("cat"));
+      add(new Values("cat"));
+      add(new Values("cat"));
+      add(new Values("dog"));
+      add(new Values("dog"));
+      add(new Values("dog"));
+      add(new Values("dog"));
+    }});
+  }};
+
+  public static class KeyedCountUpdater extends BaseTransactionalBolt implements ICommitter {
+    Map<String, Integer> _counts = new HashMap<String, Integer>();
+    BatchOutputCollector _collector;
+    TransactionAttempt _id;
+
+    int _count = 0;
+
+    @Override
+    public void prepare(Map conf, TopologyContext context, BatchOutputCollector collector, TransactionAttempt id) {
+      _collector = collector;
+      _id = id;
+    }
+
+    @Override
+    public void execute(Tuple tuple) {
+      String key = tuple.getString(1);
+      Integer curr = _counts.get(key);
+      if (curr == null)
+        curr = 0;
+      _counts.put(key, curr + 1);
+    }
+
+    @Override
+    public void finishBatch() {
+      for (String key : _counts.keySet()) {
+        CountValue val = COUNT_DATABASE.get(key);
+        CountValue newVal;
+        if (val == null || !val.txid.equals(_id)) {
+          newVal = new CountValue();
+          newVal.txid = _id.getTransactionId();
+          if (val != null) {
+            newVal.prev_count = val.count;
+            newVal.count = val.count;
+          }
+          newVal.count = newVal.count + _counts.get(key);
+          COUNT_DATABASE.put(key, newVal);
+        }
+        else {
+          newVal = val;
+        }
+        _collector.emit(new Values(_id, key, newVal.count, newVal.prev_count));
+      }
+    }
+
+    @Override
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+      declarer.declare(new Fields("id", "key", "count", "prev-count"));
+    }
+  }
+
+  public static class Bucketize extends BaseBasicBolt {
+    @Override
+    public void execute(Tuple tuple, BasicOutputCollector collector) {
+      TransactionAttempt attempt = (TransactionAttempt) tuple.getValue(0);
+      int curr = tuple.getInteger(2);
+      Integer prev = tuple.getInteger(3);
+
+      int currBucket = curr / BUCKET_SIZE;
+      Integer prevBucket = null;
+      if (prev != null) {
+        prevBucket = prev / BUCKET_SIZE;
+      }
+
+      if (prevBucket == null) {
+        collector.emit(new Values(attempt, currBucket, 1));
+      }
+      else if (currBucket != prevBucket) {
+        collector.emit(new Values(attempt, currBucket, 1));
+        collector.emit(new Values(attempt, prevBucket, -1));
+      }
+    }
+
+    @Override
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+      declarer.declare(new Fields("attempt", "bucket", "delta"));
+    }
+  }
+
+  public static class BucketCountUpdater extends BaseTransactionalBolt {
+    Map<Integer, Integer> _accum = new HashMap<Integer, Integer>();
+    BatchOutputCollector _collector;
+    TransactionAttempt _attempt;
+
+    int _count = 0;
+
+    @Override
+    public void prepare(Map conf, TopologyContext context, BatchOutputCollector collector, TransactionAttempt attempt) {
+      _collector = collector;
+      _attempt = attempt;
+    }
+
+    @Override
+    public void execute(Tuple tuple) {
+      Integer bucket = tuple.getInteger(1);
+      Integer delta = tuple.getInteger(2);
+      Integer curr = _accum.get(bucket);
+      if (curr == null)
+        curr = 0;
+      _accum.put(bucket, curr + delta);
+    }
+
+    @Override
+    public void finishBatch() {
+      for (Integer bucket : _accum.keySet()) {
+        BucketValue currVal = BUCKET_DATABASE.get(bucket);
+        BucketValue newVal;
+        if (currVal == null || !currVal.txid.equals(_attempt.getTransactionId())) {
+          newVal = new BucketValue();
+          newVal.txid = _attempt.getTransactionId();
+          newVal.count = _accum.get(bucket);
+          if (currVal != null)
+            newVal.count += currVal.count;
+          BUCKET_DATABASE.put(bucket, newVal);
+        }
+        else {
+          newVal = currVal;
+        }
+        _collector.emit(new Values(_attempt, bucket, newVal.count));
+      }
+    }
+
+    @Override
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+      declarer.declare(new Fields("id", "bucket", "count"));
+    }
+  }
+
+  public static void main(String[] args) throws Exception {
+    MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH);
+    TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("top-n-words", "spout", spout, 2);
+    builder.setBolt("count", new KeyedCountUpdater(), 5).fieldsGrouping("spout", new Fields("word"));
+    builder.setBolt("bucketize", new Bucketize()).noneGrouping("count");
+    builder.setBolt("buckets", new BucketCountUpdater(), 5).fieldsGrouping("bucketize", new Fields("bucket"));
+
+
+    LocalCluster cluster = new LocalCluster();
+
+    Config config = new Config();
+    config.setDebug(true);
+    config.setMaxSpoutPending(3);
+
+    cluster.submitTopology("top-n-topology", config, builder.buildTopology());
+
+    Thread.sleep(3000);
+    cluster.shutdown();
+  }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/org/apache/storm/starter/WordCountTopology.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/WordCountTopology.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/WordCountTopology.java
new file mode 100644
index 0000000..e4a5711
--- /dev/null
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/WordCountTopology.java
@@ -0,0 +1,106 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter;
+
+import org.apache.storm.Config;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.StormSubmitter;
+import org.apache.storm.task.ShellBolt;
+import org.apache.storm.topology.BasicOutputCollector;
+import org.apache.storm.topology.IRichBolt;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.TopologyBuilder;
+import org.apache.storm.topology.base.BaseBasicBolt;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.Values;
+import org.apache.storm.starter.spout.RandomSentenceSpout;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * This topology demonstrates Storm's stream groupings and multilang capabilities.
+ */
+public class WordCountTopology {
+  public static class SplitSentence extends ShellBolt implements IRichBolt {
+
+    public SplitSentence() {
+      super("python", "splitsentence.py");
+    }
+
+    @Override
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+      declarer.declare(new Fields("word"));
+    }
+
+    @Override
+    public Map<String, Object> getComponentConfiguration() {
+      return null;
+    }
+  }
+
+  public static class WordCount extends BaseBasicBolt {
+    Map<String, Integer> counts = new HashMap<String, Integer>();
+
+    @Override
+    public void execute(Tuple tuple, BasicOutputCollector collector) {
+      String word = tuple.getString(0);
+      Integer count = counts.get(word);
+      if (count == null)
+        count = 0;
+      count++;
+      counts.put(word, count);
+      collector.emit(new Values(word, count));
+    }
+
+    @Override
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+      declarer.declare(new Fields("word", "count"));
+    }
+  }
+
+  public static void main(String[] args) throws Exception {
+
+    TopologyBuilder builder = new TopologyBuilder();
+
+    builder.setSpout("spout", new RandomSentenceSpout(), 5);
+
+    builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout");
+    builder.setBolt("count", new WordCount(), 12).fieldsGrouping("split", new Fields("word"));
+
+    Config conf = new Config();
+    conf.setDebug(true);
+
+    if (args != null && args.length > 0) {
+      conf.setNumWorkers(3);
+
+      StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
+    }
+    else {
+      conf.setMaxTaskParallelism(3);
+
+      LocalCluster cluster = new LocalCluster();
+      cluster.submitTopology("word-count", conf, builder.createTopology());
+
+      Thread.sleep(10000);
+
+      cluster.shutdown();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/org/apache/storm/starter/WordCountTopologyNode.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/WordCountTopologyNode.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/WordCountTopologyNode.java
new file mode 100644
index 0000000..431b9d8
--- /dev/null
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/WordCountTopologyNode.java
@@ -0,0 +1,121 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter;
+
+import org.apache.storm.Config;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.StormSubmitter;
+import org.apache.storm.spout.ShellSpout;
+import org.apache.storm.task.ShellBolt;
+import org.apache.storm.topology.*;
+import org.apache.storm.topology.base.BaseBasicBolt;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.Values;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * This topology demonstrates Storm's stream groupings and multilang capabilities.
+ */
+public class WordCountTopologyNode {
+  public static class SplitSentence extends ShellBolt implements IRichBolt {
+
+    public SplitSentence() {
+      super("node", "splitsentence.js");
+    }
+
+    @Override
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+      declarer.declare(new Fields("word"));
+    }
+
+    @Override
+    public Map<String, Object> getComponentConfiguration() {
+      return null;
+    }
+  }
+
+    public static class RandomSentence extends ShellSpout implements IRichSpout {
+
+        public RandomSentence() {
+            super("node", "randomsentence.js");
+        }
+
+        @Override
+        public void declareOutputFields(OutputFieldsDeclarer declarer) {
+            declarer.declare(new Fields("word"));
+        }
+
+        @Override
+        public Map<String, Object> getComponentConfiguration() {
+            return null;
+        }
+    }
+
+  public static class WordCount extends BaseBasicBolt {
+    Map<String, Integer> counts = new HashMap<String, Integer>();
+
+    @Override
+    public void execute(Tuple tuple, BasicOutputCollector collector) {
+      String word = tuple.getString(0);
+      Integer count = counts.get(word);
+      if (count == null)
+        count = 0;
+      count++;
+      counts.put(word, count);
+      collector.emit(new Values(word, count));
+    }
+
+    @Override
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+      declarer.declare(new Fields("word", "count"));
+    }
+  }
+
+  public static void main(String[] args) throws Exception {
+
+    TopologyBuilder builder = new TopologyBuilder();
+
+    builder.setSpout("spout", new RandomSentence(), 5);
+
+    builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout");
+    builder.setBolt("count", new WordCount(), 12).fieldsGrouping("split", new Fields("word"));
+
+    Config conf = new Config();
+    conf.setDebug(true);
+
+
+    if (args != null && args.length > 0) {
+      conf.setNumWorkers(3);
+
+      StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
+    }
+    else {
+      conf.setMaxTaskParallelism(3);
+
+      LocalCluster cluster = new LocalCluster();
+      cluster.submitTopology("word-count", conf, builder.createTopology());
+
+      Thread.sleep(10000);
+
+      cluster.shutdown();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/AbstractRankerBolt.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/AbstractRankerBolt.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/AbstractRankerBolt.java
new file mode 100644
index 0000000..9cf9e79
--- /dev/null
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/AbstractRankerBolt.java
@@ -0,0 +1,110 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter.bolt;
+
+import org.apache.storm.Config;
+import org.apache.storm.topology.BasicOutputCollector;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.base.BaseBasicBolt;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.Values;
+import org.apache.storm.utils.TupleUtils;
+import org.apache.log4j.Logger;
+import org.apache.storm.starter.tools.Rankings;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * This abstract bolt provides the basic behavior of bolts that rank objects according to their count.
+ * <p/>
+ * It uses a template method design pattern for {@link AbstractRankerBolt#execute(Tuple, BasicOutputCollector)} to allow
+ * actual bolt implementations to specify how incoming tuples are processed, i.e. how the objects embedded within those
+ * tuples are retrieved and counted.
+ */
+public abstract class AbstractRankerBolt extends BaseBasicBolt {
+
+  private static final long serialVersionUID = 4931640198501530202L;
+  private static final int DEFAULT_EMIT_FREQUENCY_IN_SECONDS = 2;
+  private static final int DEFAULT_COUNT = 10;
+
+  private final int emitFrequencyInSeconds;
+  private final int count;
+  private final Rankings rankings;
+
+  public AbstractRankerBolt() {
+    this(DEFAULT_COUNT, DEFAULT_EMIT_FREQUENCY_IN_SECONDS);
+  }
+
+  public AbstractRankerBolt(int topN) {
+    this(topN, DEFAULT_EMIT_FREQUENCY_IN_SECONDS);
+  }
+
+  public AbstractRankerBolt(int topN, int emitFrequencyInSeconds) {
+    if (topN < 1) {
+      throw new IllegalArgumentException("topN must be >= 1 (you requested " + topN + ")");
+    }
+    if (emitFrequencyInSeconds < 1) {
+      throw new IllegalArgumentException(
+          "The emit frequency must be >= 1 seconds (you requested " + emitFrequencyInSeconds + " seconds)");
+    }
+    count = topN;
+    this.emitFrequencyInSeconds = emitFrequencyInSeconds;
+    rankings = new Rankings(count);
+  }
+
+  protected Rankings getRankings() {
+    return rankings;
+  }
+
+  /**
+   * This method functions as a template method (design pattern).
+   */
+  @Override
+  public final void execute(Tuple tuple, BasicOutputCollector collector) {
+    if (TupleUtils.isTick(tuple)) {
+      getLogger().debug("Received tick tuple, triggering emit of current rankings");
+      emitRankings(collector);
+    }
+    else {
+      updateRankingsWithTuple(tuple);
+    }
+  }
+
+  abstract void updateRankingsWithTuple(Tuple tuple);
+
+  private void emitRankings(BasicOutputCollector collector) {
+    collector.emit(new Values(rankings.copy()));
+    getLogger().debug("Rankings: " + rankings);
+  }
+
+  @Override
+  public void declareOutputFields(OutputFieldsDeclarer declarer) {
+    declarer.declare(new Fields("rankings"));
+  }
+
+  @Override
+  public Map<String, Object> getComponentConfiguration() {
+    Map<String, Object> conf = new HashMap<String, Object>();
+    conf.put(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, emitFrequencyInSeconds);
+    return conf;
+  }
+
+  abstract Logger getLogger();
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/IntermediateRankingsBolt.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/IntermediateRankingsBolt.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/IntermediateRankingsBolt.java
new file mode 100644
index 0000000..6950bfb
--- /dev/null
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/IntermediateRankingsBolt.java
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter.bolt;
+
+import org.apache.storm.tuple.Tuple;
+import org.apache.log4j.Logger;
+import org.apache.storm.starter.tools.Rankable;
+import org.apache.storm.starter.tools.RankableObjectWithFields;
+
+/**
+ * This bolt ranks incoming objects by their count.
+ * <p/>
+ * It assumes the input tuples to adhere to the following format: (object, object_count, additionalField1,
+ * additionalField2, ..., additionalFieldN).
+ */
+public final class IntermediateRankingsBolt extends AbstractRankerBolt {
+
+  private static final long serialVersionUID = -1369800530256637409L;
+  private static final Logger LOG = Logger.getLogger(IntermediateRankingsBolt.class);
+
+  public IntermediateRankingsBolt() {
+    super();
+  }
+
+  public IntermediateRankingsBolt(int topN) {
+    super(topN);
+  }
+
+  public IntermediateRankingsBolt(int topN, int emitFrequencyInSeconds) {
+    super(topN, emitFrequencyInSeconds);
+  }
+
+  @Override
+  void updateRankingsWithTuple(Tuple tuple) {
+    Rankable rankable = RankableObjectWithFields.from(tuple);
+    super.getRankings().updateWith(rankable);
+  }
+
+  @Override
+  Logger getLogger() {
+    return LOG;
+  }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/PrinterBolt.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/PrinterBolt.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/PrinterBolt.java
new file mode 100644
index 0000000..993a937
--- /dev/null
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/PrinterBolt.java
@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter.bolt;
+
+import org.apache.storm.topology.BasicOutputCollector;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.base.BaseBasicBolt;
+import org.apache.storm.tuple.Tuple;
+
+
+public class PrinterBolt extends BaseBasicBolt {
+
+  @Override
+  public void execute(Tuple tuple, BasicOutputCollector collector) {
+    System.out.println(tuple);
+  }
+
+  @Override
+  public void declareOutputFields(OutputFieldsDeclarer ofd) {
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/RollingCountAggBolt.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/RollingCountAggBolt.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/RollingCountAggBolt.java
new file mode 100644
index 0000000..45300de
--- /dev/null
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/RollingCountAggBolt.java
@@ -0,0 +1,77 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter.bolt;
+
+import org.apache.storm.Config;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.base.BaseRichBolt;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.Values;
+import org.apache.log4j.Logger;
+import org.apache.storm.starter.tools.NthLastModifiedTimeTracker;
+import org.apache.storm.starter.tools.SlidingWindowCounter;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+
+/**
+ * This bolt aggregates counts from multiple upstream bolts.
+ */
+public class RollingCountAggBolt extends BaseRichBolt {
+  private static final long serialVersionUID = 5537727428628598519L;
+  private static final Logger LOG = Logger.getLogger(RollingCountAggBolt.class);
+  //Mapping of key->upstreamBolt->count
+  private Map<Object, Map<Integer, Long>> counts = new HashMap<Object, Map<Integer, Long>>();
+  private OutputCollector collector;
+
+
+  @SuppressWarnings("rawtypes")
+  @Override
+  public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
+    this.collector = collector;
+  }
+
+  @Override
+  public void execute(Tuple tuple) {
+    Object obj = tuple.getValue(0);
+    long count = tuple.getLong(1);
+    int source = tuple.getSourceTask();
+    Map<Integer, Long> subCounts = counts.get(obj);
+    if (subCounts == null) {
+      subCounts = new HashMap<Integer, Long>();
+      counts.put(obj, subCounts);
+    }
+    //Update the current count for this object
+    subCounts.put(source, count);
+    //Output the sum of all the known counts so for this key
+    long sum = 0;
+    for (Long val: subCounts.values()) {
+      sum += val;
+    }
+    collector.emit(new Values(obj, sum));
+  }
+
+  @Override
+  public void declareOutputFields(OutputFieldsDeclarer declarer) {
+    declarer.declare(new Fields("obj", "count"));
+  }
+}


[37/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/KafkaUtils.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/KafkaUtils.java b/external/storm-kafka/src/jvm/storm/kafka/KafkaUtils.java
deleted file mode 100644
index 52cdde8..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/KafkaUtils.java
+++ /dev/null
@@ -1,275 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-import backtype.storm.metric.api.IMetric;
-import backtype.storm.utils.Utils;
-import com.google.common.base.Preconditions;
-import kafka.api.FetchRequest;
-import kafka.api.FetchRequestBuilder;
-import kafka.api.PartitionOffsetRequestInfo;
-import kafka.common.TopicAndPartition;
-import kafka.javaapi.FetchResponse;
-import kafka.javaapi.OffsetRequest;
-import kafka.javaapi.consumer.SimpleConsumer;
-import kafka.javaapi.message.ByteBufferMessageSet;
-import kafka.message.Message;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import storm.kafka.trident.GlobalPartitionInformation;
-import storm.kafka.trident.IBrokerReader;
-import storm.kafka.trident.StaticBrokerReader;
-import storm.kafka.trident.ZkBrokerReader;
-
-import java.io.IOException;
-import java.net.ConnectException;
-import java.net.SocketTimeoutException;
-import java.nio.ByteBuffer;
-import java.nio.channels.UnresolvedAddressException;
-import java.util.*;
-
-
-public class KafkaUtils {
-
-    public static final Logger LOG = LoggerFactory.getLogger(KafkaUtils.class);
-    private static final int NO_OFFSET = -5;
-
-
-    public static IBrokerReader makeBrokerReader(Map stormConf, KafkaConfig conf) {
-        if (conf.hosts instanceof StaticHosts) {
-            return new StaticBrokerReader(conf.topic, ((StaticHosts) conf.hosts).getPartitionInformation());
-        } else {
-            return new ZkBrokerReader(stormConf, conf.topic, (ZkHosts) conf.hosts);
-        }
-    }
-
-
-    public static long getOffset(SimpleConsumer consumer, String topic, int partition, KafkaConfig config) {
-        long startOffsetTime = config.startOffsetTime;
-        return getOffset(consumer, topic, partition, startOffsetTime);
-    }
-
-    public static long getOffset(SimpleConsumer consumer, String topic, int partition, long startOffsetTime) {
-        TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
-        Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
-        requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(startOffsetTime, 1));
-        OffsetRequest request = new OffsetRequest(
-                requestInfo, kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId());
-
-        long[] offsets = consumer.getOffsetsBefore(request).offsets(topic, partition);
-        if (offsets.length > 0) {
-            return offsets[0];
-        } else {
-            return NO_OFFSET;
-        }
-    }
-
-    public static class KafkaOffsetMetric implements IMetric {
-        Map<Partition, Long> _partitionToOffset = new HashMap<Partition, Long>();
-        Set<Partition> _partitions;
-        DynamicPartitionConnections _connections;
-
-        public KafkaOffsetMetric(DynamicPartitionConnections connections) {
-            _connections = connections;
-        }
-
-        public void setLatestEmittedOffset(Partition partition, long offset) {
-            _partitionToOffset.put(partition, offset);
-        }
-
-        private class TopicMetrics {
-            long totalSpoutLag = 0;
-            long totalEarliestTimeOffset = 0;
-            long totalLatestTimeOffset = 0;
-            long totalLatestEmittedOffset = 0;
-        }
-
-        @Override
-        public Object getValueAndReset() {
-            try {
-                HashMap ret = new HashMap();
-                if (_partitions != null && _partitions.size() == _partitionToOffset.size()) {
-                    Map<String,TopicMetrics> topicMetricsMap = new TreeMap<String, TopicMetrics>();
-                    for (Map.Entry<Partition, Long> e : _partitionToOffset.entrySet()) {
-                        Partition partition = e.getKey();
-                        SimpleConsumer consumer = _connections.getConnection(partition);
-                        if (consumer == null) {
-                            LOG.warn("partitionToOffset contains partition not found in _connections. Stale partition data?");
-                            return null;
-                        }
-                        long latestTimeOffset = getOffset(consumer, partition.topic, partition.partition, kafka.api.OffsetRequest.LatestTime());
-                        long earliestTimeOffset = getOffset(consumer, partition.topic, partition.partition, kafka.api.OffsetRequest.EarliestTime());
-                        if (latestTimeOffset == KafkaUtils.NO_OFFSET) {
-                            LOG.warn("No data found in Kafka Partition " + partition.getId());
-                            return null;
-                        }
-                        long latestEmittedOffset = e.getValue();
-                        long spoutLag = latestTimeOffset - latestEmittedOffset;
-                        String topic = partition.topic;
-                        String metricPath = partition.getId();
-                        //Handle the case where Partition Path Id does not contain topic name Partition.getId() == "partition_" + partition
-                        if (!metricPath.startsWith(topic + "/")) {
-                            metricPath = topic + "/" + metricPath;
-                        }
-                        ret.put(metricPath + "/" + "spoutLag", spoutLag);
-                        ret.put(metricPath + "/" + "earliestTimeOffset", earliestTimeOffset);
-                        ret.put(metricPath + "/" + "latestTimeOffset", latestTimeOffset);
-                        ret.put(metricPath + "/" + "latestEmittedOffset", latestEmittedOffset);
-
-                        if (!topicMetricsMap.containsKey(partition.topic)) {
-                            topicMetricsMap.put(partition.topic,new TopicMetrics());
-                        }
-
-                        TopicMetrics topicMetrics = topicMetricsMap.get(partition.topic);
-                        topicMetrics.totalSpoutLag += spoutLag;
-                        topicMetrics.totalEarliestTimeOffset += earliestTimeOffset;
-                        topicMetrics.totalLatestTimeOffset += latestTimeOffset;
-                        topicMetrics.totalLatestEmittedOffset += latestEmittedOffset;
-                    }
-
-                    for(Map.Entry<String, TopicMetrics> e : topicMetricsMap.entrySet()) {
-                        String topic = e.getKey();
-                        TopicMetrics topicMetrics = e.getValue();
-                        ret.put(topic + "/" + "totalSpoutLag", topicMetrics.totalSpoutLag);
-                        ret.put(topic + "/" + "totalEarliestTimeOffset", topicMetrics.totalEarliestTimeOffset);
-                        ret.put(topic + "/" + "totalLatestTimeOffset", topicMetrics.totalLatestTimeOffset);
-                        ret.put(topic + "/" + "totalLatestEmittedOffset", topicMetrics.totalLatestEmittedOffset);
-                    }
-
-                    return ret;
-                } else {
-                    LOG.info("Metrics Tick: Not enough data to calculate spout lag.");
-                }
-            } catch (Throwable t) {
-                LOG.warn("Metrics Tick: Exception when computing kafkaOffset metric.", t);
-            }
-            return null;
-        }
-
-        public void refreshPartitions(Set<Partition> partitions) {
-            _partitions = partitions;
-            Iterator<Partition> it = _partitionToOffset.keySet().iterator();
-            while (it.hasNext()) {
-                if (!partitions.contains(it.next())) {
-                    it.remove();
-                }
-            }
-        }
-    }
-
-    public static ByteBufferMessageSet fetchMessages(KafkaConfig config, SimpleConsumer consumer, Partition partition, long offset)
-            throws TopicOffsetOutOfRangeException, FailedFetchException,RuntimeException {
-        ByteBufferMessageSet msgs = null;
-        String topic = partition.topic;
-        int partitionId = partition.partition;
-        FetchRequestBuilder builder = new FetchRequestBuilder();
-        FetchRequest fetchRequest = builder.addFetch(topic, partitionId, offset, config.fetchSizeBytes).
-                clientId(config.clientId).maxWait(config.fetchMaxWait).build();
-        FetchResponse fetchResponse;
-        try {
-            fetchResponse = consumer.fetch(fetchRequest);
-        } catch (Exception e) {
-            if (e instanceof ConnectException ||
-                    e instanceof SocketTimeoutException ||
-                    e instanceof IOException ||
-                    e instanceof UnresolvedAddressException
-                    ) {
-                LOG.warn("Network error when fetching messages:", e);
-                throw new FailedFetchException(e);
-            } else {
-                throw new RuntimeException(e);
-            }
-        }
-        if (fetchResponse.hasError()) {
-            KafkaError error = KafkaError.getError(fetchResponse.errorCode(topic, partitionId));
-            if (error.equals(KafkaError.OFFSET_OUT_OF_RANGE) && config.useStartOffsetTimeIfOffsetOutOfRange) {
-                String msg = partition + " Got fetch request with offset out of range: [" + offset + "]";
-                LOG.warn(msg);
-                throw new TopicOffsetOutOfRangeException(msg);
-            } else {
-                String message = "Error fetching data from [" + partition + "] for topic [" + topic + "]: [" + error + "]";
-                LOG.error(message);
-                throw new FailedFetchException(message);
-            }
-        } else {
-            msgs = fetchResponse.messageSet(topic, partitionId);
-        }
-        return msgs;
-    }
-
-
-    public static Iterable<List<Object>> generateTuples(KafkaConfig kafkaConfig, Message msg, String topic) {
-        Iterable<List<Object>> tups;
-        ByteBuffer payload = msg.payload();
-        if (payload == null) {
-            return null;
-        }
-        ByteBuffer key = msg.key();
-        if (key != null && kafkaConfig.scheme instanceof KeyValueSchemeAsMultiScheme) {
-            tups = ((KeyValueSchemeAsMultiScheme) kafkaConfig.scheme).deserializeKeyAndValue(key, payload);
-        } else {
-            if (kafkaConfig.scheme instanceof StringMultiSchemeWithTopic) {
-                tups = ((StringMultiSchemeWithTopic)kafkaConfig.scheme).deserializeWithTopic(topic, payload);
-            } else {
-                tups = kafkaConfig.scheme.deserialize(payload);
-            }
-        }
-        return tups;
-    }
-    
-    public static Iterable<List<Object>> generateTuples(MessageMetadataSchemeAsMultiScheme scheme, Message msg, Partition partition, long offset) {
-        ByteBuffer payload = msg.payload();
-        if (payload == null) {
-            return null;
-        }
-        return scheme.deserializeMessageWithMetadata(payload, partition, offset);
-    }
-
-
-    public static List<Partition> calculatePartitionsForTask(List<GlobalPartitionInformation> partitons, int totalTasks, int taskIndex) {
-        Preconditions.checkArgument(taskIndex < totalTasks, "task index must be less that total tasks");
-        List<Partition> taskPartitions = new ArrayList<Partition>();
-        List<Partition> partitions = new ArrayList<Partition>();
-        for(GlobalPartitionInformation partitionInformation : partitons) {
-            partitions.addAll(partitionInformation.getOrderedPartitions());
-        }
-        int numPartitions = partitions.size();
-        if (numPartitions < totalTasks) {
-            LOG.warn("there are more tasks than partitions (tasks: " + totalTasks + "; partitions: " + numPartitions + "), some tasks will be idle");
-        }
-        for (int i = taskIndex; i < numPartitions; i += totalTasks) {
-            Partition taskPartition = partitions.get(i);
-            taskPartitions.add(taskPartition);
-        }
-        logPartitionMapping(totalTasks, taskIndex, taskPartitions);
-        return taskPartitions;
-    }
-
-    private static void logPartitionMapping(int totalTasks, int taskIndex, List<Partition> taskPartitions) {
-        String taskPrefix = taskId(taskIndex, totalTasks);
-        if (taskPartitions.isEmpty()) {
-            LOG.warn(taskPrefix + "no partitions assigned");
-        } else {
-            LOG.info(taskPrefix + "assigned " + taskPartitions);
-        }
-    }
-
-    public static String taskId(int taskIndex, int totalTasks) {
-        return "Task [" + (taskIndex + 1) + "/" + totalTasks + "] ";
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/KeyValueScheme.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/KeyValueScheme.java b/external/storm-kafka/src/jvm/storm/kafka/KeyValueScheme.java
deleted file mode 100644
index 7c0dc6c..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/KeyValueScheme.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-import backtype.storm.spout.Scheme;
-
-import java.nio.ByteBuffer;
-import java.util.List;
-
-public interface KeyValueScheme extends Scheme {
-    List<Object> deserializeKeyAndValue(ByteBuffer key, ByteBuffer value);
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/KeyValueSchemeAsMultiScheme.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/KeyValueSchemeAsMultiScheme.java b/external/storm-kafka/src/jvm/storm/kafka/KeyValueSchemeAsMultiScheme.java
deleted file mode 100644
index d27ae7e..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/KeyValueSchemeAsMultiScheme.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-import backtype.storm.spout.SchemeAsMultiScheme;
-
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import java.util.List;
-
-public class KeyValueSchemeAsMultiScheme extends SchemeAsMultiScheme {
-
-    public KeyValueSchemeAsMultiScheme(KeyValueScheme scheme) {
-        super(scheme);
-    }
-
-    public Iterable<List<Object>> deserializeKeyAndValue(final ByteBuffer key, final ByteBuffer value) {
-        List<Object> o = ((KeyValueScheme)scheme).deserializeKeyAndValue(key, value);
-        if(o == null) return null;
-        else return Arrays.asList(o);
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/MessageMetadataScheme.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/MessageMetadataScheme.java b/external/storm-kafka/src/jvm/storm/kafka/MessageMetadataScheme.java
deleted file mode 100644
index 62f652f..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/MessageMetadataScheme.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-import backtype.storm.spout.Scheme;
-
-import java.nio.ByteBuffer;
-import java.util.List;
-
-public interface MessageMetadataScheme extends Scheme {
-    List<Object> deserializeMessageWithMetadata(ByteBuffer message, Partition partition, long offset);
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/MessageMetadataSchemeAsMultiScheme.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/MessageMetadataSchemeAsMultiScheme.java b/external/storm-kafka/src/jvm/storm/kafka/MessageMetadataSchemeAsMultiScheme.java
deleted file mode 100644
index f23a101..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/MessageMetadataSchemeAsMultiScheme.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import java.util.List;
-
-import backtype.storm.spout.SchemeAsMultiScheme;
-
-public class MessageMetadataSchemeAsMultiScheme extends SchemeAsMultiScheme {
-    private static final long serialVersionUID = -7172403703813625116L;
-
-    public MessageMetadataSchemeAsMultiScheme(MessageMetadataScheme scheme) {
-        super(scheme);
-    }
-
-    public Iterable<List<Object>> deserializeMessageWithMetadata(ByteBuffer message, Partition partition, long offset) {
-        List<Object> o = ((MessageMetadataScheme) scheme).deserializeMessageWithMetadata(message, partition, offset);
-        if (o == null) {
-            return null;
-        } else {
-            return Arrays.asList(o);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/Partition.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/Partition.java b/external/storm-kafka/src/jvm/storm/kafka/Partition.java
deleted file mode 100644
index 5f683ef..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/Partition.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-import com.google.common.base.Objects;
-import storm.trident.spout.ISpoutPartition;
-
-
-public class Partition implements ISpoutPartition {
-
-    public Broker host;
-    public int partition;
-    public String topic;
-
-    //Flag to keep the Partition Path Id backward compatible with Old implementation of Partition.getId() == "partition_" + partition
-    private Boolean bUseTopicNameForPartitionPathId;
-
-    // for kryo compatibility
-    private Partition() {
-	
-    }
-    public Partition(Broker host, String topic, int partition) {
-        this.topic = topic;
-        this.host = host;
-        this.partition = partition;
-        this.bUseTopicNameForPartitionPathId = false;
-    }
-    
-    public Partition(Broker host, String topic, int partition,Boolean bUseTopicNameForPartitionPathId) {
-        this.topic = topic;
-        this.host = host;
-        this.partition = partition;
-        this.bUseTopicNameForPartitionPathId = bUseTopicNameForPartitionPathId;
-    }
-
-    @Override
-    public int hashCode() {
-        return Objects.hashCode(host, topic, partition);
-    }
-
-    @Override
-    public boolean equals(Object obj) {
-        if (this == obj) {
-            return true;
-        }
-        if (obj == null || getClass() != obj.getClass()) {
-            return false;
-        }
-        final Partition other = (Partition) obj;
-        return Objects.equal(this.host, other.host) && Objects.equal(this.topic, other.topic) && Objects.equal(this.partition, other.partition);
-    }
-
-    @Override
-    public String toString() {
-        return "Partition{" +
-                "host=" + host +
-                ", topic=" + topic +
-                ", partition=" + partition +
-                '}';
-    }
-
-    @Override
-    public String getId() {
-        if (bUseTopicNameForPartitionPathId) {
-            return  topic  + "/partition_" + partition;
-        } else {
-            //Keep the Partition Id backward compatible with Old implementation of Partition.getId() == "partition_" + partition
-            return "partition_" + partition;
-        }
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/PartitionCoordinator.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/PartitionCoordinator.java b/external/storm-kafka/src/jvm/storm/kafka/PartitionCoordinator.java
deleted file mode 100644
index 9cfed60..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/PartitionCoordinator.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-import java.util.List;
-
-public interface PartitionCoordinator {
-    List<PartitionManager> getMyManagedPartitions();
-
-    PartitionManager getManager(Partition partition);
-
-    void refresh();
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/PartitionManager.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/PartitionManager.java b/external/storm-kafka/src/jvm/storm/kafka/PartitionManager.java
deleted file mode 100644
index ff02e22..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/PartitionManager.java
+++ /dev/null
@@ -1,316 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-import backtype.storm.Config;
-import backtype.storm.metric.api.CombinedMetric;
-import backtype.storm.metric.api.CountMetric;
-import backtype.storm.metric.api.MeanReducer;
-import backtype.storm.metric.api.ReducedMetric;
-import backtype.storm.spout.SpoutOutputCollector;
-
-import com.google.common.base.Strings;
-import com.google.common.collect.ImmutableMap;
-
-import kafka.javaapi.consumer.SimpleConsumer;
-import kafka.javaapi.message.ByteBufferMessageSet;
-import kafka.message.MessageAndOffset;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import storm.kafka.KafkaSpout.EmitState;
-import storm.kafka.trident.MaxMetric;
-
-import java.util.*;
-
-public class PartitionManager {
-    public static final Logger LOG = LoggerFactory.getLogger(PartitionManager.class);
-
-    private final CombinedMetric _fetchAPILatencyMax;
-    private final ReducedMetric _fetchAPILatencyMean;
-    private final CountMetric _fetchAPICallCount;
-    private final CountMetric _fetchAPIMessageCount;
-    Long _emittedToOffset;
-    // _pending key = Kafka offset, value = time at which the message was first submitted to the topology
-    private SortedMap<Long,Long> _pending = new TreeMap<Long,Long>();
-    private final FailedMsgRetryManager _failedMsgRetryManager;
-
-    // retryRecords key = Kafka offset, value = retry info for the given message
-    Long _committedTo;
-    LinkedList<MessageAndOffset> _waitingToEmit = new LinkedList<MessageAndOffset>();
-    Partition _partition;
-    SpoutConfig _spoutConfig;
-    String _topologyInstanceId;
-    SimpleConsumer _consumer;
-    DynamicPartitionConnections _connections;
-    ZkState _state;
-    Map _stormConf;
-    long numberFailed, numberAcked;
-    public PartitionManager(DynamicPartitionConnections connections, String topologyInstanceId, ZkState state, Map stormConf, SpoutConfig spoutConfig, Partition id) {
-        _partition = id;
-        _connections = connections;
-        _spoutConfig = spoutConfig;
-        _topologyInstanceId = topologyInstanceId;
-        _consumer = connections.register(id.host, id.topic, id.partition);
-        _state = state;
-        _stormConf = stormConf;
-        numberAcked = numberFailed = 0;
-
-        _failedMsgRetryManager = new ExponentialBackoffMsgRetryManager(_spoutConfig.retryInitialDelayMs,
-                                                                           _spoutConfig.retryDelayMultiplier,
-                                                                           _spoutConfig.retryDelayMaxMs);
-
-        String jsonTopologyId = null;
-        Long jsonOffset = null;
-        String path = committedPath();
-        try {
-            Map<Object, Object> json = _state.readJSON(path);
-            LOG.info("Read partition information from: " + path +  "  --> " + json );
-            if (json != null) {
-                jsonTopologyId = (String) ((Map<Object, Object>) json.get("topology")).get("id");
-                jsonOffset = (Long) json.get("offset");
-            }
-        } catch (Throwable e) {
-            LOG.warn("Error reading and/or parsing at ZkNode: " + path, e);
-        }
-
-        String topic = _partition.topic;
-        Long currentOffset = KafkaUtils.getOffset(_consumer, topic, id.partition, spoutConfig);
-
-        if (jsonTopologyId == null || jsonOffset == null) { // failed to parse JSON?
-            _committedTo = currentOffset;
-            LOG.info("No partition information found, using configuration to determine offset");
-        } else if (!topologyInstanceId.equals(jsonTopologyId) && spoutConfig.ignoreZkOffsets) {
-            _committedTo = KafkaUtils.getOffset(_consumer, topic, id.partition, spoutConfig.startOffsetTime);
-            LOG.info("Topology change detected and ignore zookeeper offsets set to true, using configuration to determine offset");
-        } else {
-            _committedTo = jsonOffset;
-            LOG.info("Read last commit offset from zookeeper: " + _committedTo + "; old topology_id: " + jsonTopologyId + " - new topology_id: " + topologyInstanceId );
-        }
-
-        if (currentOffset - _committedTo > spoutConfig.maxOffsetBehind || _committedTo <= 0) {
-            LOG.info("Last commit offset from zookeeper: " + _committedTo);
-            Long lastCommittedOffset = _committedTo;
-            _committedTo = currentOffset;
-            LOG.info("Commit offset " + lastCommittedOffset + " is more than " +
-                    spoutConfig.maxOffsetBehind + " behind latest offset " + currentOffset + ", resetting to startOffsetTime=" + spoutConfig.startOffsetTime);
-        }
-
-        LOG.info("Starting Kafka " + _consumer.host() + ":" + id.partition + " from offset " + _committedTo);
-        _emittedToOffset = _committedTo;
-
-        _fetchAPILatencyMax = new CombinedMetric(new MaxMetric());
-        _fetchAPILatencyMean = new ReducedMetric(new MeanReducer());
-        _fetchAPICallCount = new CountMetric();
-        _fetchAPIMessageCount = new CountMetric();
-    }
-
-    public Map getMetricsDataMap() {
-        Map ret = new HashMap();
-        ret.put(_partition + "/fetchAPILatencyMax", _fetchAPILatencyMax.getValueAndReset());
-        ret.put(_partition + "/fetchAPILatencyMean", _fetchAPILatencyMean.getValueAndReset());
-        ret.put(_partition + "/fetchAPICallCount", _fetchAPICallCount.getValueAndReset());
-        ret.put(_partition + "/fetchAPIMessageCount", _fetchAPIMessageCount.getValueAndReset());
-        return ret;
-    }
-
-    //returns false if it's reached the end of current batch
-    public EmitState next(SpoutOutputCollector collector) {
-        if (_waitingToEmit.isEmpty()) {
-            fill();
-        }
-        while (true) {
-            MessageAndOffset toEmit = _waitingToEmit.pollFirst();
-            if (toEmit == null) {
-                return EmitState.NO_EMITTED;
-            }
-
-            Iterable<List<Object>> tups;
-            if (_spoutConfig.scheme instanceof MessageMetadataSchemeAsMultiScheme) {
-                tups = KafkaUtils.generateTuples((MessageMetadataSchemeAsMultiScheme) _spoutConfig.scheme, toEmit.message(), _partition, toEmit.offset());
-            } else {
-                tups = KafkaUtils.generateTuples(_spoutConfig, toEmit.message(), _partition.topic);
-            }
-            
-            if ((tups != null) && tups.iterator().hasNext()) {
-               if (!Strings.isNullOrEmpty(_spoutConfig.outputStreamId)) {
-                    for (List<Object> tup : tups) {
-                        collector.emit(_spoutConfig.topic, tup, new KafkaMessageId(_partition, toEmit.offset()));
-                    }
-                } else {
-                    for (List<Object> tup : tups) {
-                        collector.emit(tup, new KafkaMessageId(_partition, toEmit.offset()));
-                    }
-                }
-                break;
-            } else {
-                ack(toEmit.offset());
-            }
-        }
-        if (!_waitingToEmit.isEmpty()) {
-            return EmitState.EMITTED_MORE_LEFT;
-        } else {
-            return EmitState.EMITTED_END;
-        }
-    }
-
-
-    private void fill() {
-        long start = System.nanoTime();
-        Long offset;
-
-        // Are there failed tuples? If so, fetch those first.
-        offset = this._failedMsgRetryManager.nextFailedMessageToRetry();
-        final boolean processingNewTuples = (offset == null);
-        if (processingNewTuples) {
-            offset = _emittedToOffset;
-        }
-
-        ByteBufferMessageSet msgs = null;
-        try {
-            msgs = KafkaUtils.fetchMessages(_spoutConfig, _consumer, _partition, offset);
-        } catch (TopicOffsetOutOfRangeException e) {
-            _emittedToOffset = KafkaUtils.getOffset(_consumer, _partition.topic, _partition.partition, kafka.api.OffsetRequest.EarliestTime());
-            LOG.warn("{} Using new offset: {}", _partition.partition, _emittedToOffset);
-            // fetch failed, so don't update the metrics
-            
-            //fix bug [STORM-643] : remove outdated failed offsets
-            if (!processingNewTuples) {
-                // For the case of EarliestTime it would be better to discard
-                // all the failed offsets, that are earlier than actual EarliestTime
-                // offset, since they are anyway not there.
-                // These calls to broker API will be then saved.
-                Set<Long> omitted = this._failedMsgRetryManager.clearInvalidMessages(_emittedToOffset);
-                
-                LOG.warn("Removing the failed offsets that are out of range: {}", omitted);
-            }
-            
-            return;
-        }
-        long end = System.nanoTime();
-        long millis = (end - start) / 1000000;
-        _fetchAPILatencyMax.update(millis);
-        _fetchAPILatencyMean.update(millis);
-        _fetchAPICallCount.incr();
-        if (msgs != null) {
-            int numMessages = 0;
-
-            for (MessageAndOffset msg : msgs) {
-                final Long cur_offset = msg.offset();
-                if (cur_offset < offset) {
-                    // Skip any old offsets.
-                    continue;
-                }
-                if (processingNewTuples || this._failedMsgRetryManager.shouldRetryMsg(cur_offset)) {
-                    numMessages += 1;
-                    if (!_pending.containsKey(cur_offset)) {
-                        _pending.put(cur_offset, System.currentTimeMillis());
-                    }
-                    _waitingToEmit.add(msg);
-                    _emittedToOffset = Math.max(msg.nextOffset(), _emittedToOffset);
-                    if (_failedMsgRetryManager.shouldRetryMsg(cur_offset)) {
-                        this._failedMsgRetryManager.retryStarted(cur_offset);
-                    }
-                }
-            }
-            _fetchAPIMessageCount.incrBy(numMessages);
-        }
-    }
-
-    public void ack(Long offset) {
-        if (!_pending.isEmpty() && _pending.firstKey() < offset - _spoutConfig.maxOffsetBehind) {
-            // Too many things pending!
-            _pending.headMap(offset - _spoutConfig.maxOffsetBehind).clear();
-        }
-        _pending.remove(offset);
-        this._failedMsgRetryManager.acked(offset);
-        numberAcked++;
-    }
-
-    public void fail(Long offset) {
-        if (offset < _emittedToOffset - _spoutConfig.maxOffsetBehind) {
-            LOG.info(
-                    "Skipping failed tuple at offset=" + offset +
-                            " because it's more than maxOffsetBehind=" + _spoutConfig.maxOffsetBehind +
-                            " behind _emittedToOffset=" + _emittedToOffset
-            );
-        } else {
-            LOG.debug("failing at offset={} with _pending.size()={} pending and _emittedToOffset={}", offset, _pending.size(), _emittedToOffset);
-            numberFailed++;
-            if (numberAcked == 0 && numberFailed > _spoutConfig.maxOffsetBehind) {
-                throw new RuntimeException("Too many tuple failures");
-            }
-
-            this._failedMsgRetryManager.failed(offset);
-        }
-    }
-
-    public void commit() {
-        long lastCompletedOffset = lastCompletedOffset();
-        if (_committedTo != lastCompletedOffset) {
-            LOG.debug("Writing last completed offset ({}) to ZK for {} for topology: {}", lastCompletedOffset, _partition, _topologyInstanceId);
-            Map<Object, Object> data = (Map<Object, Object>) ImmutableMap.builder()
-                    .put("topology", ImmutableMap.of("id", _topologyInstanceId,
-                            "name", _stormConf.get(Config.TOPOLOGY_NAME)))
-                    .put("offset", lastCompletedOffset)
-                    .put("partition", _partition.partition)
-                    .put("broker", ImmutableMap.of("host", _partition.host.host,
-                            "port", _partition.host.port))
-                    .put("topic", _partition.topic).build();
-            _state.writeJSON(committedPath(), data);
-
-            _committedTo = lastCompletedOffset;
-            LOG.debug("Wrote last completed offset ({}) to ZK for {} for topology: {}", lastCompletedOffset, _partition, _topologyInstanceId);
-        } else {
-            LOG.debug("No new offset for {} for topology: {}", _partition, _topologyInstanceId);
-        }
-    }
-
-    private String committedPath() {
-        return _spoutConfig.zkRoot + "/" + _spoutConfig.id + "/" + _partition.getId();
-    }
-
-    public long lastCompletedOffset() {
-        if (_pending.isEmpty()) {
-            return _emittedToOffset;
-        } else {
-            return _pending.firstKey();
-        }
-    }
-
-    public Partition getPartition() {
-        return _partition;
-    }
-
-    public void close() {
-        commit();
-        _connections.unregister(_partition.host, _partition.topic , _partition.partition);
-    }
-
-    static class KafkaMessageId {
-        public Partition partition;
-        public long offset;
-
-
-        public KafkaMessageId(Partition partition, long offset) {
-            this.partition = partition;
-            this.offset = offset;
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/SpoutConfig.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/SpoutConfig.java b/external/storm-kafka/src/jvm/storm/kafka/SpoutConfig.java
deleted file mode 100644
index d125ebb..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/SpoutConfig.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-import java.io.Serializable;
-import java.util.List;
-
-
-public class SpoutConfig extends KafkaConfig implements Serializable {
-    public List<String> zkServers = null;
-    public Integer zkPort = null;
-    public String zkRoot = null;
-    public String id = null;
-
-    public String outputStreamId;
-
-    // setting for how often to save the current kafka offset to ZooKeeper
-    public long stateUpdateIntervalMs = 2000;
-
-    // Exponential back-off retry settings.  These are used when retrying messages after a bolt
-    // calls OutputCollector.fail().
-    public long retryInitialDelayMs = 0;
-    public double retryDelayMultiplier = 1.0;
-    public long retryDelayMaxMs = 60 * 1000;
-
-    public SpoutConfig(BrokerHosts hosts, String topic, String zkRoot, String id) {
-        super(hosts, topic);
-        this.zkRoot = zkRoot;
-        this.id = id;
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/StaticCoordinator.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/StaticCoordinator.java b/external/storm-kafka/src/jvm/storm/kafka/StaticCoordinator.java
deleted file mode 100644
index 4b20d84..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/StaticCoordinator.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-import storm.kafka.trident.GlobalPartitionInformation;
-
-import java.util.*;
-
-
-public class StaticCoordinator implements PartitionCoordinator {
-    Map<Partition, PartitionManager> _managers = new HashMap<Partition, PartitionManager>();
-    List<PartitionManager> _allManagers = new ArrayList();
-
-    public StaticCoordinator(DynamicPartitionConnections connections, Map stormConf, SpoutConfig config, ZkState state, int taskIndex, int totalTasks, String topologyInstanceId) {
-        StaticHosts hosts = (StaticHosts) config.hosts;
-        List<GlobalPartitionInformation> partitions = new ArrayList<GlobalPartitionInformation>();
-        partitions.add(hosts.getPartitionInformation());
-        List<Partition> myPartitions = KafkaUtils.calculatePartitionsForTask(partitions, totalTasks, taskIndex);
-        for (Partition myPartition : myPartitions) {
-            _managers.put(myPartition, new PartitionManager(connections, topologyInstanceId, state, stormConf, config, myPartition));
-        }
-        _allManagers = new ArrayList(_managers.values());
-    }
-
-    @Override
-    public List<PartitionManager> getMyManagedPartitions() {
-        return _allManagers;
-    }
-
-    public PartitionManager getManager(Partition partition) {
-        return _managers.get(partition);
-    }
-
-    @Override
-    public void refresh() { return; }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/StaticHosts.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/StaticHosts.java b/external/storm-kafka/src/jvm/storm/kafka/StaticHosts.java
deleted file mode 100644
index bee7118..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/StaticHosts.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-import storm.kafka.trident.GlobalPartitionInformation;
-
-/**
- * Date: 11/05/2013
- * Time: 14:43
- */
-public class StaticHosts implements BrokerHosts {
-
-
-    private GlobalPartitionInformation partitionInformation;
-
-    public StaticHosts(GlobalPartitionInformation partitionInformation) {
-        this.partitionInformation = partitionInformation;
-    }
-
-    public GlobalPartitionInformation getPartitionInformation() {
-        return partitionInformation;
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/StaticPartitionConnections.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/StaticPartitionConnections.java b/external/storm-kafka/src/jvm/storm/kafka/StaticPartitionConnections.java
deleted file mode 100644
index 1353b6c..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/StaticPartitionConnections.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-import kafka.javaapi.consumer.SimpleConsumer;
-
-import java.util.HashMap;
-import java.util.Map;
-
-public class StaticPartitionConnections {
-    Map<Integer, SimpleConsumer> _kafka = new HashMap<Integer, SimpleConsumer>();
-    KafkaConfig _config;
-    StaticHosts hosts;
-
-    public StaticPartitionConnections(KafkaConfig conf) {
-        _config = conf;
-        if (!(conf.hosts instanceof StaticHosts)) {
-            throw new RuntimeException("Must configure with static hosts");
-        }
-        this.hosts = (StaticHosts) conf.hosts;
-    }
-
-    public SimpleConsumer getConsumer(int partition) {
-        if (!_kafka.containsKey(partition)) {
-            Broker hp = hosts.getPartitionInformation().getBrokerFor(partition);
-            _kafka.put(partition, new SimpleConsumer(hp.host, hp.port, _config.socketTimeoutMs, _config.bufferSizeBytes, _config.clientId));
-
-        }
-        return _kafka.get(partition);
-    }
-
-    public void close() {
-        for (SimpleConsumer consumer : _kafka.values()) {
-            consumer.close();
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/StringKeyValueScheme.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/StringKeyValueScheme.java b/external/storm-kafka/src/jvm/storm/kafka/StringKeyValueScheme.java
deleted file mode 100644
index 6f6d339..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/StringKeyValueScheme.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-import backtype.storm.tuple.Values;
-import com.google.common.collect.ImmutableMap;
-
-import java.nio.ByteBuffer;
-import java.util.List;
-
-public class StringKeyValueScheme extends StringScheme implements KeyValueScheme {
-
-    @Override
-    public List<Object> deserializeKeyAndValue(ByteBuffer key, ByteBuffer value) {
-        if ( key == null ) {
-            return deserialize(value);
-        }
-        String keyString = StringScheme.deserializeString(key);
-        String valueString = StringScheme.deserializeString(value);
-        return new Values(ImmutableMap.of(keyString, valueString));
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/StringMessageAndMetadataScheme.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/StringMessageAndMetadataScheme.java b/external/storm-kafka/src/jvm/storm/kafka/StringMessageAndMetadataScheme.java
deleted file mode 100644
index 1708b97..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/StringMessageAndMetadataScheme.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Values;
-
-import java.nio.ByteBuffer;
-import java.util.List;
-
-public class StringMessageAndMetadataScheme extends StringScheme implements MessageMetadataScheme {
-    private static final long serialVersionUID = -5441841920447947374L;
-
-    public static final String STRING_SCHEME_PARTITION_KEY = "partition";
-    public static final String STRING_SCHEME_OFFSET = "offset";
-
-    @Override
-    public List<Object> deserializeMessageWithMetadata(ByteBuffer message, Partition partition, long offset) {
-        String stringMessage = StringScheme.deserializeString(message);
-        return new Values(stringMessage, partition.partition, offset);
-    }
-
-    @Override
-    public Fields getOutputFields() {
-        return new Fields(STRING_SCHEME_KEY, STRING_SCHEME_PARTITION_KEY, STRING_SCHEME_OFFSET);
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/StringMultiSchemeWithTopic.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/StringMultiSchemeWithTopic.java b/external/storm-kafka/src/jvm/storm/kafka/StringMultiSchemeWithTopic.java
deleted file mode 100644
index 1e7f216..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/StringMultiSchemeWithTopic.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-import backtype.storm.spout.MultiScheme;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Values;
-import sun.reflect.generics.reflectiveObjects.NotImplementedException;
-
-import java.nio.ByteBuffer;
-import java.util.Collections;
-import java.util.List;
-
-public class StringMultiSchemeWithTopic
-        implements MultiScheme {
-    public static final String STRING_SCHEME_KEY = "str";
-
-    public static final String TOPIC_KEY = "topic";
-
-    @Override
-    public Iterable<List<Object>> deserialize(ByteBuffer bytes) {
-        throw new NotImplementedException();
-    }
-
-    public Iterable<List<Object>> deserializeWithTopic(String topic, ByteBuffer bytes) {
-        List<Object> items = new Values(StringScheme.deserializeString(bytes), topic);
-        return Collections.singletonList(items);
-    }
-
-    public Fields getOutputFields() {
-        return new Fields(STRING_SCHEME_KEY, TOPIC_KEY);
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/StringScheme.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/StringScheme.java b/external/storm-kafka/src/jvm/storm/kafka/StringScheme.java
deleted file mode 100644
index 1071e60..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/StringScheme.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-import backtype.storm.spout.Scheme;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Values;
-import backtype.storm.utils.Utils;
-
-import java.nio.ByteBuffer;
-import java.nio.charset.Charset;
-import java.nio.charset.StandardCharsets;
-import java.util.List;
-
-public class StringScheme implements Scheme {
-    private static final Charset UTF8_CHARSET = StandardCharsets.UTF_8;
-    public static final String STRING_SCHEME_KEY = "str";
-
-    public List<Object> deserialize(ByteBuffer bytes) {
-        return new Values(deserializeString(bytes));
-    }
-
-    public static String deserializeString(ByteBuffer string) {
-        if (string.hasArray()) {
-            int base = string.arrayOffset();
-            return new String(string.array(), base + string.position(), string.remaining());
-        } else {
-            return new String(Utils.toByteArray(string), UTF8_CHARSET);
-        }
-    }
-
-    public Fields getOutputFields() {
-        return new Fields(STRING_SCHEME_KEY);
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/TopicOffsetOutOfRangeException.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/TopicOffsetOutOfRangeException.java b/external/storm-kafka/src/jvm/storm/kafka/TopicOffsetOutOfRangeException.java
deleted file mode 100644
index 5101a3e..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/TopicOffsetOutOfRangeException.java
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-public class TopicOffsetOutOfRangeException extends RuntimeException {
-
-    public TopicOffsetOutOfRangeException(String message) {
-        super(message);
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/ZkCoordinator.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/ZkCoordinator.java b/external/storm-kafka/src/jvm/storm/kafka/ZkCoordinator.java
deleted file mode 100644
index 8650e6f..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/ZkCoordinator.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import storm.kafka.trident.GlobalPartitionInformation;
-
-import java.util.*;
-
-import static storm.kafka.KafkaUtils.taskId;
-
-public class ZkCoordinator implements PartitionCoordinator {
-    public static final Logger LOG = LoggerFactory.getLogger(ZkCoordinator.class);
-
-    SpoutConfig _spoutConfig;
-    int _taskIndex;
-    int _totalTasks;
-    String _topologyInstanceId;
-    Map<Partition, PartitionManager> _managers = new HashMap();
-    List<PartitionManager> _cachedList = new ArrayList<PartitionManager>();
-    Long _lastRefreshTime = null;
-    int _refreshFreqMs;
-    DynamicPartitionConnections _connections;
-    DynamicBrokersReader _reader;
-    ZkState _state;
-    Map _stormConf;
-
-    public ZkCoordinator(DynamicPartitionConnections connections, Map stormConf, SpoutConfig spoutConfig, ZkState state, int taskIndex, int totalTasks, String topologyInstanceId) {
-        this(connections, stormConf, spoutConfig, state, taskIndex, totalTasks, topologyInstanceId, buildReader(stormConf, spoutConfig));
-    }
-
-    public ZkCoordinator(DynamicPartitionConnections connections, Map stormConf, SpoutConfig spoutConfig, ZkState state, int taskIndex, int totalTasks, String topologyInstanceId, DynamicBrokersReader reader) {
-        _spoutConfig = spoutConfig;
-        _connections = connections;
-        _taskIndex = taskIndex;
-        _totalTasks = totalTasks;
-        _topologyInstanceId = topologyInstanceId;
-        _stormConf = stormConf;
-        _state = state;
-        ZkHosts brokerConf = (ZkHosts) spoutConfig.hosts;
-        _refreshFreqMs = brokerConf.refreshFreqSecs * 1000;
-        _reader = reader;
-    }
-
-    private static DynamicBrokersReader buildReader(Map stormConf, SpoutConfig spoutConfig) {
-        ZkHosts hosts = (ZkHosts) spoutConfig.hosts;
-        return new DynamicBrokersReader(stormConf, hosts.brokerZkStr, hosts.brokerZkPath, spoutConfig.topic);
-    }
-
-    @Override
-    public List<PartitionManager> getMyManagedPartitions() {
-        if (_lastRefreshTime == null || (System.currentTimeMillis() - _lastRefreshTime) > _refreshFreqMs) {
-            refresh();
-            _lastRefreshTime = System.currentTimeMillis();
-        }
-        return _cachedList;
-    }
-
-    @Override
-    public void refresh() {
-        try {
-            LOG.info(taskId(_taskIndex, _totalTasks) + "Refreshing partition manager connections");
-            List<GlobalPartitionInformation> brokerInfo = _reader.getBrokerInfo();
-            List<Partition> mine = KafkaUtils.calculatePartitionsForTask(brokerInfo, _totalTasks, _taskIndex);
-
-            Set<Partition> curr = _managers.keySet();
-            Set<Partition> newPartitions = new HashSet<Partition>(mine);
-            newPartitions.removeAll(curr);
-
-            Set<Partition> deletedPartitions = new HashSet<Partition>(curr);
-            deletedPartitions.removeAll(mine);
-
-            LOG.info(taskId(_taskIndex, _totalTasks) + "Deleted partition managers: " + deletedPartitions.toString());
-
-            for (Partition id : deletedPartitions) {
-                PartitionManager man = _managers.remove(id);
-                man.close();
-            }
-            LOG.info(taskId(_taskIndex, _totalTasks) + "New partition managers: " + newPartitions.toString());
-
-            for (Partition id : newPartitions) {
-                PartitionManager man = new PartitionManager(_connections, _topologyInstanceId, _state, _stormConf, _spoutConfig, id);
-                _managers.put(id, man);
-            }
-
-        } catch (Exception e) {
-            throw new RuntimeException(e);
-        }
-        _cachedList = new ArrayList<PartitionManager>(_managers.values());
-        LOG.info(taskId(_taskIndex, _totalTasks) + "Finished refreshing");
-    }
-
-    @Override
-    public PartitionManager getManager(Partition partition) {
-        return _managers.get(partition);
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/ZkHosts.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/ZkHosts.java b/external/storm-kafka/src/jvm/storm/kafka/ZkHosts.java
deleted file mode 100644
index 4e4327d..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/ZkHosts.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-
-public class ZkHosts implements BrokerHosts {
-    private static final String DEFAULT_ZK_PATH = "/brokers";
-
-    public String brokerZkStr = null;
-    public String brokerZkPath = null; // e.g., /kafka/brokers
-    public int refreshFreqSecs = 60;
-
-    public ZkHosts(String brokerZkStr, String brokerZkPath) {
-        this.brokerZkStr = brokerZkStr;
-        this.brokerZkPath = brokerZkPath;
-    }
-
-    public ZkHosts(String brokerZkStr) {
-        this(brokerZkStr, DEFAULT_ZK_PATH);
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/ZkState.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/ZkState.java b/external/storm-kafka/src/jvm/storm/kafka/ZkState.java
deleted file mode 100644
index e5e67e5..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/ZkState.java
+++ /dev/null
@@ -1,116 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka;
-
-import backtype.storm.Config;
-import backtype.storm.utils.Utils;
-import org.apache.curator.framework.CuratorFramework;
-import org.apache.curator.framework.CuratorFrameworkFactory;
-import org.apache.curator.retry.RetryNTimes;
-import org.apache.zookeeper.CreateMode;
-import org.json.simple.JSONValue;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.nio.charset.Charset;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-public class ZkState {
-    public static final Logger LOG = LoggerFactory.getLogger(ZkState.class);
-    CuratorFramework _curator;
-
-    private CuratorFramework newCurator(Map stateConf) throws Exception {
-        Integer port = (Integer) stateConf.get(Config.TRANSACTIONAL_ZOOKEEPER_PORT);
-        String serverPorts = "";
-        for (String server : (List<String>) stateConf.get(Config.TRANSACTIONAL_ZOOKEEPER_SERVERS)) {
-            serverPorts = serverPorts + server + ":" + port + ",";
-        }
-        return CuratorFrameworkFactory.newClient(serverPorts,
-                Utils.getInt(stateConf.get(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT)),
-                Utils.getInt(stateConf.get(Config.STORM_ZOOKEEPER_CONNECTION_TIMEOUT)),
-                new RetryNTimes(Utils.getInt(stateConf.get(Config.STORM_ZOOKEEPER_RETRY_TIMES)),
-                        Utils.getInt(stateConf.get(Config.STORM_ZOOKEEPER_RETRY_INTERVAL))));
-    }
-
-    public CuratorFramework getCurator() {
-        assert _curator != null;
-        return _curator;
-    }
-
-    public ZkState(Map stateConf) {
-        stateConf = new HashMap(stateConf);
-
-        try {
-            _curator = newCurator(stateConf);
-            _curator.start();
-        } catch (Exception e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    public void writeJSON(String path, Map<Object, Object> data) {
-        LOG.debug("Writing {} the data {}", path, data.toString());
-        writeBytes(path, JSONValue.toJSONString(data).getBytes(Charset.forName("UTF-8")));
-    }
-
-    public void writeBytes(String path, byte[] bytes) {
-        try {
-            if (_curator.checkExists().forPath(path) == null) {
-                _curator.create()
-                        .creatingParentsIfNeeded()
-                        .withMode(CreateMode.PERSISTENT)
-                        .forPath(path, bytes);
-            } else {
-                _curator.setData().forPath(path, bytes);
-            }
-        } catch (Exception e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    public Map<Object, Object> readJSON(String path) {
-        try {
-            byte[] b = readBytes(path);
-            if (b == null) {
-                return null;
-            }
-            return (Map<Object, Object>) JSONValue.parse(new String(b, "UTF-8"));
-        } catch (Exception e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    public byte[] readBytes(String path) {
-        try {
-            if (_curator.checkExists().forPath(path) != null) {
-                return _curator.getData().forPath(path);
-            } else {
-                return null;
-            }
-        } catch (Exception e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    public void close() {
-        _curator.close();
-        _curator = null;
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/bolt/KafkaBolt.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/bolt/KafkaBolt.java b/external/storm-kafka/src/jvm/storm/kafka/bolt/KafkaBolt.java
deleted file mode 100644
index 1ebe142..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/bolt/KafkaBolt.java
+++ /dev/null
@@ -1,178 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka.bolt;
-
-import backtype.storm.task.OutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.base.BaseRichBolt;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.utils.TupleUtils;
-import org.apache.kafka.clients.producer.KafkaProducer;
-import org.apache.kafka.clients.producer.ProducerRecord;
-import org.apache.kafka.clients.producer.RecordMetadata;
-import org.apache.kafka.clients.producer.Callback;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import storm.kafka.bolt.mapper.FieldNameBasedTupleToKafkaMapper;
-import storm.kafka.bolt.mapper.TupleToKafkaMapper;
-import storm.kafka.bolt.selector.DefaultTopicSelector;
-import storm.kafka.bolt.selector.KafkaTopicSelector;
-import java.util.concurrent.Future;
-import java.util.concurrent.ExecutionException;
-import java.util.Map;
-import java.util.Properties;
-
-
-/**
- * Bolt implementation that can send Tuple data to Kafka
- * <p/>
- * It expects the producer configuration and topic in storm config under
- * <p/>
- * 'kafka.broker.properties' and 'topic'
- * <p/>
- * respectively.
- * <p/>
- * This bolt uses 0.8.2 Kafka Producer API.
- * <p/>
- * It works for sending tuples to older Kafka version (0.8.1).
- */
-public class KafkaBolt<K, V> extends BaseRichBolt {
-
-    private static final Logger LOG = LoggerFactory.getLogger(KafkaBolt.class);
-
-    public static final String TOPIC = "topic";
-
-    private KafkaProducer<K, V> producer;
-    private OutputCollector collector;
-    private TupleToKafkaMapper<K,V> mapper;
-    private KafkaTopicSelector topicSelector;
-    private Properties boltSpecfiedProperties = new Properties();
-    /**
-     * With default setting for fireAndForget and async, the callback is called when the sending succeeds.
-     * By setting fireAndForget true, the send will not wait at all for kafka to ack.
-     * "acks" setting in 0.8.2 Producer API config doesn't matter if fireAndForget is set.
-     * By setting async false, synchronous sending is used. 
-     */
-    private boolean fireAndForget = false;
-    private boolean async = true;
-
-    public KafkaBolt() {}
-
-    public KafkaBolt<K,V> withTupleToKafkaMapper(TupleToKafkaMapper<K,V> mapper) {
-        this.mapper = mapper;
-        return this;
-    }
-
-    public KafkaBolt<K,V> withTopicSelector(KafkaTopicSelector selector) {
-        this.topicSelector = selector;
-        return this;
-    }
-
-    public KafkaBolt<K,V> withProducerProperties(Properties producerProperties) {
-        this.boltSpecfiedProperties = producerProperties;
-        return this;
-    }
-
-    @Override
-    public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
-        //for backward compatibility.
-        if(mapper == null) {
-            this.mapper = new FieldNameBasedTupleToKafkaMapper<K,V>();
-        }
-
-        //for backward compatibility.
-        if(topicSelector == null) {
-            this.topicSelector = new DefaultTopicSelector((String) stormConf.get(TOPIC));
-        }
-
-        producer = new KafkaProducer<>(boltSpecfiedProperties);
-        this.collector = collector;
-    }
-
-    @Override
-    public void execute(final Tuple input) {
-        if (TupleUtils.isTick(input)) {
-          collector.ack(input);
-          return; // Do not try to send ticks to Kafka
-        }
-        K key = null;
-        V message = null;
-        String topic = null;
-        try {
-            key = mapper.getKeyFromTuple(input);
-            message = mapper.getMessageFromTuple(input);
-            topic = topicSelector.getTopic(input);
-            if (topic != null ) {
-                Callback callback = null;
-
-                if (!fireAndForget && async) {
-                    callback = new Callback() {
-                        @Override
-                        public void onCompletion(RecordMetadata ignored, Exception e) {
-                            synchronized (collector) {
-                                if (e != null) {
-                                    collector.reportError(e);
-                                    collector.fail(input);
-                                } else {
-                                    collector.ack(input);
-                                }
-                            }
-                        }
-                    };
-                }
-                Future<RecordMetadata> result = producer.send(new ProducerRecord<K, V>(topic, key, message), callback);
-                if (!async) {
-                    try {
-                        result.get();
-                        collector.ack(input);
-                    } catch (ExecutionException err) {
-                        collector.reportError(err);
-                        collector.fail(input);
-                    }
-                } else if (fireAndForget) {
-                    collector.ack(input);
-                }
-            } else {
-                LOG.warn("skipping key = " + key + ", topic selector returned null.");
-                collector.ack(input);
-            }
-        } catch (Exception ex) {
-            collector.reportError(ex);
-            collector.fail(input);
-        }
-    }
-
-    @Override
-    public void declareOutputFields(OutputFieldsDeclarer declarer) {
-
-    }
-
-    @Override
-    public void cleanup() {
-        producer.close();
-    }
-
-    public void setFireAndForget(boolean fireAndForget) {
-        this.fireAndForget = fireAndForget;
-    }
-
-    public void setAsync(boolean async) {
-        this.async = async;
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/bolt/mapper/FieldNameBasedTupleToKafkaMapper.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/bolt/mapper/FieldNameBasedTupleToKafkaMapper.java b/external/storm-kafka/src/jvm/storm/kafka/bolt/mapper/FieldNameBasedTupleToKafkaMapper.java
deleted file mode 100644
index 936b7e5..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/bolt/mapper/FieldNameBasedTupleToKafkaMapper.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka.bolt.mapper;
-
-import backtype.storm.tuple.Tuple;
-
-public class FieldNameBasedTupleToKafkaMapper<K,V> implements TupleToKafkaMapper<K, V> {
-
-    public static final String BOLT_KEY = "key";
-    public static final String BOLT_MESSAGE = "message";
-    public String boltKeyField;
-    public String boltMessageField;
-
-    public FieldNameBasedTupleToKafkaMapper() {
-        this(BOLT_KEY, BOLT_MESSAGE);
-    }
-
-    public FieldNameBasedTupleToKafkaMapper(String boltKeyField, String boltMessageField) {
-        this.boltKeyField = boltKeyField;
-        this.boltMessageField = boltMessageField;
-    }
-
-    @Override
-    public K getKeyFromTuple(Tuple tuple) {
-        //for backward compatibility, we return null when key is not present.
-        return tuple.contains(boltKeyField) ? (K) tuple.getValueByField(boltKeyField) : null;
-    }
-
-    @Override
-    public V getMessageFromTuple(Tuple tuple) {
-        return (V) tuple.getValueByField(boltMessageField);
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/bolt/mapper/TupleToKafkaMapper.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/bolt/mapper/TupleToKafkaMapper.java b/external/storm-kafka/src/jvm/storm/kafka/bolt/mapper/TupleToKafkaMapper.java
deleted file mode 100644
index d92de7b..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/bolt/mapper/TupleToKafkaMapper.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka.bolt.mapper;
-
-import backtype.storm.tuple.Tuple;
-
-import java.io.Serializable;
-
-/**
- * as the really verbose name suggests this interface mapps a storm tuple to kafka key and message.
- * @param <K> type of key.
- * @param <V> type of value.
- */
-public interface TupleToKafkaMapper<K,V> extends Serializable {
-    K getKeyFromTuple(Tuple tuple);
-    V getMessageFromTuple(Tuple tuple);
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/bolt/selector/DefaultTopicSelector.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/bolt/selector/DefaultTopicSelector.java b/external/storm-kafka/src/jvm/storm/kafka/bolt/selector/DefaultTopicSelector.java
deleted file mode 100644
index 9c87658..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/bolt/selector/DefaultTopicSelector.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka.bolt.selector;
-
-import backtype.storm.tuple.Tuple;
-
-public class DefaultTopicSelector implements KafkaTopicSelector {
-
-    private final String topicName;
-
-    public DefaultTopicSelector(final String topicName) {
-        this.topicName = topicName;
-    }
-
-    @Override
-    public String getTopic(Tuple tuple) {
-        return topicName;
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/bolt/selector/KafkaTopicSelector.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/bolt/selector/KafkaTopicSelector.java b/external/storm-kafka/src/jvm/storm/kafka/bolt/selector/KafkaTopicSelector.java
deleted file mode 100644
index f77fc47..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/bolt/selector/KafkaTopicSelector.java
+++ /dev/null
@@ -1,26 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka.bolt.selector;
-
-import backtype.storm.tuple.Tuple;
-
-import java.io.Serializable;
-
-public interface KafkaTopicSelector extends Serializable {
-    String getTopic(Tuple tuple);
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/trident/Coordinator.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/trident/Coordinator.java b/external/storm-kafka/src/jvm/storm/kafka/trident/Coordinator.java
deleted file mode 100644
index bd786b3..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/trident/Coordinator.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka.trident;
-
-import storm.kafka.KafkaUtils;
-import storm.trident.spout.IOpaquePartitionedTridentSpout;
-import storm.trident.spout.IPartitionedTridentSpout;
-
-import java.util.List;
-import java.util.Map;
-
-class Coordinator implements IPartitionedTridentSpout.Coordinator<List<GlobalPartitionInformation>>, IOpaquePartitionedTridentSpout.Coordinator<List<GlobalPartitionInformation>> {
-
-    private IBrokerReader reader;
-    private TridentKafkaConfig config;
-
-    public Coordinator(Map conf, TridentKafkaConfig tridentKafkaConfig) {
-        config = tridentKafkaConfig;
-        reader = KafkaUtils.makeBrokerReader(conf, config);
-    }
-
-    @Override
-    public void close() {
-        config.coordinator.close();
-    }
-
-    @Override
-    public boolean isReady(long txid) {
-        return config.coordinator.isReady(txid);
-    }
-
-    @Override
-    public List<GlobalPartitionInformation> getPartitionsForBatch() {
-        return reader.getAllBrokers();
-    }
-}


[21/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/config.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/config.clj b/storm-core/src/clj/org/apache/storm/config.clj
new file mode 100644
index 0000000..d65c439
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/config.clj
@@ -0,0 +1,331 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+
+(ns org.apache.storm.config
+  (:import [java.io FileReader File IOException]
+           [org.apache.storm.generated StormTopology])
+  (:import [org.apache.storm Config])
+  (:import [org.apache.storm.utils Utils LocalState])
+  (:import [org.apache.storm.validation ConfigValidation])
+  (:import [org.apache.commons.io FileUtils])
+  (:require [clojure [string :as str]])
+  (:use [org.apache.storm log util]))
+
+(def RESOURCES-SUBDIR "resources")
+(def NIMBUS-DO-NOT-REASSIGN "NIMBUS-DO-NOT-REASSIGN")
+
+(defn- clojure-config-name [name]
+  (.replace (.toUpperCase name) "_" "-"))
+
+; define clojure constants for every configuration parameter
+(doseq [f (seq (.getFields Config))]
+  (let [name (.getName f)
+        new-name (clojure-config-name name)]
+    (eval
+      `(def ~(symbol new-name) (. Config ~(symbol name))))))
+
+(def ALL-CONFIGS
+  (dofor [f (seq (.getFields Config))]
+         (.get f nil)))
+
+
+(defn cluster-mode
+  [conf & args]
+  (keyword (conf STORM-CLUSTER-MODE)))
+
+(defn local-mode?
+  [conf]
+  (let [mode (conf STORM-CLUSTER-MODE)]
+    (condp = mode
+      "local" true
+      "distributed" false
+      (throw (IllegalArgumentException.
+               (str "Illegal cluster mode in conf: " mode))))))
+
+(defn sampling-rate
+  [conf]
+  (->> (conf TOPOLOGY-STATS-SAMPLE-RATE)
+       (/ 1)
+       int))
+
+(defn mk-stats-sampler
+  [conf]
+  (even-sampler (sampling-rate conf)))
+
+(defn read-default-config
+  []
+  (clojurify-structure (Utils/readDefaultConfig)))
+
+(defn validate-configs-with-schemas
+  [conf]
+  (ConfigValidation/validateFields conf))
+
+(defn read-storm-config
+  []
+  (let [conf (clojurify-structure (Utils/readStormConfig))]
+    (validate-configs-with-schemas conf)
+    conf))
+
+(defn read-yaml-config
+  ([name must-exist]
+     (let [conf (clojurify-structure (Utils/findAndReadConfigFile name must-exist))]
+       (validate-configs-with-schemas conf)
+       conf))
+  ([name]
+     (read-yaml-config true)))
+
+(defn absolute-storm-local-dir [conf]
+  (let [storm-home (System/getProperty "storm.home")
+        path (conf STORM-LOCAL-DIR)]
+    (if path
+      (if (is-absolute-path? path) path (str storm-home file-path-separator path))
+      (str storm-home file-path-separator "storm-local"))))
+
+(def LOG-DIR
+  (.getCanonicalPath
+    (clojure.java.io/file (or (System/getProperty "storm.log.dir")
+                              (get (read-storm-config) "storm.log.dir")
+                              (str (System/getProperty "storm.home") file-path-separator "logs")))))
+
+(defn absolute-healthcheck-dir [conf]
+  (let [storm-home (System/getProperty "storm.home")
+        path (conf STORM-HEALTH-CHECK-DIR)]
+    (if path
+      (if (is-absolute-path? path) path (str storm-home file-path-separator path))
+      (str storm-home file-path-separator "healthchecks"))))
+
+(defn master-local-dir
+  [conf]
+  (let [ret (str (absolute-storm-local-dir conf) file-path-separator "nimbus")]
+    (FileUtils/forceMkdir (File. ret))
+    ret))
+
+(defn master-stormjar-key
+  [topology-id]
+  (str topology-id "-stormjar.jar"))
+
+(defn master-stormcode-key
+  [topology-id]
+  (str topology-id "-stormcode.ser"))
+
+(defn master-stormconf-key
+  [topology-id]
+  (str topology-id "-stormconf.ser"))
+
+(defn master-stormdist-root
+  ([conf]
+   (str (master-local-dir conf) file-path-separator "stormdist"))
+  ([conf storm-id]
+   (str (master-stormdist-root conf) file-path-separator storm-id)))
+
+(defn master-tmp-dir
+  [conf]
+  (let [ret (str (master-local-dir conf) file-path-separator "tmp")]
+    (FileUtils/forceMkdir (File. ret))
+    ret ))
+
+(defn read-supervisor-storm-conf-given-path
+  [conf stormconf-path]
+  (merge conf (clojurify-structure (Utils/fromCompressedJsonConf (FileUtils/readFileToByteArray (File. stormconf-path))))))
+
+(defn master-storm-metafile-path [stormroot ]
+  (str stormroot file-path-separator "storm-code-distributor.meta"))
+
+(defn master-stormjar-path
+  [stormroot]
+  (str stormroot file-path-separator "stormjar.jar"))
+
+(defn master-stormcode-path
+  [stormroot]
+  (str stormroot file-path-separator "stormcode.ser"))
+
+(defn master-stormconf-path
+  [stormroot]
+  (str stormroot file-path-separator "stormconf.ser"))
+
+(defn master-inbox
+  [conf]
+  (let [ret (str (master-local-dir conf) file-path-separator "inbox")]
+    (FileUtils/forceMkdir (File. ret))
+    ret ))
+
+(defn master-inimbus-dir
+  [conf]
+  (str (master-local-dir conf) file-path-separator "inimbus"))
+
+(defn supervisor-local-dir
+  [conf]
+  (let [ret (str (absolute-storm-local-dir conf) file-path-separator "supervisor")]
+    (FileUtils/forceMkdir (File. ret))
+    ret))
+
+(defn supervisor-isupervisor-dir
+  [conf]
+  (str (supervisor-local-dir conf) file-path-separator "isupervisor"))
+
+(defn supervisor-stormdist-root
+  ([conf]
+   (str (supervisor-local-dir conf) file-path-separator "stormdist"))
+  ([conf storm-id]
+   (str (supervisor-stormdist-root conf) file-path-separator (url-encode storm-id))))
+
+(defn supervisor-stormjar-path [stormroot]
+  (str stormroot file-path-separator "stormjar.jar"))
+
+(defn supervisor-storm-metafile-path [stormroot]
+  (str stormroot file-path-separator "storm-code-distributor.meta"))
+
+(defn supervisor-stormcode-path
+  [stormroot]
+  (str stormroot file-path-separator "stormcode.ser"))
+
+(defn supervisor-stormconf-path
+  [stormroot]
+  (str stormroot file-path-separator "stormconf.ser"))
+
+(defn supervisor-tmp-dir
+  [conf]
+  (let [ret (str (supervisor-local-dir conf) file-path-separator "tmp")]
+    (FileUtils/forceMkdir (File. ret))
+    ret ))
+
+(defn supervisor-storm-resources-path
+  [stormroot]
+  (str stormroot file-path-separator RESOURCES-SUBDIR))
+
+(defn ^LocalState supervisor-state
+  [conf]
+  (LocalState. (str (supervisor-local-dir conf) file-path-separator "localstate")))
+
+(defn ^LocalState nimbus-topo-history-state
+  [conf]
+  (LocalState. (str (master-local-dir conf) file-path-separator "history")))
+
+(defn read-supervisor-storm-conf
+  [conf storm-id]
+  (let [stormroot (supervisor-stormdist-root conf storm-id)
+        conf-path (supervisor-stormconf-path stormroot)]
+    (read-supervisor-storm-conf-given-path conf conf-path)))
+
+(defn read-supervisor-topology
+  [conf storm-id]
+  (let [stormroot (supervisor-stormdist-root conf storm-id)
+        topology-path (supervisor-stormcode-path stormroot)]
+    (Utils/deserialize (FileUtils/readFileToByteArray (File. topology-path)) StormTopology)
+    ))
+
+(defn worker-user-root [conf]
+  (str (absolute-storm-local-dir conf) "/workers-users"))
+
+(defn worker-user-file [conf worker-id]
+  (str (worker-user-root conf) "/" worker-id))
+
+(defn get-worker-user [conf worker-id]
+  (log-message "GET worker-user " worker-id)
+  (try
+    (str/trim (slurp (worker-user-file conf worker-id)))
+  (catch IOException e
+    (log-warn-error e "Failed to get worker user for " worker-id ".")
+    nil
+    )))
+
+(defn get-id-from-blob-key
+  [key]
+  (if-let [groups (re-find #"^(.*)((-stormjar\.jar)|(-stormcode\.ser)|(-stormconf\.ser))$" key)]
+    (nth groups 1)))
+
+(defn set-worker-user! [conf worker-id user]
+  (log-message "SET worker-user " worker-id " " user)
+  (let [file (worker-user-file conf worker-id)]
+    (.mkdirs (.getParentFile (File. file)))
+    (spit (worker-user-file conf worker-id) user)))
+
+(defn remove-worker-user! [conf worker-id]
+  (log-message "REMOVE worker-user " worker-id)
+  (.delete (File. (worker-user-file conf worker-id))))
+
+(defn worker-artifacts-root
+  ([conf]
+   (let [workers-artifacts-dir (conf STORM-WORKERS-ARTIFACTS-DIR)]
+     (if workers-artifacts-dir
+       (if (is-absolute-path? workers-artifacts-dir)
+         workers-artifacts-dir
+         (str LOG-DIR file-path-separator workers-artifacts-dir))
+       (str LOG-DIR file-path-separator "workers-artifacts"))))
+  ([conf id]
+   (str (worker-artifacts-root conf) file-path-separator id))
+  ([conf id port]
+   (str (worker-artifacts-root conf id) file-path-separator port)))
+
+(defn worker-artifacts-pid-path
+  [conf id port]
+  (str (worker-artifacts-root conf id port) file-path-separator "worker.pid"))
+
+(defn get-log-metadata-file
+  ([fname]
+    (let [[id port & _] (str/split fname (re-pattern file-path-separator))]
+      (get-log-metadata-file (read-storm-config) id port)))
+  ([conf id port]
+    (clojure.java.io/file (str (worker-artifacts-root conf id) file-path-separator port file-path-separator) "worker.yaml")))
+
+(defn get-worker-dir-from-root
+  [log-root id port]
+  (clojure.java.io/file (str log-root file-path-separator id file-path-separator port)))
+
+(defn worker-root
+  ([conf]
+   (str (absolute-storm-local-dir conf) file-path-separator "workers"))
+  ([conf id]
+   (str (worker-root conf) file-path-separator id)))
+
+(defn worker-pids-root
+  [conf id]
+  (str (worker-root conf id) file-path-separator "pids"))
+
+(defn worker-pid-path
+  [conf id pid]
+  (str (worker-pids-root conf id) file-path-separator pid))
+
+(defn worker-heartbeats-root
+  [conf id]
+  (str (worker-root conf id) file-path-separator "heartbeats"))
+
+;; workers heartbeat here with pid and timestamp
+;; if supervisor stops receiving heartbeat, it kills and restarts the process
+;; in local mode, keep a global map of ids to threads for simulating process management
+(defn ^LocalState worker-state
+  [conf id]
+  (LocalState. (worker-heartbeats-root conf id)))
+
+(defn override-login-config-with-system-property [conf]
+  (if-let [login_conf_file (System/getProperty "java.security.auth.login.config")]
+    (assoc conf "java.security.auth.login.config" login_conf_file)
+    conf))
+
+(defn get-topo-logs-users
+  [topology-conf]
+  (sort (distinct (remove nil?
+                    (concat
+                      (topology-conf LOGS-USERS)
+                      (topology-conf TOPOLOGY-USERS))))))
+
+(defn get-topo-logs-groups
+  [topology-conf]
+  (sort (distinct (remove nil?
+                    (concat
+                      (topology-conf LOGS-GROUPS)
+                      (topology-conf TOPOLOGY-GROUPS))))))
+

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/converter.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/converter.clj b/storm-core/src/clj/org/apache/storm/converter.clj
new file mode 100644
index 0000000..bb2dc87
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/converter.clj
@@ -0,0 +1,277 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+(ns org.apache.storm.converter
+  (:import [org.apache.storm.generated SupervisorInfo NodeInfo Assignment WorkerResources
+            StormBase TopologyStatus ClusterWorkerHeartbeat ExecutorInfo ErrorInfo Credentials RebalanceOptions KillOptions
+            TopologyActionOptions DebugOptions ProfileRequest])
+  (:use [org.apache.storm util stats log])
+  (:require [org.apache.storm.daemon [common :as common]]))
+
+(defn thriftify-supervisor-info [supervisor-info]
+  (doto (SupervisorInfo.)
+    (.set_time_secs (long (:time-secs supervisor-info)))
+    (.set_hostname (:hostname supervisor-info))
+    (.set_assignment_id (:assignment-id supervisor-info))
+    (.set_used_ports (map long (:used-ports supervisor-info)))
+    (.set_meta (map long (:meta supervisor-info)))
+    (.set_scheduler_meta (:scheduler-meta supervisor-info))
+    (.set_uptime_secs (long (:uptime-secs supervisor-info)))
+    (.set_version (:version supervisor-info))
+    (.set_resources_map (:resources-map supervisor-info))
+    ))
+
+(defn clojurify-supervisor-info [^SupervisorInfo supervisor-info]
+  (if supervisor-info
+    (org.apache.storm.daemon.common.SupervisorInfo.
+      (.get_time_secs supervisor-info)
+      (.get_hostname supervisor-info)
+      (.get_assignment_id supervisor-info)
+      (if (.get_used_ports supervisor-info) (into [] (.get_used_ports supervisor-info)))
+      (if (.get_meta supervisor-info) (into [] (.get_meta supervisor-info)))
+      (if (.get_scheduler_meta supervisor-info) (into {} (.get_scheduler_meta supervisor-info)))
+      (.get_uptime_secs supervisor-info)
+      (.get_version supervisor-info)
+      (if-let [res-map (.get_resources_map supervisor-info)] (into {} res-map)))))
+
+(defn thriftify-assignment [assignment]
+  (let [thrift-assignment (doto (Assignment.)
+                            (.set_master_code_dir (:master-code-dir assignment))
+                            (.set_node_host (:node->host assignment))
+                            (.set_executor_node_port (into {}
+                                                           (map (fn [[k v]]
+                                                                  [(map long k)
+                                                                   (NodeInfo. (first v) (set (map long (rest v))))])
+                                                                (:executor->node+port assignment))))
+                            (.set_executor_start_time_secs
+                              (into {}
+                                    (map (fn [[k v]]
+                                           [(map long k) (long v)])
+                                         (:executor->start-time-secs assignment)))))]
+    (if (:worker->resources assignment)
+      (.set_worker_resources thrift-assignment (into {} (map
+                                                          (fn [[node+port resources]]
+                                                            [(NodeInfo. (first node+port) (set (map long (rest node+port))))
+                                                             (doto (WorkerResources.)
+                                                               (.set_mem_on_heap (first resources))
+                                                               (.set_mem_off_heap (second resources))
+                                                               (.set_cpu (last resources)))])
+                                                          (:worker->resources assignment)))))
+    thrift-assignment))
+
+(defn clojurify-executor->node_port [executor->node_port]
+  (into {}
+    (map-val
+      (fn [nodeInfo]
+        (concat [(.get_node nodeInfo)] (.get_port nodeInfo))) ;nodeInfo should be converted to [node,port1,port2..]
+      (map-key
+        (fn [list-of-executors]
+          (into [] list-of-executors)) ; list of executors must be coverted to clojure vector to ensure it is sortable.
+        executor->node_port))))
+
+(defn clojurify-worker->resources [worker->resources]
+  "convert worker info to be [node, port]
+   convert resources to be [mem_on_heap mem_off_heap cpu]"
+  (into {} (map
+             (fn [[nodeInfo resources]]
+               [(concat [(.get_node nodeInfo)] (.get_port nodeInfo))
+                [(.get_mem_on_heap resources) (.get_mem_off_heap resources) (.get_cpu resources)]])
+             worker->resources)))
+
+(defn clojurify-assignment [^Assignment assignment]
+  (if assignment
+    (org.apache.storm.daemon.common.Assignment.
+      (.get_master_code_dir assignment)
+      (into {} (.get_node_host assignment))
+      (clojurify-executor->node_port (into {} (.get_executor_node_port assignment)))
+      (map-key (fn [executor] (into [] executor))
+        (into {} (.get_executor_start_time_secs assignment)))
+      (clojurify-worker->resources (into {} (.get_worker_resources assignment))))))
+
+(defn convert-to-symbol-from-status [status]
+  (condp = status
+    TopologyStatus/ACTIVE {:type :active}
+    TopologyStatus/INACTIVE {:type :inactive}
+    TopologyStatus/REBALANCING {:type :rebalancing}
+    TopologyStatus/KILLED {:type :killed}
+    nil))
+
+(defn- convert-to-status-from-symbol [status]
+  (if status
+    (condp = (:type status)
+      :active TopologyStatus/ACTIVE
+      :inactive TopologyStatus/INACTIVE
+      :rebalancing TopologyStatus/REBALANCING
+      :killed TopologyStatus/KILLED
+      nil)))
+
+(defn clojurify-rebalance-options [^RebalanceOptions rebalance-options]
+  (-> {:action :rebalance}
+    (assoc-non-nil :delay-secs (if (.is_set_wait_secs rebalance-options) (.get_wait_secs rebalance-options)))
+    (assoc-non-nil :num-workers (if (.is_set_num_workers rebalance-options) (.get_num_workers rebalance-options)))
+    (assoc-non-nil :component->executors (if (.is_set_num_executors rebalance-options) (into {} (.get_num_executors rebalance-options))))))
+
+(defn thriftify-rebalance-options [rebalance-options]
+  (if rebalance-options
+    (let [thrift-rebalance-options (RebalanceOptions.)]
+      (if (:delay-secs rebalance-options)
+        (.set_wait_secs thrift-rebalance-options (int (:delay-secs rebalance-options))))
+      (if (:num-workers rebalance-options)
+        (.set_num_workers thrift-rebalance-options (int (:num-workers rebalance-options))))
+      (if (:component->executors rebalance-options)
+        (.set_num_executors thrift-rebalance-options (map-val int (:component->executors rebalance-options))))
+      thrift-rebalance-options)))
+
+(defn clojurify-kill-options [^KillOptions kill-options]
+  (-> {:action :kill}
+    (assoc-non-nil :delay-secs (if (.is_set_wait_secs kill-options) (.get_wait_secs kill-options)))))
+
+(defn thriftify-kill-options [kill-options]
+  (if kill-options
+    (let [thrift-kill-options (KillOptions.)]
+      (if (:delay-secs kill-options)
+        (.set_wait_secs thrift-kill-options (int (:delay-secs kill-options))))
+      thrift-kill-options)))
+
+(defn thriftify-topology-action-options [storm-base]
+  (if (:topology-action-options storm-base)
+    (let [ topology-action-options (:topology-action-options storm-base)
+           action (:action topology-action-options)
+           thrift-topology-action-options (TopologyActionOptions.)]
+      (if (= action :kill)
+        (.set_kill_options thrift-topology-action-options (thriftify-kill-options topology-action-options)))
+      (if (= action :rebalance)
+        (.set_rebalance_options thrift-topology-action-options (thriftify-rebalance-options topology-action-options)))
+      thrift-topology-action-options)))
+
+(defn clojurify-topology-action-options [^TopologyActionOptions topology-action-options]
+  (if topology-action-options
+    (or (and (.is_set_kill_options topology-action-options)
+             (clojurify-kill-options
+               (.get_kill_options topology-action-options)))
+        (and (.is_set_rebalance_options topology-action-options)
+             (clojurify-rebalance-options
+               (.get_rebalance_options topology-action-options))))))
+
+(defn clojurify-debugoptions [^DebugOptions options]
+  (if options
+    {
+      :enable (.is_enable options)
+      :samplingpct (.get_samplingpct options)
+      }
+    ))
+
+(defn thriftify-debugoptions [options]
+  (doto (DebugOptions.)
+    (.set_enable (get options :enable false))
+    (.set_samplingpct (get options :samplingpct 10))))
+
+(defn thriftify-storm-base [storm-base]
+  (doto (StormBase.)
+    (.set_name (:storm-name storm-base))
+    (.set_launch_time_secs (int (:launch-time-secs storm-base)))
+    (.set_status (convert-to-status-from-symbol (:status storm-base)))
+    (.set_num_workers (int (:num-workers storm-base)))
+    (.set_component_executors (map-val int (:component->executors storm-base)))
+    (.set_owner (:owner storm-base))
+    (.set_topology_action_options (thriftify-topology-action-options storm-base))
+    (.set_prev_status (convert-to-status-from-symbol (:prev-status storm-base)))
+    (.set_component_debug (map-val thriftify-debugoptions (:component->debug storm-base)))))
+
+(defn clojurify-storm-base [^StormBase storm-base]
+  (if storm-base
+    (org.apache.storm.daemon.common.StormBase.
+      (.get_name storm-base)
+      (.get_launch_time_secs storm-base)
+      (convert-to-symbol-from-status (.get_status storm-base))
+      (.get_num_workers storm-base)
+      (into {} (.get_component_executors storm-base))
+      (.get_owner storm-base)
+      (clojurify-topology-action-options (.get_topology_action_options storm-base))
+      (convert-to-symbol-from-status (.get_prev_status storm-base))
+      (map-val clojurify-debugoptions (.get_component_debug storm-base)))))
+
+(defn thriftify-stats [stats]
+  (if stats
+    (map-val thriftify-executor-stats
+      (map-key #(ExecutorInfo. (int (first %1)) (int (last %1)))
+        stats))
+    {}))
+
+(defn clojurify-stats [stats]
+  (if stats
+    (map-val clojurify-executor-stats
+      (map-key (fn [x] (list (.get_task_start x) (.get_task_end x)))
+        stats))
+    {}))
+
+(defn clojurify-zk-worker-hb [^ClusterWorkerHeartbeat worker-hb]
+  (if worker-hb
+    {:storm-id (.get_storm_id worker-hb)
+     :executor-stats (clojurify-stats (into {} (.get_executor_stats worker-hb)))
+     :uptime (.get_uptime_secs worker-hb)
+     :time-secs (.get_time_secs worker-hb)
+     }
+    {}))
+
+(defn thriftify-zk-worker-hb [worker-hb]
+  (if (not-empty (filter second (:executor-stats worker-hb)))
+    (doto (ClusterWorkerHeartbeat.)
+      (.set_uptime_secs (:uptime worker-hb))
+      (.set_storm_id (:storm-id worker-hb))
+      (.set_executor_stats (thriftify-stats (filter second (:executor-stats worker-hb))))
+      (.set_time_secs (:time-secs worker-hb)))))
+
+(defn clojurify-error [^ErrorInfo error]
+  (if error
+    {
+      :error (.get_error error)
+      :time-secs (.get_error_time_secs error)
+      :host (.get_host error)
+      :port (.get_port error)
+      }
+    ))
+
+(defn thriftify-error [error]
+  (doto (ErrorInfo. (:error error) (:time-secs error))
+    (.set_host (:host error))
+    (.set_port (:port error))))
+
+(defn clojurify-profile-request
+  [^ProfileRequest request]
+  (when request
+    {:host (.get_node (.get_nodeInfo request))
+     :port (first (.get_port (.get_nodeInfo request)))
+     :action     (.get_action request)
+     :timestamp  (.get_time_stamp request)}))
+
+(defn thriftify-profile-request
+  [profile-request]
+  (let [nodeinfo (doto (NodeInfo.)
+                   (.set_node (:host profile-request))
+                   (.set_port (set [(:port profile-request)])))
+        request (ProfileRequest. nodeinfo (:action profile-request))]
+    (.set_time_stamp request (:timestamp profile-request))
+    request))
+
+(defn thriftify-credentials [credentials]
+    (doto (Credentials.)
+      (.set_creds (if credentials credentials {}))))
+
+(defn clojurify-crdentials [^Credentials credentials]
+  (if credentials
+    (into {} (.get_creds credentials))
+    nil
+    ))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/daemon/acker.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/daemon/acker.clj b/storm-core/src/clj/org/apache/storm/daemon/acker.clj
new file mode 100644
index 0000000..7c4d614
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/daemon/acker.clj
@@ -0,0 +1,107 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+(ns org.apache.storm.daemon.acker
+  (:import [org.apache.storm.task OutputCollector TopologyContext IBolt])
+  (:import [org.apache.storm.tuple Tuple Fields])
+  (:import [org.apache.storm.utils RotatingMap MutableObject])
+  (:import [java.util List Map])
+  (:import [org.apache.storm Constants])
+  (:use [org.apache.storm config util log])
+  (:gen-class
+   :init init
+   :implements [org.apache.storm.task.IBolt]
+   :constructors {[] []}
+   :state state ))
+
+(def ACKER-COMPONENT-ID "__acker")
+(def ACKER-INIT-STREAM-ID "__ack_init")
+(def ACKER-ACK-STREAM-ID "__ack_ack")
+(def ACKER-FAIL-STREAM-ID "__ack_fail")
+
+(defn- update-ack [curr-entry val]
+  (let [old (get curr-entry :val 0)]
+    (assoc curr-entry :val (bit-xor old val))
+    ))
+
+(defn- acker-emit-direct [^OutputCollector collector ^Integer task ^String stream ^List values]
+  (.emitDirect collector task stream values)
+  )
+
+(defn mk-acker-bolt []
+  (let [output-collector (MutableObject.)
+        pending (MutableObject.)]
+    (reify IBolt
+      (^void prepare [this ^Map storm-conf ^TopologyContext context ^OutputCollector collector]
+               (.setObject output-collector collector)
+               (.setObject pending (RotatingMap. 2))
+               )
+      (^void execute [this ^Tuple tuple]
+             (let [^RotatingMap pending (.getObject pending)
+                   stream-id (.getSourceStreamId tuple)]
+               (if (= stream-id Constants/SYSTEM_TICK_STREAM_ID)
+                 (.rotate pending)
+                 (let [id (.getValue tuple 0)
+                       ^OutputCollector output-collector (.getObject output-collector)
+                       curr (.get pending id)
+                       curr (condp = stream-id
+                                ACKER-INIT-STREAM-ID (-> curr
+                                                         (update-ack (.getValue tuple 1))
+                                                         (assoc :spout-task (.getValue tuple 2)))
+                                ACKER-ACK-STREAM-ID (update-ack curr (.getValue tuple 1))
+                                ACKER-FAIL-STREAM-ID (assoc curr :failed true))]
+                   (.put pending id curr)
+                   (when (and curr (:spout-task curr))
+                     (cond (= 0 (:val curr))
+                           (do
+                             (.remove pending id)
+                             (acker-emit-direct output-collector
+                                                (:spout-task curr)
+                                                ACKER-ACK-STREAM-ID
+                                                [id]
+                                                ))
+                           (:failed curr)
+                           (do
+                             (.remove pending id)
+                             (acker-emit-direct output-collector
+                                                (:spout-task curr)
+                                                ACKER-FAIL-STREAM-ID
+                                                [id]
+                                                ))
+                           ))
+                   (.ack output-collector tuple)
+                   ))))
+      (^void cleanup [this]
+        )
+      )))
+
+(defn -init []
+  [[] (container)])
+
+(defn -prepare [this conf context collector]
+  (let [^IBolt ret (mk-acker-bolt)]
+    (container-set! (.state ^org.apache.storm.daemon.acker this) ret)
+    (.prepare ret conf context collector)
+    ))
+
+(defn -execute [this tuple]
+  (let [^IBolt delegate (container-get (.state ^org.apache.storm.daemon.acker this))]
+    (.execute delegate tuple)
+    ))
+
+(defn -cleanup [this]
+  (let [^IBolt delegate (container-get (.state ^org.apache.storm.daemon.acker this))]
+    (.cleanup delegate)
+    ))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/daemon/builtin_metrics.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/daemon/builtin_metrics.clj b/storm-core/src/clj/org/apache/storm/daemon/builtin_metrics.clj
new file mode 100644
index 0000000..14d0132
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/daemon/builtin_metrics.clj
@@ -0,0 +1,98 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+(ns org.apache.storm.daemon.builtin-metrics
+  (:import [org.apache.storm.metric.api CountMetric StateMetric IMetric IStatefulObject])
+  (:import [org.apache.storm.metric.internal MultiCountStatAndMetric MultiLatencyStatAndMetric])
+  (:import [org.apache.storm Config])
+  (:use [org.apache.storm.stats]))
+
+(defrecord BuiltinSpoutMetrics [^MultiCountStatAndMetric ack-count
+                                ^MultiLatencyStatAndMetric complete-latency
+                                ^MultiCountStatAndMetric fail-count
+                                ^MultiCountStatAndMetric emit-count
+                                ^MultiCountStatAndMetric transfer-count])
+(defrecord BuiltinBoltMetrics [^MultiCountStatAndMetric ack-count
+                               ^MultiLatencyStatAndMetric process-latency
+                               ^MultiCountStatAndMetric fail-count
+                               ^MultiCountStatAndMetric execute-count
+                               ^MultiLatencyStatAndMetric execute-latency
+                               ^MultiCountStatAndMetric emit-count
+                               ^MultiCountStatAndMetric transfer-count])
+(defrecord SpoutThrottlingMetrics [^CountMetric skipped-max-spout
+                                   ^CountMetric skipped-throttle
+                                   ^CountMetric skipped-inactive])
+
+
+(defn make-data [executor-type stats]
+  (condp = executor-type
+    :spout (BuiltinSpoutMetrics. (stats-acked stats)
+                                 (stats-complete-latencies stats)
+                                 (stats-failed stats)
+                                 (stats-emitted stats)
+                                 (stats-transferred stats))
+    :bolt (BuiltinBoltMetrics. (stats-acked stats)
+                               (stats-process-latencies stats)
+                               (stats-failed stats)
+                               (stats-executed stats)
+                               (stats-execute-latencies stats)
+                               (stats-emitted stats)
+                               (stats-transferred stats))))
+
+(defn make-spout-throttling-data []
+  (SpoutThrottlingMetrics. (CountMetric.)
+                           (CountMetric.)
+                           (CountMetric.)))
+
+(defn register-spout-throttling-metrics [throttling-metrics  storm-conf topology-context]
+  (doseq [[kw imetric] throttling-metrics]
+    (.registerMetric topology-context (str "__" (name kw)) imetric
+                     (int (get storm-conf Config/TOPOLOGY_BUILTIN_METRICS_BUCKET_SIZE_SECS)))))
+
+(defn register-all [builtin-metrics  storm-conf topology-context]
+  (doseq [[kw imetric] builtin-metrics]
+    (.registerMetric topology-context (str "__" (name kw)) imetric
+                     (int (get storm-conf Config/TOPOLOGY_BUILTIN_METRICS_BUCKET_SIZE_SECS)))))
+
+(defn register-iconnection-server-metric [server storm-conf topology-context]
+  (if (instance? IStatefulObject server)
+    (.registerMetric topology-context "__recv-iconnection" (StateMetric. server)
+                     (int (get storm-conf Config/TOPOLOGY_BUILTIN_METRICS_BUCKET_SIZE_SECS)))))
+
+(defn register-iconnection-client-metrics [node+port->socket-ref storm-conf topology-context]
+  (.registerMetric topology-context "__send-iconnection"
+    (reify IMetric
+      (^Object getValueAndReset [this]
+        (into {}
+          (map
+            (fn [[node+port ^IStatefulObject connection]] [node+port (.getState connection)])
+            (filter 
+              (fn [[node+port connection]] (instance? IStatefulObject connection))
+              @node+port->socket-ref)))))
+    (int (get storm-conf Config/TOPOLOGY_BUILTIN_METRICS_BUCKET_SIZE_SECS))))
+ 
+(defn register-queue-metrics [queues storm-conf topology-context]
+  (doseq [[qname q] queues]
+    (.registerMetric topology-context (str "__" (name qname)) (StateMetric. q)
+                     (int (get storm-conf Config/TOPOLOGY_BUILTIN_METRICS_BUCKET_SIZE_SECS)))))
+
+(defn skipped-max-spout! [^SpoutThrottlingMetrics m stats]
+  (-> m .skipped-max-spout (.incrBy (stats-rate stats))))
+
+(defn skipped-throttle! [^SpoutThrottlingMetrics m stats]
+  (-> m .skipped-throttle (.incrBy (stats-rate stats))))
+
+(defn skipped-inactive! [^SpoutThrottlingMetrics m stats]
+  (-> m .skipped-inactive (.incrBy (stats-rate stats))))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/daemon/common.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/daemon/common.clj b/storm-core/src/clj/org/apache/storm/daemon/common.clj
new file mode 100644
index 0000000..dd761a5
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/daemon/common.clj
@@ -0,0 +1,402 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+(ns org.apache.storm.daemon.common
+  (:use [org.apache.storm log config util])
+  (:import [org.apache.storm.generated StormTopology
+            InvalidTopologyException GlobalStreamId]
+           [org.apache.storm.utils ThriftTopologyUtils])
+  (:import [org.apache.storm.utils Utils])
+  (:import [org.apache.storm.task WorkerTopologyContext])
+  (:import [org.apache.storm Constants])
+  (:import [org.apache.storm.metric SystemBolt])
+  (:import [org.apache.storm.metric EventLoggerBolt])
+  (:import [org.apache.storm.security.auth IAuthorizer]) 
+  (:import [java.io InterruptedIOException])
+  (:require [clojure.set :as set])  
+  (:require [org.apache.storm.daemon.acker :as acker])
+  (:require [org.apache.storm.thrift :as thrift])
+  (:require [metrics.reporters.jmx :as jmx]))
+
+(defn start-metrics-reporters []
+  (jmx/start (jmx/reporter {})))
+
+(def ACKER-COMPONENT-ID acker/ACKER-COMPONENT-ID)
+(def ACKER-INIT-STREAM-ID acker/ACKER-INIT-STREAM-ID)
+(def ACKER-ACK-STREAM-ID acker/ACKER-ACK-STREAM-ID)
+(def ACKER-FAIL-STREAM-ID acker/ACKER-FAIL-STREAM-ID)
+
+(def SYSTEM-STREAM-ID "__system")
+
+(def EVENTLOGGER-COMPONENT-ID "__eventlogger")
+(def EVENTLOGGER-STREAM-ID "__eventlog")
+
+(def SYSTEM-COMPONENT-ID Constants/SYSTEM_COMPONENT_ID)
+(def SYSTEM-TICK-STREAM-ID Constants/SYSTEM_TICK_STREAM_ID)
+(def METRICS-STREAM-ID Constants/METRICS_STREAM_ID)
+(def METRICS-TICK-STREAM-ID Constants/METRICS_TICK_STREAM_ID)
+(def CREDENTIALS-CHANGED-STREAM-ID Constants/CREDENTIALS_CHANGED_STREAM_ID)
+
+;; the task id is the virtual port
+;; node->host is here so that tasks know who to talk to just from assignment
+;; this avoid situation where node goes down and task doesn't know what to do information-wise
+(defrecord Assignment [master-code-dir node->host executor->node+port executor->start-time-secs worker->resources])
+
+
+;; component->executors is a map from spout/bolt id to number of executors for that component
+(defrecord StormBase [storm-name launch-time-secs status num-workers component->executors owner topology-action-options prev-status component->debug])
+
+(defrecord SupervisorInfo [time-secs hostname assignment-id used-ports meta scheduler-meta uptime-secs version resources-map])
+
+(defprotocol DaemonCommon
+  (waiting? [this]))
+
+(defrecord ExecutorStats [^long processed
+                          ^long acked
+                          ^long emitted
+                          ^long transferred
+                          ^long failed])
+
+(defn new-executor-stats []
+  (ExecutorStats. 0 0 0 0 0))
+
+(defn get-storm-id [storm-cluster-state storm-name]
+  (let [active-storms (.active-storms storm-cluster-state)]
+    (find-first
+      #(= storm-name (:storm-name (.storm-base storm-cluster-state % nil)))
+      active-storms)
+    ))
+
+(defn topology-bases [storm-cluster-state]
+  (let [active-topologies (.active-storms storm-cluster-state)]
+    (into {} 
+          (dofor [id active-topologies]
+                 [id (.storm-base storm-cluster-state id nil)]
+                 ))
+    ))
+
+(defn validate-distributed-mode! [conf]
+  (if (local-mode? conf)
+      (throw
+        (IllegalArgumentException. "Cannot start server in local mode!"))))
+
+(defmacro defserverfn [name & body]
+  `(let [exec-fn# (fn ~@body)]
+    (defn ~name [& args#]
+      (try-cause
+        (apply exec-fn# args#)
+      (catch InterruptedIOException e#
+        (throw e#))
+      (catch InterruptedException e#
+        (throw e#))
+      (catch Throwable t#
+        (log-error t# "Error on initialization of server " ~(str name))
+        (exit-process! 13 "Error on initialization")
+        )))))
+
+(defn- validate-ids! [^StormTopology topology]
+  (let [sets (map #(.getFieldValue topology %) thrift/STORM-TOPOLOGY-FIELDS)
+        offending (apply any-intersection sets)]
+    (if-not (empty? offending)
+      (throw (InvalidTopologyException.
+              (str "Duplicate component ids: " offending))))
+    (doseq [f thrift/STORM-TOPOLOGY-FIELDS
+            :let [obj-map (.getFieldValue topology f)]]
+      (if-not (ThriftTopologyUtils/isWorkerHook f)
+        (do
+          (doseq [id (keys obj-map)]
+            (if (Utils/isSystemId id)
+              (throw (InvalidTopologyException.
+                       (str id " is not a valid component id")))))
+          (doseq [obj (vals obj-map)
+                  id (-> obj .get_common .get_streams keys)]
+            (if (Utils/isSystemId id)
+              (throw (InvalidTopologyException.
+                       (str id " is not a valid stream id"))))))))))
+
+(defn all-components [^StormTopology topology]
+  (apply merge {}
+    (for [f thrift/STORM-TOPOLOGY-FIELDS]
+      (if-not (ThriftTopologyUtils/isWorkerHook f)
+        (.getFieldValue topology f)))))
+
+(defn component-conf [component]
+  (->> component
+      .get_common
+      .get_json_conf
+      from-json))
+
+(defn validate-basic! [^StormTopology topology]
+  (validate-ids! topology)
+  (doseq [f thrift/SPOUT-FIELDS
+          obj (->> f (.getFieldValue topology) vals)]
+    (if-not (empty? (-> obj .get_common .get_inputs))
+      (throw (InvalidTopologyException. "May not declare inputs for a spout"))))
+  (doseq [[comp-id comp] (all-components topology)
+          :let [conf (component-conf comp)
+                p (-> comp .get_common thrift/parallelism-hint)]]
+    (when (and (> (conf TOPOLOGY-TASKS) 0)
+               p
+               (<= p 0))
+      (throw (InvalidTopologyException. "Number of executors must be greater than 0 when number of tasks is greater than 0"))
+      )))
+
+(defn validate-structure! [^StormTopology topology]
+  ;; validate all the component subscribe from component+stream which actually exists in the topology
+  ;; and if it is a fields grouping, validate the corresponding field exists  
+  (let [all-components (all-components topology)]
+    (doseq [[id comp] all-components
+            :let [inputs (.. comp get_common get_inputs)]]
+      (doseq [[global-stream-id grouping] inputs
+              :let [source-component-id (.get_componentId global-stream-id)
+                    source-stream-id    (.get_streamId global-stream-id)]]
+        (if-not (contains? all-components source-component-id)
+          (throw (InvalidTopologyException. (str "Component: [" id "] subscribes from non-existent component [" source-component-id "]")))
+          (let [source-streams (-> all-components (get source-component-id) .get_common .get_streams)]
+            (if-not (contains? source-streams source-stream-id)
+              (throw (InvalidTopologyException. (str "Component: [" id "] subscribes from non-existent stream: [" source-stream-id "] of component [" source-component-id "]")))
+              (if (= :fields (thrift/grouping-type grouping))
+                (let [grouping-fields (set (.get_fields grouping))
+                      source-stream-fields (-> source-streams (get source-stream-id) .get_output_fields set)
+                      diff-fields (set/difference grouping-fields source-stream-fields)]
+                  (when-not (empty? diff-fields)
+                    (throw (InvalidTopologyException. (str "Component: [" id "] subscribes from stream: [" source-stream-id "] of component [" source-component-id "] with non-existent fields: " diff-fields)))))))))))))
+
+(defn acker-inputs [^StormTopology topology]
+  (let [bolt-ids (.. topology get_bolts keySet)
+        spout-ids (.. topology get_spouts keySet)
+        spout-inputs (apply merge
+                            (for [id spout-ids]
+                              {[id ACKER-INIT-STREAM-ID] ["id"]}
+                              ))
+        bolt-inputs (apply merge
+                           (for [id bolt-ids]
+                             {[id ACKER-ACK-STREAM-ID] ["id"]
+                              [id ACKER-FAIL-STREAM-ID] ["id"]}
+                             ))]
+    (merge spout-inputs bolt-inputs)))
+
+;; the event logger receives inputs from all the spouts and bolts
+;; with a field grouping on component id so that all tuples from a component
+;; goes to same executor and can be viewed via logviewer.
+(defn eventlogger-inputs [^StormTopology topology]
+  (let [bolt-ids (.. topology get_bolts keySet)
+        spout-ids (.. topology get_spouts keySet)
+        spout-inputs (apply merge
+                       (for [id spout-ids]
+                         {[id EVENTLOGGER-STREAM-ID] ["component-id"]}
+                         ))
+        bolt-inputs (apply merge
+                      (for [id bolt-ids]
+                        {[id EVENTLOGGER-STREAM-ID] ["component-id"]}
+                        ))]
+    (merge spout-inputs bolt-inputs)))
+
+(defn add-acker! [storm-conf ^StormTopology ret]
+  (let [num-executors (if (nil? (storm-conf TOPOLOGY-ACKER-EXECUTORS)) (storm-conf TOPOLOGY-WORKERS) (storm-conf TOPOLOGY-ACKER-EXECUTORS))
+        acker-bolt (thrift/mk-bolt-spec* (acker-inputs ret)
+                                         (new org.apache.storm.daemon.acker)
+                                         {ACKER-ACK-STREAM-ID (thrift/direct-output-fields ["id"])
+                                          ACKER-FAIL-STREAM-ID (thrift/direct-output-fields ["id"])
+                                          }
+                                         :p num-executors
+                                         :conf {TOPOLOGY-TASKS num-executors
+                                                TOPOLOGY-TICK-TUPLE-FREQ-SECS (storm-conf TOPOLOGY-MESSAGE-TIMEOUT-SECS)})]
+    (dofor [[_ bolt] (.get_bolts ret)
+            :let [common (.get_common bolt)]]
+           (do
+             (.put_to_streams common ACKER-ACK-STREAM-ID (thrift/output-fields ["id" "ack-val"]))
+             (.put_to_streams common ACKER-FAIL-STREAM-ID (thrift/output-fields ["id"]))
+             ))
+    (dofor [[_ spout] (.get_spouts ret)
+            :let [common (.get_common spout)
+                  spout-conf (merge
+                               (component-conf spout)
+                               {TOPOLOGY-TICK-TUPLE-FREQ-SECS (storm-conf TOPOLOGY-MESSAGE-TIMEOUT-SECS)})]]
+      (do
+        ;; this set up tick tuples to cause timeouts to be triggered
+        (.set_json_conf common (to-json spout-conf))
+        (.put_to_streams common ACKER-INIT-STREAM-ID (thrift/output-fields ["id" "init-val" "spout-task"]))
+        (.put_to_inputs common
+                        (GlobalStreamId. ACKER-COMPONENT-ID ACKER-ACK-STREAM-ID)
+                        (thrift/mk-direct-grouping))
+        (.put_to_inputs common
+                        (GlobalStreamId. ACKER-COMPONENT-ID ACKER-FAIL-STREAM-ID)
+                        (thrift/mk-direct-grouping))
+        ))
+    (.put_to_bolts ret "__acker" acker-bolt)
+    ))
+
+(defn add-metric-streams! [^StormTopology topology]
+  (doseq [[_ component] (all-components topology)
+          :let [common (.get_common component)]]
+    (.put_to_streams common METRICS-STREAM-ID
+                     (thrift/output-fields ["task-info" "data-points"]))))
+
+(defn add-system-streams! [^StormTopology topology]
+  (doseq [[_ component] (all-components topology)
+          :let [common (.get_common component)]]
+    (.put_to_streams common SYSTEM-STREAM-ID (thrift/output-fields ["event"]))))
+
+
+(defn map-occurrences [afn coll]
+  (->> coll
+       (reduce (fn [[counts new-coll] x]
+                 (let [occurs (inc (get counts x 0))]
+                   [(assoc counts x occurs) (cons (afn x occurs) new-coll)]))
+               [{} []])
+       (second)
+       (reverse)))
+
+(defn number-duplicates
+  "(number-duplicates [\"a\", \"b\", \"a\"]) => [\"a\", \"b\", \"a#2\"]"
+  [coll]
+  (map-occurrences (fn [x occurences] (if (>= occurences 2) (str x "#" occurences) x)) coll))
+
+(defn metrics-consumer-register-ids
+  "Generates a list of component ids for each metrics consumer
+   e.g. [\"__metrics_org.mycompany.MyMetricsConsumer\", ..] "
+  [storm-conf]
+  (->> (get storm-conf TOPOLOGY-METRICS-CONSUMER-REGISTER)         
+       (map #(get % "class"))
+       (number-duplicates)
+       (map #(str Constants/METRICS_COMPONENT_ID_PREFIX %))))
+
+(defn metrics-consumer-bolt-specs [storm-conf topology]
+  (let [component-ids-that-emit-metrics (cons SYSTEM-COMPONENT-ID (keys (all-components topology)))
+        inputs (->> (for [comp-id component-ids-that-emit-metrics]
+                      {[comp-id METRICS-STREAM-ID] :shuffle})
+                    (into {}))
+        
+        mk-bolt-spec (fn [class arg p]
+                       (thrift/mk-bolt-spec*
+                        inputs
+                        (org.apache.storm.metric.MetricsConsumerBolt. class arg)
+                        {} :p p :conf {TOPOLOGY-TASKS p}))]
+    
+    (map
+     (fn [component-id register]           
+       [component-id (mk-bolt-spec (get register "class")
+                                   (get register "argument")
+                                   (or (get register "parallelism.hint") 1))])
+     
+     (metrics-consumer-register-ids storm-conf)
+     (get storm-conf TOPOLOGY-METRICS-CONSUMER-REGISTER))))
+
+;; return the fields that event logger bolt expects
+(defn eventlogger-bolt-fields []
+  [(EventLoggerBolt/FIELD_COMPONENT_ID) (EventLoggerBolt/FIELD_MESSAGE_ID)  (EventLoggerBolt/FIELD_TS) (EventLoggerBolt/FIELD_VALUES)]
+  )
+
+(defn add-eventlogger! [storm-conf ^StormTopology ret]
+  (let [num-executors (if (nil? (storm-conf TOPOLOGY-EVENTLOGGER-EXECUTORS)) (storm-conf TOPOLOGY-WORKERS) (storm-conf TOPOLOGY-EVENTLOGGER-EXECUTORS))
+        eventlogger-bolt (thrift/mk-bolt-spec* (eventlogger-inputs ret)
+                     (EventLoggerBolt.)
+                     {}
+                     :p num-executors
+                     :conf {TOPOLOGY-TASKS num-executors
+                            TOPOLOGY-TICK-TUPLE-FREQ-SECS (storm-conf TOPOLOGY-MESSAGE-TIMEOUT-SECS)})]
+
+    (doseq [[_ component] (all-components ret)
+            :let [common (.get_common component)]]
+      (.put_to_streams common EVENTLOGGER-STREAM-ID (thrift/output-fields (eventlogger-bolt-fields))))
+    (.put_to_bolts ret EVENTLOGGER-COMPONENT-ID eventlogger-bolt)
+    ))
+
+(defn add-metric-components! [storm-conf ^StormTopology topology]  
+  (doseq [[comp-id bolt-spec] (metrics-consumer-bolt-specs storm-conf topology)]
+    (.put_to_bolts topology comp-id bolt-spec)))
+
+(defn add-system-components! [conf ^StormTopology topology]
+  (let [system-bolt-spec (thrift/mk-bolt-spec*
+                          {}
+                          (SystemBolt.)
+                          {SYSTEM-TICK-STREAM-ID (thrift/output-fields ["rate_secs"])
+                           METRICS-TICK-STREAM-ID (thrift/output-fields ["interval"])
+                           CREDENTIALS-CHANGED-STREAM-ID (thrift/output-fields ["creds"])}
+                          :p 0
+                          :conf {TOPOLOGY-TASKS 0})]
+    (.put_to_bolts topology SYSTEM-COMPONENT-ID system-bolt-spec)))
+
+(defn system-topology! [storm-conf ^StormTopology topology]
+  (validate-basic! topology)
+  (let [ret (.deepCopy topology)]
+    (add-acker! storm-conf ret)
+    (add-eventlogger! storm-conf ret)
+    (add-metric-components! storm-conf ret)
+    (add-system-components! storm-conf ret)
+    (add-metric-streams! ret)
+    (add-system-streams! ret)
+    (validate-structure! ret)
+    ret
+    ))
+
+(defn has-ackers? [storm-conf]
+  (or (nil? (storm-conf TOPOLOGY-ACKER-EXECUTORS)) (> (storm-conf TOPOLOGY-ACKER-EXECUTORS) 0)))
+
+(defn has-eventloggers? [storm-conf]
+  (or (nil? (storm-conf TOPOLOGY-EVENTLOGGER-EXECUTORS)) (> (storm-conf TOPOLOGY-EVENTLOGGER-EXECUTORS) 0)))
+
+(defn num-start-executors [component]
+  (thrift/parallelism-hint (.get_common component)))
+
+(defn storm-task-info
+  "Returns map from task -> component id"
+  [^StormTopology user-topology storm-conf]
+  (->> (system-topology! storm-conf user-topology)
+       all-components
+       (map-val (comp #(get % TOPOLOGY-TASKS) component-conf))
+       (sort-by first)
+       (mapcat (fn [[c num-tasks]] (repeat num-tasks c)))
+       (map (fn [id comp] [id comp]) (iterate (comp int inc) (int 1)))
+       (into {})
+       ))
+
+(defn executor-id->tasks [[first-task-id last-task-id]]
+  (->> (range first-task-id (inc last-task-id))
+       (map int)))
+
+(defn worker-context [worker]
+  (WorkerTopologyContext. (:system-topology worker)
+                          (:storm-conf worker)
+                          (:task->component worker)
+                          (:component->sorted-tasks worker)
+                          (:component->stream->fields worker)
+                          (:storm-id worker)
+                          (supervisor-storm-resources-path
+                            (supervisor-stormdist-root (:conf worker) (:storm-id worker)))
+                          (worker-pids-root (:conf worker) (:worker-id worker))
+                          (:port worker)
+                          (:task-ids worker)
+                          (:default-shared-resources worker)
+                          (:user-shared-resources worker)
+                          ))
+
+
+(defn to-task->node+port [executor->node+port]
+  (->> executor->node+port
+       (mapcat (fn [[e node+port]] (for [t (executor-id->tasks e)] [t node+port])))
+       (into {})))
+
+(defn mk-authorization-handler [klassname conf]
+  (let [aznClass (if klassname (Class/forName klassname))
+        aznHandler (if aznClass (.newInstance aznClass))] 
+    (if aznHandler (.prepare ^IAuthorizer aznHandler conf))
+    (log-debug "authorization class name:" klassname
+                 " class:" aznClass
+                 " handler:" aznHandler)
+    aznHandler
+  )) 
+

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/daemon/drpc.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/daemon/drpc.clj b/storm-core/src/clj/org/apache/storm/daemon/drpc.clj
new file mode 100644
index 0000000..d6f77c3
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/daemon/drpc.clj
@@ -0,0 +1,274 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+
+(ns org.apache.storm.daemon.drpc
+  (:import [org.apache.storm.security.auth AuthUtils ThriftServer ThriftConnectionType ReqContext])
+  (:import [org.apache.storm.security.auth.authorizer DRPCAuthorizerBase])
+  (:import [org.apache.storm.generated DistributedRPC DistributedRPC$Iface DistributedRPC$Processor
+            DRPCRequest DRPCExecutionException DistributedRPCInvocations DistributedRPCInvocations$Iface
+            DistributedRPCInvocations$Processor])
+  (:import [java.util.concurrent Semaphore ConcurrentLinkedQueue
+            ThreadPoolExecutor ArrayBlockingQueue TimeUnit])
+  (:import [org.apache.storm.daemon Shutdownable])
+  (:import [java.net InetAddress])
+  (:import [org.apache.storm.generated AuthorizationException]
+           [org.apache.storm.utils VersionInfo])
+  (:use [org.apache.storm config log util])
+  (:use [org.apache.storm.daemon common])
+  (:use [org.apache.storm.ui helpers])
+  (:use compojure.core)
+  (:use ring.middleware.reload)
+  (:require [compojure.handler :as handler])
+  (:require [metrics.meters :refer [defmeter mark!]])
+  (:gen-class))
+
+(defmeter drpc:num-execute-http-requests)
+(defmeter drpc:num-execute-calls)
+(defmeter drpc:num-result-calls)
+(defmeter drpc:num-failRequest-calls)
+(defmeter drpc:num-fetchRequest-calls)
+(defmeter drpc:num-shutdown-calls)
+
+(def STORM-VERSION (VersionInfo/getVersion))
+
+(defn timeout-check-secs [] 5)
+
+(defn acquire-queue [queues-atom function]
+  (swap! queues-atom
+    (fn [amap]
+      (if-not (amap function)
+        (assoc amap function (ConcurrentLinkedQueue.))
+        amap)))
+  (@queues-atom function))
+
+(defn check-authorization
+  ([aclHandler mapping operation context]
+    (if (not-nil? context)
+      (log-thrift-access (.requestID context) (.remoteAddress context) (.principal context) operation))
+    (if aclHandler
+      (let [context (or context (ReqContext/context))]
+        (if-not (.permit aclHandler context operation mapping)
+          (let [principal (.principal context)
+                user (if principal (.getName principal) "unknown")]
+              (throw (AuthorizationException.
+                       (str "DRPC request '" operation "' for '"
+                            user "' user is not authorized"))))))))
+  ([aclHandler mapping operation]
+    (check-authorization aclHandler mapping operation (ReqContext/context))))
+
+;; TODO: change this to use TimeCacheMap
+(defn service-handler [conf]
+  (let [drpc-acl-handler (mk-authorization-handler (conf DRPC-AUTHORIZER) conf)
+        ctr (atom 0)
+        id->sem (atom {})
+        id->result (atom {})
+        id->start (atom {})
+        id->function (atom {})
+        id->request (atom {})
+        request-queues (atom {})
+        cleanup (fn [id] (swap! id->sem dissoc id)
+                  (swap! id->result dissoc id)
+                  (swap! id->function dissoc id)
+                  (swap! id->request dissoc id)
+                  (swap! id->start dissoc id))
+        my-ip (.getHostAddress (InetAddress/getLocalHost))
+        clear-thread (async-loop
+                       (fn []
+                         (doseq [[id start] @id->start]
+                           (when (> (time-delta start) (conf DRPC-REQUEST-TIMEOUT-SECS))
+                             (when-let [sem (@id->sem id)]
+                               (.remove (acquire-queue request-queues (@id->function id)) (@id->request id))
+                               (log-warn "Timeout DRPC request id: " id " start at " start)
+                               (.release sem))
+                             (cleanup id)))
+                         (timeout-check-secs)))]
+    (reify DistributedRPC$Iface
+      (^String execute
+        [this ^String function ^String args]
+        (mark! drpc:num-execute-calls)
+        (log-debug "Received DRPC request for " function " (" args ") at " (System/currentTimeMillis))
+        (check-authorization drpc-acl-handler
+                             {DRPCAuthorizerBase/FUNCTION_NAME function}
+                             "execute")
+        (let [id (str (swap! ctr (fn [v] (mod (inc v) 1000000000))))
+              ^Semaphore sem (Semaphore. 0)
+              req (DRPCRequest. args id)
+              ^ConcurrentLinkedQueue queue (acquire-queue request-queues function)]
+          (swap! id->start assoc id (current-time-secs))
+          (swap! id->sem assoc id sem)
+          (swap! id->function assoc id function)
+          (swap! id->request assoc id req)
+          (.add queue req)
+          (log-debug "Waiting for DRPC result for " function " " args " at " (System/currentTimeMillis))
+          (.acquire sem)
+          (log-debug "Acquired DRPC result for " function " " args " at " (System/currentTimeMillis))
+          (let [result (@id->result id)]
+            (cleanup id)
+            (log-debug "Returning DRPC result for " function " " args " at " (System/currentTimeMillis))
+            (if (instance? DRPCExecutionException result)
+              (throw result)
+              (if (nil? result)
+                (throw (DRPCExecutionException. "Request timed out"))
+                result)))))
+
+      DistributedRPCInvocations$Iface
+
+      (^void result
+        [this ^String id ^String result]
+        (mark! drpc:num-result-calls)
+        (when-let [func (@id->function id)]
+          (check-authorization drpc-acl-handler
+                               {DRPCAuthorizerBase/FUNCTION_NAME func}
+                               "result")
+          (let [^Semaphore sem (@id->sem id)]
+            (log-debug "Received result " result " for " id " at " (System/currentTimeMillis))
+            (when sem
+              (swap! id->result assoc id result)
+              (.release sem)
+              ))))
+
+      (^void failRequest
+        [this ^String id]
+        (mark! drpc:num-failRequest-calls)
+        (when-let [func (@id->function id)]
+          (check-authorization drpc-acl-handler
+                               {DRPCAuthorizerBase/FUNCTION_NAME func}
+                               "failRequest")
+          (let [^Semaphore sem (@id->sem id)]
+            (when sem
+              (swap! id->result assoc id (DRPCExecutionException. "Request failed"))
+              (.release sem)))))
+
+      (^DRPCRequest fetchRequest
+        [this ^String func]
+        (mark! drpc:num-fetchRequest-calls)
+        (check-authorization drpc-acl-handler
+                             {DRPCAuthorizerBase/FUNCTION_NAME func}
+                             "fetchRequest")
+        (let [^ConcurrentLinkedQueue queue (acquire-queue request-queues func)
+              ret (.poll queue)]
+          (if ret
+            (do (log-debug "Fetched request for " func " at " (System/currentTimeMillis))
+              ret)
+            (DRPCRequest. "" ""))))
+
+      Shutdownable
+
+      (shutdown
+        [this]
+        (mark! drpc:num-shutdown-calls)
+        (.interrupt clear-thread)))))
+
+(defn handle-request [handler]
+  (fn [request]
+    (handler request)))
+
+(defn populate-context!
+  "Populate the Storm RequestContext from an servlet-request. This should be called in each handler"
+  [http-creds-handler servlet-request]
+    (when http-creds-handler
+      (.populateContext http-creds-handler (ReqContext/context) servlet-request)))
+
+(defn webapp [handler http-creds-handler]
+  (mark! drpc:num-execute-http-requests)
+  (->
+    (routes
+      (POST "/drpc/:func" [:as {:keys [body servlet-request]} func & m]
+        (let [args (slurp body)]
+          (populate-context! http-creds-handler servlet-request)
+          (.execute handler func args)))
+      (POST "/drpc/:func/" [:as {:keys [body servlet-request]} func & m]
+        (let [args (slurp body)]
+          (populate-context! http-creds-handler servlet-request)
+          (.execute handler func args)))
+      (GET "/drpc/:func/:args" [:as {:keys [servlet-request]} func args & m]
+          (populate-context! http-creds-handler servlet-request)
+          (.execute handler func args))
+      (GET "/drpc/:func/" [:as {:keys [servlet-request]} func & m]
+          (populate-context! http-creds-handler servlet-request)
+          (.execute handler func ""))
+      (GET "/drpc/:func" [:as {:keys [servlet-request]} func & m]
+          (populate-context! http-creds-handler servlet-request)
+          (.execute handler func "")))
+    (wrap-reload '[org.apache.storm.daemon.drpc])
+    handle-request))
+
+(defn launch-server!
+  ([]
+    (log-message "Starting drpc server for storm version '" STORM-VERSION "'")
+    (let [conf (read-storm-config)
+          worker-threads (int (conf DRPC-WORKER-THREADS))
+          queue-size (int (conf DRPC-QUEUE-SIZE))
+          drpc-http-port (int (conf DRPC-HTTP-PORT))
+          drpc-port (int (conf DRPC-PORT))
+          drpc-service-handler (service-handler conf)
+          ;; requests and returns need to be on separate thread pools, since calls to
+          ;; "execute" don't unblock until other thrift methods are called. So if
+          ;; 64 threads are calling execute, the server won't accept the result
+          ;; invocations that will unblock those threads
+          handler-server (when (> drpc-port 0)
+                           (ThriftServer. conf
+                             (DistributedRPC$Processor. drpc-service-handler)
+                             ThriftConnectionType/DRPC))
+          invoke-server (ThriftServer. conf
+                          (DistributedRPCInvocations$Processor. drpc-service-handler)
+                          ThriftConnectionType/DRPC_INVOCATIONS)
+          http-creds-handler (AuthUtils/GetDrpcHttpCredentialsPlugin conf)]
+      (add-shutdown-hook-with-force-kill-in-1-sec (fn []
+                                                    (if handler-server (.stop handler-server))
+                                                    (.stop invoke-server)))
+      (log-message "Starting Distributed RPC servers...")
+      (future (.serve invoke-server))
+      (when (> drpc-http-port 0)
+        (let [app (-> (webapp drpc-service-handler http-creds-handler)
+                    requests-middleware)
+              filter-class (conf DRPC-HTTP-FILTER)
+              filter-params (conf DRPC-HTTP-FILTER-PARAMS)
+              filters-confs [{:filter-class filter-class
+                              :filter-params filter-params}]
+              https-port (int (conf DRPC-HTTPS-PORT))
+              https-ks-path (conf DRPC-HTTPS-KEYSTORE-PATH)
+              https-ks-password (conf DRPC-HTTPS-KEYSTORE-PASSWORD)
+              https-ks-type (conf DRPC-HTTPS-KEYSTORE-TYPE)
+              https-key-password (conf DRPC-HTTPS-KEY-PASSWORD)
+              https-ts-path (conf DRPC-HTTPS-TRUSTSTORE-PATH)
+              https-ts-password (conf DRPC-HTTPS-TRUSTSTORE-PASSWORD)
+              https-ts-type (conf DRPC-HTTPS-TRUSTSTORE-TYPE)
+              https-want-client-auth (conf DRPC-HTTPS-WANT-CLIENT-AUTH)
+              https-need-client-auth (conf DRPC-HTTPS-NEED-CLIENT-AUTH)]
+
+          (storm-run-jetty
+           {:port drpc-http-port
+            :configurator (fn [server]
+                            (config-ssl server
+                                        https-port
+                                        https-ks-path
+                                        https-ks-password
+                                        https-ks-type
+                                        https-key-password
+                                        https-ts-path
+                                        https-ts-password
+                                        https-ts-type
+                                        https-need-client-auth
+                                        https-want-client-auth)
+                            (config-filter server app filters-confs))})))
+      (start-metrics-reporters)
+      (when handler-server
+        (.serve handler-server)))))
+
+(defn -main []
+  (setup-default-uncaught-exception-handler)
+  (launch-server!))


[30/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/daemon/logviewer.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/daemon/logviewer.clj b/storm-core/src/clj/backtype/storm/daemon/logviewer.clj
deleted file mode 100644
index f17a63d..0000000
--- a/storm-core/src/clj/backtype/storm/daemon/logviewer.clj
+++ /dev/null
@@ -1,1199 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-(ns backtype.storm.daemon.logviewer
-  (:use compojure.core)
-  (:use [clojure.set :only [difference intersection]])
-  (:use [clojure.string :only [blank? split]])
-  (:use [hiccup core page-helpers form-helpers])
-  (:use [backtype.storm config util log timer])
-  (:use [backtype.storm.ui helpers])
-  (:import [backtype.storm.utils Utils VersionInfo])
-  (:import [org.slf4j LoggerFactory])
-  (:import [java.util Arrays ArrayList HashSet])
-  (:import [java.util.zip GZIPInputStream])
-  (:import [org.apache.logging.log4j LogManager])
-  (:import [org.apache.logging.log4j.core Appender LoggerContext])
-  (:import [org.apache.logging.log4j.core.appender RollingFileAppender])
-  (:import [java.io BufferedInputStream File FileFilter FileInputStream
-            InputStream InputStreamReader])
-  (:import [java.nio.file Files Path Paths DirectoryStream])
-  (:import [java.nio ByteBuffer])
-  (:import [backtype.storm.utils Utils])
-  (:import [backtype.storm.daemon DirectoryCleaner])
-  (:import [org.yaml.snakeyaml Yaml]
-           [org.yaml.snakeyaml.constructor SafeConstructor])
-  (:import [backtype.storm.ui InvalidRequestException]
-           [backtype.storm.security.auth AuthUtils])
-  (:require [backtype.storm.daemon common [supervisor :as supervisor]])
-  (:require [compojure.route :as route]
-            [compojure.handler :as handler]
-            [ring.middleware.keyword-params]
-            [ring.util.codec :as codec]
-            [ring.util.response :as resp]
-            [clojure.string :as string])
-  (:require [metrics.meters :refer [defmeter mark!]])
-  (:use [backtype.storm.daemon.common :only [start-metrics-reporters]])
-  (:gen-class))
-
-(def ^:dynamic *STORM-CONF* (read-storm-config))
-(def STORM-VERSION (VersionInfo/getVersion))
-
-(defmeter logviewer:num-log-page-http-requests)
-(defmeter logviewer:num-daemonlog-page-http-requests)
-(defmeter logviewer:num-download-log-file-http-requests)
-(defmeter logviewer:num-download-log-daemon-file-http-requests)
-(defmeter logviewer:num-list-logs-http-requests)
-
-(defn cleanup-cutoff-age-millis [conf now-millis]
-  (- now-millis (* (conf LOGVIEWER-CLEANUP-AGE-MINS) 60 1000)))
-
-(defn get-stream-for-dir
-  [^File f]
-  (try (Files/newDirectoryStream (.toPath f))
-    (catch Exception ex (log-error ex) nil)))
-
-(defn- last-modifiedtime-worker-logdir
-  "Return the last modified time for all log files in a worker's log dir.
-  Using stream rather than File.listFiles is to avoid large mem usage
-  when a directory has too many files"
-  [^File log-dir]
-  (let [^DirectoryStream stream (get-stream-for-dir log-dir)
-        dir-modified (.lastModified log-dir)
-        last-modified (try (reduce
-                        (fn [maximum path]
-                          (let [curr (.lastModified (.toFile path))]
-                            (if (> curr maximum)
-                              curr
-                              maximum)))
-                        dir-modified
-                        stream)
-                        (catch Exception ex
-                          (log-error ex) dir-modified)
-                        (finally
-                          (if (instance? DirectoryStream stream)
-                            (.close stream))))]
-    last-modified))
-
-(defn get-size-for-logdir
-  "Return the sum of lengths for all log files in a worker's log dir.
-   Using stream rather than File.listFiles is to avoid large mem usage
-   when a directory has too many files"
-  [log-dir]
-  (let [^DirectoryStream stream (get-stream-for-dir log-dir)]
-    (reduce
-      (fn [sum path]
-        (let [size (.length (.toFile path))]
-          (+ sum size)))
-      0
-      stream)))
-
-(defn mk-FileFilter-for-log-cleanup [conf now-millis]
-  (let [cutoff-age-millis (cleanup-cutoff-age-millis conf now-millis)]
-    (reify FileFilter (^boolean accept [this ^File file]
-                        (boolean (and
-                                   (not (.isFile file))
-                                   (<= (last-modifiedtime-worker-logdir file) cutoff-age-millis)))))))
-
-(defn select-dirs-for-cleanup [conf now-millis root-dir]
-  (let [file-filter (mk-FileFilter-for-log-cleanup conf now-millis)]
-    (reduce clojure.set/union
-            (sorted-set)
-            (for [^File topo-dir (.listFiles (File. root-dir))]
-              (into [] (.listFiles topo-dir file-filter))))))
-
-(defn get-topo-port-workerlog
-  "Return the path of the worker log with the format of topoId/port/worker.log.*"
-  [^File file]
-  (clojure.string/join file-path-separator
-                       (take-last 3
-                                  (split (.getCanonicalPath file) (re-pattern file-path-separator)))))
-
-(defn get-metadata-file-for-log-root-name [root-name root-dir]
-  (let [metaFile (clojure.java.io/file root-dir "metadata"
-                                       (str root-name ".yaml"))]
-    (if (.exists metaFile)
-      metaFile
-      (do
-        (log-warn "Could not find " (.getCanonicalPath metaFile)
-                  " to clean up for " root-name)
-        nil))))
-
-(defn get-metadata-file-for-wroker-logdir [logdir]
-  (let [metaFile (clojure.java.io/file logdir "worker.yaml")]
-    (if (.exists metaFile)
-      metaFile
-      (do
-        (log-warn "Could not find " (.getCanonicalPath metaFile)
-                  " to clean up for " logdir)
-        nil))))
-
-(defn get-worker-id-from-metadata-file [metaFile]
-  (get (clojure-from-yaml-file metaFile) "worker-id"))
-
-(defn get-topo-owner-from-metadata-file [metaFile]
-  (get (clojure-from-yaml-file metaFile) TOPOLOGY-SUBMITTER-USER))
-
-(defn identify-worker-log-dirs [log-dirs]
-  "return the workerid to worker-log-dir map"
-  (into {} (for [logdir log-dirs
-                 :let [metaFile (get-metadata-file-for-wroker-logdir logdir)]
-                 :when metaFile]
-             {(get-worker-id-from-metadata-file metaFile) logdir})))
-
-(defn get-alive-ids
-  [conf now-secs]
-  (->>
-    (supervisor/read-worker-heartbeats conf)
-    (remove
-      #(or (not (val %))
-           (supervisor/is-worker-hb-timed-out? now-secs
-                                               (val %)
-                                               conf)))
-    keys
-    set))
-
-(defn get-dead-worker-dirs
-  "Return a sorted set of java.io.Files that were written by workers that are
-  now dead"
-  [conf now-secs log-dirs]
-  (if (empty? log-dirs)
-    (sorted-set)
-    (let [alive-ids (get-alive-ids conf now-secs)
-          id->dir (identify-worker-log-dirs log-dirs)]
-      (apply sorted-set
-             (for [[id dir] id->dir
-                   :when (not (contains? alive-ids id))]
-               dir)))))
-
-(defn get-all-worker-dirs [^File root-dir]
-  (reduce clojure.set/union
-          (sorted-set)
-          (for [^File topo-dir (.listFiles root-dir)]
-            (into [] (.listFiles topo-dir)))))
-
-(defn get-alive-worker-dirs
-  "Return a sorted set of java.io.Files that were written by workers that are
-  now active"
-  [conf root-dir]
-  (let [alive-ids (get-alive-ids conf (current-time-secs))
-        log-dirs (get-all-worker-dirs root-dir)
-        id->dir (identify-worker-log-dirs log-dirs)]
-    (apply sorted-set
-           (for [[id dir] id->dir
-                 :when (contains? alive-ids id)]
-             (.getCanonicalPath dir)))))
-
-(defn get-all-logs-for-rootdir [^File log-dir]
-  (reduce concat
-          (for [port-dir (get-all-worker-dirs log-dir)]
-            (into [] (DirectoryCleaner/getFilesForDir port-dir)))))
-
-(defn is-active-log [^File file]
-  (re-find #"\.(log|err|out|current|yaml|pid)$" (.getName file)))
-
-(defn sum-file-size
-  "Given a sequence of Files, sum their sizes."
-  [files]
-  (reduce #(+ %1 (.length %2)) 0 files))
-
-(defn per-workerdir-cleanup!
-  "Delete the oldest files in each overloaded worker log dir"
-  [^File root-dir size ^DirectoryCleaner cleaner]
-  (dofor [worker-dir (get-all-worker-dirs root-dir)]
-    (.deleteOldestWhileTooLarge cleaner (ArrayList. [worker-dir]) size true nil)))
-
-(defn global-log-cleanup!
-  "Delete the oldest files in overloaded worker-artifacts globally"
-  [^File root-dir size ^DirectoryCleaner cleaner]
-  (let [worker-dirs (ArrayList. (get-all-worker-dirs root-dir))
-        alive-worker-dirs (HashSet. (get-alive-worker-dirs *STORM-CONF* root-dir))]
-    (.deleteOldestWhileTooLarge cleaner worker-dirs size false alive-worker-dirs)))
-
-(defn cleanup-empty-topodir!
-  "Delete the topo dir if it contains zero port dirs"
-  [^File dir]
-  (let [topodir (.getParentFile dir)]
-    (if (empty? (.listFiles topodir))
-      (rmr (.getCanonicalPath topodir)))))
-
-(defn cleanup-fn!
-  "Delete old log dirs for which the workers are no longer alive"
-  [log-root-dir]
-  (let [now-secs (current-time-secs)
-        old-log-dirs (select-dirs-for-cleanup *STORM-CONF*
-                                              (* now-secs 1000)
-                                              log-root-dir)
-        total-size (*STORM-CONF* LOGVIEWER-MAX-SUM-WORKER-LOGS-SIZE-MB)
-        per-dir-size (*STORM-CONF* LOGVIEWER-MAX-PER-WORKER-LOGS-SIZE-MB)
-        per-dir-size (min per-dir-size (* total-size 0.5))
-        cleaner (DirectoryCleaner.)
-        dead-worker-dirs (get-dead-worker-dirs *STORM-CONF*
-                                               now-secs
-                                               old-log-dirs)]
-    (log-debug "log cleanup: now=" now-secs
-               " old log dirs " (pr-str (map #(.getName %) old-log-dirs))
-               " dead worker dirs " (pr-str
-                                       (map #(.getName %) dead-worker-dirs)))
-    (dofor [dir dead-worker-dirs]
-           (let [path (.getCanonicalPath dir)]
-             (log-message "Cleaning up: Removing " path)
-             (try (rmr path)
-                  (cleanup-empty-topodir! dir)
-                  (catch Exception ex (log-error ex)))))
-    (per-workerdir-cleanup! (File. log-root-dir) (* per-dir-size (* 1024 1024)) cleaner)
-    (let [size (* total-size (* 1024 1024))]
-      (global-log-cleanup! (File. log-root-dir) size cleaner))))
-
-(defn start-log-cleaner! [conf log-root-dir]
-  (let [interval-secs (conf LOGVIEWER-CLEANUP-INTERVAL-SECS)]
-    (when interval-secs
-      (log-debug "starting log cleanup thread at interval: " interval-secs)
-      (schedule-recurring (mk-timer :thread-name "logviewer-cleanup"
-                                    :kill-fn (fn [t]
-                                               (log-error t "Error when doing logs cleanup")
-                                               (exit-process! 20 "Error when doing log cleanup")))
-                          0 ;; Start immediately.
-                          interval-secs
-                          (fn [] (cleanup-fn! log-root-dir))))))
-
-(defn- skip-bytes
-  "FileInputStream#skip may not work the first time, so ensure it successfully
-  skips the given number of bytes."
-  [^InputStream stream n]
-  (loop [skipped 0]
-    (let [skipped (+ skipped (.skip stream (- n skipped)))]
-      (if (< skipped n) (recur skipped)))))
-
-(defn logfile-matches-filter?
-  [log-file-name]
-  (let [regex-string (str "worker.log.*")
-        regex-pattern (re-pattern regex-string)]
-    (not= (re-seq regex-pattern (.toString log-file-name)) nil)))
-
-(defn page-file
-  ([path tail]
-    (let [zip-file? (.endsWith path ".gz")
-          flen (if zip-file? (Utils/zipFileSize (clojure.java.io/file path)) (.length (clojure.java.io/file path)))
-          skip (- flen tail)]
-      (page-file path skip tail)))
-  ([path start length]
-    (let [zip-file? (.endsWith path ".gz")
-          flen (if zip-file? (Utils/zipFileSize (clojure.java.io/file path)) (.length (clojure.java.io/file path)))]
-      (with-open [input (if zip-file? (GZIPInputStream. (FileInputStream. path)) (FileInputStream. path))
-                  output (java.io.ByteArrayOutputStream.)]
-        (if (>= start flen)
-          (throw
-            (InvalidRequestException. "Cannot start past the end of the file")))
-        (if (> start 0) (skip-bytes input start))
-        (let [buffer (make-array Byte/TYPE 1024)]
-          (loop []
-            (when (< (.size output) length)
-              (let [size (.read input buffer 0 (min 1024 (- length (.size output))))]
-                (when (pos? size)
-                  (.write output buffer 0 size)
-                  (recur)))))
-        (.toString output))))))
-
-(defn get-log-user-group-whitelist [fname]
-  (let [wl-file (get-log-metadata-file fname)
-        m (clojure-from-yaml-file wl-file)]
-    (if (not-nil? m)
-      (do
-        (let [user-wl (.get m LOGS-USERS)
-              user-wl (if user-wl user-wl [])
-              group-wl (.get m LOGS-GROUPS)
-              group-wl (if group-wl group-wl [])]
-          [user-wl group-wl]))
-        nil)))
-
-(def igroup-mapper (AuthUtils/GetGroupMappingServiceProviderPlugin *STORM-CONF*))
-(defn user-groups
-  [user]
-  (if (blank? user) [] (.getGroups igroup-mapper user)))
-
-(defn authorized-log-user? [user fname conf]
-  (if (or (blank? user) (blank? fname) (nil? (get-log-user-group-whitelist fname)))
-    nil
-    (let [groups (user-groups user)
-          [user-wl group-wl] (get-log-user-group-whitelist fname)
-          logs-users (concat (conf LOGS-USERS)
-                             (conf NIMBUS-ADMINS)
-                             user-wl)
-          logs-groups (concat (conf LOGS-GROUPS)
-                              group-wl)]
-       (or (some #(= % user) logs-users)
-           (< 0 (.size (intersection (set groups) (set logs-groups))))))))
-
-(defn log-root-dir
-  "Given an appender name, as configured, get the parent directory of the appender's log file.
-   Note that if anything goes wrong, this will throw an Error and exit."
-  [appender-name]
-  (let [appender (.getAppender (.getConfiguration (LogManager/getContext)) appender-name)]
-    (if (and appender-name appender (instance? RollingFileAppender appender))
-      (.getParent (File. (.getFileName appender)))
-      (throw
-       (RuntimeException. "Log viewer could not find configured appender, or the appender is not a FileAppender. Please check that the appender name configured in storm and log4j agree.")))))
-
-(defnk to-btn-link
-  "Create a link that is formatted like a button"
-  [url text :enabled true]
-  [:a {:href (java.net.URI. url)
-       :class (str "btn btn-default " (if enabled "enabled" "disabled"))} text])
-
-(defn search-file-form [fname]
-  [[:form {:action "logviewer_search.html" :id "search-box"}
-    "Search this file:"
-    [:input {:type "text" :name "search"}]
-    [:input {:type "hidden" :name "file" :value fname}]
-    [:input {:type "submit" :value "Search"}]]])
-
-(defn log-file-selection-form [log-files type]
-  [[:form {:action type :id "list-of-files"}
-    (drop-down "file" log-files)
-    [:input {:type "submit" :value "Switch file"}]]])
-
-(defn pager-links [fname start length file-size]
-  (let [prev-start (max 0 (- start length))
-        next-start (if (> file-size 0)
-                     (min (max 0 (- file-size length)) (+ start length))
-                     (+ start length))]
-    [[:div
-      (concat
-          [(to-btn-link (url "/log"
-                          {:file fname
-                           :start (max 0 (- start length))
-                           :length length})
-                          "Prev" :enabled (< prev-start start))]
-          [(to-btn-link (url "/log"
-                           {:file fname
-                            :start 0
-                            :length length}) "First")]
-          [(to-btn-link (url "/log"
-                           {:file fname
-                            :length length})
-                        "Last")]
-          [(to-btn-link (url "/log"
-                          {:file fname
-                           :start (min (max 0 (- file-size length))
-                                       (+ start length))
-                           :length length})
-                        "Next" :enabled (> next-start start))])]]))
-
-(defn- download-link [fname]
-  [[:p (link-to (url-format "/download/%s" fname) "Download Full File")]])
-
-(defn- daemon-download-link [fname]
-  [[:p (link-to (url-format "/daemondownload/%s" fname) "Download Full File")]])
-
-(defn- is-txt-file [fname]
-  (re-find #"\.(log.*|txt|yaml|pid)$" fname))
-
-(def default-bytes-per-page 51200)
-
-(defn log-page [fname start length grep user root-dir]
-  (if (or (blank? (*STORM-CONF* UI-FILTER))
-          (authorized-log-user? user fname *STORM-CONF*))
-    (let [file (.getCanonicalFile (File. root-dir fname))
-          path (.getCanonicalPath file)
-          zip-file? (.endsWith path ".gz")
-          topo-dir (.getParentFile (.getParentFile file))]
-      (if (and (.exists file)
-               (= (.getCanonicalFile (File. root-dir))
-                  (.getParentFile topo-dir)))
-        (let [file-length (if zip-file? (Utils/zipFileSize (clojure.java.io/file path)) (.length (clojure.java.io/file path)))
-              log-files (reduce clojure.set/union
-                          (sorted-set)
-                          (for [^File port-dir (.listFiles topo-dir)]
-                            (into [] (filter #(.isFile %) (DirectoryCleaner/getFilesForDir port-dir))))) ;all types of files included
-              files-str (for [file log-files]
-                          (get-topo-port-workerlog file))
-              reordered-files-str (conj (filter #(not= fname %) files-str) fname)
-               length (if length
-                       (min 10485760 length)
-                       default-bytes-per-page)
-              log-string (escape-html
-                           (if (is-txt-file fname)
-                             (if start
-                               (page-file path start length)
-                               (page-file path length))
-                             "This is a binary file and cannot display! You may download the full file."))
-              start (or start (- file-length length))]
-          (if grep
-            (html [:pre#logContent
-                   (if grep
-                     (->> (.split log-string "\n")
-                          (filter #(.contains % grep))
-                          (string/join "\n"))
-                     log-string)])
-            (let [pager-data (if (is-txt-file fname) (pager-links fname start length file-length) nil)]
-              (html (concat (search-file-form fname)
-                            (log-file-selection-form reordered-files-str "log") ; list all files for this topology
-                            pager-data
-                            (download-link fname)
-                            [[:pre#logContent log-string]]
-                            pager-data)))))
-        (-> (resp/response "Page not found")
-            (resp/status 404))))
-    (if (nil? (get-log-user-group-whitelist fname))
-      (-> (resp/response "Page not found")
-        (resp/status 404))
-      (unauthorized-user-html user))))
-
-(defn daemonlog-page [fname start length grep user root-dir]
-  (let [file (.getCanonicalFile (File. root-dir fname))
-        file-length (.length file)
-        path (.getCanonicalPath file)
-        zip-file? (.endsWith path ".gz")]
-    (if (and (= (.getCanonicalFile (File. root-dir))
-                (.getParentFile file))
-             (.exists file))
-      (let [file-length (if zip-file? (Utils/zipFileSize (clojure.java.io/file path)) (.length (clojure.java.io/file path)))
-            length (if length
-                     (min 10485760 length)
-                     default-bytes-per-page)
-            log-files (into [] (filter #(.isFile %) (.listFiles (File. root-dir)))) ;all types of files included
-            files-str (for [file log-files]
-                        (.getName file))
-            reordered-files-str (conj (filter #(not= fname %) files-str) fname)
-            log-string (escape-html
-                         (if (is-txt-file fname)
-                           (if start
-                             (page-file path start length)
-                             (page-file path length))
-                           "This is a binary file and cannot display! You may download the full file."))
-            start (or start (- file-length length))]
-        (if grep
-          (html [:pre#logContent
-                 (if grep
-                   (->> (.split log-string "\n")
-                        (filter #(.contains % grep))
-                        (string/join "\n"))
-                   log-string)])
-          (let [pager-data (if (is-txt-file fname) (pager-links fname start length file-length) nil)]
-            (html (concat (log-file-selection-form reordered-files-str "daemonlog") ; list all daemon logs
-                          pager-data
-                          (daemon-download-link fname)
-                          [[:pre#logContent log-string]]
-                          pager-data)))))
-      (-> (resp/response "Page not found")
-          (resp/status 404)))))
-
-(defn download-log-file [fname req resp user ^String root-dir]
-  (let [file (.getCanonicalFile (File. root-dir fname))]
-    (if (.exists file)
-      (if (or (blank? (*STORM-CONF* UI-FILTER))
-              (authorized-log-user? user fname *STORM-CONF*))
-        (-> (resp/response file)
-            (resp/content-type "application/octet-stream"))
-        (unauthorized-user-html user))
-      (-> (resp/response "Page not found")
-          (resp/status 404)))))
-
-(def grep-max-search-size 1024)
-(def grep-buf-size 2048)
-(def grep-context-size 128)
-
-(defn logviewer-port
-  []
-  (int (*STORM-CONF* LOGVIEWER-PORT)))
-
-(defn url-to-match-centered-in-log-page
-  [needle fname offset port]
-  (let [host (local-hostname)
-        port (logviewer-port)
-        fname (clojure.string/join file-path-separator (take-last 3 (split fname (re-pattern file-path-separator))))]
-    (url (str "http://" host ":" port "/log")
-      {:file fname
-       :start (max 0
-                (- offset
-                  (int (/ default-bytes-per-page 2))
-                  (int (/ (alength needle) -2)))) ;; Addition
-       :length default-bytes-per-page})))
-
-(defnk mk-match-data
-  [^bytes needle ^ByteBuffer haystack haystack-offset file-offset fname
-   :before-bytes nil :after-bytes nil]
-  (let [url (url-to-match-centered-in-log-page needle
-              fname
-              file-offset
-              (*STORM-CONF* LOGVIEWER-PORT))
-        haystack-bytes (.array haystack)
-        before-string (if (>= haystack-offset grep-context-size)
-                        (String. haystack-bytes
-                          (- haystack-offset grep-context-size)
-                          grep-context-size
-                          "UTF-8")
-                        (let [num-desired (max 0 (- grep-context-size
-                                                   haystack-offset))
-                              before-size (if before-bytes
-                                            (alength before-bytes)
-                                            0)
-                              num-expected (min before-size num-desired)]
-                          (if (pos? num-expected)
-                            (str (String. before-bytes
-                                   (- before-size num-expected)
-                                   num-expected
-                                   "UTF-8")
-                              (String. haystack-bytes
-                                0
-                                haystack-offset
-                                "UTF-8"))
-                            (String. haystack-bytes
-                              0
-                              haystack-offset
-                              "UTF-8"))))
-        after-string (let [needle-size (alength needle)
-                           after-offset (+ haystack-offset needle-size)
-                           haystack-size (.limit haystack)]
-                       (if (< (+ after-offset grep-context-size) haystack-size)
-                         (String. haystack-bytes
-                           after-offset
-                           grep-context-size
-                           "UTF-8")
-                         (let [num-desired (- grep-context-size
-                                             (- haystack-size after-offset))
-                               after-size (if after-bytes
-                                            (alength after-bytes)
-                                            0)
-                               num-expected (min after-size num-desired)]
-                           (if (pos? num-expected)
-                             (str (String. haystack-bytes
-                                    after-offset
-                                    (- haystack-size after-offset)
-                                    "UTF-8")
-                               (String. after-bytes 0 num-expected "UTF-8"))
-                             (String. haystack-bytes
-                               after-offset
-                               (- haystack-size after-offset)
-                               "UTF-8")))))]
-    {"byteOffset" file-offset
-     "beforeString" before-string
-     "afterString" after-string
-     "matchString" (String. needle "UTF-8")
-     "logviewerURL" url}))
-
-(defn- try-read-ahead!
-  "Tries once to read ahead in the stream to fill the context and resets the
-  stream to its position before the call."
-  [^BufferedInputStream stream haystack offset file-len bytes-read]
-  (let [num-expected (min (- file-len bytes-read)
-                       grep-context-size)
-        after-bytes (byte-array num-expected)]
-    (.mark stream num-expected)
-    ;; Only try reading once.
-    (.read stream after-bytes 0 num-expected)
-    (.reset stream)
-    after-bytes))
-
-(defn offset-of-bytes
-  "Searches a given byte array for a match of a sub-array of bytes.  Returns
-  the offset to the byte that matches, or -1 if no match was found."
-  [^bytes buf ^bytes value init-offset]
-  {:pre [(> (alength value) 0)
-         (not (neg? init-offset))]}
-  (loop [offset init-offset
-         candidate-offset init-offset
-         val-offset 0]
-    (if-not (pos? (- (alength value) val-offset))
-      ;; Found
-      candidate-offset
-      (if (>= offset (alength buf))
-        ;; We ran out of buffer for the search.
-        -1
-        (if (not= (aget value val-offset) (aget buf offset))
-          ;; The match at this candidate offset failed, so start over with the
-          ;; next candidate byte from the buffer.
-          (let [new-offset (inc candidate-offset)]
-            (recur new-offset new-offset 0))
-          ;; So far it matches.  Keep going...
-          (recur (inc offset) candidate-offset (inc val-offset)))))))
-
-(defn- buffer-substring-search!
-  "As the file is read into a buffer, 1/2 the buffer's size at a time, we
-  search the buffer for matches of the substring and return a list of zero or
-  more matches."
-  [file file-len offset-to-buf init-buf-offset stream bytes-skipped
-   bytes-read ^ByteBuffer haystack ^bytes needle initial-matches num-matches
-   ^bytes before-bytes]
-  (loop [buf-offset init-buf-offset
-         matches initial-matches]
-    (let [offset (offset-of-bytes (.array haystack) needle buf-offset)]
-      (if (and (< (count matches) num-matches) (not (neg? offset)))
-        (let [file-offset (+ offset-to-buf offset)
-              bytes-needed-after-match (- (.limit haystack)
-                                         grep-context-size
-                                         (alength needle))
-              before-arg (if (< offset grep-context-size) before-bytes)
-              after-arg (if (> offset bytes-needed-after-match)
-                          (try-read-ahead! stream
-                            haystack
-                            offset
-                            file-len
-                            bytes-read))]
-          (recur (+ offset (alength needle))
-            (conj matches
-              (mk-match-data needle
-                haystack
-                offset
-                file-offset
-                (.getCanonicalPath file)
-                :before-bytes before-arg
-                :after-bytes after-arg))))
-        (let [before-str-to-offset (min (.limit haystack)
-                                     grep-max-search-size)
-              before-str-from-offset (max 0 (- before-str-to-offset
-                                              grep-context-size))
-              new-before-bytes (Arrays/copyOfRange (.array haystack)
-                                 before-str-from-offset
-                                 before-str-to-offset)
-              ;; It's OK if new-byte-offset is negative.  This is normal if
-              ;; we are out of bytes to read from a small file.
-              new-byte-offset (if (>= (count matches) num-matches)
-                                (+ (get (last matches) "byteOffset")
-                                  (alength needle))
-                                (+ bytes-skipped
-                                  bytes-read
-                                  (- grep-max-search-size)))]
-          [matches new-byte-offset new-before-bytes])))))
-
-(defn- mk-grep-response
-  "This response data only includes a next byte offset if there is more of the
-  file to read."
-  [search-bytes offset matches next-byte-offset]
-  (merge {"searchString" (String. search-bytes "UTF-8")
-          "startByteOffset" offset
-          "matches" matches}
-    (and next-byte-offset {"nextByteOffset" next-byte-offset})))
-
-(defn rotate-grep-buffer!
-  [^ByteBuffer buf ^BufferedInputStream stream total-bytes-read file file-len]
-  (let [buf-arr (.array buf)]
-    ;; Copy the 2nd half of the buffer to the first half.
-    (System/arraycopy buf-arr
-      grep-max-search-size
-      buf-arr
-      0
-      grep-max-search-size)
-    ;; Zero-out the 2nd half to prevent accidental matches.
-    (Arrays/fill buf-arr
-      grep-max-search-size
-      (count buf-arr)
-      (byte 0))
-    ;; Fill the 2nd half with new bytes from the stream.
-    (let [bytes-read (.read stream
-                       buf-arr
-                       grep-max-search-size
-                       (min file-len grep-max-search-size))]
-      (.limit buf (+ grep-max-search-size bytes-read))
-      (swap! total-bytes-read + bytes-read))))
-
-(defnk substring-search
-  "Searches for a substring in a log file, starting at the given offset,
-  returning the given number of matches, surrounded by the given number of
-  context lines.  Other information is included to be useful for progressively
-  searching through a file for display in a UI. The search string must
-  grep-max-search-size bytes or fewer when decoded with UTF-8."
-  [file ^String search-string :num-matches 10 :start-byte-offset 0]
-  {:pre [(not (empty? search-string))
-         (<= (count (.getBytes search-string "UTF-8")) grep-max-search-size)]}
-  (let [zip-file? (.endsWith (.getName file) ".gz")
-        f-input-steam (FileInputStream. file)
-        gzipped-input-stream (if zip-file?
-                               (GZIPInputStream. f-input-steam)
-                               f-input-steam)
-        stream ^BufferedInputStream (BufferedInputStream.
-                                      gzipped-input-stream)
-        file-len (if zip-file? (Utils/zipFileSize file) (.length file))
-        buf ^ByteBuffer (ByteBuffer/allocate grep-buf-size)
-        buf-arr ^bytes (.array buf)
-        string nil
-        total-bytes-read (atom 0)
-        matches []
-        search-bytes ^bytes (.getBytes search-string "UTF-8")
-        num-matches (or num-matches 10)
-        start-byte-offset (or start-byte-offset 0)]
-    ;; Start at the part of the log file we are interested in.
-    ;; Allow searching when start-byte-offset == file-len so it doesn't blow up on 0-length files
-    (if (> start-byte-offset file-len)
-      (throw
-        (InvalidRequestException. "Cannot search past the end of the file")))
-    (when (> start-byte-offset 0)
-      (skip-bytes stream start-byte-offset))
-    (java.util.Arrays/fill buf-arr (byte 0))
-    (let [bytes-read (.read stream buf-arr 0 (min file-len grep-buf-size))]
-      (.limit buf bytes-read)
-      (swap! total-bytes-read + bytes-read))
-    (loop [initial-matches []
-           init-buf-offset 0
-           byte-offset start-byte-offset
-           before-bytes nil]
-      (let [[matches new-byte-offset new-before-bytes]
-            (buffer-substring-search! file
-              file-len
-              byte-offset
-              init-buf-offset
-              stream
-              start-byte-offset
-              @total-bytes-read
-              buf
-              search-bytes
-              initial-matches
-              num-matches
-              before-bytes)]
-        (if (and (< (count matches) num-matches)
-              (< (+ @total-bytes-read start-byte-offset) file-len))
-          (let [;; The start index is positioned to find any possible
-                ;; occurrence search string that did not quite fit in the
-                ;; buffer on the previous read.
-                new-buf-offset (- (min (.limit ^ByteBuffer buf)
-                                    grep-max-search-size)
-                                 (alength search-bytes))]
-            (rotate-grep-buffer! buf stream total-bytes-read file file-len)
-            (when (< @total-bytes-read 0)
-              (throw (InvalidRequestException. "Cannot search past the end of the file")))
-            (recur matches
-              new-buf-offset
-              new-byte-offset
-              new-before-bytes))
-          (mk-grep-response search-bytes
-            start-byte-offset
-            matches
-            (if-not (and (< (count matches) num-matches)
-                      (>= @total-bytes-read file-len))
-              (let [next-byte-offset (+ (get (last matches)
-                                          "byteOffset")
-                                       (alength search-bytes))]
-                (if (> file-len next-byte-offset)
-                  next-byte-offset)))))))))
-
-(defn- try-parse-int-param
-  [nam value]
-  (try
-    (Integer/parseInt value)
-    (catch java.lang.NumberFormatException e
-      (->
-        (str "Could not parse " nam " to an integer")
-        (InvalidRequestException. e)
-        throw))))
-
-(defn search-log-file
-  [fname user ^String root-dir search num-matches offset callback origin]
-  (let [file (.getCanonicalFile (File. root-dir fname))]
-    (if (.exists file)
-      (if (or (blank? (*STORM-CONF* UI-FILTER))
-            (authorized-log-user? user fname *STORM-CONF*))
-        (let [num-matches-int (if num-matches
-                                (try-parse-int-param "num-matches"
-                                  num-matches))
-              offset-int (if offset
-                           (try-parse-int-param "start-byte-offset" offset))]
-          (try
-            (if (and (not (empty? search))
-                  <= (count (.getBytes search "UTF-8")) grep-max-search-size)
-              (json-response
-                (substring-search file
-                  search
-                  :num-matches num-matches-int
-                  :start-byte-offset offset-int)
-                callback
-                :headers {"Access-Control-Allow-Origin" origin
-                          "Access-Control-Allow-Credentials" "true"})
-              (throw
-                (InvalidRequestException.
-                  (str "Search substring must be between 1 and 1024 UTF-8 "
-                    "bytes in size (inclusive)"))))
-            (catch Exception ex
-              (json-response (exception->json ex) callback :status 500))))
-        (json-response (unauthorized-user-json user) callback :status 401))
-      (json-response {"error" "Not Found"
-                      "errorMessage" "The file was not found on this node."}
-        callback
-        :status 404))))
-
-(defn find-n-matches [logs n file-offset offset search]
-  (let [logs (drop file-offset logs)
-        wrap-matches-fn (fn [matches]
-                          {"fileOffset" file-offset
-                           "searchString" search
-                           "matches" matches})]
-    (loop [matches []
-           logs logs
-           offset offset
-           file-offset file-offset
-           match-count 0]
-      (if (empty? logs)
-        (wrap-matches-fn matches)
-        (let [these-matches (try
-                              (log-debug "Looking through " (first logs))
-                              (substring-search (first logs)
-                                search
-                                :num-matches (- n match-count)
-                                :start-byte-offset offset)
-                              (catch InvalidRequestException e
-                                (log-error e "Can't search past end of file.")
-                                {}))
-              file-name (get-topo-port-workerlog (first logs))
-              new-matches (conj matches
-                            (merge these-matches
-                              { "fileName" file-name
-                                "port" (first (take-last 2 (split (.getCanonicalPath (first logs)) (re-pattern file-path-separator))))}))
-              new-count (+ match-count (count (these-matches "matches")))]
-          (if (empty? these-matches)
-            (recur matches (rest logs) 0 (+ file-offset 1) match-count)
-            (if (>= new-count n)
-              (wrap-matches-fn new-matches)
-              (recur new-matches (rest logs) 0 (+ file-offset 1) new-count))))))))
-
-(defn logs-for-port
-  "Get the filtered, authorized, sorted log files for a port."
-  [user port-dir]
-  (let [filter-authorized-fn (fn [user logs]
-                               (filter #(or
-                                          (blank? (*STORM-CONF* UI-FILTER))
-                                          (authorized-log-user? user (get-topo-port-workerlog %) *STORM-CONF*)) logs))]
-    (sort #(compare (.lastModified %2) (.lastModified %1))
-      (filter-authorized-fn
-        user
-        (filter #(re-find worker-log-filename-pattern (.getName %)) (DirectoryCleaner/getFilesForDir port-dir))))))
-
-(defn deep-search-logs-for-topology
-  [topology-id user ^String root-dir search num-matches port file-offset offset search-archived? callback origin]
-  (json-response
-    (if (or (not search) (not (.exists (File. (str root-dir file-path-separator topology-id)))))
-      []
-      (let [file-offset (if file-offset (Integer/parseInt file-offset) 0)
-            offset (if offset (Integer/parseInt offset) 0)
-            num-matches (or (Integer/parseInt num-matches) 1)
-            port-dirs (vec (.listFiles (File. (str root-dir file-path-separator topology-id))))
-            logs-for-port-fn (partial logs-for-port user)]
-        (if (or (not port) (= "*" port))
-          ;; Check for all ports
-          (let [filtered-logs (filter (comp not empty?) (map logs-for-port-fn port-dirs))]
-            (if search-archived?
-              (map #(find-n-matches % num-matches 0 0 search)
-                filtered-logs)
-              (map #(find-n-matches % num-matches 0 0 search)
-                (map (comp vector first) filtered-logs))))
-          ;; Check just the one port
-          (if (not (contains? (into #{} (map str (*STORM-CONF* SUPERVISOR-SLOTS-PORTS))) port))
-            []
-            (let [port-dir (File. (str root-dir file-path-separator topology-id file-path-separator port))]
-              (if (or (not (.exists port-dir)) (empty? (logs-for-port user port-dir)))
-                []
-                (let [filtered-logs (logs-for-port user port-dir)]
-                  (if search-archived?
-                    (find-n-matches filtered-logs num-matches file-offset offset search)
-                    (find-n-matches [(first filtered-logs)] num-matches 0 offset search)))))))))
-    callback
-    :headers {"Access-Control-Allow-Origin" origin
-              "Access-Control-Allow-Credentials" "true"}))
-
-(defn log-template
-  ([body] (log-template body nil nil))
-  ([body fname user]
-    (html4
-     [:head
-      [:title (str (escape-html fname) " - Storm Log Viewer")]
-      (include-css "/css/bootstrap-3.3.1.min.css")
-      (include-css "/css/jquery.dataTables.1.10.4.min.css")
-      (include-css "/css/style.css")
-      ]
-     [:body
-      (concat
-        (when (not (blank? user)) [[:div.ui-user [:p "User: " user]]])
-        [[:div.ui-note [:p "Note: the drop-list shows at most 1024 files for each worker directory."]]]
-        [[:h3 (escape-html fname)]]
-        (seq body))
-      ])))
-
-(def http-creds-handler (AuthUtils/GetUiHttpCredentialsPlugin *STORM-CONF*))
-
-(defn- parse-long-from-map [m k]
-  (try
-    (Long/parseLong (k m))
-    (catch NumberFormatException ex
-      (throw (InvalidRequestException.
-               (str "Could not make an integer out of the query parameter '"
-                    (name k) "'")
-               ex)))))
-
-(defn list-log-files
-  [user topoId port log-root callback origin]
-  (let [file-results
-        (if (nil? topoId)
-          (if (nil? port)
-            (get-all-logs-for-rootdir (File. log-root))
-            (reduce concat
-              (for [topo-dir (.listFiles (File. log-root))]
-                (reduce concat
-                  (for [port-dir (.listFiles topo-dir)]
-                    (if (= (str port) (.getName port-dir))
-                      (into [] (DirectoryCleaner/getFilesForDir port-dir))))))))
-          (if (nil? port)
-            (let [topo-dir (File. (str log-root file-path-separator topoId))]
-              (if (.exists topo-dir)
-                (reduce concat
-                  (for [port-dir (.listFiles topo-dir)]
-                    (into [] (DirectoryCleaner/getFilesForDir port-dir))))
-                []))
-            (let [port-dir (get-worker-dir-from-root log-root topoId port)]
-              (if (.exists port-dir)
-                (into [] (DirectoryCleaner/getFilesForDir port-dir))
-                []))))
-        file-strs (sort (for [file file-results]
-                          (get-topo-port-workerlog file)))]
-    (json-response file-strs
-      callback
-      :headers {"Access-Control-Allow-Origin" origin
-                "Access-Control-Allow-Credentials" "true"})))
-
-(defn get-profiler-dump-files
-  [dir]
-  (filter (comp not nil?)
-        (for [f (DirectoryCleaner/getFilesForDir dir)]
-          (let [name (.getName f)]
-            (if (or
-                  (.endsWith name ".txt")
-                  (.endsWith name ".jfr")
-                  (.endsWith name ".bin"))
-              (.getName f))))))
-
-(defroutes log-routes
-  (GET "/log" [:as req & m]
-    (try
-      (mark! logviewer:num-log-page-http-requests)
-      (let [servlet-request (:servlet-request req)
-            log-root (:log-root req)
-            user (.getUserName http-creds-handler servlet-request)
-            start (if (:start m) (parse-long-from-map m :start))
-            length (if (:length m) (parse-long-from-map m :length))
-            file (url-decode (:file m))]
-        (log-template (log-page file start length (:grep m) user log-root)
-          file user))
-      (catch InvalidRequestException ex
-        (log-error ex)
-        (ring-response-from-exception ex))))
-  (GET "/dumps/:topo-id/:host-port/:filename"
-       [:as {:keys [servlet-request servlet-response log-root]} topo-id host-port filename &m]
-     (let [user (.getUserName http-creds-handler servlet-request)
-           port (second (split host-port #":"))
-           dir (File. (str log-root
-                           file-path-separator
-                           topo-id
-                           file-path-separator
-                           port))
-           file (File. (str log-root
-                            file-path-separator
-                            topo-id
-                            file-path-separator
-                            port
-                            file-path-separator
-                            filename))]
-       (if (and (.exists dir) (.exists file))
-         (if (or (blank? (*STORM-CONF* UI-FILTER))
-               (authorized-log-user? user 
-                                     (str topo-id file-path-separator port file-path-separator "worker.log")
-                                     *STORM-CONF*))
-           (-> (resp/response file)
-               (resp/content-type "application/octet-stream"))
-           (unauthorized-user-html user))
-         (-> (resp/response "Page not found")
-           (resp/status 404)))))
-  (GET "/dumps/:topo-id/:host-port"
-       [:as {:keys [servlet-request servlet-response log-root]} topo-id host-port &m]
-     (let [user (.getUserName http-creds-handler servlet-request)
-           port (second (split host-port #":"))
-           dir (File. (str log-root
-                           file-path-separator
-                           topo-id
-                           file-path-separator
-                           port))]
-       (if (.exists dir)
-         (if (or (blank? (*STORM-CONF* UI-FILTER))
-               (authorized-log-user? user 
-                                     (str topo-id file-path-separator port file-path-separator "worker.log")
-                                     *STORM-CONF*))
-           (html4
-             [:head
-              [:title "File Dumps - Storm Log Viewer"]
-              (include-css "/css/bootstrap-3.3.1.min.css")
-              (include-css "/css/jquery.dataTables.1.10.4.min.css")
-              (include-css "/css/style.css")]
-             [:body
-              [:ul
-               (for [file (get-profiler-dump-files dir)]
-                 [:li
-                  [:a {:href (str "/dumps/" topo-id "/" host-port "/" file)} file ]])]])
-           (unauthorized-user-html user))
-         (-> (resp/response "Page not found")
-           (resp/status 404)))))
-  (GET "/daemonlog" [:as req & m]
-    (try
-      (mark! logviewer:num-daemonlog-page-http-requests)
-      (let [servlet-request (:servlet-request req)
-            daemonlog-root (:daemonlog-root req)
-            user (.getUserName http-creds-handler servlet-request)
-            start (if (:start m) (parse-long-from-map m :start))
-            length (if (:length m) (parse-long-from-map m :length))
-            file (url-decode (:file m))]
-        (log-template (daemonlog-page file start length (:grep m) user daemonlog-root)
-          file user))
-      (catch InvalidRequestException ex
-        (log-error ex)
-        (ring-response-from-exception ex))))
-  (GET "/download/:file" [:as {:keys [servlet-request servlet-response log-root]} file & m]
-    (try
-      (mark! logviewer:num-download-log-file-http-requests)
-      (let [user (.getUserName http-creds-handler servlet-request)]
-        (download-log-file file servlet-request servlet-response user log-root))
-      (catch InvalidRequestException ex
-        (log-error ex)
-        (ring-response-from-exception ex))))
-  (GET "/daemondownload/:file" [:as {:keys [servlet-request servlet-response daemonlog-root]} file & m]
-    (try
-      (mark! logviewer:num-download-log-daemon-file-http-requests)
-      (let [user (.getUserName http-creds-handler servlet-request)]
-        (download-log-file file servlet-request servlet-response user daemonlog-root))
-      (catch InvalidRequestException ex
-        (log-error ex)
-        (ring-response-from-exception ex))))
-  (GET "/search/:file" [:as {:keys [servlet-request servlet-response log-root]} file & m]
-    ;; We do not use servlet-response here, but do not remove it from the
-    ;; :keys list, or this rule could stop working when an authentication
-    ;; filter is configured.
-    (try
-      (let [user (.getUserName http-creds-handler servlet-request)]
-        (search-log-file (url-decode file)
-          user
-          log-root
-          (:search-string m)
-          (:num-matches m)
-          (:start-byte-offset m)
-          (:callback m)
-          (.getHeader servlet-request "Origin")))
-      (catch InvalidRequestException ex
-        (log-error ex)
-        (json-response (exception->json ex) (:callback m) :status 400))))
-  (GET "/deepSearch/:topo-id" [:as {:keys [servlet-request servlet-response log-root]} topo-id & m]
-    ;; We do not use servlet-response here, but do not remove it from the
-    ;; :keys list, or this rule could stop working when an authentication
-    ;; filter is configured.
-    (try
-      (let [user (.getUserName http-creds-handler servlet-request)]
-        (deep-search-logs-for-topology topo-id
-          user
-          log-root
-          (:search-string m)
-          (:num-matches m)
-          (:port m)
-          (:start-file-offset m)
-          (:start-byte-offset m)
-          (:search-archived m)
-          (:callback m)
-          (.getHeader servlet-request "Origin")))
-      (catch InvalidRequestException ex
-        (log-error ex)
-        (json-response (exception->json ex) (:callback m) :status 400))))
-  (GET "/searchLogs" [:as req & m]
-    (try
-      (let [servlet-request (:servlet-request req)
-            user (.getUserName http-creds-handler servlet-request)]
-        (list-log-files user
-          (:topoId m)
-          (:port m)
-          (:log-root req)
-          (:callback m)
-          (.getHeader servlet-request "Origin")))
-      (catch InvalidRequestException ex
-        (log-error ex)
-        (json-response (exception->json ex) (:callback m) :status 400))))
-  (GET "/listLogs" [:as req & m]
-    (try
-      (mark! logviewer:num-list-logs-http-requests)
-      (let [servlet-request (:servlet-request req)
-            user (.getUserName http-creds-handler servlet-request)]
-        (list-log-files user
-          (:topoId m)
-          (:port m)
-          (:log-root req)
-          (:callback m)
-          (.getHeader servlet-request "Origin")))
-      (catch InvalidRequestException ex
-        (log-error ex)
-        (json-response (exception->json ex) (:callback m) :status 400))))
-  (route/resources "/")
-  (route/not-found "Page not found"))
-
-(defn conf-middleware
-  "For passing the storm configuration with each request."
-  [app log-root daemonlog-root]
-  (fn [req]
-    (app (assoc req :log-root log-root :daemonlog-root daemonlog-root))))
-
-(defn start-logviewer! [conf log-root-dir daemonlog-root-dir]
-  (try
-    (let [header-buffer-size (int (.get conf UI-HEADER-BUFFER-BYTES))
-          filter-class (conf UI-FILTER)
-          filter-params (conf UI-FILTER-PARAMS)
-          logapp (handler/api (-> log-routes
-                                requests-middleware))  ;; query params as map
-          middle (conf-middleware logapp log-root-dir daemonlog-root-dir)
-          filters-confs (if (conf UI-FILTER)
-                          [{:filter-class filter-class
-                            :filter-params (or (conf UI-FILTER-PARAMS) {})}]
-                          [])
-          filters-confs (concat filters-confs
-                          [{:filter-class "org.eclipse.jetty.servlets.GzipFilter"
-                            :filter-name "Gzipper"
-                            :filter-params {}}])
-          https-port (int (or (conf LOGVIEWER-HTTPS-PORT) 0))
-          keystore-path (conf LOGVIEWER-HTTPS-KEYSTORE-PATH)
-          keystore-pass (conf LOGVIEWER-HTTPS-KEYSTORE-PASSWORD)
-          keystore-type (conf LOGVIEWER-HTTPS-KEYSTORE-TYPE)
-          key-password (conf LOGVIEWER-HTTPS-KEY-PASSWORD)
-          truststore-path (conf LOGVIEWER-HTTPS-TRUSTSTORE-PATH)
-          truststore-password (conf LOGVIEWER-HTTPS-TRUSTSTORE-PASSWORD)
-          truststore-type (conf LOGVIEWER-HTTPS-TRUSTSTORE-TYPE)
-          want-client-auth (conf LOGVIEWER-HTTPS-WANT-CLIENT-AUTH)
-          need-client-auth (conf LOGVIEWER-HTTPS-NEED-CLIENT-AUTH)]
-      (storm-run-jetty {:port (int (conf LOGVIEWER-PORT))
-                        :configurator (fn [server]
-                                        (config-ssl server
-                                                    https-port
-                                                    keystore-path
-                                                    keystore-pass
-                                                    keystore-type
-                                                    key-password
-                                                    truststore-path
-                                                    truststore-password
-                                                    truststore-type
-                                                    want-client-auth
-                                                    need-client-auth)
-                                        (config-filter server middle filters-confs))}))
-  (catch Exception ex
-    (log-error ex))))
-
-(defn -main []
-  (let [conf (read-storm-config)
-        log-root (worker-artifacts-root conf)
-        daemonlog-root (log-root-dir (conf LOGVIEWER-APPENDER-NAME))]
-    (setup-default-uncaught-exception-handler)
-    (start-log-cleaner! conf log-root)
-    (log-message "Starting logviewer server for storm version '"
-                 STORM-VERSION
-                 "'")
-    (start-logviewer! conf log-root daemonlog-root)
-    (start-metrics-reporters)))


[28/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/daemon/supervisor.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/daemon/supervisor.clj b/storm-core/src/clj/backtype/storm/daemon/supervisor.clj
deleted file mode 100644
index 1ae4356..0000000
--- a/storm-core/src/clj/backtype/storm/daemon/supervisor.clj
+++ /dev/null
@@ -1,1219 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-(ns backtype.storm.daemon.supervisor
-  (:import [java.io File IOException FileOutputStream])
-  (:import [backtype.storm.scheduler ISupervisor]
-           [backtype.storm.utils LocalState Time Utils]
-           [backtype.storm.daemon Shutdownable]
-           [backtype.storm Constants]
-           [backtype.storm.cluster ClusterStateContext DaemonType]
-           [java.net JarURLConnection]
-           [java.net URI]
-           [org.apache.commons.io FileUtils])
-  (:use [backtype.storm config util log timer local-state])
-  (:import [backtype.storm.generated AuthorizationException KeyNotFoundException WorkerResources])
-  (:import [backtype.storm.utils NimbusLeaderNotFoundException VersionInfo])
-  (:import [java.nio.file Files StandardCopyOption])
-  (:import [backtype.storm Config])
-  (:import [backtype.storm.generated WorkerResources ProfileAction])
-  (:import [backtype.storm.localizer LocalResource])
-  (:use [backtype.storm.daemon common])
-  (:require [backtype.storm.command [healthcheck :as healthcheck]])
-  (:require [backtype.storm.daemon [worker :as worker]]
-            [backtype.storm [process-simulator :as psim] [cluster :as cluster] [event :as event]]
-            [clojure.set :as set])
-  (:import [org.apache.thrift.transport TTransportException])
-  (:import [org.apache.zookeeper data.ACL ZooDefs$Ids ZooDefs$Perms])
-  (:import [org.yaml.snakeyaml Yaml]
-           [org.yaml.snakeyaml.constructor SafeConstructor])
-  (:require [metrics.gauges :refer [defgauge]])
-  (:require [metrics.meters :refer [defmeter mark!]])
-  (:gen-class
-    :methods [^{:static true} [launch [backtype.storm.scheduler.ISupervisor] void]]))
-
-(defmeter supervisor:num-workers-launched)
-
-(defmulti download-storm-code cluster-mode)
-(defmulti launch-worker (fn [supervisor & _] (cluster-mode (:conf supervisor))))
-
-(def STORM-VERSION (VersionInfo/getVersion))
-
-(defprotocol SupervisorDaemon
-  (get-id [this])
-  (get-conf [this])
-  (shutdown-all-workers [this])
-  )
-
-(defn- assignments-snapshot [storm-cluster-state callback assignment-versions]
-  (let [storm-ids (.assignments storm-cluster-state callback)]
-    (let [new-assignments
-          (->>
-           (dofor [sid storm-ids]
-                  (let [recorded-version (:version (get assignment-versions sid))]
-                    (if-let [assignment-version (.assignment-version storm-cluster-state sid callback)]
-                      (if (= assignment-version recorded-version)
-                        {sid (get assignment-versions sid)}
-                        {sid (.assignment-info-with-version storm-cluster-state sid callback)})
-                      {sid nil})))
-           (apply merge)
-           (filter-val not-nil?))
-          new-profiler-actions
-          (->>
-            (dofor [sid (distinct storm-ids)]
-                   (if-let [topo-profile-actions (.get-topology-profile-requests storm-cluster-state sid false)]
-                      {sid topo-profile-actions}))
-           (apply merge))]
-         
-      {:assignments (into {} (for [[k v] new-assignments] [k (:data v)]))
-       :profiler-actions new-profiler-actions
-       :versions new-assignments})))
-
-(defn- read-my-executors [assignments-snapshot storm-id assignment-id]
-  (let [assignment (get assignments-snapshot storm-id)
-        my-slots-resources (into {}
-                                 (filter (fn [[[node _] _]] (= node assignment-id))
-                                         (:worker->resources assignment)))
-        my-executors (filter (fn [[_ [node _]]] (= node assignment-id))
-                             (:executor->node+port assignment))
-        port-executors (apply merge-with
-                              concat
-                              (for [[executor [_ port]] my-executors]
-                                {port [executor]}
-                                ))]
-    (into {} (for [[port executors] port-executors]
-               ;; need to cast to int b/c it might be a long (due to how yaml parses things)
-               ;; doall is to avoid serialization/deserialization problems with lazy seqs
-               [(Integer. port) (mk-local-assignment storm-id (doall executors) (get my-slots-resources [assignment-id port]))]
-               ))))
-
-(defn- read-assignments
-  "Returns map from port to struct containing :storm-id, :executors and :resources"
-  ([assignments-snapshot assignment-id]
-     (->> (dofor [sid (keys assignments-snapshot)] (read-my-executors assignments-snapshot sid assignment-id))
-          (apply merge-with (fn [& ignored] (throw-runtime "Should not have multiple topologies assigned to one port")))))
-  ([assignments-snapshot assignment-id existing-assignment retries]
-     (try (let [assignments (read-assignments assignments-snapshot assignment-id)]
-            (reset! retries 0)
-            assignments)
-          (catch RuntimeException e
-            (if (> @retries 2) (throw e) (swap! retries inc))
-            (log-warn (.getMessage e) ": retrying " @retries " of 3")
-            existing-assignment))))
-
-(defn- read-storm-code-locations
-  [assignments-snapshot]
-  (map-val :master-code-dir assignments-snapshot))
-
-(defn- read-downloaded-storm-ids [conf]
-  (map #(url-decode %) (read-dir-contents (supervisor-stormdist-root conf)))
-  )
-
-(defn read-worker-heartbeat [conf id]
-  (let [local-state (worker-state conf id)]
-    (try
-      (ls-worker-heartbeat local-state)
-      (catch Exception e
-        (log-warn e "Failed to read local heartbeat for workerId : " id ",Ignoring exception.")
-        nil))))
-
-
-(defn my-worker-ids [conf]
-  (read-dir-contents (worker-root conf)))
-
-(defn read-worker-heartbeats
-  "Returns map from worker id to heartbeat"
-  [conf]
-  (let [ids (my-worker-ids conf)]
-    (into {}
-      (dofor [id ids]
-        [id (read-worker-heartbeat conf id)]))
-    ))
-
-
-(defn matches-an-assignment? [worker-heartbeat assigned-executors]
-  (let [local-assignment (assigned-executors (:port worker-heartbeat))]
-    (and local-assignment
-         (= (:storm-id worker-heartbeat) (:storm-id local-assignment))
-         (= (disj (set (:executors worker-heartbeat)) Constants/SYSTEM_EXECUTOR_ID)
-            (set (:executors local-assignment))))))
-
-(let [dead-workers (atom #{})]
-  (defn get-dead-workers []
-    @dead-workers)
-  (defn add-dead-worker [worker]
-    (swap! dead-workers conj worker))
-  (defn remove-dead-worker [worker]
-    (swap! dead-workers disj worker)))
-
-(defn is-worker-hb-timed-out? [now hb conf]
-  (> (- now (:time-secs hb))
-     (conf SUPERVISOR-WORKER-TIMEOUT-SECS)))
-
-(defn read-allocated-workers
-  "Returns map from worker id to worker heartbeat. if the heartbeat is nil, then the worker is dead (timed out or never wrote heartbeat)"
-  [supervisor assigned-executors now]
-  (let [conf (:conf supervisor)
-        ^LocalState local-state (:local-state supervisor)
-        id->heartbeat (read-worker-heartbeats conf)
-        approved-ids (set (keys (ls-approved-workers local-state)))]
-    (into
-     {}
-     (dofor [[id hb] id->heartbeat]
-            (let [state (cond
-                         (not hb)
-                           :not-started
-                         (or (not (contains? approved-ids id))
-                             (not (matches-an-assignment? hb assigned-executors)))
-                           :disallowed
-                         (or
-                          (when (get (get-dead-workers) id)
-                            (log-message "Worker Process " id " has died!")
-                            true)
-                          (is-worker-hb-timed-out? now hb conf))
-                           :timed-out
-                         true
-                           :valid)]
-              (log-debug "Worker " id " is " state ": " (pr-str hb) " at supervisor time-secs " now)
-              [id [state hb]]
-              ))
-     )))
-
-(defn- wait-for-worker-launch [conf id start-time]
-  (let [state (worker-state conf id)]
-    (loop []
-      (let [hb (ls-worker-heartbeat state)]
-        (when (and
-               (not hb)
-               (<
-                (- (current-time-secs) start-time)
-                (conf SUPERVISOR-WORKER-START-TIMEOUT-SECS)
-                ))
-          (log-message id " still hasn't started")
-          (Time/sleep 500)
-          (recur)
-          )))
-    (when-not (ls-worker-heartbeat state)
-      (log-message "Worker " id " failed to start")
-      )))
-
-(defn- wait-for-workers-launch [conf ids]
-  (let [start-time (current-time-secs)]
-    (doseq [id ids]
-      (wait-for-worker-launch conf id start-time))
-    ))
-
-(defn generate-supervisor-id []
-  (uuid))
-
-(defnk worker-launcher [conf user args :environment {} :log-prefix nil :exit-code-callback nil :directory nil]
-  (let [_ (when (clojure.string/blank? user)
-            (throw (java.lang.IllegalArgumentException.
-                     "User cannot be blank when calling worker-launcher.")))
-        wl-initial (conf SUPERVISOR-WORKER-LAUNCHER)
-        storm-home (System/getProperty "storm.home")
-        wl (if wl-initial wl-initial (str storm-home "/bin/worker-launcher"))
-        command (concat [wl user] args)]
-    (log-message "Running as user:" user " command:" (pr-str command))
-    (launch-process command :environment environment :log-prefix log-prefix :exit-code-callback exit-code-callback :directory directory)
-  ))
-
-(defnk worker-launcher-and-wait [conf user args :environment {} :log-prefix nil]
-  (let [process (worker-launcher conf user args :environment environment)]
-    (if log-prefix
-      (read-and-log-stream log-prefix (.getInputStream process)))
-      (try
-        (.waitFor process)
-      (catch InterruptedException e
-        (log-message log-prefix " interrupted.")))
-      (.exitValue process)))
-
-(defn- rmr-as-user
-  "Launches a process owned by the given user that deletes the given path
-  recursively.  Throws RuntimeException if the directory is not removed."
-  [conf id path]
-  (let [user (Utils/getFileOwner path)]
-    (worker-launcher-and-wait conf
-      user
-      ["rmr" path]
-      :log-prefix (str "rmr " id))
-    (if (exists-file? path)
-      (throw (RuntimeException. (str path " was not deleted"))))))
-
-(defn try-cleanup-worker [conf id]
-  (try
-    (if (.exists (File. (worker-root conf id)))
-      (do
-        (if (conf SUPERVISOR-RUN-WORKER-AS-USER)
-          (rmr-as-user conf id (worker-root conf id))
-          (do
-            (rmr (worker-heartbeats-root conf id))
-            ;; this avoids a race condition with worker or subprocess writing pid around same time
-            (rmr (worker-pids-root conf id))
-            (rmr (worker-root conf id))))
-        (remove-worker-user! conf id)
-        (remove-dead-worker id)
-      ))
-  (catch IOException e
-    (log-warn-error e "Failed to cleanup worker " id ". Will retry later"))
-  (catch RuntimeException e
-    (log-warn-error e "Failed to cleanup worker " id ". Will retry later")
-    )
-  (catch java.io.FileNotFoundException e (log-message (.getMessage e)))
-    ))
-
-(defn shutdown-worker [supervisor id]
-  (log-message "Shutting down " (:supervisor-id supervisor) ":" id)
-  (let [conf (:conf supervisor)
-        pids (read-dir-contents (worker-pids-root conf id))
-        thread-pid (@(:worker-thread-pids-atom supervisor) id)
-        shutdown-sleep-secs (conf SUPERVISOR-WORKER-SHUTDOWN-SLEEP-SECS)
-        as-user (conf SUPERVISOR-RUN-WORKER-AS-USER)
-        user (get-worker-user conf id)]
-    (when thread-pid
-      (psim/kill-process thread-pid))
-    (doseq [pid pids]
-      (if as-user
-        (worker-launcher-and-wait conf user ["signal" pid "15"] :log-prefix (str "kill -15 " pid))
-        (kill-process-with-sig-term pid)))
-    (when-not (empty? pids)  
-      (log-message "Sleep " shutdown-sleep-secs " seconds for execution of cleanup threads on worker.")
-      (sleep-secs shutdown-sleep-secs))
-    (doseq [pid pids]
-      (if as-user
-        (worker-launcher-and-wait conf user ["signal" pid "9"] :log-prefix (str "kill -9 " pid))
-        (force-kill-process pid))
-      (if as-user
-        (rmr-as-user conf id (worker-pid-path conf id pid))
-        (try
-          (rmpath (worker-pid-path conf id pid))
-          (catch Exception e)))) ;; on windows, the supervisor may still holds the lock on the worker directory
-    (try-cleanup-worker conf id))
-  (log-message "Shut down " (:supervisor-id supervisor) ":" id))
-
-(def SUPERVISOR-ZK-ACLS
-  [(first ZooDefs$Ids/CREATOR_ALL_ACL)
-   (ACL. (bit-or ZooDefs$Perms/READ ZooDefs$Perms/CREATE) ZooDefs$Ids/ANYONE_ID_UNSAFE)])
-
-(defn supervisor-data [conf shared-context ^ISupervisor isupervisor]
-  {:conf conf
-   :shared-context shared-context
-   :isupervisor isupervisor
-   :active (atom true)
-   :uptime (uptime-computer)
-   :version STORM-VERSION
-   :worker-thread-pids-atom (atom {})
-   :storm-cluster-state (cluster/mk-storm-cluster-state conf :acls (when
-                                                                     (Utils/isZkAuthenticationConfiguredStormServer
-                                                                       conf)
-                                                                     SUPERVISOR-ZK-ACLS)
-                                                        :context (ClusterStateContext. DaemonType/SUPERVISOR))
-   :local-state (supervisor-state conf)
-   :supervisor-id (.getSupervisorId isupervisor)
-   :assignment-id (.getAssignmentId isupervisor)
-   :my-hostname (hostname conf)
-   :curr-assignment (atom nil) ;; used for reporting used ports when heartbeating
-   :heartbeat-timer (mk-timer :kill-fn (fn [t]
-                               (log-error t "Error when processing event")
-                               (exit-process! 20 "Error when processing an event")
-                               ))
-   :event-timer (mk-timer :kill-fn (fn [t]
-                                         (log-error t "Error when processing event")
-                                         (exit-process! 20 "Error when processing an event")
-                                         ))
-   :blob-update-timer (mk-timer :kill-fn (defn blob-update-timer
-                                           [t]
-                                           (log-error t "Error when processing event")
-                                           (exit-process! 20 "Error when processing a event"))
-                                :timer-name "blob-update-timer")
-   :localizer (Utils/createLocalizer conf (supervisor-local-dir conf))
-   :assignment-versions (atom {})
-   :sync-retry (atom 0)
-   :download-lock (Object.)
-   :stormid->profiler-actions (atom {})
-   })
-
-(defn required-topo-files-exist?
-  [conf storm-id]
-  (let [stormroot (supervisor-stormdist-root conf storm-id)
-        stormjarpath (supervisor-stormjar-path stormroot)
-        stormcodepath (supervisor-stormcode-path stormroot)
-        stormconfpath (supervisor-stormconf-path stormroot)]
-    (and (every? exists-file? [stormroot stormconfpath stormcodepath])
-         (or (local-mode? conf)
-             (exists-file? stormjarpath)))))
-
-(defn get-worker-assignment-helper-msg
-  [assignment supervisor port id]
-  (str (pr-str assignment) " for this supervisor " (:supervisor-id supervisor) " on port "
-    port " with id " id))
-
-(defn get-valid-new-worker-ids
-  [conf supervisor reassign-executors new-worker-ids]
-  (into {}
-    (remove nil?
-      (dofor [[port assignment] reassign-executors]
-        (let [id (new-worker-ids port)
-              storm-id (:storm-id assignment)
-              ^WorkerResources resources (:resources assignment)
-              mem-onheap (.get_mem_on_heap resources)]
-          ;; This condition checks for required files exist before launching the worker
-          (if (required-topo-files-exist? conf storm-id)
-            (do
-              (log-message "Launching worker with assignment "
-                (get-worker-assignment-helper-msg assignment supervisor port id))
-              (local-mkdirs (worker-pids-root conf id))
-              (local-mkdirs (worker-heartbeats-root conf id))
-              (launch-worker supervisor
-                (:storm-id assignment)
-                port
-                id
-                mem-onheap)
-              [id port])
-            (do
-              (log-message "Missing topology storm code, so can't launch worker with assignment "
-                (get-worker-assignment-helper-msg assignment supervisor port id))
-              nil)))))))
-
-(defn sync-processes [supervisor]
-  (let [conf (:conf supervisor)
-        ^LocalState local-state (:local-state supervisor)
-        storm-cluster-state (:storm-cluster-state supervisor)
-        assigned-executors (defaulted (ls-local-assignments local-state) {})
-        now (current-time-secs)
-        allocated (read-allocated-workers supervisor assigned-executors now)
-        keepers (filter-val
-                 (fn [[state _]] (= state :valid))
-                 allocated)
-        keep-ports (set (for [[id [_ hb]] keepers] (:port hb)))
-        reassign-executors (select-keys-pred (complement keep-ports) assigned-executors)
-        new-worker-ids (into
-                        {}
-                        (for [port (keys reassign-executors)]
-                          [port (uuid)]))]
-    ;; 1. to kill are those in allocated that are dead or disallowed
-    ;; 2. kill the ones that should be dead
-    ;;     - read pids, kill -9 and individually remove file
-    ;;     - rmr heartbeat dir, rmdir pid dir, rmdir id dir (catch exception and log)
-    ;; 3. of the rest, figure out what assignments aren't yet satisfied
-    ;; 4. generate new worker ids, write new "approved workers" to LS
-    ;; 5. create local dir for worker id
-    ;; 5. launch new workers (give worker-id, port, and supervisor-id)
-    ;; 6. wait for workers launch
-
-    (log-debug "Syncing processes")
-    (log-debug "Assigned executors: " assigned-executors)
-    (log-debug "Allocated: " allocated)
-    (doseq [[id [state heartbeat]] allocated]
-      (when (not= :valid state)
-        (log-message
-         "Shutting down and clearing state for id " id
-         ". Current supervisor time: " now
-         ". State: " state
-         ", Heartbeat: " (pr-str heartbeat))
-        (shutdown-worker supervisor id)))
-    (let [valid-new-worker-ids (get-valid-new-worker-ids conf supervisor reassign-executors new-worker-ids)]
-      (ls-approved-workers! local-state
-                        (merge
-                          (select-keys (ls-approved-workers local-state)
-                            (keys keepers))
-                          valid-new-worker-ids))
-      (wait-for-workers-launch conf (keys valid-new-worker-ids)))))
-
-(defn assigned-storm-ids-from-port-assignments [assignment]
-  (->> assignment
-       vals
-       (map :storm-id)
-       set))
-
-(defn shutdown-disallowed-workers [supervisor]
-  (let [conf (:conf supervisor)
-        ^LocalState local-state (:local-state supervisor)
-        assigned-executors (defaulted (ls-local-assignments local-state) {})
-        now (current-time-secs)
-        allocated (read-allocated-workers supervisor assigned-executors now)
-        disallowed (keys (filter-val
-                                  (fn [[state _]] (= state :disallowed))
-                                  allocated))]
-    (log-debug "Allocated workers " allocated)
-    (log-debug "Disallowed workers " disallowed)
-    (doseq [id disallowed]
-      (shutdown-worker supervisor id))
-    ))
-
-(defn get-blob-localname
-  "Given the blob information either gets the localname field if it exists,
-  else routines the default value passed in."
-  [blob-info defaultValue]
-  (or (get blob-info "localname") defaultValue))
-
-(defn should-uncompress-blob?
-  "Given the blob information returns the value of the uncompress field, handling it either being
-  a string or a boolean value, or if it's not specified then returns false"
-  [blob-info]
-  (Boolean. (get blob-info "uncompress")))
-
-(defn remove-blob-references
-  "Remove a reference to a blob when its no longer needed."
-  [localizer storm-id conf]
-  (let [storm-conf (read-supervisor-storm-conf conf storm-id)
-        blobstore-map (storm-conf TOPOLOGY-BLOBSTORE-MAP)
-        user (storm-conf TOPOLOGY-SUBMITTER-USER)
-        topo-name (storm-conf TOPOLOGY-NAME)]
-    (if blobstore-map
-      (doseq [[k, v] blobstore-map]
-        (.removeBlobReference localizer
-          k
-          user
-          topo-name
-          (should-uncompress-blob? v))))))
-
-(defn blobstore-map-to-localresources
-  "Returns a list of LocalResources based on the blobstore-map passed in."
-  [blobstore-map]
-  (if blobstore-map
-    (for [[k, v] blobstore-map] (LocalResource. k (should-uncompress-blob? v)))
-    ()))
-
-(defn add-blob-references
-  "For each of the downloaded topologies, adds references to the blobs that the topologies are
-  using. This is used to reconstruct the cache on restart."
-  [localizer storm-id conf]
-  (let [storm-conf (read-supervisor-storm-conf conf storm-id)
-        blobstore-map (storm-conf TOPOLOGY-BLOBSTORE-MAP)
-        user (storm-conf TOPOLOGY-SUBMITTER-USER)
-        topo-name (storm-conf TOPOLOGY-NAME)
-        localresources (blobstore-map-to-localresources blobstore-map)]
-    (if blobstore-map
-      (.addReferences localizer localresources user topo-name))))
-
-(defn rm-topo-files
-  [conf storm-id localizer rm-blob-refs?]
-  (let [path (supervisor-stormdist-root conf storm-id)]
-    (try
-      (if rm-blob-refs?
-        (remove-blob-references localizer storm-id conf))
-      (if (conf SUPERVISOR-RUN-WORKER-AS-USER)
-        (rmr-as-user conf storm-id path)
-        (rmr (supervisor-stormdist-root conf storm-id)))
-      (catch Exception e
-        (log-message e (str "Exception removing: " storm-id))))))
-
-(defn verify-downloaded-files
-  "Check for the files exists to avoid supervisor crashing
-   Also makes sure there is no necessity for locking"
-  [conf localizer assigned-storm-ids all-downloaded-storm-ids]
-  (remove nil?
-    (into #{}
-      (for [storm-id all-downloaded-storm-ids
-            :when (contains? assigned-storm-ids storm-id)]
-        (when-not (required-topo-files-exist? conf storm-id)
-          (log-debug "Files not present in topology directory")
-          (rm-topo-files conf storm-id localizer false)
-          storm-id)))))
-
-(defn mk-synchronize-supervisor [supervisor sync-processes event-manager processes-event-manager]
-  (fn this []
-    (let [conf (:conf supervisor)
-          storm-cluster-state (:storm-cluster-state supervisor)
-          ^ISupervisor isupervisor (:isupervisor supervisor)
-          ^LocalState local-state (:local-state supervisor)
-          sync-callback (fn [& ignored] (.add event-manager this))
-          assignment-versions @(:assignment-versions supervisor)
-          {assignments-snapshot :assignments
-           storm-id->profiler-actions :profiler-actions
-           versions :versions}
-          (assignments-snapshot storm-cluster-state sync-callback assignment-versions)
-          storm-code-map (read-storm-code-locations assignments-snapshot)
-          all-downloaded-storm-ids (set (read-downloaded-storm-ids conf))
-          existing-assignment (ls-local-assignments local-state)
-          all-assignment (read-assignments assignments-snapshot
-                                           (:assignment-id supervisor)
-                                           existing-assignment
-                                           (:sync-retry supervisor))
-          new-assignment (->> all-assignment
-                              (filter-key #(.confirmAssigned isupervisor %)))
-          assigned-storm-ids (assigned-storm-ids-from-port-assignments new-assignment)
-          localizer (:localizer supervisor)
-          checked-downloaded-storm-ids (set (verify-downloaded-files conf localizer assigned-storm-ids all-downloaded-storm-ids))
-          downloaded-storm-ids (set/difference all-downloaded-storm-ids checked-downloaded-storm-ids)]
-
-      (log-debug "Synchronizing supervisor")
-      (log-debug "Storm code map: " storm-code-map)
-      (log-debug "All assignment: " all-assignment)
-      (log-debug "New assignment: " new-assignment)
-      (log-debug "Assigned Storm Ids " assigned-storm-ids)
-      (log-debug "All Downloaded Ids " all-downloaded-storm-ids)
-      (log-debug "Checked Downloaded Ids " checked-downloaded-storm-ids)
-      (log-debug "Downloaded Ids " downloaded-storm-ids)
-      (log-debug "Storm Ids Profiler Actions " storm-id->profiler-actions)
-      ;; download code first
-      ;; This might take awhile
-      ;;   - should this be done separately from usual monitoring?
-      ;; should we only download when topology is assigned to this supervisor?
-      (doseq [[storm-id master-code-dir] storm-code-map]
-        (when (and (not (downloaded-storm-ids storm-id))
-                   (assigned-storm-ids storm-id))
-          (log-message "Downloading code for storm id " storm-id)
-          (try-cause
-            (download-storm-code conf storm-id master-code-dir localizer)
-
-            (catch NimbusLeaderNotFoundException e
-              (log-warn-error e "Nimbus leader was not available."))
-            (catch TTransportException e
-              (log-warn-error e "There was a connection problem with nimbus.")))
-          (log-message "Finished downloading code for storm id " storm-id)))
-
-      (log-debug "Writing new assignment "
-                 (pr-str new-assignment))
-      (doseq [p (set/difference (set (keys existing-assignment))
-                                (set (keys new-assignment)))]
-        (.killedWorker isupervisor (int p)))
-      (.assigned isupervisor (keys new-assignment))
-      (ls-local-assignments! local-state
-            new-assignment)
-      (reset! (:assignment-versions supervisor) versions)
-      (reset! (:stormid->profiler-actions supervisor) storm-id->profiler-actions)
-
-      (reset! (:curr-assignment supervisor) new-assignment)
-      ;; remove any downloaded code that's no longer assigned or active
-      ;; important that this happens after setting the local assignment so that
-      ;; synchronize-supervisor doesn't try to launch workers for which the
-      ;; resources don't exist
-      (if on-windows? (shutdown-disallowed-workers supervisor))
-      (doseq [storm-id all-downloaded-storm-ids]
-        (when-not (storm-code-map storm-id)
-          (log-message "Removing code for storm id "
-                       storm-id)
-          (rm-topo-files conf storm-id localizer true)))
-      (.add processes-event-manager sync-processes))))
-
-(defn mk-supervisor-capacities
-  [conf]
-  {Config/SUPERVISOR_MEMORY_CAPACITY_MB (double (conf SUPERVISOR-MEMORY-CAPACITY-MB))
-   Config/SUPERVISOR_CPU_CAPACITY (double (conf SUPERVISOR-CPU-CAPACITY))})
-
-(defn update-blobs-for-topology!
-  "Update each blob listed in the topology configuration if the latest version of the blob
-   has not been downloaded."
-  [conf storm-id localizer]
-  (let [storm-conf (read-supervisor-storm-conf conf storm-id)
-        blobstore-map (storm-conf TOPOLOGY-BLOBSTORE-MAP)
-        user (storm-conf TOPOLOGY-SUBMITTER-USER)
-        localresources (blobstore-map-to-localresources blobstore-map)]
-    (try
-      (.updateBlobs localizer localresources user)
-      (catch AuthorizationException authExp
-        (log-error authExp))
-      (catch KeyNotFoundException knf
-        (log-error knf)))))
-
-(defn update-blobs-for-all-topologies-fn
-  "Returns a function that downloads all blobs listed in the topology configuration for all topologies assigned
-  to this supervisor, and creates version files with a suffix. The returned function is intended to be run periodically
-  by a timer, created elsewhere."
-  [supervisor]
-  (fn []
-    (try-cause
-      (let [conf (:conf supervisor)
-            downloaded-storm-ids (set (read-downloaded-storm-ids conf))
-            new-assignment @(:curr-assignment supervisor)
-            assigned-storm-ids (assigned-storm-ids-from-port-assignments new-assignment)]
-        (doseq [topology-id downloaded-storm-ids]
-          (let [storm-root (supervisor-stormdist-root conf topology-id)]
-            (when (assigned-storm-ids topology-id)
-              (log-debug "Checking Blob updates for storm topology id " topology-id " With target_dir: " storm-root)
-              (update-blobs-for-topology! conf topology-id (:localizer supervisor))))))
-      (catch TTransportException e
-        (log-error
-          e
-          "Network error while updating blobs, will retry again later"))
-      (catch NimbusLeaderNotFoundException e
-        (log-error
-          e
-          "Nimbus unavailable to update blobs, will retry again later")))))
-
-(defn jvm-cmd [cmd]
-  (let [java-home (.get (System/getenv) "JAVA_HOME")]
-    (if (nil? java-home)
-      cmd
-      (str java-home file-path-separator "bin" file-path-separator cmd))))
-
-(defn java-cmd []
-  (jvm-cmd "java"))
-
-(defn jmap-dump-cmd [profile-cmd pid target-dir]
-  [profile-cmd pid "jmap" target-dir])
-
-(defn jstack-dump-cmd [profile-cmd pid target-dir]
-  [profile-cmd pid "jstack" target-dir])
-
-(defn jprofile-start [profile-cmd pid]
-  [profile-cmd pid "start"])
-
-(defn jprofile-stop [profile-cmd pid target-dir]
-  [profile-cmd pid "stop" target-dir])
-
-(defn jprofile-dump [profile-cmd pid workers-artifacts-directory]
-  [profile-cmd pid "dump" workers-artifacts-directory])
-
-(defn jprofile-jvm-restart [profile-cmd pid]
-  [profile-cmd pid "kill"])
-
-(defn- delete-topology-profiler-action [storm-cluster-state storm-id profile-action]
-  (log-message "Deleting profiler action.." profile-action)
-  (.delete-topology-profile-requests storm-cluster-state storm-id profile-action))
-
-(defnk launch-profiler-action-for-worker
-  "Launch profiler action for a worker"
-  [conf user target-dir command :environment {} :exit-code-on-profile-action nil :log-prefix nil]
-  (if-let [run-worker-as-user (conf SUPERVISOR-RUN-WORKER-AS-USER)]
-    (let [container-file (container-file-path target-dir)
-          script-file (script-file-path target-dir)]
-      (log-message "Running as user:" user " command:" (shell-cmd command))
-      (if (exists-file? container-file) (rmr-as-user conf container-file container-file))
-      (if (exists-file? script-file) (rmr-as-user conf script-file script-file))
-      (worker-launcher
-        conf
-        user
-        ["profiler" target-dir (write-script target-dir command :environment environment)]
-        :log-prefix log-prefix
-        :exit-code-callback exit-code-on-profile-action
-        :directory (File. target-dir)))
-    (launch-process
-      command
-      :environment environment
-      :log-prefix log-prefix
-      :exit-code-callback exit-code-on-profile-action
-      :directory (File. target-dir))))
-
-(defn mk-run-profiler-actions-for-all-topologies
-  "Returns a function that downloads all profile-actions listed for all topologies assigned
-  to this supervisor, executes those actions as user and deletes them from zookeeper."
-  [supervisor]
-  (fn []
-    (try
-      (let [conf (:conf supervisor)
-            stormid->profiler-actions @(:stormid->profiler-actions supervisor)
-            storm-cluster-state (:storm-cluster-state supervisor)
-            hostname (:my-hostname supervisor)
-            profile-cmd (conf WORKER-PROFILER-COMMAND)
-            new-assignment @(:curr-assignment supervisor)
-            assigned-storm-ids (assigned-storm-ids-from-port-assignments new-assignment)]
-        (doseq [[storm-id profiler-actions] stormid->profiler-actions]
-          (when (not (empty? profiler-actions))
-            (doseq [pro-action profiler-actions]
-              (if (= hostname (:host pro-action))
-                (let [port (:port pro-action)
-                      action ^ProfileAction (:action pro-action)
-                      stop? (> (System/currentTimeMillis) (:timestamp pro-action))
-                      target-dir (worker-artifacts-root conf storm-id port)
-                      storm-conf (read-supervisor-storm-conf conf storm-id)
-                      user (storm-conf TOPOLOGY-SUBMITTER-USER)
-                      environment (if-let [env (storm-conf TOPOLOGY-ENVIRONMENT)] env {})
-                      worker-pid (slurp (worker-artifacts-pid-path conf storm-id port))
-                      log-prefix (str "ProfilerAction process " storm-id ":" port " PROFILER_ACTION: " action " ")
-                      ;; Until PROFILER_STOP action is invalid, keep launching profiler start in case worker restarted
-                      ;; The profiler plugin script validates if JVM is recording before starting another recording.
-                      command (cond
-                                (= action ProfileAction/JMAP_DUMP) (jmap-dump-cmd profile-cmd worker-pid target-dir)
-                                (= action ProfileAction/JSTACK_DUMP) (jstack-dump-cmd profile-cmd worker-pid target-dir)
-                                (= action ProfileAction/JPROFILE_DUMP) (jprofile-dump profile-cmd worker-pid target-dir)
-                                (= action ProfileAction/JVM_RESTART) (jprofile-jvm-restart profile-cmd worker-pid)
-                                (and (not stop?)
-                                     (= action ProfileAction/JPROFILE_STOP))
-                                  (jprofile-start profile-cmd worker-pid) ;; Ensure the profiler is still running
-                                (and stop? (= action ProfileAction/JPROFILE_STOP)) (jprofile-stop profile-cmd worker-pid target-dir))
-                      action-on-exit (fn [exit-code]
-                                       (log-message log-prefix " profile-action exited for code: " exit-code)
-                                       (if (and (= exit-code 0) stop?)
-                                         (delete-topology-profiler-action storm-cluster-state storm-id pro-action)))
-                      command (->> command (map str) (filter (complement empty?)))]
-
-                  (try
-                    (launch-profiler-action-for-worker conf
-                      user
-                      target-dir
-                      command
-                      :environment environment
-                      :exit-code-on-profile-action action-on-exit
-                      :log-prefix log-prefix)
-                    (catch IOException ioe
-                      (log-error ioe
-                        (str "Error in processing ProfilerAction '" action "' for " storm-id ":" port ", will retry later.")))
-                    (catch RuntimeException rte
-                      (log-error rte
-                        (str "Error in processing ProfilerAction '" action "' for " storm-id ":" port ", will retry later."))))))))))
-      (catch Exception e
-        (log-error e "Error running profiler actions, will retry again later")))))
-
-;; in local state, supervisor stores who its current assignments are
-;; another thread launches events to restart any dead processes if necessary
-(defserverfn mk-supervisor [conf shared-context ^ISupervisor isupervisor]
-  (log-message "Starting Supervisor with conf " conf)
-  (.prepare isupervisor conf (supervisor-isupervisor-dir conf))
-  (FileUtils/cleanDirectory (File. (supervisor-tmp-dir conf)))
-  (let [supervisor (supervisor-data conf shared-context isupervisor)
-        [event-manager processes-event-manager :as managers] [(event/event-manager false) (event/event-manager false)]
-        sync-processes (partial sync-processes supervisor)
-        synchronize-supervisor (mk-synchronize-supervisor supervisor sync-processes event-manager processes-event-manager)
-        synchronize-blobs-fn (update-blobs-for-all-topologies-fn supervisor)
-        downloaded-storm-ids (set (read-downloaded-storm-ids conf))
-        run-profiler-actions-fn (mk-run-profiler-actions-for-all-topologies supervisor)
-        heartbeat-fn (fn [] (.supervisor-heartbeat!
-                               (:storm-cluster-state supervisor)
-                               (:supervisor-id supervisor)
-                               (->SupervisorInfo (current-time-secs)
-                                                 (:my-hostname supervisor)
-                                                 (:assignment-id supervisor)
-                                                 (keys @(:curr-assignment supervisor))
-                                                  ;; used ports
-                                                 (.getMetadata isupervisor)
-                                                 (conf SUPERVISOR-SCHEDULER-META)
-                                                 ((:uptime supervisor))
-                                                 (:version supervisor)
-                                                 (mk-supervisor-capacities conf))))]
-    (heartbeat-fn)
-
-    ;; should synchronize supervisor so it doesn't launch anything after being down (optimization)
-    (schedule-recurring (:heartbeat-timer supervisor)
-                        0
-                        (conf SUPERVISOR-HEARTBEAT-FREQUENCY-SECS)
-                        heartbeat-fn)
-    (doseq [storm-id downloaded-storm-ids]
-      (add-blob-references (:localizer supervisor) storm-id
-        conf))
-    ;; do this after adding the references so we don't try to clean things being used
-    (.startCleaner (:localizer supervisor))
-
-    (when (conf SUPERVISOR-ENABLE)
-      ;; This isn't strictly necessary, but it doesn't hurt and ensures that the machine stays up
-      ;; to date even if callbacks don't all work exactly right
-      (schedule-recurring (:event-timer supervisor) 0 10 (fn [] (.add event-manager synchronize-supervisor)))
-      (schedule-recurring (:event-timer supervisor)
-                          0
-                          (conf SUPERVISOR-MONITOR-FREQUENCY-SECS)
-                          (fn [] (.add processes-event-manager sync-processes)))
-
-      ;; Blob update thread. Starts with 30 seconds delay, every 30 seconds
-      (schedule-recurring (:blob-update-timer supervisor)
-                          30
-                          30
-                          (fn [] (.add event-manager synchronize-blobs-fn)))
-
-      (schedule-recurring (:event-timer supervisor)
-                          (* 60 5)
-                          (* 60 5)
-                          (fn [] (let [health-code (healthcheck/health-check conf)
-                                       ids (my-worker-ids conf)]
-                                   (if (not (= health-code 0))
-                                     (do
-                                       (doseq [id ids]
-                                         (shutdown-worker supervisor id))
-                                       (throw (RuntimeException. "Supervisor failed health check. Exiting.")))))))
-
-      ;; Launch a thread that Runs profiler commands . Starts with 30 seconds delay, every 30 seconds
-      (schedule-recurring (:event-timer supervisor)
-                          30
-                          30
-                          (fn [] (.add event-manager run-profiler-actions-fn))))
-    (log-message "Starting supervisor with id " (:supervisor-id supervisor) " at host " (:my-hostname supervisor))
-    (reify
-     Shutdownable
-     (shutdown [this]
-               (log-message "Shutting down supervisor " (:supervisor-id supervisor))
-               (reset! (:active supervisor) false)
-               (cancel-timer (:heartbeat-timer supervisor))
-               (cancel-timer (:event-timer supervisor))
-               (cancel-timer (:blob-update-timer supervisor))
-               (.shutdown event-manager)
-               (.shutdown processes-event-manager)
-               (.shutdown (:localizer supervisor))
-               (.disconnect (:storm-cluster-state supervisor)))
-     SupervisorDaemon
-     (get-conf [this]
-       conf)
-     (get-id [this]
-       (:supervisor-id supervisor))
-     (shutdown-all-workers [this]
-       (let [ids (my-worker-ids conf)]
-         (doseq [id ids]
-           (shutdown-worker supervisor id)
-           )))
-     DaemonCommon
-     (waiting? [this]
-       (or (not @(:active supervisor))
-           (and
-            (timer-waiting? (:heartbeat-timer supervisor))
-            (timer-waiting? (:event-timer supervisor))
-            (every? (memfn waiting?) managers)))
-           ))))
-
-(defn kill-supervisor [supervisor]
-  (.shutdown supervisor)
-  )
-
-(defn setup-storm-code-dir
-  [conf storm-conf dir]
- (if (conf SUPERVISOR-RUN-WORKER-AS-USER)
-  (worker-launcher-and-wait conf (storm-conf TOPOLOGY-SUBMITTER-USER) ["code-dir" dir] :log-prefix (str "setup conf for " dir))))
-
-(defn setup-blob-permission
-  [conf storm-conf path]
-  (if (conf SUPERVISOR-RUN-WORKER-AS-USER)
-    (worker-launcher-and-wait conf (storm-conf TOPOLOGY-SUBMITTER-USER) ["blob" path] :log-prefix (str "setup blob permissions for " path))))
-
-(defn download-blobs-for-topology!
-  "Download all blobs listed in the topology configuration for a given topology."
-  [conf stormconf-path localizer tmproot]
-  (let [storm-conf (read-supervisor-storm-conf-given-path conf stormconf-path)
-        blobstore-map (storm-conf TOPOLOGY-BLOBSTORE-MAP)
-        user (storm-conf TOPOLOGY-SUBMITTER-USER)
-        topo-name (storm-conf TOPOLOGY-NAME)
-        user-dir (.getLocalUserFileCacheDir localizer user)
-        localresources (blobstore-map-to-localresources blobstore-map)]
-    (when localresources
-      (when-not (.exists user-dir)
-        (FileUtils/forceMkdir user-dir))
-      (try
-        (let [localized-resources (.getBlobs localizer localresources user topo-name user-dir)]
-          (setup-blob-permission conf storm-conf (.toString user-dir))
-          (doseq [local-rsrc localized-resources]
-            (let [rsrc-file-path (File. (.getFilePath local-rsrc))
-                  key-name (.getName rsrc-file-path)
-                  blob-symlink-target-name (.getName (File. (.getCurrentSymlinkPath local-rsrc)))
-                  symlink-name (get-blob-localname (get blobstore-map key-name) key-name)]
-              (create-symlink! tmproot (.getParent rsrc-file-path) symlink-name
-                blob-symlink-target-name))))
-        (catch AuthorizationException authExp
-          (log-error authExp))
-        (catch KeyNotFoundException knf
-          (log-error knf))))))
-
-(defn get-blob-file-names
-  [blobstore-map]
-  (if blobstore-map
-    (for [[k, data] blobstore-map]
-      (get-blob-localname data k))))
-
-(defn download-blobs-for-topology-succeed?
-  "Assert if all blobs are downloaded for the given topology"
-  [stormconf-path target-dir]
-  (let [storm-conf (clojurify-structure (Utils/fromCompressedJsonConf (FileUtils/readFileToByteArray (File. stormconf-path))))
-        blobstore-map (storm-conf TOPOLOGY-BLOBSTORE-MAP)
-        file-names (get-blob-file-names blobstore-map)]
-    (if-not (empty? file-names)
-      (every? #(Utils/checkFileExists target-dir %) file-names)
-      true)))
-
-;; distributed implementation
-(defmethod download-storm-code
-  :distributed [conf storm-id master-code-dir localizer]
-  ;; Downloading to permanent location is atomic
-  (let [tmproot (str (supervisor-tmp-dir conf) file-path-separator (uuid))
-        stormroot (supervisor-stormdist-root conf storm-id)
-        blobstore (Utils/getClientBlobStoreForSupervisor conf)]
-    (FileUtils/forceMkdir (File. tmproot))
-    (if-not on-windows?
-      (Utils/restrictPermissions tmproot)
-      (if (conf SUPERVISOR-RUN-WORKER-AS-USER)
-        (throw-runtime (str "ERROR: Windows doesn't implement setting the correct permissions"))))
-    (Utils/downloadResourcesAsSupervisor (master-stormjar-key storm-id)
-      (supervisor-stormjar-path tmproot) blobstore)
-    (Utils/downloadResourcesAsSupervisor (master-stormcode-key storm-id)
-      (supervisor-stormcode-path tmproot) blobstore)
-    (Utils/downloadResourcesAsSupervisor (master-stormconf-key storm-id)
-      (supervisor-stormconf-path tmproot) blobstore)
-    (.shutdown blobstore)
-    (extract-dir-from-jar (supervisor-stormjar-path tmproot) RESOURCES-SUBDIR tmproot)
-    (download-blobs-for-topology! conf (supervisor-stormconf-path tmproot) localizer
-      tmproot)
-    (if (download-blobs-for-topology-succeed? (supervisor-stormconf-path tmproot) tmproot)
-      (do
-        (log-message "Successfully downloaded blob resources for storm-id " storm-id)
-        (FileUtils/forceMkdir (File. stormroot))
-        (Files/move (.toPath (File. tmproot)) (.toPath (File. stormroot))
-          (doto (make-array StandardCopyOption 1) (aset 0 StandardCopyOption/ATOMIC_MOVE)))
-        (setup-storm-code-dir conf (read-supervisor-storm-conf conf storm-id) stormroot))
-      (do
-        (log-message "Failed to download blob resources for storm-id " storm-id)
-        (rmr tmproot)))))
-
-(defn write-log-metadata-to-yaml-file! [storm-id port data conf]
-  (let [file (get-log-metadata-file conf storm-id port)]
-    ;;run worker as user needs the directory to have special permissions
-    ;; or it is insecure
-    (when (not (.exists (.getParentFile file)))
-      (if (conf SUPERVISOR-RUN-WORKER-AS-USER)
-        (do (FileUtils/forceMkdir (.getParentFile file))
-            (setup-storm-code-dir conf (read-supervisor-storm-conf conf storm-id) (.getCanonicalPath (.getParentFile file))))
-        (.mkdirs (.getParentFile file))))
-    (let [writer (java.io.FileWriter. file)
-          yaml (Yaml.)]
-      (try
-        (.dump yaml data writer)
-        (finally
-          (.close writer))))))
-
-(defn write-log-metadata! [storm-conf user worker-id storm-id port conf]
-  (let [data {TOPOLOGY-SUBMITTER-USER user
-              "worker-id" worker-id
-              LOGS-GROUPS (sort (distinct (remove nil?
-                                           (concat
-                                             (storm-conf LOGS-GROUPS)
-                                             (storm-conf TOPOLOGY-GROUPS)))))
-              LOGS-USERS (sort (distinct (remove nil?
-                                           (concat
-                                             (storm-conf LOGS-USERS)
-                                             (storm-conf TOPOLOGY-USERS)))))}]
-    (write-log-metadata-to-yaml-file! storm-id port data conf)))
-
-(defn jlp [stormroot conf]
-  (let [resource-root (str stormroot File/separator RESOURCES-SUBDIR)
-        os (clojure.string/replace (System/getProperty "os.name") #"\s+" "_")
-        arch (System/getProperty "os.arch")
-        arch-resource-root (str resource-root File/separator os "-" arch)]
-    (str arch-resource-root File/pathSeparator resource-root File/pathSeparator (conf JAVA-LIBRARY-PATH))))
-
-(defn substitute-childopts
-  "Generates runtime childopts by replacing keys with topology-id, worker-id, port, mem-onheap"
-  [value worker-id topology-id port mem-onheap]
-  (let [replacement-map {"%ID%"          (str port)
-                         "%WORKER-ID%"   (str worker-id)
-                         "%TOPOLOGY-ID%"    (str topology-id)
-                         "%WORKER-PORT%" (str port)
-                         "%HEAP-MEM%" (str mem-onheap)}
-        sub-fn #(reduce (fn [string entry]
-                          (apply clojure.string/replace string entry))
-                        %
-                        replacement-map)]
-    (cond
-      (nil? value) nil
-      (sequential? value) (vec (map sub-fn value))
-      :else (-> value sub-fn (clojure.string/split #"\s+")))))
-
-
-(defn create-blobstore-links
-  "Create symlinks in worker launch directory for all blobs"
-  [conf storm-id worker-id]
-  (let [stormroot (supervisor-stormdist-root conf storm-id)
-        storm-conf (read-supervisor-storm-conf conf storm-id)
-        workerroot (worker-root conf worker-id)
-        blobstore-map (storm-conf TOPOLOGY-BLOBSTORE-MAP)
-        blob-file-names (get-blob-file-names blobstore-map)
-        resource-file-names (cons RESOURCES-SUBDIR blob-file-names)]
-    (log-message "Creating symlinks for worker-id: " worker-id " storm-id: "
-      storm-id " for files(" (count resource-file-names) "): " (pr-str resource-file-names))
-    (create-symlink! workerroot stormroot RESOURCES-SUBDIR)
-    (doseq [file-name blob-file-names]
-      (create-symlink! workerroot stormroot file-name file-name))))
-
-(defn create-artifacts-link
-  "Create a symlink from workder directory to its port artifacts directory"
-  [conf storm-id port worker-id]
-  (let [worker-dir (worker-root conf worker-id)
-        topo-dir (worker-artifacts-root conf storm-id)]
-    (log-message "Creating symlinks for worker-id: " worker-id " storm-id: "
-                 storm-id " to its port artifacts directory")
-    (if (.exists (File. worker-dir))
-      (create-symlink! worker-dir topo-dir "artifacts" port))))
-
-(defmethod launch-worker
-    :distributed [supervisor storm-id port worker-id mem-onheap]
-    (let [conf (:conf supervisor)
-          run-worker-as-user (conf SUPERVISOR-RUN-WORKER-AS-USER)
-          storm-home (System/getProperty "storm.home")
-          storm-options (System/getProperty "storm.options")
-          storm-conf-file (System/getProperty "storm.conf.file")
-          storm-log-dir LOG-DIR
-          storm-log-conf-dir (conf STORM-LOG4J2-CONF-DIR)
-          storm-log4j2-conf-dir (if storm-log-conf-dir
-                                  (if (is-absolute-path? storm-log-conf-dir)
-                                    storm-log-conf-dir
-                                    (str storm-home file-path-separator storm-log-conf-dir))
-                                  (str storm-home file-path-separator "log4j2"))
-          stormroot (supervisor-stormdist-root conf storm-id)
-          jlp (jlp stormroot conf)
-          stormjar (supervisor-stormjar-path stormroot)
-          storm-conf (read-supervisor-storm-conf conf storm-id)
-          topo-classpath (if-let [cp (storm-conf TOPOLOGY-CLASSPATH)]
-                           [cp]
-                           [])
-          classpath (-> (worker-classpath)
-                        (add-to-classpath [stormjar])
-                        (add-to-classpath topo-classpath))
-          top-gc-opts (storm-conf TOPOLOGY-WORKER-GC-CHILDOPTS)
-          mem-onheap (if (and mem-onheap (> mem-onheap 0)) ;; not nil and not zero
-                       (int (Math/ceil mem-onheap)) ;; round up
-                       (storm-conf WORKER-HEAP-MEMORY-MB)) ;; otherwise use default value
-          gc-opts (substitute-childopts (if top-gc-opts top-gc-opts (conf WORKER-GC-CHILDOPTS)) worker-id storm-id port mem-onheap)
-          topo-worker-logwriter-childopts (storm-conf TOPOLOGY-WORKER-LOGWRITER-CHILDOPTS)
-          user (storm-conf TOPOLOGY-SUBMITTER-USER)
-          logfilename "worker.log"
-          workers-artifacts (worker-artifacts-root conf)
-          logging-sensitivity (storm-conf TOPOLOGY-LOGGING-SENSITIVITY "S3")
-          worker-childopts (when-let [s (conf WORKER-CHILDOPTS)]
-                             (substitute-childopts s worker-id storm-id port mem-onheap))
-          topo-worker-childopts (when-let [s (storm-conf TOPOLOGY-WORKER-CHILDOPTS)]
-                                  (substitute-childopts s worker-id storm-id port mem-onheap))
-          worker--profiler-childopts (if (conf WORKER-PROFILER-ENABLED)
-                                       (substitute-childopts (conf WORKER-PROFILER-CHILDOPTS) worker-id storm-id port mem-onheap)
-                                       "")
-          topology-worker-environment (if-let [env (storm-conf TOPOLOGY-ENVIRONMENT)]
-                                        (merge env {"LD_LIBRARY_PATH" jlp})
-                                        {"LD_LIBRARY_PATH" jlp})
-          command (concat
-                    [(java-cmd) "-cp" classpath 
-                     topo-worker-logwriter-childopts
-                     (str "-Dlogfile.name=" logfilename)
-                     (str "-Dstorm.home=" storm-home)
-                     (str "-Dworkers.artifacts=" workers-artifacts)
-                     (str "-Dstorm.id=" storm-id)
-                     (str "-Dworker.id=" worker-id)
-                     (str "-Dworker.port=" port)
-                     (str "-Dstorm.log.dir=" storm-log-dir)
-                     (str "-Dlog4j.configurationFile=" storm-log4j2-conf-dir file-path-separator "worker.xml")
-                     (str "-DLog4jContextSelector=org.apache.logging.log4j.core.selector.BasicContextSelector")
-                     "backtype.storm.LogWriter"]
-                    [(java-cmd) "-server"]
-                    worker-childopts
-                    topo-worker-childopts
-                    gc-opts
-                    worker--profiler-childopts
-                    [(str "-Djava.library.path=" jlp)
-                     (str "-Dlogfile.name=" logfilename)
-                     (str "-Dstorm.home=" storm-home)
-                     (str "-Dworkers.artifacts=" workers-artifacts)
-                     (str "-Dstorm.conf.file=" storm-conf-file)
-                     (str "-Dstorm.options=" storm-options)
-                     (str "-Dstorm.log.dir=" storm-log-dir)
-                     (str "-Dlogging.sensitivity=" logging-sensitivity)
-                     (str "-Dlog4j.configurationFile=" storm-log4j2-conf-dir file-path-separator "worker.xml")
-                     (str "-DLog4jContextSelector=org.apache.logging.log4j.core.selector.BasicContextSelector")
-                     (str "-Dstorm.id=" storm-id)
-                     (str "-Dworker.id=" worker-id)
-                     (str "-Dworker.port=" port)
-                     "-cp" classpath
-                     "backtype.storm.daemon.worker"
-                     storm-id
-                     (:assignment-id supervisor)
-                     port
-                     worker-id])
-          command (->> command (map str) (filter (complement empty?)))]
-      (log-message "Launching worker with command: " (shell-cmd command))
-      (write-log-metadata! storm-conf user worker-id storm-id port conf)
-      (set-worker-user! conf worker-id user)
-      (create-artifacts-link conf storm-id port worker-id)
-      (let [log-prefix (str "Worker Process " worker-id)
-            callback (fn [exit-code]
-                       (log-message log-prefix " exited with code: " exit-code)
-                       (add-dead-worker worker-id))
-            worker-dir (worker-root conf worker-id)]
-        (remove-dead-worker worker-id)
-        (create-blobstore-links conf storm-id worker-id)
-        (if run-worker-as-user
-          (worker-launcher conf user ["worker" worker-dir (write-script worker-dir command :environment topology-worker-environment)] :log-prefix log-prefix :exit-code-callback callback :directory (File. worker-dir))
-          (launch-process command :environment topology-worker-environment :log-prefix log-prefix :exit-code-callback callback :directory (File. worker-dir)))
-        )))
-
-;; local implementation
-
-(defn resources-jar []
-  (->> (.split (current-classpath) File/pathSeparator)
-       (filter #(.endsWith  % ".jar"))
-       (filter #(zip-contains-dir? % RESOURCES-SUBDIR))
-       first ))
-
-(defmethod download-storm-code
-  :local [conf storm-id master-code-dir localizer]
-  (let [tmproot (str (supervisor-tmp-dir conf) file-path-separator (uuid))
-        stormroot (supervisor-stormdist-root conf storm-id)
-        blob-store (Utils/getNimbusBlobStore conf master-code-dir nil)]
-    (try
-      (FileUtils/forceMkdir (File. tmproot))
-      (.readBlobTo blob-store (master-stormcode-key storm-id) (FileOutputStream. (supervisor-stormcode-path tmproot)) nil)
-      (.readBlobTo blob-store (master-stormconf-key storm-id) (FileOutputStream. (supervisor-stormconf-path tmproot)) nil)
-      (finally
-        (.shutdown blob-store)))
-    (FileUtils/moveDirectory (File. tmproot) (File. stormroot))
-    (setup-storm-code-dir conf (read-supervisor-storm-conf conf storm-id) stormroot)
-    (let [classloader (.getContextClassLoader (Thread/currentThread))
-          resources-jar (resources-jar)
-          url (.getResource classloader RESOURCES-SUBDIR)
-          target-dir (str stormroot file-path-separator RESOURCES-SUBDIR)]
-      (cond
-        resources-jar
-        (do
-          (log-message "Extracting resources from jar at " resources-jar " to " target-dir)
-          (extract-dir-from-jar resources-jar RESOURCES-SUBDIR stormroot))
-        url
-        (do
-          (log-message "Copying resources at " (str url) " to " target-dir)
-          (FileUtils/copyDirectory (File. (.getFile url)) (File. target-dir)))))))
-
-(defmethod launch-worker
-    :local [supervisor storm-id port worker-id mem-onheap]
-    (let [conf (:conf supervisor)
-          pid (uuid)
-          worker (worker/mk-worker conf
-                                   (:shared-context supervisor)
-                                   storm-id
-                                   (:assignment-id supervisor)
-                                   port
-                                   worker-id)]
-      (set-worker-user! conf worker-id "")
-      (psim/register-process pid worker)
-      (swap! (:worker-thread-pids-atom supervisor) assoc worker-id pid)
-      ))
-
-(defn -launch
-  [supervisor]
-  (log-message "Starting supervisor for storm version '" STORM-VERSION "'")
-  (let [conf (read-storm-config)]
-    (validate-distributed-mode! conf)
-    (let [supervisor (mk-supervisor conf nil supervisor)]
-      (add-shutdown-hook-with-force-kill-in-1-sec #(.shutdown supervisor)))
-    (defgauge supervisor:num-slots-used-gauge #(count (my-worker-ids conf)))
-    (start-metrics-reporters)))
-
-(defn standalone-supervisor []
-  (let [conf-atom (atom nil)
-        id-atom (atom nil)]
-    (reify ISupervisor
-      (prepare [this conf local-dir]
-        (reset! conf-atom conf)
-        (let [state (LocalState. local-dir)
-              curr-id (if-let [id (ls-supervisor-id state)]
-                        id
-                        (generate-supervisor-id))]
-          (ls-supervisor-id! state curr-id)
-          (reset! id-atom curr-id))
-        )
-      (confirmAssigned [this port]
-        true)
-      (getMetadata [this]
-        (doall (map int (get @conf-atom SUPERVISOR-SLOTS-PORTS))))
-      (getSupervisorId [this]
-        @id-atom)
-      (getAssignmentId [this]
-        @id-atom)
-      (killedWorker [this port]
-        )
-      (assigned [this ports]
-        ))))
-
-(defn -main []
-  (setup-default-uncaught-exception-handler)
-  (-launch (standalone-supervisor)))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/daemon/task.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/daemon/task.clj b/storm-core/src/clj/backtype/storm/daemon/task.clj
deleted file mode 100644
index 7133fdf..0000000
--- a/storm-core/src/clj/backtype/storm/daemon/task.clj
+++ /dev/null
@@ -1,189 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-(ns backtype.storm.daemon.task
-  (:use [backtype.storm.daemon common])
-  (:use [backtype.storm config util log])
-  (:import [backtype.storm.hooks ITaskHook])
-  (:import [backtype.storm.tuple Tuple TupleImpl])
-  (:import [backtype.storm.grouping LoadMapping])
-  (:import [backtype.storm.generated SpoutSpec Bolt StateSpoutSpec StormTopology])
-  (:import [backtype.storm.hooks.info SpoutAckInfo SpoutFailInfo
-            EmitInfo BoltFailInfo BoltAckInfo])
-  (:import [backtype.storm.task TopologyContext ShellBolt WorkerTopologyContext])
-  (:import [backtype.storm.utils Utils])
-  (:import [backtype.storm.generated ShellComponent JavaObject])
-  (:import [backtype.storm.spout ShellSpout])
-  (:import [java.util Collection List ArrayList])
-  (:require [backtype.storm
-             [thrift :as thrift]
-             [stats :as stats]])
-  (:require [backtype.storm.daemon.builtin-metrics :as builtin-metrics]))
-
-(defn mk-topology-context-builder [worker executor-data topology]
-  (let [conf (:conf worker)]
-    #(TopologyContext.
-      topology
-      (:storm-conf worker)
-      (:task->component worker)
-      (:component->sorted-tasks worker)
-      (:component->stream->fields worker)
-      (:storm-id worker)
-      (supervisor-storm-resources-path
-        (supervisor-stormdist-root conf (:storm-id worker)))
-      (worker-pids-root conf (:worker-id worker))
-      (int %)
-      (:port worker)
-      (:task-ids worker)
-      (:default-shared-resources worker)
-      (:user-shared-resources worker)
-      (:shared-executor-data executor-data)
-      (:interval->task->metric-registry executor-data)
-      (:open-or-prepare-was-called? executor-data))))
-
-(defn system-topology-context [worker executor-data tid]
-  ((mk-topology-context-builder
-    worker
-    executor-data
-    (:system-topology worker))
-   tid))
-
-(defn user-topology-context [worker executor-data tid]
-  ((mk-topology-context-builder
-    worker
-    executor-data
-    (:topology worker))
-   tid))
-
-(defn- get-task-object [^StormTopology topology component-id]
-  (let [spouts (.get_spouts topology)
-        bolts (.get_bolts topology)
-        state-spouts (.get_state_spouts topology)
-        obj (Utils/getSetComponentObject
-             (cond
-              (contains? spouts component-id) (.get_spout_object ^SpoutSpec (get spouts component-id))
-              (contains? bolts component-id) (.get_bolt_object ^Bolt (get bolts component-id))
-              (contains? state-spouts component-id) (.get_state_spout_object ^StateSpoutSpec (get state-spouts component-id))
-              true (throw-runtime "Could not find " component-id " in " topology)))
-        obj (if (instance? ShellComponent obj)
-              (if (contains? spouts component-id)
-                (ShellSpout. obj)
-                (ShellBolt. obj))
-              obj )
-        obj (if (instance? JavaObject obj)
-              (thrift/instantiate-java-object obj)
-              obj )]
-    obj
-    ))
-
-(defn get-context-hooks [^TopologyContext context]
-  (.getHooks context))
-
-(defn hooks-empty? [^Collection hooks]
-  (.isEmpty hooks))
-
-(defmacro apply-hooks [topology-context method-sym info-form]
-  (let [hook-sym (with-meta (gensym "hook") {:tag 'backtype.storm.hooks.ITaskHook})]
-    `(let [hooks# (get-context-hooks ~topology-context)]
-       (when-not (hooks-empty? hooks#)
-         (let [info# ~info-form]
-           (fast-list-iter [~hook-sym hooks#]
-             (~method-sym ~hook-sym info#)
-             ))))))
-
-
-;; TODO: this is all expensive... should be precomputed
-(defn send-unanchored
-  [task-data stream values]
-    (let [^TopologyContext topology-context (:system-context task-data)
-          tasks-fn (:tasks-fn task-data)
-          transfer-fn (-> task-data :executor-data :transfer-fn)
-          out-tuple (TupleImpl. topology-context
-                                 values
-                                 (.getThisTaskId topology-context)
-                                 stream)]
-      (fast-list-iter [t (tasks-fn stream values)]
-        (transfer-fn t out-tuple))))
-
-(defn mk-tasks-fn [task-data]
-  (let [task-id (:task-id task-data)
-        executor-data (:executor-data task-data)
-        ^LoadMapping load-mapping (:load-mapping (:worker executor-data))
-        component-id (:component-id executor-data)
-        ^WorkerTopologyContext worker-context (:worker-context executor-data)
-        storm-conf (:storm-conf executor-data)
-        emit-sampler (mk-stats-sampler storm-conf)
-        stream->component->grouper (:stream->component->grouper executor-data)
-        user-context (:user-context task-data)
-        executor-stats (:stats executor-data)
-        debug? (= true (storm-conf TOPOLOGY-DEBUG))]
-        
-    (fn ([^Integer out-task-id ^String stream ^List values]
-          (when debug?
-            (log-message "Emitting direct: " out-task-id "; " component-id " " stream " " values))
-          (let [target-component (.getComponentId worker-context out-task-id)
-                component->grouping (get stream->component->grouper stream)
-                grouping (get component->grouping target-component)
-                out-task-id (if grouping out-task-id)]
-            (when (and (not-nil? grouping) (not= :direct grouping))
-              (throw (IllegalArgumentException. "Cannot emitDirect to a task expecting a regular grouping")))                          
-            (apply-hooks user-context .emit (EmitInfo. values stream task-id [out-task-id]))
-            (when (emit-sampler)
-              (stats/emitted-tuple! executor-stats stream)
-              (if out-task-id
-                (stats/transferred-tuples! executor-stats stream 1)))
-            (if out-task-id [out-task-id])
-            ))
-        ([^String stream ^List values]
-           (when debug?
-             (log-message "Emitting: " component-id " " stream " " values))
-           (let [out-tasks (ArrayList.)]
-             (fast-map-iter [[out-component grouper] (get stream->component->grouper stream)]
-               (when (= :direct grouper)
-                  ;;  TODO: this is wrong, need to check how the stream was declared
-                  (throw (IllegalArgumentException. "Cannot do regular emit to direct stream")))
-               (let [comp-tasks (grouper task-id values load-mapping)]
-                 (if (or (sequential? comp-tasks) (instance? Collection comp-tasks))
-                   (.addAll out-tasks comp-tasks)
-                   (.add out-tasks comp-tasks)
-                   )))
-             (apply-hooks user-context .emit (EmitInfo. values stream task-id out-tasks))
-             (when (emit-sampler)
-               (stats/emitted-tuple! executor-stats stream)
-               (stats/transferred-tuples! executor-stats stream (count out-tasks)))
-             out-tasks)))
-    ))
-
-(defn mk-task-data [executor-data task-id]
-  (recursive-map
-    :executor-data executor-data
-    :task-id task-id
-    :system-context (system-topology-context (:worker executor-data) executor-data task-id)
-    :user-context (user-topology-context (:worker executor-data) executor-data task-id)
-    :builtin-metrics (builtin-metrics/make-data (:type executor-data) (:stats executor-data))
-    :tasks-fn (mk-tasks-fn <>)
-    :object (get-task-object (.getRawTopology ^TopologyContext (:system-context <>)) (:component-id executor-data))))
-
-
-(defn mk-task [executor-data task-id]
-  (let [task-data (mk-task-data executor-data task-id)
-        storm-conf (:storm-conf executor-data)]
-    (doseq [klass (storm-conf TOPOLOGY-AUTO-TASK-HOOKS)]
-      (.addTaskHook ^TopologyContext (:user-context task-data) (-> klass Class/forName .newInstance)))
-    ;; when this is called, the threads for the executor haven't been started yet,
-    ;; so we won't be risking trampling on the single-threaded claim strategy disruptor queue
-    (send-unanchored task-data SYSTEM-STREAM-ID ["startup"])
-    task-data
-    ))


[50/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/DEVELOPER.md
----------------------------------------------------------------------
diff --git a/DEVELOPER.md b/DEVELOPER.md
index 7a98ead..5fc1c36 100644
--- a/DEVELOPER.md
+++ b/DEVELOPER.md
@@ -133,8 +133,8 @@ To mark a Java test as a Java integration test, add the annotation `@Category(In
  
 To mark a Clojure test as Clojure integration test, the test source must be located in a package with name prefixed by `integration.`
 
-For example, the test `test/clj/backtype.storm.drpc_test.clj` is considered a clojure unit test, whereas
- `test/clj/integration.backtype.storm.drpc_test.clj` is considered a clojure integration test.
+For example, the test `test/clj/org.apache.storm.drpc_test.clj` is considered a clojure unit test, whereas
+ `test/clj/integration.org.apache.storm.drpc_test.clj` is considered a clojure integration test.
 
 Please refer to section <a href="#building">Build the code and run the tests</a> for how to run integration tests, and the info on the build phase each test runs. 
 
@@ -301,8 +301,8 @@ To run all unit tests and all integration tests execute one of the commands
  
  
 You can also run tests selectively via the Clojure REPL.  The following example runs the tests in
-[auth_test.clj](storm-core/test/clj/backtype/storm/security/auth/auth_test.clj), which has the namespace
-`backtype.storm.security.auth.auth-test`.
+[auth_test.clj](storm-core/test/clj/org/apache/storm/security/auth/auth_test.clj), which has the namespace
+`org.apache.storm.security.auth.auth-test`.
 
 You can also run tests selectively with `-Dtest=<test_name>`.  This works for both clojure and junit tests.
 
@@ -360,8 +360,8 @@ Tests should never rely on timing in order to pass.  Storm can properly test fun
 simulating time, which means we do not have to worry about e.g. random delays failing our tests indeterministically.
 
 If you are testing topologies that do not do full tuple acking, then you should be testing using the "tracked
-topologies" utilities in `backtype.storm.testing.clj`.  For example,
-[test-acking](storm-core/test/clj/backtype/storm/integration_test.clj) (around line 213) tests the acking system in
+topologies" utilities in `org.apache.storm.testing.clj`.  For example,
+[test-acking](storm-core/test/clj/org/apache/storm/integration_test.clj) (around line 213) tests the acking system in
 Storm using tracked topologies.  Here, the key is the `tracked-wait` function: it will only return when both that many
 tuples have been emitted by the spouts _and_ the topology is idle (i.e. no tuples have been emitted nor will be emitted
 without further input).  Note that you should not use tracked topologies for topologies that have tick tuples.

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/SECURITY.md
----------------------------------------------------------------------
diff --git a/SECURITY.md b/SECURITY.md
index 6d6c825..e9966b6 100644
--- a/SECURITY.md
+++ b/SECURITY.md
@@ -172,7 +172,7 @@ Each jaas file may have multiple sections for different interfaces being used.
 
 To enable Kerberos authentication in storm you need to set the following `storm.yaml` configs
 ```yaml
-storm.thrift.transport: "backtype.storm.security.auth.kerberos.KerberosSaslTransportPlugin"
+storm.thrift.transport: "org.apache.storm.security.auth.kerberos.KerberosSaslTransportPlugin"
 java.security.auth.login.config: "/path/to/jaas.conf"
 ```
 
@@ -275,7 +275,7 @@ Server {
 Nimbus also will translate the principal into a local user name, so that other services can use this name.  To configure this for Kerberos authentication set
 
 ```
-storm.principal.tolocal: "backtype.storm.security.auth.KerberosPrincipalToLocal"
+storm.principal.tolocal: "org.apache.storm.security.auth.KerberosPrincipalToLocal"
 ```
 
 This only needs to be done on nimbus, but it will not hurt on any node.
@@ -324,7 +324,7 @@ The end user can override this if they have a headless user that has a keytab.
 The preferred authorization plug-in for nimbus is The *SimpleACLAuthorizer*.  To use the *SimpleACLAuthorizer*, set the following:
 
 ```yaml
-nimbus.authorizer: "backtype.storm.security.auth.authorizer.SimpleACLAuthorizer"
+nimbus.authorizer: "org.apache.storm.security.auth.authorizer.SimpleACLAuthorizer"
 ```
 
 DRPC has a separate authorizer configuration for it.  Do not use SimpleACLAuthorizer for DRPC.
@@ -349,7 +349,7 @@ To ensure isolation of users in multi-tenancy, the supervisors must run under a
 
 To support multi-tenancy better we have written a new scheduler.  To enable this scheduler set:
 ```yaml
-storm.scheduler: "backtype.storm.scheduler.multitenant.MultitenantScheduler"
+storm.scheduler: "org.apache.storm.scheduler.multitenant.MultitenantScheduler"
 ```
 Be aware that many of the features of this scheduler rely on storm authentication.  Without storm authentication, the scheduler will not know what the user is, and thus will not isolate topologies properly.
 
@@ -392,11 +392,11 @@ A storm client may submit requests on behalf of another user. For example, if a
 it can do so by leveraging the impersonation feature. In order to submit a topology as some other user, you can use the `StormSubmitter.submitTopologyAs` API. Alternatively you can use `NimbusClient.getConfiguredClientAs`
 to get a nimbus client as some other user and perform any nimbus action (i.e., kill/rebalance/activate/deactivate) using this client.
 
-To ensure only authorized users can perform impersonation, you should start nimbus with `nimbus.impersonation.authorizer` set to `backtype.storm.security.auth.authorizer.ImpersonationAuthorizer`.
+To ensure only authorized users can perform impersonation, you should start nimbus with `nimbus.impersonation.authorizer` set to `org.apache.storm.security.auth.authorizer.ImpersonationAuthorizer`.
 The `ImpersonationAuthorizer` uses `nimbus.impersonation.acl` as the acl to authorize users. Following is a sample nimbus config for supporting impersonation:
 
 ```yaml
-nimbus.impersonation.authorizer: backtype.storm.security.auth.authorizer.ImpersonationAuthorizer
+nimbus.impersonation.authorizer: org.apache.storm.security.auth.authorizer.ImpersonationAuthorizer
 nimbus.impersonation.acl:
     impersonating_user1:
         hosts:
@@ -425,7 +425,7 @@ Individual topologies have the ability to push credentials (tickets and tokens)
 To hide this from them, in the common case plugins can be used to populate the credentials, unpack them on the other side into a java Subject, and also allow Nimbus to renew the credentials if needed.
 These are controlled by the following configs:
 
-* `topology.auto-credentials`: a list of java plugins, all of which must implement IAutoCredentials interface, that populate the credentials on gateway and unpack them on the worker side. On a kerberos secure cluster they should be set by default to point to `backtype.storm.security.auth.kerberos.AutoTGT`.  `nimbus.credential.renewers.classes` should also be set to this value so that nimbus can periodically renew the TGT on behalf of the user.
+* `topology.auto-credentials`: a list of java plugins, all of which must implement IAutoCredentials interface, that populate the credentials on gateway and unpack them on the worker side. On a kerberos secure cluster they should be set by default to point to `org.apache.storm.security.auth.kerberos.AutoTGT`.  `nimbus.credential.renewers.classes` should also be set to this value so that nimbus can periodically renew the TGT on behalf of the user.
 * `nimbus.credential.renewers.freq.secs`: controls how often the renewer will poll to see if anything needs to be renewed, but the default should be fine.
 
 In addition Nimbus itself can be used to get credentials on behalf of the user submitting topologies. This can be configures using:

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/bin/storm-config.cmd
----------------------------------------------------------------------
diff --git a/bin/storm-config.cmd b/bin/storm-config.cmd
index d259e30..cb1e203 100644
--- a/bin/storm-config.cmd
+++ b/bin/storm-config.cmd
@@ -86,7 +86,7 @@ if not defined STORM_LOG_DIR (
 @rem retrieve storm.log4j2.conf.dir from conf file
 @rem
 
-"%JAVA%" -client -Dstorm.options= -Dstorm.conf.file= -cp "%CLASSPATH%" backtype.storm.command.config_value storm.log4j2.conf.dir > %CMD_TEMP_FILE%
+"%JAVA%" -client -Dstorm.options= -Dstorm.conf.file= -cp "%CLASSPATH%" org.apache.storm.command.config_value storm.log4j2.conf.dir > %CMD_TEMP_FILE%
 
 FOR /F "delims=" %%i in (%CMD_TEMP_FILE%) do (
 	FOR /F "tokens=1,* delims= " %%a in ("%%i") do (
@@ -113,7 +113,7 @@ if not defined STORM_LOG4J2_CONFIGURATION_FILE (
   set STORM_LOG4J2_CONFIGURATION_FILE="file://%STORM_HOME%\log4j2\cluster.xml"
 )
 
-"%JAVA%" -client -Dstorm.options= -Dstorm.conf.file= -cp "%CLASSPATH%" backtype.storm.command.config_value java.library.path > %CMD_TEMP_FILE%
+"%JAVA%" -client -Dstorm.options= -Dstorm.conf.file= -cp "%CLASSPATH%" org.apache.storm.command.config_value java.library.path > %CMD_TEMP_FILE%
 
 FOR /F "delims=" %%i in (%CMD_TEMP_FILE%) do (
     FOR /F "tokens=1,* delims= " %%a in ("%%i") do (

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/bin/storm.cmd
----------------------------------------------------------------------
diff --git a/bin/storm.cmd b/bin/storm.cmd
index ad1a81f..ee125e5 100644
--- a/bin/storm.cmd
+++ b/bin/storm.cmd
@@ -94,7 +94,7 @@
 
 
 :activate
-  set CLASS=backtype.storm.command.activate
+  set CLASS=org.apache.storm.command.activate
   set STORM_OPTS=%STORM_CLIENT_OPTS% %STORM_OPTS%
   goto :eof
 
@@ -103,18 +103,18 @@
   goto :eof
 
 :deactivate
-  set CLASS=backtype.storm.command.deactivate
+  set CLASS=org.apache.storm.command.deactivate
   set STORM_OPTS=%STORM_CLIENT_OPTS% %STORM_OPTS%
   goto :eof
 
 :dev-zookeeper
-  set CLASS=backtype.storm.command.dev_zookeeper
+  set CLASS=org.apache.storm.command.dev_zookeeper
   set STORM_OPTS=%STORM_SERVER_OPTS% %STORM_OPTS%
   goto :eof
 
 :drpc
-  set CLASS=backtype.storm.daemon.drpc
-  "%JAVA%" -client -Dstorm.options= -Dstorm.conf.file= -cp "%CLASSPATH%" backtype.storm.command.config_value drpc.childopts > %CMD_TEMP_FILE%
+  set CLASS=org.apache.storm.daemon.drpc
+  "%JAVA%" -client -Dstorm.options= -Dstorm.conf.file= -cp "%CLASSPATH%" org.apache.storm.command.config_value drpc.childopts > %CMD_TEMP_FILE%
   FOR /F "delims=" %%i in (%CMD_TEMP_FILE%) do (
      FOR /F "tokens=1,* delims= " %%a in ("%%i") do (
 	  if %%a == VALUE: (
@@ -129,18 +129,18 @@
   goto :eof
 
 :kill
-  set CLASS=backtype.storm.command.kill_topology
+  set CLASS=org.apache.storm.command.kill_topology
   set STORM_OPTS=%STORM_CLIENT_OPTS% %STORM_OPTS%
   goto :eof
 
 :list
-  set CLASS=backtype.storm.command.list
+  set CLASS=org.apache.storm.command.list
   set STORM_OPTS=%STORM_CLIENT_OPTS% %STORM_OPTS%
   goto :eof
 
 :logviewer
-  set CLASS=backtype.storm.daemon.logviewer
-   "%JAVA%" -client -Dstorm.options= -Dstorm.conf.file= -cp "%CLASSPATH%" backtype.storm.command.config_value logviewer.childopts > %CMD_TEMP_FILE%
+  set CLASS=org.apache.storm.daemon.logviewer
+   "%JAVA%" -client -Dstorm.options= -Dstorm.conf.file= -cp "%CLASSPATH%" org.apache.storm.command.config_value logviewer.childopts > %CMD_TEMP_FILE%
   FOR /F "delims=" %%i in (%CMD_TEMP_FILE%) do (
      FOR /F "tokens=1,* delims= " %%a in ("%%i") do (
 	  if %%a == VALUE: (
@@ -151,8 +151,8 @@
   goto :eof
 
 :nimbus
-  set CLASS=backtype.storm.daemon.nimbus
-  "%JAVA%" -client -Dstorm.options= -Dstorm.conf.file= -cp "%CLASSPATH%" backtype.storm.command.config_value nimbus.childopts > %CMD_TEMP_FILE%
+  set CLASS=org.apache.storm.daemon.nimbus
+  "%JAVA%" -client -Dstorm.options= -Dstorm.conf.file= -cp "%CLASSPATH%" org.apache.storm.command.config_value nimbus.childopts > %CMD_TEMP_FILE%
   FOR /F "delims=" %%i in (%CMD_TEMP_FILE%) do (
      FOR /F "tokens=1,* delims= " %%a in ("%%i") do (
 	  if %%a == VALUE: (
@@ -163,12 +163,12 @@
   goto :eof
 
 :rebalance
-  set CLASS=backtype.storm.command.rebalance
+  set CLASS=org.apache.storm.command.rebalance
   set STORM_OPTS=%STORM_CLIENT_OPTS% %STORM_OPTS%
   goto :eof
 
 :remoteconfvalue
-  set CLASS=backtype.storm.command.config_value
+  set CLASS=org.apache.storm.command.config_value
   set STORM_OPTS=%STORM_CLIENT_OPTS% %STORM_OPTS%
   goto :eof
 
@@ -178,13 +178,13 @@
   goto :eof
 
 :shell
-  set CLASS=backtype.storm.command.shell_submission
+  set CLASS=org.apache.storm.command.shell_submission
   set STORM_OPTS=%STORM_CLIENT_OPTS% %STORM_OPTS% 
   goto :eof
   
 :supervisor
-  set CLASS=backtype.storm.daemon.supervisor
-  "%JAVA%" -client -Dstorm.options= -Dstorm.conf.file= -cp "%CLASSPATH%" backtype.storm.command.config_value supervisor.childopts > %CMD_TEMP_FILE%
+  set CLASS=org.apache.storm.daemon.supervisor
+  "%JAVA%" -client -Dstorm.options= -Dstorm.conf.file= -cp "%CLASSPATH%" org.apache.storm.command.config_value supervisor.childopts > %CMD_TEMP_FILE%
   FOR /F "delims=" %%i in (%CMD_TEMP_FILE%) do (
      FOR /F "tokens=1,* delims= " %%a in ("%%i") do (
 	  if %%a == VALUE: (
@@ -195,9 +195,9 @@
   goto :eof
 
 :ui
-  set CLASS=backtype.storm.ui.core
+  set CLASS=org.apache.storm.ui.core
   set CLASSPATH=%CLASSPATH%;%STORM_HOME%
-  "%JAVA%" -client -Dstorm.options= -Dstorm.conf.file= -cp "%CLASSPATH%" backtype.storm.command.config_value ui.childopts > %CMD_TEMP_FILE%
+  "%JAVA%" -client -Dstorm.options= -Dstorm.conf.file= -cp "%CLASSPATH%" org.apache.storm.command.config_value ui.childopts > %CMD_TEMP_FILE%
   FOR /F "delims=" %%i in (%CMD_TEMP_FILE%) do (
      FOR /F "tokens=1,* delims= " %%a in ("%%i") do (
 	  if %%a == VALUE: (
@@ -208,7 +208,7 @@
   goto :eof
 
 :version
-  set CLASS=backtype.storm.utils.VersionInfo
+  set CLASS=org.apache.storm.utils.VersionInfo
   set STORM_OPTS=%STORM_CLIENT_OPTS% %STORM_OPTS%
   goto :eof
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/bin/storm.py
----------------------------------------------------------------------
diff --git a/bin/storm.py b/bin/storm.py
index e59b55a..80fd958 100755
--- a/bin/storm.py
+++ b/bin/storm.py
@@ -137,7 +137,7 @@ def confvalue(name, extrapaths, daemon=True):
     global CONFFILE
     command = [
         JAVA_CMD, "-client", get_config_opts(), "-Dstorm.conf.file=" + CONFFILE,
-        "-cp", get_classpath(extrapaths, daemon), "backtype.storm.command.config_value", name
+        "-cp", get_classpath(extrapaths, daemon), "org.apache.storm.command.config_value", name
     ]
     p = sub.Popen(command, stdout=sub.PIPE)
     output, errors = p.communicate()
@@ -225,13 +225,13 @@ def jar(jarfile, klass, *args):
     Runs the main method of class with the specified arguments.
     The storm jars and configs in ~/.storm are put on the classpath.
     The process is configured so that StormSubmitter
-    (http://storm.apache.org/apidocs/backtype/storm/StormSubmitter.html)
+    (http://storm.apache.org/apidocs/org/apache/storm/StormSubmitter.html)
     will upload the jar at topology-jar-path when the topology is submitted.
     """
     transform_class = confvalue("client.jartransformer.class", [CLUSTER_CONF_DIR])
     if (transform_class != None and transform_class != "nil"):
         tmpjar = os.path.join(tempfile.gettempdir(), uuid.uuid1().hex+".jar")
-        exec_storm_class("backtype.storm.daemon.ClientJarTransformerRunner", args=[transform_class, jarfile, tmpjar], fork=True, daemon=False)
+        exec_storm_class("org.apache.storm.daemon.ClientJarTransformerRunner", args=[transform_class, jarfile, tmpjar], fork=True, daemon=False)
         exec_storm_class(
             klass,
             jvmtype="-client",
@@ -276,7 +276,7 @@ def kill(*args):
         print_usage(command="kill")
         sys.exit(2)
     exec_storm_class(
-        "backtype.storm.command.kill_topology",
+        "org.apache.storm.command.kill_topology",
         args=args,
         jvmtype="-client",
         extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
@@ -291,7 +291,7 @@ def upload_credentials(*args):
         print_usage(command="upload_credentials")
         sys.exit(2)
     exec_storm_class(
-        "backtype.storm.command.upload_credentials",
+        "org.apache.storm.command.upload_credentials",
         args=args,
         jvmtype="-client",
         extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
@@ -317,7 +317,7 @@ def blobstore(*args):
     storm blobstore create mytopo:data.tgz -f data.tgz -a u:alice:rwa,u:bob:rw,o::r
     """
     exec_storm_class(
-        "backtype.storm.command.blobstore",
+        "org.apache.storm.command.blobstore",
         args=args,
         jvmtype="-client",
         extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
@@ -329,7 +329,7 @@ def heartbeats(*args):
     get  PATH - Get the heartbeat data at PATH
     """
     exec_storm_class(
-        "backtype.storm.command.heartbeats",
+        "org.apache.storm.command.heartbeats",
         args=args,
         jvmtype="-client",
         extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
@@ -343,7 +343,7 @@ def activate(*args):
         print_usage(command="activate")
         sys.exit(2)
     exec_storm_class(
-        "backtype.storm.command.activate",
+        "org.apache.storm.command.activate",
         args=args,
         jvmtype="-client",
         extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
@@ -376,7 +376,7 @@ def set_log_level(*args):
         Clears settings, resetting back to the original level
     """
     exec_storm_class(
-        "backtype.storm.command.set_log_level",
+        "org.apache.storm.command.set_log_level",
         args=args,
         jvmtype="-client",
         extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
@@ -387,7 +387,7 @@ def listtopos(*args):
     List the running topologies and their statuses.
     """
     exec_storm_class(
-        "backtype.storm.command.list",
+        "org.apache.storm.command.list",
         args=args,
         jvmtype="-client",
         extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
@@ -401,7 +401,7 @@ def deactivate(*args):
         print_usage(command="deactivate")
         sys.exit(2)
     exec_storm_class(
-        "backtype.storm.command.deactivate",
+        "org.apache.storm.command.deactivate",
         args=args,
         jvmtype="-client",
         extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
@@ -431,7 +431,7 @@ def rebalance(*args):
         print_usage(command="rebalance")
         sys.exit(2)
     exec_storm_class(
-        "backtype.storm.command.rebalance",
+        "org.apache.storm.command.rebalance",
         args=args,
         jvmtype="-client",
         extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
@@ -447,7 +447,7 @@ def get_errors(*args):
         print_usage(command="get_errors")
         sys.exit(2)
     exec_storm_class(
-        "backtype.storm.command.get_errors",
+        "org.apache.storm.command.get_errors",
         args=args,
         jvmtype="-client",
         extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
@@ -458,7 +458,7 @@ def healthcheck(*args):
     Run health checks on the local supervisor.
     """
     exec_storm_class(
-        "backtype.storm.command.healthcheck",
+        "org.apache.storm.command.healthcheck",
         args=args,
         jvmtype="-client",
         extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
@@ -471,7 +471,7 @@ def kill_workers(*args):
     to have admin rights on the node to be able to successfully kill all workers.
     """
     exec_storm_class(
-        "backtype.storm.command.kill_workers",
+        "org.apache.storm.command.kill_workers",
         args=args,
         jvmtype="-client",
         extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
@@ -482,7 +482,7 @@ def shell(resourcesdir, command, *args):
     runnerargs = [tmpjarpath, command]
     runnerargs.extend(args)
     exec_storm_class(
-        "backtype.storm.command.shell_submission",
+        "org.apache.storm.command.shell_submission",
         args=runnerargs,
         jvmtype="-client",
         extrajars=[USER_CONF_DIR],
@@ -507,7 +507,7 @@ def get_log4j2_conf_dir():
         storm_log4j2_conf_dir = os.path.join(STORM_DIR, storm_log4j2_conf_dir)
     return storm_log4j2_conf_dir
 
-def nimbus(klass="backtype.storm.daemon.nimbus"):
+def nimbus(klass="org.apache.storm.daemon.nimbus"):
     """Syntax: [storm nimbus]
 
     Launches the nimbus daemon. This command should be run under
@@ -550,7 +550,7 @@ def pacemaker(klass="org.apache.storm.pacemaker.pacemaker"):
         extrajars=cppaths,
         jvmopts=jvmopts)
 
-def supervisor(klass="backtype.storm.daemon.supervisor"):
+def supervisor(klass="org.apache.storm.daemon.supervisor"):
     """Syntax: [storm supervisor]
 
     Launches the supervisor daemon. This command should be run
@@ -589,7 +589,7 @@ def ui():
         "-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
     ]
     exec_storm_class(
-        "backtype.storm.ui.core",
+        "org.apache.storm.ui.core",
         jvmtype="-server",
         daemonName="ui",
         jvmopts=jvmopts,
@@ -612,7 +612,7 @@ def logviewer():
         "-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
     ]
     exec_storm_class(
-        "backtype.storm.daemon.logviewer",
+        "org.apache.storm.daemon.logviewer",
         jvmtype="-server",
         daemonName="logviewer",
         jvmopts=jvmopts,
@@ -634,7 +634,7 @@ def drpc():
         "-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
     ]
     exec_storm_class(
-        "backtype.storm.daemon.drpc",
+        "org.apache.storm.daemon.drpc",
         jvmtype="-server",
         daemonName="drpc",
         jvmopts=jvmopts,
@@ -649,7 +649,7 @@ def dev_zookeeper():
     """
     cppaths = [CLUSTER_CONF_DIR]
     exec_storm_class(
-        "backtype.storm.command.dev_zookeeper",
+        "org.apache.storm.command.dev_zookeeper",
         jvmtype="-server",
         extrajars=[CLUSTER_CONF_DIR])
 
@@ -660,7 +660,7 @@ def version():
   """
   cppaths = [CLUSTER_CONF_DIR]
   exec_storm_class(
-       "backtype.storm.utils.VersionInfo",
+       "org.apache.storm.utils.VersionInfo",
        jvmtype="-client",
        extrajars=[CLUSTER_CONF_DIR])
 
@@ -683,7 +683,7 @@ def monitor(*args):
         watch-item is 'emitted';
     """
     exec_storm_class(
-        "backtype.storm.command.monitor",
+        "org.apache.storm.command.monitor",
         args=args,
         jvmtype="-client",
         extrajars=[USER_CONF_DIR, STORM_BIN_DIR])

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/conf/defaults.yaml
----------------------------------------------------------------------
diff --git a/conf/defaults.yaml b/conf/defaults.yaml
index b5c8b47..735a83e 100644
--- a/conf/defaults.yaml
+++ b/conf/defaults.yaml
@@ -39,11 +39,11 @@ storm.exhibitor.port: 8080
 storm.exhibitor.poll.uripath: "/exhibitor/v1/cluster/list"
 storm.cluster.mode: "distributed" # can be distributed or local
 storm.local.mode.zmq: false
-storm.thrift.transport: "backtype.storm.security.auth.SimpleTransportPlugin"
-storm.principal.tolocal: "backtype.storm.security.auth.DefaultPrincipalToLocal"
-storm.group.mapping.service: "backtype.storm.security.auth.ShellBasedGroupsMapping"
+storm.thrift.transport: "org.apache.storm.security.auth.SimpleTransportPlugin"
+storm.principal.tolocal: "org.apache.storm.security.auth.DefaultPrincipalToLocal"
+storm.group.mapping.service: "org.apache.storm.security.auth.ShellBasedGroupsMapping"
 storm.group.mapping.service.params: null
-storm.messaging.transport: "backtype.storm.messaging.netty.Context"
+storm.messaging.transport: "org.apache.storm.messaging.netty.Context"
 storm.nimbus.retry.times: 5
 storm.nimbus.retry.interval.millis: 2000
 storm.nimbus.retry.intervalceiling.millis: 60000
@@ -51,9 +51,9 @@ storm.auth.simple-white-list.users: []
 storm.auth.simple-acl.users: []
 storm.auth.simple-acl.users.commands: []
 storm.auth.simple-acl.admins: []
-storm.cluster.state.store: "backtype.storm.cluster_state.zookeeper_state_factory"
-storm.meta.serialization.delegate: "backtype.storm.serialization.GzipThriftSerializationDelegate"
-storm.codedistributor.class: "backtype.storm.codedistributor.LocalFileSystemCodeDistributor"
+storm.cluster.state.store: "org.apache.storm.cluster_state.zookeeper_state_factory"
+storm.meta.serialization.delegate: "org.apache.storm.serialization.GzipThriftSerializationDelegate"
+storm.codedistributor.class: "org.apache.storm.codedistributor.LocalFileSystemCodeDistributor"
 storm.workers.artifacts.dir: "workers-artifacts"
 storm.health.check.dir: "healthchecks"
 storm.health.check.timeout.ms: 5000
@@ -72,11 +72,11 @@ nimbus.inbox.jar.expiration.secs: 3600
 nimbus.code.sync.freq.secs: 120
 nimbus.task.launch.secs: 120
 nimbus.file.copy.expiration.secs: 600
-nimbus.topology.validator: "backtype.storm.nimbus.DefaultTopologyValidator"
+nimbus.topology.validator: "org.apache.storm.nimbus.DefaultTopologyValidator"
 topology.min.replication.count: 1
 topology.max.replication.wait.time.sec: 60
 nimbus.credential.renewers.freq.secs: 600
-nimbus.impersonation.authorizer: "backtype.storm.security.auth.authorizer.ImpersonationAuthorizer"
+nimbus.impersonation.authorizer: "org.apache.storm.security.auth.authorizer.ImpersonationAuthorizer"
 
 scheduler.display.resource: false
 
@@ -89,7 +89,7 @@ ui.filter: null
 ui.filter.params: null
 ui.users: null
 ui.header.buffer.bytes: 4096
-ui.http.creds.plugin: backtype.storm.security.auth.DefaultHttpCredentialsPlugin
+ui.http.creds.plugin: org.apache.storm.security.auth.DefaultHttpCredentialsPlugin
 
 logviewer.port: 8000
 logviewer.childopts: "-Xmx128m"
@@ -112,7 +112,7 @@ drpc.http.port: 3774
 drpc.https.port: -1
 drpc.https.keystore.password: ""
 drpc.https.keystore.type: "JKS"
-drpc.http.creds.plugin: backtype.storm.security.auth.DefaultHttpCredentialsPlugin
+drpc.http.creds.plugin: org.apache.storm.security.auth.DefaultHttpCredentialsPlugin
 drpc.authorizer.acl.filename: "drpc-auth-acl.yaml"
 drpc.authorizer.acl.strict: false
 
@@ -121,17 +121,17 @@ transactional.zookeeper.servers: null
 transactional.zookeeper.port: null
 
 ## blobstore configs
-supervisor.blobstore.class: "backtype.storm.blobstore.NimbusBlobStore"
+supervisor.blobstore.class: "org.apache.storm.blobstore.NimbusBlobStore"
 supervisor.blobstore.download.thread.count: 5
 supervisor.blobstore.download.max_retries: 3
 supervisor.localizer.cache.target.size.mb: 10240
 supervisor.localizer.cleanup.interval.ms: 600000
 
-nimbus.blobstore.class: "backtype.storm.blobstore.LocalFsBlobStore"
+nimbus.blobstore.class: "org.apache.storm.blobstore.LocalFsBlobStore"
 nimbus.blobstore.expiration.secs: 600
 
 storm.blobstore.inputstream.buffer.size.bytes: 65536
-client.blobstore.class: "backtype.storm.blobstore.NimbusBlobStore"
+client.blobstore.class: "org.apache.storm.blobstore.NimbusBlobStore"
 storm.blobstore.replication.factor: 3
 
 ### supervisor.* configs are for node supervisors
@@ -208,7 +208,7 @@ storm.messaging.netty.socket.backlog: 500
 storm.messaging.netty.authentication: false
 
 # Default plugin to use for automatic network topology discovery
-storm.network.topography.plugin: backtype.storm.networktopography.DefaultRackDNSToSwitchMapping
+storm.network.topography.plugin: org.apache.storm.networktopography.DefaultRackDNSToSwitchMapping
 
 # default number of seconds group mapping service will cache user group
 storm.group.mapping.service.cache.duration.secs: 120
@@ -222,7 +222,7 @@ topology.eventlogger.executors: null
 topology.tasks: null
 # maximum amount of time a message has to complete before it's considered failed
 topology.message.timeout.secs: 30
-topology.multilang.serializer: "backtype.storm.multilang.JsonSerializer"
+topology.multilang.serializer: "org.apache.storm.multilang.JsonSerializer"
 topology.shellbolt.max.pending: 100
 topology.skip.missing.kryo.registrations: false
 topology.max.task.parallelism: null
@@ -238,12 +238,12 @@ topology.executor.send.buffer.size: 1024 #individual messages
 topology.transfer.buffer.size: 1024 # batched
 topology.tick.tuple.freq.secs: null
 topology.worker.shared.thread.pool.size: 4
-topology.spout.wait.strategy: "backtype.storm.spout.SleepSpoutWaitStrategy"
+topology.spout.wait.strategy: "org.apache.storm.spout.SleepSpoutWaitStrategy"
 topology.sleep.spout.wait.strategy.time.ms: 1
 topology.error.throttle.interval.secs: 10
 topology.max.error.report.per.interval: 5
-topology.kryo.factory: "backtype.storm.serialization.DefaultKryoFactory"
-topology.tuple.serializer: "backtype.storm.serialization.types.ListDelegateSerializer"
+topology.kryo.factory: "org.apache.storm.serialization.DefaultKryoFactory"
+topology.tuple.serializer: "org.apache.storm.serialization.types.ListDelegateSerializer"
 topology.trident.batch.emit.interval.millis: 500
 topology.testing.always.try.serialize: false
 topology.classpath: null
@@ -262,9 +262,9 @@ topology.component.resources.onheap.memory.mb: 128.0
 topology.component.resources.offheap.memory.mb: 0.0
 topology.component.cpu.pcore.percent: 10.0
 topology.worker.max.heap.size.mb: 768.0
-topology.scheduler.strategy: "backtype.storm.scheduler.resource.strategies.scheduling.DefaultResourceAwareStrategy"
-resource.aware.scheduler.eviction.strategy: "backtype.storm.scheduler.resource.strategies.eviction.DefaultEvictionStrategy"
-resource.aware.scheduler.priority.strategy: "backtype.storm.scheduler.resource.strategies.priority.DefaultSchedulingPriorityStrategy"
+topology.scheduler.strategy: "org.apache.storm.scheduler.resource.strategies.scheduling.DefaultResourceAwareStrategy"
+resource.aware.scheduler.eviction.strategy: "org.apache.storm.scheduler.resource.strategies.eviction.DefaultEvictionStrategy"
+resource.aware.scheduler.priority.strategy: "org.apache.storm.scheduler.resource.strategies.priority.DefaultSchedulingPriorityStrategy"
 
 dev.zookeeper.path: "/tmp/dev-storm-zookeeper"
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/conf/storm.yaml.example
----------------------------------------------------------------------
diff --git a/conf/storm.yaml.example b/conf/storm.yaml.example
index 13c2f8e..7df3e9d 100644
--- a/conf/storm.yaml.example
+++ b/conf/storm.yaml.example
@@ -40,7 +40,7 @@
 
 ## Metrics Consumers
 # topology.metrics.consumer.register:
-#   - class: "backtype.storm.metric.LoggingMetricsConsumer"
+#   - class: "org.apache.storm.metric.LoggingMetricsConsumer"
 #     parallelism.hint: 1
 #   - class: "org.mycompany.MyMetricsConsumer"
 #     parallelism.hint: 1

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/README.markdown
----------------------------------------------------------------------
diff --git a/examples/storm-starter/README.markdown b/examples/storm-starter/README.markdown
index 01ba09f..48d9d27 100644
--- a/examples/storm-starter/README.markdown
+++ b/examples/storm-starter/README.markdown
@@ -88,11 +88,11 @@ Example filename of the uberjar:
 You can submit (run) a topology contained in this uberjar to Storm via the `storm` CLI tool:
 
     # Example 1: Run the ExclamationTopology in local mode (LocalCluster)
-    $ storm jar target/storm-starter-*.jar storm.starter.ExclamationTopology
+    $ storm jar target/storm-starter-*.jar org.apache.storm.starter.ExclamationTopology
 
     # Example 2: Run the RollingTopWords in remote/cluster mode,
     #            under the name "production-topology"
-    $ storm jar storm-starter-*.jar storm.starter.RollingTopWords production-topology remote
+    $ storm jar storm-starter-*.jar org.apache.storm.starter.RollingTopWords production-topology remote
 
 With submitting you can run topologies which use multilang, for example, `WordCountTopology`.
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/multilang/resources/randomsentence.js
----------------------------------------------------------------------
diff --git a/examples/storm-starter/multilang/resources/randomsentence.js b/examples/storm-starter/multilang/resources/randomsentence.js
index 36fc5f5..b121915 100644
--- a/examples/storm-starter/multilang/resources/randomsentence.js
+++ b/examples/storm-starter/multilang/resources/randomsentence.js
@@ -18,7 +18,7 @@
 
 /**
  * Example for storm spout. Emits random sentences.
- * The original class in java - storm.starter.spout.RandomSentenceSpout.
+ * The original class in java - org.apache.storm.starter.spout.RandomSentenceSpout.
  *
  */
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/clj/org/apache/storm/starter/clj/word_count.clj
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/clj/org/apache/storm/starter/clj/word_count.clj b/examples/storm-starter/src/clj/org/apache/storm/starter/clj/word_count.clj
new file mode 100644
index 0000000..fb3a695
--- /dev/null
+++ b/examples/storm-starter/src/clj/org/apache/storm/starter/clj/word_count.clj
@@ -0,0 +1,95 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+(ns org.apache.storm.starter.clj.word-count
+  (:import [org.apache.storm StormSubmitter LocalCluster])
+  (:use [org.apache.storm clojure config])
+  (:gen-class))
+
+(defspout sentence-spout ["sentence"]
+  [conf context collector]
+  (let [sentences ["a little brown dog"
+                   "the man petted the dog"
+                   "four score and seven years ago"
+                   "an apple a day keeps the doctor away"]]
+    (spout
+     (nextTuple []
+       (Thread/sleep 100)
+       (emit-spout! collector [(rand-nth sentences)])         
+       )
+     (ack [id]
+        ;; You only need to define this method for reliable spouts
+        ;; (such as one that reads off of a queue like Kestrel)
+        ;; This is an unreliable spout, so it does nothing here
+        ))))
+
+(defspout sentence-spout-parameterized ["word"] {:params [sentences] :prepare false}
+  [collector]
+  (Thread/sleep 500)
+  (emit-spout! collector [(rand-nth sentences)]))
+
+(defbolt split-sentence ["word"] [tuple collector]
+  (let [words (.split (.getString tuple 0) " ")]
+    (doseq [w words]
+      (emit-bolt! collector [w] :anchor tuple))
+    (ack! collector tuple)
+    ))
+
+(defbolt word-count ["word" "count"] {:prepare true}
+  [conf context collector]
+  (let [counts (atom {})]
+    (bolt
+     (execute [tuple]
+       (let [word (.getString tuple 0)]
+         (swap! counts (partial merge-with +) {word 1})
+         (emit-bolt! collector [word (@counts word)] :anchor tuple)
+         (ack! collector tuple)
+         )))))
+
+(defn mk-topology []
+
+  (topology
+   {"1" (spout-spec sentence-spout)
+    "2" (spout-spec (sentence-spout-parameterized
+                     ["the cat jumped over the door"
+                      "greetings from a faraway land"])
+                     :p 2)}
+   {"3" (bolt-spec {"1" :shuffle "2" :shuffle}
+                   split-sentence
+                   :p 5)
+    "4" (bolt-spec {"3" ["word"]}
+                   word-count
+                   :p 6)}))
+
+(defn run-local! []
+  (let [cluster (LocalCluster.)]
+    (.submitTopology cluster "word-count" {TOPOLOGY-DEBUG true} (mk-topology))
+    (Thread/sleep 10000)
+    (.shutdown cluster)
+    ))
+
+(defn submit-topology! [name]
+  (StormSubmitter/submitTopology
+   name
+   {TOPOLOGY-DEBUG true
+    TOPOLOGY-WORKERS 3}
+   (mk-topology)))
+
+(defn -main
+  ([]
+   (run-local!))
+  ([name]
+   (submit-topology! name)))
+

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/clj/storm/starter/clj/word_count.clj
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/clj/storm/starter/clj/word_count.clj b/examples/storm-starter/src/clj/storm/starter/clj/word_count.clj
deleted file mode 100644
index 3b54ac8..0000000
--- a/examples/storm-starter/src/clj/storm/starter/clj/word_count.clj
+++ /dev/null
@@ -1,95 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-(ns storm.starter.clj.word-count
-  (:import [backtype.storm StormSubmitter LocalCluster])
-  (:use [backtype.storm clojure config])
-  (:gen-class))
-
-(defspout sentence-spout ["sentence"]
-  [conf context collector]
-  (let [sentences ["a little brown dog"
-                   "the man petted the dog"
-                   "four score and seven years ago"
-                   "an apple a day keeps the doctor away"]]
-    (spout
-     (nextTuple []
-       (Thread/sleep 100)
-       (emit-spout! collector [(rand-nth sentences)])         
-       )
-     (ack [id]
-        ;; You only need to define this method for reliable spouts
-        ;; (such as one that reads off of a queue like Kestrel)
-        ;; This is an unreliable spout, so it does nothing here
-        ))))
-
-(defspout sentence-spout-parameterized ["word"] {:params [sentences] :prepare false}
-  [collector]
-  (Thread/sleep 500)
-  (emit-spout! collector [(rand-nth sentences)]))
-
-(defbolt split-sentence ["word"] [tuple collector]
-  (let [words (.split (.getString tuple 0) " ")]
-    (doseq [w words]
-      (emit-bolt! collector [w] :anchor tuple))
-    (ack! collector tuple)
-    ))
-
-(defbolt word-count ["word" "count"] {:prepare true}
-  [conf context collector]
-  (let [counts (atom {})]
-    (bolt
-     (execute [tuple]
-       (let [word (.getString tuple 0)]
-         (swap! counts (partial merge-with +) {word 1})
-         (emit-bolt! collector [word (@counts word)] :anchor tuple)
-         (ack! collector tuple)
-         )))))
-
-(defn mk-topology []
-
-  (topology
-   {"1" (spout-spec sentence-spout)
-    "2" (spout-spec (sentence-spout-parameterized
-                     ["the cat jumped over the door"
-                      "greetings from a faraway land"])
-                     :p 2)}
-   {"3" (bolt-spec {"1" :shuffle "2" :shuffle}
-                   split-sentence
-                   :p 5)
-    "4" (bolt-spec {"3" ["word"]}
-                   word-count
-                   :p 6)}))
-
-(defn run-local! []
-  (let [cluster (LocalCluster.)]
-    (.submitTopology cluster "word-count" {TOPOLOGY-DEBUG true} (mk-topology))
-    (Thread/sleep 10000)
-    (.shutdown cluster)
-    ))
-
-(defn submit-topology! [name]
-  (StormSubmitter/submitTopology
-   name
-   {TOPOLOGY-DEBUG true
-    TOPOLOGY-WORKERS 3}
-   (mk-topology)))
-
-(defn -main
-  ([]
-   (run-local!))
-  ([name]
-   (submit-topology! name)))
-

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/org/apache/storm/starter/BasicDRPCTopology.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/BasicDRPCTopology.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/BasicDRPCTopology.java
new file mode 100644
index 0000000..2187da4
--- /dev/null
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/BasicDRPCTopology.java
@@ -0,0 +1,78 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter;
+
+import org.apache.storm.Config;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.LocalDRPC;
+import org.apache.storm.StormSubmitter;
+import org.apache.storm.drpc.LinearDRPCTopologyBuilder;
+import org.apache.storm.topology.BasicOutputCollector;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.base.BaseBasicBolt;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.Values;
+
+/**
+ * This topology is a basic example of doing distributed RPC on top of Storm. It implements a function that appends a
+ * "!" to any string you send the DRPC function.
+ *
+ * @see <a href="http://storm.apache.org/documentation/Distributed-RPC.html">Distributed RPC</a>
+ */
+public class BasicDRPCTopology {
+  public static class ExclaimBolt extends BaseBasicBolt {
+    @Override
+    public void execute(Tuple tuple, BasicOutputCollector collector) {
+      String input = tuple.getString(1);
+      collector.emit(new Values(tuple.getValue(0), input + "!"));
+    }
+
+    @Override
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+      declarer.declare(new Fields("id", "result"));
+    }
+
+  }
+
+  public static void main(String[] args) throws Exception {
+    LinearDRPCTopologyBuilder builder = new LinearDRPCTopologyBuilder("exclamation");
+    builder.addBolt(new ExclaimBolt(), 3);
+
+    Config conf = new Config();
+
+    if (args == null || args.length == 0) {
+      LocalDRPC drpc = new LocalDRPC();
+      LocalCluster cluster = new LocalCluster();
+
+      cluster.submitTopology("drpc-demo", conf, builder.createLocalTopology(drpc));
+
+      for (String word : new String[]{ "hello", "goodbye" }) {
+        System.out.println("Result for \"" + word + "\": " + drpc.execute("exclamation", word));
+      }
+
+      Thread.sleep(10000);
+      drpc.shutdown();
+      cluster.shutdown();
+    }
+    else {
+      conf.setNumWorkers(3);
+      StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createRemoteTopology());
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/org/apache/storm/starter/BlobStoreAPIWordCountTopology.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/BlobStoreAPIWordCountTopology.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/BlobStoreAPIWordCountTopology.java
new file mode 100644
index 0000000..13ccb1d
--- /dev/null
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/BlobStoreAPIWordCountTopology.java
@@ -0,0 +1,304 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter;
+
+import org.apache.storm.Config;
+import org.apache.storm.StormSubmitter;
+import org.apache.storm.blobstore.AtomicOutputStream;
+import org.apache.storm.blobstore.ClientBlobStore;
+import org.apache.storm.blobstore.InputStreamWithMeta;
+import org.apache.storm.blobstore.NimbusBlobStore;
+
+import org.apache.storm.generated.AccessControl;
+import org.apache.storm.generated.AccessControlType;
+import org.apache.storm.generated.AlreadyAliveException;
+import org.apache.storm.generated.AuthorizationException;
+import org.apache.storm.generated.InvalidTopologyException;
+import org.apache.storm.generated.KeyAlreadyExistsException;
+import org.apache.storm.generated.KeyNotFoundException;
+import org.apache.storm.generated.SettableBlobMeta;
+import org.apache.storm.spout.SpoutOutputCollector;
+import org.apache.storm.task.ShellBolt;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.BasicOutputCollector;
+import org.apache.storm.topology.IRichBolt;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.TopologyBuilder;
+import org.apache.storm.topology.base.BaseBasicBolt;
+import org.apache.storm.topology.base.BaseRichSpout;
+import org.apache.storm.blobstore.BlobStoreAclHandler;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.Values;
+import org.apache.storm.utils.Utils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileReader;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.util.StringTokenizer;
+
+public class BlobStoreAPIWordCountTopology {
+    private static ClientBlobStore store; // Client API to invoke blob store API functionality
+    private static String key = "key";
+    private static String fileName = "blacklist.txt";
+    private static final Logger LOG = LoggerFactory.getLogger(BlobStoreAPIWordCountTopology.class);
+
+    public static void prepare() {
+        Config conf = new Config();
+        conf.putAll(Utils.readStormConfig());
+        store = Utils.getClientBlobStore(conf);
+    }
+
+    // Spout implementation
+    public static class RandomSentenceSpout extends BaseRichSpout {
+        SpoutOutputCollector _collector;
+
+        @Override
+        public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
+            _collector = collector;
+        }
+
+        @Override
+        public void nextTuple() {
+            Utils.sleep(100);
+            _collector.emit(new Values(getRandomSentence()));
+        }
+
+        @Override
+        public void ack(Object id) {
+        }
+
+        @Override
+        public void fail(Object id) {
+        }
+
+        @Override
+        public void declareOutputFields(OutputFieldsDeclarer declarer) {
+            declarer.declare(new Fields("sentence"));
+        }
+
+    }
+
+    // Bolt implementation
+    public static class SplitSentence extends ShellBolt implements IRichBolt {
+
+        public SplitSentence() {
+            super("python", "splitsentence.py");
+        }
+
+        @Override
+        public void declareOutputFields(OutputFieldsDeclarer declarer) {
+            declarer.declare(new Fields("word"));
+        }
+
+        @Override
+        public Map<String, Object> getComponentConfiguration() {
+            return null;
+        }
+    }
+
+    public static class FilterWords extends BaseBasicBolt {
+        boolean poll = false;
+        long pollTime;
+        Set<String> wordSet;
+        @Override
+        public void execute(Tuple tuple, BasicOutputCollector collector) {
+            String word = tuple.getString(0);
+            // Thread Polling every 5 seconds to update the wordSet seconds which is
+            // used in FilterWords bolt to filter the words
+            try {
+                if (!poll) {
+                    wordSet = parseFile(fileName);
+                    pollTime = System.currentTimeMillis();
+                    poll = true;
+                } else {
+                    if ((System.currentTimeMillis() - pollTime) > 5000) {
+                        wordSet = parseFile(fileName);
+                        pollTime = System.currentTimeMillis();
+                    }
+                }
+            } catch (IOException exp) {
+                throw new RuntimeException(exp);
+            }
+            if (wordSet !=null && !wordSet.contains(word)) {
+                collector.emit(new Values(word));
+            }
+        }
+
+        @Override
+        public void declareOutputFields(OutputFieldsDeclarer declarer) {
+            declarer.declare(new Fields("word"));
+        }
+    }
+
+    public void buildAndLaunchWordCountTopology(String[] args) {
+        TopologyBuilder builder = new TopologyBuilder();
+        builder.setSpout("spout", new RandomSentenceSpout(), 5);
+        builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout");
+        builder.setBolt("filter", new FilterWords(), 6).shuffleGrouping("split");
+
+        Config conf = new Config();
+        conf.setDebug(true);
+        try {
+            conf.setNumWorkers(3);
+            StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
+        } catch (InvalidTopologyException | AuthorizationException | AlreadyAliveException exp) {
+            throw new RuntimeException(exp);
+        }
+    }
+
+    // Equivalent create command on command line
+    // storm blobstore create --file blacklist.txt --acl o::rwa key
+    private static void createBlobWithContent(String blobKey, ClientBlobStore clientBlobStore, File file)
+            throws AuthorizationException, KeyAlreadyExistsException, IOException,KeyNotFoundException {
+        String stringBlobACL = "o::rwa";
+        AccessControl blobACL = BlobStoreAclHandler.parseAccessControl(stringBlobACL);
+        List<AccessControl> acls = new LinkedList<AccessControl>();
+        acls.add(blobACL); // more ACLs can be added here
+        SettableBlobMeta settableBlobMeta = new SettableBlobMeta(acls);
+        AtomicOutputStream blobStream = clientBlobStore.createBlob(blobKey,settableBlobMeta);
+        blobStream.write(readFile(file).toString().getBytes());
+        blobStream.close();
+    }
+
+    // Equivalent update command on command line
+    // storm blobstore update --file blacklist.txt key
+    private static void updateBlobWithContent(String blobKey, ClientBlobStore clientBlobStore, File file)
+            throws KeyNotFoundException, AuthorizationException, IOException {
+        AtomicOutputStream blobOutputStream = clientBlobStore.updateBlob(blobKey);
+        blobOutputStream.write(readFile(file).toString().getBytes());
+        blobOutputStream.close();
+    }
+
+    private static String getRandomSentence() {
+        String[] sentences = new String[]{ "the cow jumped over the moon", "an apple a day keeps the doctor away",
+                "four score and seven years ago", "snow white and the seven dwarfs", "i am at two with nature" };
+        String sentence = sentences[new Random().nextInt(sentences.length)];
+        return sentence;
+    }
+
+    private static Set<String> getRandomWordSet() {
+        Set<String> randomWordSet = new HashSet<>();
+        Random random = new Random();
+        String[] words = new String[]{ "cow", "jumped", "over", "the", "moon", "apple", "day", "doctor", "away",
+                "four", "seven", "ago", "snow", "white", "seven", "dwarfs", "nature", "two" };
+        // Choosing atmost 5 words to update the blacklist file for filtering
+        for (int i=0; i<5; i++) {
+            randomWordSet.add(words[random.nextInt(words.length)]);
+        }
+        return randomWordSet;
+    }
+
+    private static Set<String> parseFile(String fileName) throws IOException {
+        File file = new File(fileName);
+        Set<String> wordSet = new HashSet<>();
+        if (!file.exists()) {
+            return wordSet;
+        }
+        StringTokenizer tokens = new StringTokenizer(readFile(file).toString(), "\r\n");
+        while (tokens.hasMoreElements()) {
+            wordSet.add(tokens.nextToken());
+        }
+        LOG.debug("parseFile {}", wordSet);
+        return wordSet;
+    }
+
+    private static StringBuilder readFile(File file) throws IOException {
+        String line;
+        StringBuilder fileContent = new StringBuilder();
+        // Do not use canonical file name here as we are using
+        // symbolic links to read file data and performing atomic move
+        // while updating files
+        BufferedReader br = new BufferedReader(new FileReader(file));
+        while ((line = br.readLine()) != null) {
+            fileContent.append(line);
+            fileContent.append(System.lineSeparator());
+        }
+        return fileContent;
+    }
+
+    // Creating a blacklist file to read from the disk
+    public static File createFile(String fileName) throws IOException {
+        File file = null;
+        file = new File(fileName);
+        if (!file.exists()) {
+            file.createNewFile();
+        }
+        writeToFile(file, getRandomWordSet());
+        return file;
+    }
+
+    // Updating a blacklist file periodically with random words
+    public static File updateFile(File file) throws IOException {
+        writeToFile(file, getRandomWordSet());
+        return file;
+    }
+
+    // Writing random words to be blacklisted
+    public static void writeToFile(File file, Set<String> content) throws IOException{
+        FileWriter fw = new FileWriter(file, false);
+        BufferedWriter bw = new BufferedWriter(fw);
+        Iterator<String> iter = content.iterator();
+        while(iter.hasNext()) {
+            bw.write(iter.next());
+            bw.write(System.lineSeparator());
+        }
+        bw.close();
+    }
+
+    public static void main(String[] args) {
+        prepare();
+        BlobStoreAPIWordCountTopology wc = new BlobStoreAPIWordCountTopology();
+        try {
+            File file = createFile(fileName);
+            // Creating blob again before launching topology
+            createBlobWithContent(key, store, file);
+
+            // Blostore launch command with topology blobstore map
+            // Here we are giving it a local name so that we can read from the file
+            // bin/storm jar examples/storm-starter/storm-starter-topologies-0.11.0-SNAPSHOT.jar
+            // org.apache.storm.starter.BlobStoreAPIWordCountTopology bl -c
+            // topology.blobstore.map='{"key":{"localname":"blacklist.txt", "uncompress":"false"}}'
+            wc.buildAndLaunchWordCountTopology(args);
+
+            // Updating file few times every 5 seconds
+            for(int i=0; i<10; i++) {
+                updateBlobWithContent(key, store, updateFile(file));
+                Utils.sleep(5000);
+            }
+        } catch (KeyAlreadyExistsException kae) {
+            LOG.info("Key already exists {}", kae);
+        } catch (AuthorizationException | KeyNotFoundException | IOException exp) {
+            throw new RuntimeException(exp);
+        }
+    }
+}
+
+

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/org/apache/storm/starter/ExclamationTopology.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/ExclamationTopology.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/ExclamationTopology.java
new file mode 100644
index 0000000..26e0430
--- /dev/null
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/ExclamationTopology.java
@@ -0,0 +1,87 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter;
+
+import org.apache.storm.Config;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.StormSubmitter;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.testing.TestWordSpout;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.TopologyBuilder;
+import org.apache.storm.topology.base.BaseRichBolt;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.Values;
+import org.apache.storm.utils.Utils;
+
+import java.util.Map;
+
+/**
+ * This is a basic example of a Storm topology.
+ */
+public class ExclamationTopology {
+
+  public static class ExclamationBolt extends BaseRichBolt {
+    OutputCollector _collector;
+
+    @Override
+    public void prepare(Map conf, TopologyContext context, OutputCollector collector) {
+      _collector = collector;
+    }
+
+    @Override
+    public void execute(Tuple tuple) {
+      _collector.emit(tuple, new Values(tuple.getString(0) + "!!!"));
+      _collector.ack(tuple);
+    }
+
+    @Override
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+      declarer.declare(new Fields("word"));
+    }
+
+
+  }
+
+  public static void main(String[] args) throws Exception {
+    TopologyBuilder builder = new TopologyBuilder();
+
+    builder.setSpout("word", new TestWordSpout(), 10);
+    builder.setBolt("exclaim1", new ExclamationBolt(), 3).shuffleGrouping("word");
+    builder.setBolt("exclaim2", new ExclamationBolt(), 2).shuffleGrouping("exclaim1");
+
+    Config conf = new Config();
+    conf.setDebug(true);
+
+    if (args != null && args.length > 0) {
+      conf.setNumWorkers(3);
+
+      StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
+    }
+    else {
+
+      LocalCluster cluster = new LocalCluster();
+      cluster.submitTopology("test", conf, builder.createTopology());
+      Utils.sleep(10000);
+      cluster.killTopology("test");
+      cluster.shutdown();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/org/apache/storm/starter/FastWordCountTopology.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/FastWordCountTopology.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/FastWordCountTopology.java
new file mode 100644
index 0000000..51f6b11
--- /dev/null
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/FastWordCountTopology.java
@@ -0,0 +1,198 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter;
+
+import org.apache.storm.Config;
+import org.apache.storm.StormSubmitter;
+import org.apache.storm.generated.*;
+import org.apache.storm.spout.SpoutOutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.BasicOutputCollector;
+import org.apache.storm.topology.IRichBolt;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.TopologyBuilder;
+import org.apache.storm.topology.base.BaseBasicBolt;
+import org.apache.storm.topology.base.BaseRichSpout;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.Values;
+import org.apache.storm.utils.NimbusClient;
+import org.apache.storm.utils.Utils;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Random;
+import java.util.concurrent.ThreadLocalRandom;
+
+/**
+ * WordCount but teh spout does not stop, and the bolts are implemented in
+ * java.  This can show how fast the word count can run.
+ */
+public class FastWordCountTopology {
+  public static class FastRandomSentenceSpout extends BaseRichSpout {
+    SpoutOutputCollector _collector;
+    Random _rand;
+    private static final String[] CHOICES = {
+        "marry had a little lamb whos fleese was white as snow",
+        "and every where that marry went the lamb was sure to go",
+        "one two three four five six seven eight nine ten",
+        "this is a test of the emergency broadcast system this is only a test",
+        "peter piper picked a peck of pickeled peppers"
+    };
+
+    @Override
+    public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
+      _collector = collector;
+      _rand = ThreadLocalRandom.current();
+    }
+
+    @Override
+    public void nextTuple() {
+      String sentence = CHOICES[_rand.nextInt(CHOICES.length)];
+      _collector.emit(new Values(sentence), sentence);
+    }
+
+    @Override
+    public void ack(Object id) {
+        //Ignored
+    }
+
+    @Override
+    public void fail(Object id) {
+      _collector.emit(new Values(id), id);
+    }
+
+    @Override
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+      declarer.declare(new Fields("sentence"));
+    }
+  }
+
+  public static class SplitSentence extends BaseBasicBolt {
+    @Override
+    public void execute(Tuple tuple, BasicOutputCollector collector) {
+      String sentence = tuple.getString(0);
+      for (String word: sentence.split("\\s+")) {
+          collector.emit(new Values(word, 1));
+      }
+    }
+
+    @Override
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+      declarer.declare(new Fields("word", "count"));
+    }
+  }
+
+  public static class WordCount extends BaseBasicBolt {
+    Map<String, Integer> counts = new HashMap<String, Integer>();
+
+    @Override
+    public void execute(Tuple tuple, BasicOutputCollector collector) {
+      String word = tuple.getString(0);
+      Integer count = counts.get(word);
+      if (count == null)
+        count = 0;
+      count++;
+      counts.put(word, count);
+      collector.emit(new Values(word, count));
+    }
+
+    @Override
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+      declarer.declare(new Fields("word", "count"));
+    }
+  }
+
+  public static void printMetrics(Nimbus.Client client, String name) throws Exception {
+    ClusterSummary summary = client.getClusterInfo();
+    String id = null;
+    for (TopologySummary ts: summary.get_topologies()) {
+      if (name.equals(ts.get_name())) {
+        id = ts.get_id();
+      }
+    }
+    if (id == null) {
+      throw new Exception("Could not find a topology named "+name);
+    }
+    TopologyInfo info = client.getTopologyInfo(id);
+    int uptime = info.get_uptime_secs();
+    long acked = 0;
+    long failed = 0;
+    double weightedAvgTotal = 0.0;
+    for (ExecutorSummary exec: info.get_executors()) {
+      if ("spout".equals(exec.get_component_id())) {
+        SpoutStats stats = exec.get_stats().get_specific().get_spout();
+        Map<String, Long> failedMap = stats.get_failed().get(":all-time");
+        Map<String, Long> ackedMap = stats.get_acked().get(":all-time");
+        Map<String, Double> avgLatMap = stats.get_complete_ms_avg().get(":all-time");
+        for (String key: ackedMap.keySet()) {
+          if (failedMap != null) {
+              Long tmp = failedMap.get(key);
+              if (tmp != null) {
+                  failed += tmp;
+              }
+          }
+          long ackVal = ackedMap.get(key);
+          double latVal = avgLatMap.get(key) * ackVal;
+          acked += ackVal;
+          weightedAvgTotal += latVal;
+        }
+      }
+    }
+    double avgLatency = weightedAvgTotal/acked;
+    System.out.println("uptime: "+uptime+" acked: "+acked+" avgLatency: "+avgLatency+" acked/sec: "+(((double)acked)/uptime+" failed: "+failed));
+  } 
+
+  public static void kill(Nimbus.Client client, String name) throws Exception {
+    KillOptions opts = new KillOptions();
+    opts.set_wait_secs(0);
+    client.killTopologyWithOpts(name, opts);
+  } 
+
+  public static void main(String[] args) throws Exception {
+
+    TopologyBuilder builder = new TopologyBuilder();
+
+    builder.setSpout("spout", new FastRandomSentenceSpout(), 4);
+
+    builder.setBolt("split", new SplitSentence(), 4).shuffleGrouping("spout");
+    builder.setBolt("count", new WordCount(), 4).fieldsGrouping("split", new Fields("word"));
+
+    Config conf = new Config();
+    conf.registerMetricsConsumer(org.apache.storm.metric.LoggingMetricsConsumer.class);
+
+    String name = "wc-test";
+    if (args != null && args.length > 0) {
+        name = args[0];
+    }
+
+    conf.setNumWorkers(1);
+    StormSubmitter.submitTopologyWithProgressBar(name, conf, builder.createTopology());
+
+    Map clusterConf = Utils.readStormConfig();
+    clusterConf.putAll(Utils.readCommandLineOpts());
+    Nimbus.Client client = NimbusClient.getConfiguredClient(clusterConf).getClient();
+
+    //Sleep for 5 mins
+    for (int i = 0; i < 10; i++) {
+        Thread.sleep(30 * 1000);
+        printMetrics(client, name);
+    }
+    kill(client, name);
+  }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/org/apache/storm/starter/InOrderDeliveryTest.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/InOrderDeliveryTest.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/InOrderDeliveryTest.java
new file mode 100644
index 0000000..1684ce5
--- /dev/null
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/InOrderDeliveryTest.java
@@ -0,0 +1,175 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter;
+
+import org.apache.storm.Config;
+import org.apache.storm.StormSubmitter;
+import org.apache.storm.generated.*;
+import org.apache.storm.spout.SpoutOutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.BasicOutputCollector;
+import org.apache.storm.topology.FailedException;
+import org.apache.storm.topology.IRichBolt;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.TopologyBuilder;
+import org.apache.storm.topology.base.BaseBasicBolt;
+import org.apache.storm.topology.base.BaseRichSpout;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.Values;
+import org.apache.storm.utils.NimbusClient;
+import org.apache.storm.utils.Utils;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Random;
+
+public class InOrderDeliveryTest {
+  public static class InOrderSpout extends BaseRichSpout {
+    SpoutOutputCollector _collector;
+    int _base = 0;
+    int _i = 0;
+
+    @Override
+    public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
+      _collector = collector;
+      _base = context.getThisTaskIndex();
+    }
+
+    @Override
+    public void nextTuple() {
+      Values v = new Values(_base, _i);
+      _collector.emit(v, "ACK");
+      _i++;
+    }
+
+    @Override
+    public void ack(Object id) {
+      //Ignored
+    }
+
+    @Override
+    public void fail(Object id) {
+      //Ignored
+    }
+
+    @Override
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+      declarer.declare(new Fields("c1", "c2"));
+    }
+  }
+
+  public static class Check extends BaseBasicBolt {
+    Map<Integer, Integer> expected = new HashMap<Integer, Integer>();
+
+    @Override
+    public void execute(Tuple tuple, BasicOutputCollector collector) {
+      Integer c1 = tuple.getInteger(0);
+      Integer c2 = tuple.getInteger(1);
+      Integer exp = expected.get(c1);
+      if (exp == null) exp = 0;
+      if (c2.intValue() != exp.intValue()) {
+          System.out.println(c1+" "+c2+" != "+exp);
+          throw new FailedException(c1+" "+c2+" != "+exp);
+      }
+      exp = c2 + 1;
+      expected.put(c1, exp);
+    }
+
+    @Override
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+      //Empty
+    }
+  }
+
+  public static void printMetrics(Nimbus.Client client, String name) throws Exception {
+    ClusterSummary summary = client.getClusterInfo();
+    String id = null;
+    for (TopologySummary ts: summary.get_topologies()) {
+      if (name.equals(ts.get_name())) {
+        id = ts.get_id();
+      }
+    }
+    if (id == null) {
+      throw new Exception("Could not find a topology named "+name);
+    }
+    TopologyInfo info = client.getTopologyInfo(id);
+    int uptime = info.get_uptime_secs();
+    long acked = 0;
+    long failed = 0;
+    double weightedAvgTotal = 0.0;
+    for (ExecutorSummary exec: info.get_executors()) {
+      if ("spout".equals(exec.get_component_id())) {
+        SpoutStats stats = exec.get_stats().get_specific().get_spout();
+        Map<String, Long> failedMap = stats.get_failed().get(":all-time");
+        Map<String, Long> ackedMap = stats.get_acked().get(":all-time");
+        Map<String, Double> avgLatMap = stats.get_complete_ms_avg().get(":all-time");
+        for (String key: ackedMap.keySet()) {
+          if (failedMap != null) {
+              Long tmp = failedMap.get(key);
+              if (tmp != null) {
+                  failed += tmp;
+              }
+          }
+          long ackVal = ackedMap.get(key);
+          double latVal = avgLatMap.get(key) * ackVal;
+          acked += ackVal;
+          weightedAvgTotal += latVal;
+        }
+      }
+    }
+    double avgLatency = weightedAvgTotal/acked;
+    System.out.println("uptime: "+uptime+" acked: "+acked+" avgLatency: "+avgLatency+" acked/sec: "+(((double)acked)/uptime+" failed: "+failed));
+  } 
+
+  public static void kill(Nimbus.Client client, String name) throws Exception {
+    KillOptions opts = new KillOptions();
+    opts.set_wait_secs(0);
+    client.killTopologyWithOpts(name, opts);
+  } 
+
+  public static void main(String[] args) throws Exception {
+
+    TopologyBuilder builder = new TopologyBuilder();
+
+    builder.setSpout("spout", new InOrderSpout(), 8);
+    builder.setBolt("count", new Check(), 8).fieldsGrouping("spout", new Fields("c1"));
+
+    Config conf = new Config();
+    conf.registerMetricsConsumer(org.apache.storm.metric.LoggingMetricsConsumer.class);
+
+    String name = "in-order-test";
+    if (args != null && args.length > 0) {
+        name = args[0];
+    }
+
+    conf.setNumWorkers(1);
+    StormSubmitter.submitTopologyWithProgressBar(name, conf, builder.createTopology());
+
+    Map clusterConf = Utils.readStormConfig();
+    clusterConf.putAll(Utils.readCommandLineOpts());
+    Nimbus.Client client = NimbusClient.getConfiguredClient(clusterConf).getClient();
+
+    //Sleep for 50 mins
+    for (int i = 0; i < 50; i++) {
+        Thread.sleep(30 * 1000);
+        printMetrics(client, name);
+    }
+    kill(client, name);
+  }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/org/apache/storm/starter/ManualDRPC.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/ManualDRPC.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/ManualDRPC.java
new file mode 100644
index 0000000..4c9daec
--- /dev/null
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/ManualDRPC.java
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter;
+
+import org.apache.storm.Config;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.LocalDRPC;
+import org.apache.storm.drpc.DRPCSpout;
+import org.apache.storm.drpc.ReturnResults;
+import org.apache.storm.topology.BasicOutputCollector;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.TopologyBuilder;
+import org.apache.storm.topology.base.BaseBasicBolt;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.Values;
+
+
+public class ManualDRPC {
+  public static class ExclamationBolt extends BaseBasicBolt {
+
+    @Override
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+      declarer.declare(new Fields("result", "return-info"));
+    }
+
+    @Override
+    public void execute(Tuple tuple, BasicOutputCollector collector) {
+      String arg = tuple.getString(0);
+      Object retInfo = tuple.getValue(1);
+      collector.emit(new Values(arg + "!!!", retInfo));
+    }
+
+  }
+
+  public static void main(String[] args) {
+    TopologyBuilder builder = new TopologyBuilder();
+    LocalDRPC drpc = new LocalDRPC();
+
+    DRPCSpout spout = new DRPCSpout("exclamation", drpc);
+    builder.setSpout("drpc", spout);
+    builder.setBolt("exclaim", new ExclamationBolt(), 3).shuffleGrouping("drpc");
+    builder.setBolt("return", new ReturnResults(), 3).shuffleGrouping("exclaim");
+
+    LocalCluster cluster = new LocalCluster();
+    Config conf = new Config();
+    cluster.submitTopology("exclaim", conf, builder.createTopology());
+
+    System.out.println(drpc.execute("exclamation", "aaa"));
+    System.out.println(drpc.execute("exclamation", "bbb"));
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/org/apache/storm/starter/MultipleLoggerTopology.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/MultipleLoggerTopology.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/MultipleLoggerTopology.java
new file mode 100644
index 0000000..99c3da1
--- /dev/null
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/MultipleLoggerTopology.java
@@ -0,0 +1,105 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter;
+
+import org.apache.storm.Config;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.StormSubmitter;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.testing.TestWordSpout;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.TopologyBuilder;
+import org.apache.storm.topology.base.BaseRichBolt;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.Values;
+import org.apache.storm.utils.Utils;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.List;
+import java.util.Map;
+
+/**
+ * This is a basic example of a Storm topology.
+ */
+public class MultipleLoggerTopology {
+  public static class ExclamationLoggingBolt extends BaseRichBolt {
+    OutputCollector _collector;
+    Logger _rootLogger = LoggerFactory.getLogger (Logger.ROOT_LOGGER_NAME);
+    // ensure the loggers are configured in the worker.xml before
+    // trying to use them here
+    Logger _logger = LoggerFactory.getLogger ("com.myapp");
+    Logger _subLogger = LoggerFactory.getLogger ("com.myapp.sub");
+
+    @Override
+    public void prepare(Map conf, TopologyContext context, OutputCollector collector) {
+      _collector = collector;
+    }
+
+    @Override
+    public void execute(Tuple tuple) {
+      _rootLogger.debug ("root: This is a DEBUG message");
+      _rootLogger.info ("root: This is an INFO message");
+      _rootLogger.warn ("root: This is a WARN message");
+      _rootLogger.error ("root: This is an ERROR message");
+
+      _logger.debug ("myapp: This is a DEBUG message");
+      _logger.info ("myapp: This is an INFO message");
+      _logger.warn ("myapp: This is a WARN message");
+      _logger.error ("myapp: This is an ERROR message");
+
+      _subLogger.debug ("myapp.sub: This is a DEBUG message");
+      _subLogger.info ("myapp.sub: This is an INFO message");
+      _subLogger.warn ("myapp.sub: This is a WARN message");
+      _subLogger.error ("myapp.sub: This is an ERROR message");
+
+      _collector.emit(tuple, new Values(tuple.getString(0) + "!!!"));
+      _collector.ack(tuple);
+    }
+
+    @Override
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+      declarer.declare(new Fields("word"));
+    }
+  }
+
+  public static void main(String[] args) throws Exception {
+    TopologyBuilder builder = new TopologyBuilder();
+
+    builder.setSpout("word", new TestWordSpout(), 10);
+    builder.setBolt("exclaim1", new ExclamationLoggingBolt(), 3).shuffleGrouping("word");
+    builder.setBolt("exclaim2", new ExclamationLoggingBolt(), 2).shuffleGrouping("exclaim1");
+
+    Config conf = new Config();
+    conf.setDebug(true);
+
+    if (args != null && args.length > 0) {
+      conf.setNumWorkers(2);
+      StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
+    } else {
+      LocalCluster cluster = new LocalCluster();
+      cluster.submitTopology("test", conf, builder.createTopology());
+      Utils.sleep(10000);
+      cluster.killTopology("test");
+      cluster.shutdown();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/org/apache/storm/starter/PrintSampleStream.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/PrintSampleStream.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/PrintSampleStream.java
new file mode 100644
index 0000000..466fca0
--- /dev/null
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/PrintSampleStream.java
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.storm.starter;
+
+import java.util.Arrays;
+
+import org.apache.storm.Config;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.topology.TopologyBuilder;
+import org.apache.storm.utils.Utils;
+
+import org.apache.storm.starter.bolt.PrinterBolt;
+import org.apache.storm.starter.spout.TwitterSampleSpout;
+
+public class PrintSampleStream {        
+    public static void main(String[] args) {
+        String consumerKey = args[0]; 
+        String consumerSecret = args[1]; 
+        String accessToken = args[2]; 
+        String accessTokenSecret = args[3];
+        String[] arguments = args.clone();
+        String[] keyWords = Arrays.copyOfRange(arguments, 4, arguments.length);
+        
+        TopologyBuilder builder = new TopologyBuilder();
+        
+        builder.setSpout("twitter", new TwitterSampleSpout(consumerKey, consumerSecret,
+                                accessToken, accessTokenSecret, keyWords));
+        builder.setBolt("print", new PrinterBolt())
+                .shuffleGrouping("twitter");
+                
+                
+        Config conf = new Config();
+        
+        
+        LocalCluster cluster = new LocalCluster();
+        
+        cluster.submitTopology("test", conf, builder.createTopology());
+        
+        Utils.sleep(10000);
+        cluster.shutdown();
+    }
+}


[12/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/util.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/util.clj b/storm-core/src/clj/org/apache/storm/util.clj
new file mode 100644
index 0000000..23d39f6
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/util.clj
@@ -0,0 +1,1118 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+
+(ns org.apache.storm.util
+  (:import [java.net InetAddress])
+  (:import [java.util Map Map$Entry List ArrayList Collection Iterator HashMap])
+  (:import [java.io FileReader FileNotFoundException])
+  (:import [java.nio.file Paths])
+  (:import [org.apache.storm Config])
+  (:import [org.apache.storm.utils Time Container ClojureTimerTask Utils
+            MutableObject MutableInt])
+  (:import [org.apache.storm.security.auth NimbusPrincipal])
+  (:import [javax.security.auth Subject])
+  (:import [java.util UUID Random ArrayList List Collections])
+  (:import [java.util.zip ZipFile])
+  (:import [java.util.concurrent.locks ReentrantReadWriteLock])
+  (:import [java.util.concurrent Semaphore])
+  (:import [java.nio.file Files Paths])
+  (:import [java.nio.file.attribute FileAttribute])
+  (:import [java.io File FileOutputStream RandomAccessFile StringWriter
+            PrintWriter BufferedReader InputStreamReader IOException])
+  (:import [java.lang.management ManagementFactory])
+  (:import [org.apache.commons.exec DefaultExecutor CommandLine])
+  (:import [org.apache.commons.io FileUtils])
+  (:import [org.apache.storm.logging ThriftAccessLogger])
+  (:import [org.apache.commons.exec ExecuteException])
+  (:import [org.json.simple JSONValue])
+  (:import [org.yaml.snakeyaml Yaml]
+           [org.yaml.snakeyaml.constructor SafeConstructor])
+  (:require [clojure [string :as str]])
+  (:import [clojure.lang RT])
+  (:require [clojure [set :as set]])
+  (:require [clojure.java.io :as io])
+  (:use [clojure walk])
+  (:require [ring.util.codec :as codec])
+  (:use [org.apache.storm log]))
+
+(defn wrap-in-runtime
+  "Wraps an exception in a RuntimeException if needed"
+  [^Exception e]
+  (if (instance? RuntimeException e)
+    e
+    (RuntimeException. e)))
+
+(def on-windows?
+  (= "Windows_NT" (System/getenv "OS")))
+
+(def file-path-separator
+  (System/getProperty "file.separator"))
+
+(def class-path-separator
+  (System/getProperty "path.separator"))
+
+(defn is-absolute-path? [path]
+  (.isAbsolute (Paths/get path (into-array String []))))
+
+(defmacro defalias
+  "Defines an alias for a var: a new var with the same root binding (if
+  any) and similar metadata. The metadata of the alias is its initial
+  metadata (as provided by def) merged into the metadata of the original."
+  ([name orig]
+   `(do
+      (alter-meta!
+        (if (.hasRoot (var ~orig))
+          (def ~name (.getRawRoot (var ~orig)))
+          (def ~name))
+        ;; When copying metadata, disregard {:macro false}.
+        ;; Workaround for http://www.assembla.com/spaces/clojure/tickets/273
+        #(conj (dissoc % :macro)
+               (apply dissoc (meta (var ~orig)) (remove #{:macro} (keys %)))))
+      (var ~name)))
+  ([name orig doc]
+   (list `defalias (with-meta name (assoc (meta name) :doc doc)) orig)))
+
+;; name-with-attributes by Konrad Hinsen:
+(defn name-with-attributes
+  "To be used in macro definitions.
+  Handles optional docstrings and attribute maps for a name to be defined
+  in a list of macro arguments. If the first macro argument is a string,
+  it is added as a docstring to name and removed from the macro argument
+  list. If afterwards the first macro argument is a map, its entries are
+  added to the name's metadata map and the map is removed from the
+  macro argument list. The return value is a vector containing the name
+  with its extended metadata map and the list of unprocessed macro
+  arguments."
+  [name macro-args]
+  (let [[docstring macro-args] (if (string? (first macro-args))
+                                 [(first macro-args) (next macro-args)]
+                                 [nil macro-args])
+        [attr macro-args] (if (map? (first macro-args))
+                            [(first macro-args) (next macro-args)]
+                            [{} macro-args])
+        attr (if docstring
+               (assoc attr :doc docstring)
+               attr)
+        attr (if (meta name)
+               (conj (meta name) attr)
+               attr)]
+    [(with-meta name attr) macro-args]))
+
+(defmacro defnk
+  "Define a function accepting keyword arguments. Symbols up to the first
+  keyword in the parameter list are taken as positional arguments.  Then
+  an alternating sequence of keywords and defaults values is expected. The
+  values of the keyword arguments are available in the function body by
+  virtue of the symbol corresponding to the keyword (cf. :keys destructuring).
+  defnk accepts an optional docstring as well as an optional metadata map."
+  [fn-name & fn-tail]
+  (let [[fn-name [args & body]] (name-with-attributes fn-name fn-tail)
+        [pos kw-vals] (split-with symbol? args)
+        syms (map #(-> % name symbol) (take-nth 2 kw-vals))
+        values (take-nth 2 (rest kw-vals))
+        sym-vals (apply hash-map (interleave syms values))
+        de-map {:keys (vec syms) :or sym-vals}]
+    `(defn ~fn-name
+       [~@pos & options#]
+       (let [~de-map (apply hash-map options#)]
+         ~@body))))
+
+(defn find-first
+  "Returns the first item of coll for which (pred item) returns logical true.
+  Consumes sequences up to the first match, will consume the entire sequence
+  and return nil if no match is found."
+  [pred coll]
+  (first (filter pred coll)))
+
+(defn dissoc-in
+  "Dissociates an entry from a nested associative structure returning a new
+  nested structure. keys is a sequence of keys. Any empty maps that result
+  will not be present in the new structure."
+  [m [k & ks :as keys]]
+  (if ks
+    (if-let [nextmap (get m k)]
+      (let [newmap (dissoc-in nextmap ks)]
+        (if (seq newmap)
+          (assoc m k newmap)
+          (dissoc m k)))
+      m)
+    (dissoc m k)))
+
+(defn indexed
+  "Returns a lazy sequence of [index, item] pairs, where items come
+  from 's' and indexes count up from zero.
+
+  (indexed '(a b c d))  =>  ([0 a] [1 b] [2 c] [3 d])"
+  [s]
+  (map vector (iterate inc 0) s))
+
+(defn positions
+  "Returns a lazy sequence containing the positions at which pred
+  is true for items in coll."
+  [pred coll]
+  (for [[idx elt] (indexed coll) :when (pred elt)] idx))
+
+(defn exception-cause?
+  [klass ^Throwable t]
+  (->> (iterate #(.getCause ^Throwable %) t)
+       (take-while identity)
+       (some (partial instance? klass))
+       boolean))
+
+(defmacro thrown-cause?
+  [klass & body]
+  `(try
+     ~@body
+     false
+     (catch Throwable t#
+       (exception-cause? ~klass t#))))
+
+(defmacro thrown-cause-with-msg?
+  [klass re & body]
+  `(try
+     ~@body
+     false
+     (catch Throwable t#
+       (and (re-matches ~re (.getMessage t#))
+            (exception-cause? ~klass t#)))))
+
+(defmacro forcat
+  [[args aseq] & body]
+  `(mapcat (fn [~args]
+             ~@body)
+           ~aseq))
+
+(defmacro try-cause
+  [& body]
+  (let [checker (fn [form]
+                  (or (not (sequential? form))
+                      (not= 'catch (first form))))
+        [code guards] (split-with checker body)
+        error-local (gensym "t")
+        guards (forcat [[_ klass local & guard-body] guards]
+                       `((exception-cause? ~klass ~error-local)
+                         (let [~local ~error-local]
+                           ~@guard-body
+                           )))]
+    `(try ~@code
+       (catch Throwable ~error-local
+         (cond ~@guards
+               true (throw ~error-local)
+               )))))
+
+(defn local-hostname
+  []
+  (.getCanonicalHostName (InetAddress/getLocalHost)))
+
+(def memoized-local-hostname (memoize local-hostname))
+
+;; checks conf for STORM_LOCAL_HOSTNAME.
+;; when unconfigured, falls back to (memoized) guess by `local-hostname`.
+(defn hostname
+  [conf]
+  (conf Config/STORM_LOCAL_HOSTNAME (memoized-local-hostname)))
+
+(letfn [(try-port [port]
+                  (with-open [socket (java.net.ServerSocket. port)]
+                    (.getLocalPort socket)))]
+  (defn available-port
+    ([] (try-port 0))
+    ([preferred]
+     (try
+       (try-port preferred)
+       (catch java.io.IOException e
+         (available-port))))))
+
+(defn uuid []
+  (str (UUID/randomUUID)))
+
+(defn current-time-secs
+  []
+  (Time/currentTimeSecs))
+
+(defn current-time-millis
+  []
+  (Time/currentTimeMillis))
+
+(defn secs-to-millis-long
+  [secs]
+  (long (* (long 1000) secs)))
+
+(defn clojurify-structure
+  [s]
+  (prewalk (fn [x]
+             (cond (instance? Map x) (into {} x)
+                   (instance? List x) (vec x)
+                   ;; (Boolean. false) does not evaluate to false in an if.
+                   ;; This fixes that.
+                   (instance? Boolean x) (boolean x)
+                   true x))
+           s))
+
+(defmacro with-file-lock
+  [path & body]
+  `(let [f# (File. ~path)
+         _# (.createNewFile f#)
+         rf# (RandomAccessFile. f# "rw")
+         lock# (.. rf# (getChannel) (lock))]
+     (try
+       ~@body
+       (finally
+         (.release lock#)
+         (.close rf#)))))
+
+(defn tokenize-path
+  [^String path]
+  (let [toks (.split path "/")]
+    (vec (filter (complement empty?) toks))))
+
+(defn assoc-conj
+  [m k v]
+  (merge-with concat m {k [v]}))
+
+;; returns [ones in first set not in second, ones in second set not in first]
+(defn set-delta
+  [old curr]
+  (let [s1 (set old)
+        s2 (set curr)]
+    [(set/difference s1 s2) (set/difference s2 s1)]))
+
+(defn parent-path
+  [path]
+  (let [toks (tokenize-path path)]
+    (str "/" (str/join "/" (butlast toks)))))
+
+(defn toks->path
+  [toks]
+  (str "/" (str/join "/" toks)))
+
+(defn normalize-path
+  [^String path]
+  (toks->path (tokenize-path path)))
+
+(defn map-val
+  [afn amap]
+  (into {}
+        (for [[k v] amap]
+          [k (afn v)])))
+
+(defn filter-val
+  [afn amap]
+  (into {} (filter (fn [[k v]] (afn v)) amap)))
+
+(defn filter-key
+  [afn amap]
+  (into {} (filter (fn [[k v]] (afn k)) amap)))
+
+(defn map-key
+  [afn amap]
+  (into {} (for [[k v] amap] [(afn k) v])))
+
+(defn separate
+  [pred aseq]
+  [(filter pred aseq) (filter (complement pred) aseq)])
+
+(defn full-path
+  [parent name]
+  (let [toks (tokenize-path parent)]
+    (toks->path (conj toks name))))
+
+(def not-nil? (complement nil?))
+
+(defn barr
+  [& vals]
+  (byte-array (map byte vals)))
+
+(defn exit-process!
+  [val & msg]
+  (log-error (RuntimeException. (str msg)) "Halting process: " msg)
+  (.exit (Runtime/getRuntime) val))
+
+(defn sum
+  [vals]
+  (reduce + vals))
+
+(defn repeat-seq
+  ([aseq]
+   (apply concat (repeat aseq)))
+  ([amt aseq]
+   (apply concat (repeat amt aseq))))
+
+(defn div
+  "Perform floating point division on the arguments."
+  [f & rest]
+  (apply / (double f) rest))
+
+(defn defaulted
+  [val default]
+  (if val val default))
+
+(defn mk-counter
+  ([] (mk-counter 1))
+  ([start-val]
+   (let [val (atom (dec start-val))]
+     (fn [] (swap! val inc)))))
+
+(defmacro for-times [times & body]
+  `(for [i# (range ~times)]
+     ~@body))
+
+(defmacro dofor [& body]
+  `(doall (for ~@body)))
+
+(defn reverse-map
+  "{:a 1 :b 1 :c 2} -> {1 [:a :b] 2 :c}"
+  [amap]
+  (reduce (fn [m [k v]]
+            (let [existing (get m v [])]
+              (assoc m v (conj existing k))))
+          {} amap))
+
+(defmacro print-vars [& vars]
+  (let [prints (for [v vars] `(println ~(str v) ~v))]
+    `(do ~@prints)))
+
+(defn process-pid
+  "Gets the pid of this JVM. Hacky because Java doesn't provide a real way to do this."
+  []
+  (let [name (.getName (ManagementFactory/getRuntimeMXBean))
+        split (.split name "@")]
+    (when-not (= 2 (count split))
+      (throw (RuntimeException. (str "Got unexpected process name: " name))))
+    (first split)))
+
+(defn exec-command! [command]
+  (let [[comm-str & args] (seq (.split command " "))
+        command (CommandLine. comm-str)]
+    (doseq [a args]
+      (.addArgument command a))
+    (.execute (DefaultExecutor.) command)))
+
+(defn extract-dir-from-jar [jarpath dir destdir]
+  (try-cause
+    (with-open [jarpath (ZipFile. jarpath)]
+      (let [entries (enumeration-seq (.entries jarpath))]
+        (doseq [file (filter (fn [entry](and (not (.isDirectory entry)) (.startsWith (.getName entry) dir))) entries)]
+          (.mkdirs (.getParentFile (File. destdir (.getName file))))
+          (with-open [out (FileOutputStream. (File. destdir (.getName file)))]
+            (io/copy (.getInputStream jarpath file) out)))))
+    (catch IOException e
+      (log-message "Could not extract " dir " from " jarpath))))
+
+(defn sleep-secs [secs]
+  (when (pos? secs)
+    (Time/sleep (* (long secs) 1000))))
+
+(defn sleep-until-secs [target-secs]
+  (Time/sleepUntil (* (long target-secs) 1000)))
+
+(def ^:const sig-kill 9)
+
+(def ^:const sig-term 15)
+
+(defn send-signal-to-process
+  [pid signum]
+  (try-cause
+    (exec-command! (str (if on-windows?
+                          (if (== signum sig-kill) "taskkill /f /pid " "taskkill /pid ")
+                          (str "kill -" signum " "))
+                     pid))
+    (catch ExecuteException e
+      (log-message "Error when trying to kill " pid ". Process is probably already dead."))))
+
+(defn read-and-log-stream
+  [prefix stream]
+  (try
+    (let [reader (BufferedReader. (InputStreamReader. stream))]
+      (loop []
+        (if-let [line (.readLine reader)]
+                (do
+                  (log-warn (str prefix ":" line))
+                  (recur)))))
+    (catch IOException e
+      (log-warn "Error while trying to log stream" e))))
+
+(defn force-kill-process
+  [pid]
+  (send-signal-to-process pid sig-kill))
+
+(defn kill-process-with-sig-term
+  [pid]
+  (send-signal-to-process pid sig-term))
+
+(defn add-shutdown-hook-with-force-kill-in-1-sec
+  "adds the user supplied function as a shutdown hook for cleanup.
+   Also adds a function that sleeps for a second and then sends kill -9 to process to avoid any zombie process in case
+   cleanup function hangs."
+  [func]
+  (.addShutdownHook (Runtime/getRuntime) (Thread. #(func)))
+  (.addShutdownHook (Runtime/getRuntime) (Thread. #((sleep-secs 1)
+                                                    (.halt (Runtime/getRuntime) 20)))))
+
+(defprotocol SmartThread
+  (start [this])
+  (join [this])
+  (interrupt [this])
+  (sleeping? [this]))
+
+;; afn returns amount of time to sleep
+(defnk async-loop [afn
+                   :daemon false
+                   :kill-fn (fn [error] (exit-process! 1 "Async loop died!"))
+                   :priority Thread/NORM_PRIORITY
+                   :factory? false
+                   :start true
+                   :thread-name nil]
+  (let [thread (Thread.
+                 (fn []
+                   (try-cause
+                     (let [afn (if factory? (afn) afn)]
+                       (loop []
+                         (let [sleep-time (afn)]
+                           (when-not (nil? sleep-time)
+                             (sleep-secs sleep-time)
+                             (recur))
+                           )))
+                     (catch InterruptedException e
+                       (log-message "Async loop interrupted!")
+                       )
+                     (catch Throwable t
+                       (log-error t "Async loop died!")
+                       (kill-fn t)))))]
+    (.setDaemon thread daemon)
+    (.setPriority thread priority)
+    (when thread-name
+      (.setName thread (str (.getName thread) "-" thread-name)))
+    (when start
+      (.start thread))
+    ;; should return object that supports stop, interrupt, join, and waiting?
+    (reify SmartThread
+      (start
+        [this]
+        (.start thread))
+      (join
+        [this]
+        (.join thread))
+      (interrupt
+        [this]
+        (.interrupt thread))
+      (sleeping?
+        [this]
+        (Time/isThreadWaiting thread)))))
+
+(defn shell-cmd
+  [command]
+  (->> command
+    (map #(str \' (clojure.string/escape % {\' "'\"'\"'"}) \'))
+      (clojure.string/join " ")))
+
+(defn script-file-path [dir]
+  (str dir file-path-separator "storm-worker-script.sh"))
+
+(defn container-file-path [dir]
+  (str dir file-path-separator "launch_container.sh"))
+
+(defnk write-script
+  [dir command :environment {}]
+  (let [script-src (str "#!/bin/bash\n" (clojure.string/join "" (map (fn [[k v]] (str (shell-cmd ["export" (str k "=" v)]) ";\n")) environment)) "\nexec " (shell-cmd command) ";")
+        script-path (script-file-path dir)
+        _ (spit script-path script-src)]
+    script-path
+  ))
+
+(defnk launch-process
+  [command :environment {} :log-prefix nil :exit-code-callback nil :directory nil]
+  (let [builder (ProcessBuilder. command)
+        process-env (.environment builder)]
+    (when directory (.directory builder directory))
+    (.redirectErrorStream builder true)
+    (doseq [[k v] environment]
+      (.put process-env k v))
+    (let [process (.start builder)]
+      (if (or log-prefix exit-code-callback)
+        (async-loop
+         (fn []
+           (if log-prefix
+             (read-and-log-stream log-prefix (.getInputStream process)))
+           (when exit-code-callback
+             (try
+               (.waitFor process)
+               (catch InterruptedException e
+                 (log-message log-prefix " interrupted.")))
+             (exit-code-callback (.exitValue process)))
+           nil)))                    
+      process)))
+   
+(defn exists-file?
+  [path]
+  (.exists (File. path)))
+
+(defn rmr
+  [path]
+  (log-debug "Rmr path " path)
+  (when (exists-file? path)
+    (try
+      (FileUtils/forceDelete (File. path))
+      (catch FileNotFoundException e))))
+
+(defn rmpath
+  "Removes file or directory at the path. Not recursive. Throws exception on failure"
+  [path]
+  (log-debug "Removing path " path)
+  (when (exists-file? path)
+    (let [deleted? (.delete (File. path))]
+      (when-not deleted?
+        (throw (RuntimeException. (str "Failed to delete " path)))))))
+
+(defn local-mkdirs
+  [path]
+  (log-debug "Making dirs at " path)
+  (FileUtils/forceMkdir (File. path)))
+
+(defn touch
+  [path]
+  (log-debug "Touching file at " path)
+  (let [success? (do (if on-windows? (.mkdirs (.getParentFile (File. path))))
+                   (.createNewFile (File. path)))]
+    (when-not success?
+      (throw (RuntimeException. (str "Failed to touch " path))))))
+
+(defn create-symlink!
+  "Create symlink is to the target"
+  ([path-dir target-dir file-name]
+    (create-symlink! path-dir target-dir file-name file-name))
+  ([path-dir target-dir from-file-name to-file-name]
+    (let [path (str path-dir file-path-separator from-file-name)
+          target (str target-dir file-path-separator to-file-name)
+          empty-array (make-array String 0)
+          attrs (make-array FileAttribute 0)
+          abs-path (.toAbsolutePath (Paths/get path empty-array))
+          abs-target (.toAbsolutePath (Paths/get target empty-array))]
+      (log-debug "Creating symlink [" abs-path "] to [" abs-target "]")
+      (if (not (.exists (.toFile abs-path)))
+        (Files/createSymbolicLink abs-path abs-target attrs)))))
+
+(defn read-dir-contents
+  [dir]
+  (if (exists-file? dir)
+    (let [content-files (.listFiles (File. dir))]
+      (map #(.getName ^File %) content-files))
+    []))
+
+(defn compact
+  [aseq]
+  (filter (complement nil?) aseq))
+
+(defn current-classpath
+  []
+  (System/getProperty "java.class.path"))
+
+(defn get-full-jars
+  [dir]
+  (map #(str dir file-path-separator %) (filter #(.endsWith % ".jar") (read-dir-contents dir))))
+
+(defn worker-classpath
+  []
+  (let [storm-dir (System/getProperty "storm.home")
+        storm-lib-dir (str storm-dir file-path-separator "lib")
+        storm-conf-dir (if-let [confdir (System/getenv "STORM_CONF_DIR")]
+                         confdir 
+                         (str storm-dir file-path-separator "conf"))
+        storm-extlib-dir (str storm-dir file-path-separator "extlib")
+        extcp (System/getenv "STORM_EXT_CLASSPATH")]
+    (if (nil? storm-dir) 
+      (current-classpath)
+      (str/join class-path-separator
+                (remove nil? (concat (get-full-jars storm-lib-dir) (get-full-jars storm-extlib-dir) [extcp] [storm-conf-dir]))))))
+
+(defn add-to-classpath
+  [classpath paths]
+  (if (empty? paths)
+    classpath
+    (str/join class-path-separator (cons classpath paths))))
+
+(defn ^ReentrantReadWriteLock mk-rw-lock
+  []
+  (ReentrantReadWriteLock.))
+
+(defmacro read-locked
+  [rw-lock & body]
+  (let [lock (with-meta rw-lock {:tag `ReentrantReadWriteLock})]
+    `(let [rlock# (.readLock ~lock)]
+       (try (.lock rlock#)
+         ~@body
+         (finally (.unlock rlock#))))))
+
+(defmacro write-locked
+  [rw-lock & body]
+  (let [lock (with-meta rw-lock {:tag `ReentrantReadWriteLock})]
+    `(let [wlock# (.writeLock ~lock)]
+       (try (.lock wlock#)
+         ~@body
+         (finally (.unlock wlock#))))))
+
+(defn time-delta
+  [time-secs]
+  (- (current-time-secs) time-secs))
+
+(defn time-delta-ms
+  [time-ms]
+  (- (System/currentTimeMillis) (long time-ms)))
+
+(defn parse-int
+  [str]
+  (Integer/valueOf str))
+
+(defn integer-divided
+  [sum num-pieces]
+  (clojurify-structure (Utils/integerDivided sum num-pieces)))
+
+(defn collectify
+  [obj]
+  (if (or (sequential? obj) (instance? Collection obj))
+    obj
+    [obj]))
+
+(defn to-json
+  [obj]
+  (JSONValue/toJSONString obj))
+
+(defn from-json
+  [^String str]
+  (if str
+    (clojurify-structure
+      (JSONValue/parse str))
+    nil))
+
+(defmacro letlocals
+  [& body]
+  (let [[tobind lexpr] (split-at (dec (count body)) body)
+        binded (vec (mapcat (fn [e]
+                              (if (and (list? e) (= 'bind (first e)))
+                                [(second e) (last e)]
+                                ['_ e]
+                                ))
+                            tobind))]
+    `(let ~binded
+       ~(first lexpr))))
+
+(defn remove-first
+  [pred aseq]
+  (let [[b e] (split-with (complement pred) aseq)]
+    (when (empty? e)
+      (throw (IllegalArgumentException. "Nothing to remove")))
+    (concat b (rest e))))
+
+(defn assoc-non-nil
+  [m k v]
+  (if v (assoc m k v) m))
+
+(defn multi-set
+  "Returns a map of elem to count"
+  [aseq]
+  (apply merge-with +
+         (map #(hash-map % 1) aseq)))
+
+(defn set-var-root*
+  [avar val]
+  (alter-var-root avar (fn [avar] val)))
+
+(defmacro set-var-root
+  [var-sym val]
+  `(set-var-root* (var ~var-sym) ~val))
+
+(defmacro with-var-roots
+  [bindings & body]
+  (let [settings (partition 2 bindings)
+        tmpvars (repeatedly (count settings) (partial gensym "old"))
+        vars (map first settings)
+        savevals (vec (mapcat (fn [t v] [t v]) tmpvars vars))
+        setters (for [[v s] settings] `(set-var-root ~v ~s))
+        restorers (map (fn [v s] `(set-var-root ~v ~s)) vars tmpvars)]
+    `(let ~savevals
+       ~@setters
+       (try
+         ~@body
+         (finally
+           ~@restorers)))))
+
+(defn map-diff
+  "Returns mappings in m2 that aren't in m1"
+  [m1 m2]
+  (into {} (filter (fn [[k v]] (not= v (m1 k))) m2)))
+
+(defn select-keys-pred
+  [pred amap]
+  (into {} (filter (fn [[k v]] (pred k)) amap)))
+
+(defn rotating-random-range
+  [choices]
+  (let [rand (Random.)
+        choices (ArrayList. choices)]
+    (Collections/shuffle choices rand)
+    [(MutableInt. -1) choices rand]))
+
+(defn acquire-random-range-id
+  [[^MutableInt curr ^List state ^Random rand]]
+  (when (>= (.increment curr) (.size state))
+    (.set curr 0)
+    (Collections/shuffle state rand))
+  (.get state (.get curr)))
+
+; this can be rewritten to be tail recursive
+(defn interleave-all
+  [& colls]
+  (if (empty? colls)
+    []
+    (let [colls (filter (complement empty?) colls)
+          my-elems (map first colls)
+          rest-elems (apply interleave-all (map rest colls))]
+      (concat my-elems rest-elems))))
+
+(defn any-intersection
+  [& sets]
+  (let [elem->count (multi-set (apply concat sets))]
+    (-> (filter-val #(> % 1) elem->count)
+        keys)))
+
+(defn between?
+  "val >= lower and val <= upper"
+  [val lower upper]
+  (and (>= val lower)
+       (<= val upper)))
+
+(defmacro benchmark
+  [& body]
+  `(let [l# (doall (range 1000000))]
+     (time
+       (doseq [i# l#]
+         ~@body))))
+
+(defn rand-sampler
+  [freq]
+  (let [r (java.util.Random.)]
+    (fn [] (= 0 (.nextInt r freq)))))
+
+(defn even-sampler
+  [freq]
+  (let [freq (int freq)
+        start (int 0)
+        r (java.util.Random.)
+        curr (MutableInt. -1)
+        target (MutableInt. (.nextInt r freq))]
+    (with-meta
+      (fn []
+        (let [i (.increment curr)]
+          (when (>= i freq)
+            (.set curr start)
+            (.set target (.nextInt r freq))))
+        (= (.get curr) (.get target)))
+      {:rate freq})))
+
+(defn sampler-rate
+  [sampler]
+  (:rate (meta sampler)))
+
+(defn class-selector
+  [obj & args]
+  (class obj))
+
+(defn uptime-computer []
+  (let [start-time (current-time-secs)]
+    (fn [] (time-delta start-time))))
+
+(defn stringify-error [error]
+  (let [result (StringWriter.)
+        printer (PrintWriter. result)]
+    (.printStackTrace error printer)
+    (.toString result)))
+
+(defn nil-to-zero
+  [v]
+  (or v 0))
+
+(defn bit-xor-vals
+  [vals]
+  (reduce bit-xor 0 vals))
+
+(defmacro with-error-reaction
+  [afn & body]
+  `(try ~@body
+     (catch Throwable t# (~afn t#))))
+
+(defn container
+  []
+  (Container.))
+
+(defn container-set! [^Container container obj]
+  (set! (. container object) obj)
+  container)
+
+(defn container-get [^Container container]
+  (. container object))
+
+(defn to-millis [secs]
+  (* 1000 (long secs)))
+
+(defn throw-runtime [& strs]
+  (throw (RuntimeException. (apply str strs))))
+
+(defn redirect-stdio-to-slf4j!
+  []
+  ;; set-var-root doesn't work with *out* and *err*, so digging much deeper here
+  ;; Unfortunately, this code seems to work at the REPL but not when spawned as worker processes
+  ;; it might have something to do with being a child process
+  ;; (set! (. (.getThreadBinding RT/OUT) val)
+  ;;       (java.io.OutputStreamWriter.
+  ;;         (log-stream :info "STDIO")))
+  ;; (set! (. (.getThreadBinding RT/ERR) val)
+  ;;       (PrintWriter.
+  ;;         (java.io.OutputStreamWriter.
+  ;;           (log-stream :error "STDIO"))
+  ;;         true))
+  (log-capture! "STDIO"))
+
+(defn spy
+  [prefix val]
+  (log-message prefix ": " val)
+  val)
+
+(defn zip-contains-dir?
+  [zipfile target]
+  (let [entries (->> zipfile (ZipFile.) .entries enumeration-seq (map (memfn getName)))]
+    (boolean (some #(.startsWith % (str target "/")) entries))))
+
+(defn url-encode
+  [s]
+  (codec/url-encode s))
+
+(defn url-decode
+  [s]
+  (codec/url-decode s))
+
+(defn join-maps
+  [& maps]
+  (let [all-keys (apply set/union (for [m maps] (-> m keys set)))]
+    (into {} (for [k all-keys]
+               [k (for [m maps] (m k))]))))
+
+(defn partition-fixed
+  [max-num-chunks aseq]
+  (if (zero? max-num-chunks)
+    []
+    (let [chunks (->> (integer-divided (count aseq) max-num-chunks)
+                      (#(dissoc % 0))
+                      (sort-by (comp - first))
+                      (mapcat (fn [[size amt]] (repeat amt size)))
+                      )]
+      (loop [result []
+             [chunk & rest-chunks] chunks
+             data aseq]
+        (if (nil? chunk)
+          result
+          (let [[c rest-data] (split-at chunk data)]
+            (recur (conj result c)
+                   rest-chunks
+                   rest-data)))))))
+
+
+(defn assoc-apply-self
+  [curr key afn]
+  (assoc curr key (afn curr)))
+
+(defmacro recursive-map
+  [& forms]
+  (->> (partition 2 forms)
+       (map (fn [[key form]] `(assoc-apply-self ~key (fn [~'<>] ~form))))
+       (concat `(-> {}))))
+
+(defn current-stack-trace
+  []
+  (->> (Thread/currentThread)
+       .getStackTrace
+       (map str)
+       (str/join "\n")))
+
+(defn get-iterator
+  [^Iterable alist]
+  (if alist (.iterator alist)))
+
+(defn iter-has-next?
+  [^Iterator iter]
+  (if iter (.hasNext iter) false))
+
+(defn iter-next
+  [^Iterator iter]
+  (.next iter))
+
+(defmacro fast-list-iter
+  [pairs & body]
+  (let [pairs (partition 2 pairs)
+        lists (map second pairs)
+        elems (map first pairs)
+        iters (map (fn [_] (gensym)) lists)
+        bindings (->> (map (fn [i l] [i `(get-iterator ~l)]) iters lists)
+                      (apply concat))
+        tests (map (fn [i] `(iter-has-next? ~i)) iters)
+        assignments (->> (map (fn [e i] [e `(iter-next ~i)]) elems iters)
+                         (apply concat))]
+    `(let [~@bindings]
+       (while (and ~@tests)
+         (let [~@assignments]
+           ~@body)))))
+
+(defn fast-list-map
+  [afn alist]
+  (let [ret (ArrayList.)]
+    (fast-list-iter [e alist]
+                    (.add ret (afn e)))
+    ret))
+
+(defmacro fast-list-for
+  [[e alist] & body]
+  `(fast-list-map (fn [~e] ~@body) ~alist))
+
+(defn map-iter
+  [^Map amap]
+  (if amap (-> amap .entrySet .iterator)))
+
+(defn convert-entry
+  [^Map$Entry entry]
+  [(.getKey entry) (.getValue entry)])
+
+(defmacro fast-map-iter
+  [[bind amap] & body]
+  `(let [iter# (map-iter ~amap)]
+     (while (iter-has-next? iter#)
+       (let [entry# (iter-next iter#)
+             ~bind (convert-entry entry#)]
+         ~@body))))
+
+(defn fast-first
+  [^List alist]
+  (.get alist 0))
+
+(defmacro get-with-default
+  [amap key default-val]
+  `(let [curr# (.get ~amap ~key)]
+     (if curr#
+       curr#
+       (do
+         (let [new# ~default-val]
+           (.put ~amap ~key new#)
+           new#)))))
+
+(defn fast-group-by
+  [afn alist]
+  (let [ret (HashMap.)]
+    (fast-list-iter
+      [e alist]
+      (let [key (afn e)
+            ^List curr (get-with-default ret key (ArrayList.))]
+        (.add curr e)))
+    ret))
+
+(defn new-instance
+  [klass]
+  (let [klass (if (string? klass) (Class/forName klass) klass)]
+    (.newInstance klass)))
+
+(defn get-configured-class
+  [conf config-key]
+  (if (.get conf config-key) (new-instance (.get conf config-key)) nil))
+
+(defmacro -<>
+  ([x] x)
+  ([x form] (if (seq? form)
+              (with-meta
+                (let [[begin [_ & end]] (split-with #(not= % '<>) form)]
+                  (concat begin [x] end))
+                (meta form))
+              (list form x)))
+  ([x form & more] `(-<> (-<> ~x ~form) ~@more)))
+
+(defn logs-filename
+  [storm-id port]
+  (str storm-id file-path-separator port file-path-separator "worker.log"))
+
+(def worker-log-filename-pattern #"^worker.log(.*)")
+
+(defn event-logs-filename
+  [storm-id port]
+  (str storm-id file-path-separator port file-path-separator "events.log"))
+
+(defn clojure-from-yaml-file [yamlFile]
+  (try
+    (with-open [reader (java.io.FileReader. yamlFile)]
+      (clojurify-structure (.load (Yaml. (SafeConstructor.)) reader)))
+    (catch Exception ex
+      (log-error ex))))
+
+(defn hashmap-to-persistent [^HashMap m]
+  (zipmap (.keySet m) (.values m)))
+
+(defn retry-on-exception
+  "Retries specific function on exception based on retries count"
+  [retries task-description f & args]
+  (let [res (try {:value (apply f args)}
+              (catch Exception e
+                (if (<= 0 retries)
+                  (throw e)
+                  {:exception e})))]
+    (if (:exception res)
+      (do 
+        (log-error (:exception res) (str "Failed to " task-description ". Will make [" retries "] more attempts."))
+        (recur (dec retries) task-description f args))
+      (do 
+        (log-debug (str "Successful " task-description "."))
+        (:value res)))))
+
+(defn setup-default-uncaught-exception-handler
+  "Set a default uncaught exception handler to handle exceptions not caught in other threads."
+  []
+  (Thread/setDefaultUncaughtExceptionHandler
+    (proxy [Thread$UncaughtExceptionHandler] []
+      (uncaughtException [thread thrown]
+        (try
+          (Utils/handleUncaughtException thrown)
+          (catch Error err
+            (do
+              (log-error err "Received error in main thread.. terminating server...")
+              (.exit (Runtime/getRuntime) -2))))))))
+
+(defn redact-value
+  "Hides value for k in coll for printing coll safely"
+  [coll k]
+  (if (contains? coll k)
+    (assoc coll k (apply str (repeat (count (coll k)) "#")))
+    coll))
+
+(defn log-thrift-access
+  [request-id remoteAddress principal operation]
+  (doto
+    (ThriftAccessLogger.)
+    (.log (str "Request ID: " request-id " access from: " remoteAddress " principal: " principal " operation: " operation))))
+
+(def DISALLOWED-KEY-NAME-STRS #{"/" "." ":" "\\"})
+
+(defn validate-key-name!
+  [name]
+  (if (some #(.contains name %) DISALLOWED-KEY-NAME-STRS)
+    (throw (RuntimeException.
+             (str "Key name cannot contain any of the following: " (pr-str DISALLOWED-KEY-NAME-STRS))))
+    (if (clojure.string/blank? name)
+      (throw (RuntimeException.
+               ("Key name cannot be blank"))))))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/zookeeper.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/zookeeper.clj b/storm-core/src/clj/org/apache/storm/zookeeper.clj
new file mode 100644
index 0000000..8a223cd
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/zookeeper.clj
@@ -0,0 +1,308 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+
+(ns org.apache.storm.zookeeper
+  (:import [org.apache.curator.retry RetryNTimes]
+           [org.apache.storm Config])
+  (:import [org.apache.curator.framework.api CuratorEvent CuratorEventType CuratorListener UnhandledErrorListener])
+  (:import [org.apache.curator.framework.state ConnectionStateListener])
+  (:import [org.apache.curator.framework CuratorFramework CuratorFrameworkFactory])
+  (:import [org.apache.curator.framework.recipes.leader LeaderLatch LeaderLatch$State Participant LeaderLatchListener])
+  (:import [org.apache.zookeeper ZooKeeper Watcher KeeperException$NoNodeException
+            ZooDefs ZooDefs$Ids CreateMode WatchedEvent Watcher$Event Watcher$Event$KeeperState
+            Watcher$Event$EventType KeeperException$NodeExistsException])
+  (:import [org.apache.zookeeper.data Stat])
+  (:import [org.apache.zookeeper.server ZooKeeperServer NIOServerCnxnFactory])
+  (:import [java.net InetSocketAddress BindException InetAddress])
+  (:import [org.apache.storm.nimbus ILeaderElector NimbusInfo])
+  (:import [java.io File])
+  (:import [java.util List Map])
+  (:import [org.apache.storm.utils Utils ZookeeperAuthInfo])
+  (:use [org.apache.storm util log config]))
+
+(def zk-keeper-states
+  {Watcher$Event$KeeperState/Disconnected :disconnected
+   Watcher$Event$KeeperState/SyncConnected :connected
+   Watcher$Event$KeeperState/AuthFailed :auth-failed
+   Watcher$Event$KeeperState/Expired :expired})
+
+(def zk-event-types
+  {Watcher$Event$EventType/None :none
+   Watcher$Event$EventType/NodeCreated :node-created
+   Watcher$Event$EventType/NodeDeleted :node-deleted
+   Watcher$Event$EventType/NodeDataChanged :node-data-changed
+   Watcher$Event$EventType/NodeChildrenChanged :node-children-changed})
+
+(defn- default-watcher
+  [state type path]
+  (log-message "Zookeeper state update: " state type path))
+
+(defnk mk-client
+  [conf servers port
+   :root ""
+   :watcher default-watcher
+   :auth-conf nil]
+  (let [fk (Utils/newCurator conf servers port root (when auth-conf (ZookeeperAuthInfo. auth-conf)))]
+    (.. fk
+        (getCuratorListenable)
+        (addListener
+          (reify CuratorListener
+            (^void eventReceived [this ^CuratorFramework _fk ^CuratorEvent e]
+                   (when (= (.getType e) CuratorEventType/WATCHED)
+                     (let [^WatchedEvent event (.getWatchedEvent e)]
+                       (watcher (zk-keeper-states (.getState event))
+                                (zk-event-types (.getType event))
+                                (.getPath event))))))))
+    ;;    (.. fk
+    ;;        (getUnhandledErrorListenable)
+    ;;        (addListener
+    ;;         (reify UnhandledErrorListener
+    ;;           (unhandledError [this msg error]
+    ;;             (if (or (exception-cause? InterruptedException error)
+    ;;                     (exception-cause? java.nio.channels.ClosedByInterruptException error))
+    ;;               (do (log-warn-error error "Zookeeper exception " msg)
+    ;;                   (let [to-throw (InterruptedException.)]
+    ;;                     (.initCause to-throw error)
+    ;;                     (throw to-throw)
+    ;;                     ))
+    ;;               (do (log-error error "Unrecoverable Zookeeper error " msg)
+    ;;                   (halt-process! 1 "Unrecoverable Zookeeper error")))
+    ;;             ))))
+    (.start fk)
+    fk))
+
+(def zk-create-modes
+  {:ephemeral CreateMode/EPHEMERAL
+   :persistent CreateMode/PERSISTENT
+   :sequential CreateMode/PERSISTENT_SEQUENTIAL})
+
+(defn create-node
+  ([^CuratorFramework zk ^String path ^bytes data mode acls]
+    (let [mode  (zk-create-modes mode)]
+      (try
+        (.. zk (create) (creatingParentsIfNeeded) (withMode mode) (withACL acls) (forPath (normalize-path path) data))
+        (catch Exception e (throw (wrap-in-runtime e))))))
+  ([^CuratorFramework zk ^String path ^bytes data acls]
+    (create-node zk path data :persistent acls)))
+
+(defn exists-node?
+  [^CuratorFramework zk ^String path watch?]
+  ((complement nil?)
+   (try
+     (if watch?
+       (.. zk (checkExists) (watched) (forPath (normalize-path path)))
+       (.. zk (checkExists) (forPath (normalize-path path))))
+     (catch Exception e (throw (wrap-in-runtime e))))))
+
+(defnk delete-node
+  [^CuratorFramework zk ^String path]
+  (let [path (normalize-path path)]
+    (when (exists-node? zk path false)
+      (try-cause  (.. zk (delete) (deletingChildrenIfNeeded) (forPath (normalize-path path)))
+                  (catch KeeperException$NoNodeException e
+                    ;; do nothing
+                    (log-message "exception" e)
+                  )
+                  (catch Exception e (throw (wrap-in-runtime e)))))))
+
+(defn mkdirs
+  [^CuratorFramework zk ^String path acls]
+  (let [path (normalize-path path)]
+    (when-not (or (= path "/") (exists-node? zk path false))
+      (mkdirs zk (parent-path path) acls)
+      (try-cause
+        (create-node zk path (barr 7) :persistent acls)
+        (catch KeeperException$NodeExistsException e
+          ;; this can happen when multiple clients doing mkdir at same time
+          ))
+      )))
+
+(defn sync-path
+  [^CuratorFramework zk ^String path]
+  (try
+    (.. zk (sync) (forPath (normalize-path path)))
+    (catch Exception e (throw (wrap-in-runtime e)))))
+
+
+(defn add-listener [^CuratorFramework zk ^ConnectionStateListener listener]
+  (.. zk (getConnectionStateListenable) (addListener listener)))
+
+(defn get-data
+  [^CuratorFramework zk ^String path watch?]
+  (let [path (normalize-path path)]
+    (try-cause
+      (if (exists-node? zk path watch?)
+        (if watch?
+          (.. zk (getData) (watched) (forPath path))
+          (.. zk (getData) (forPath path))))
+      (catch KeeperException$NoNodeException e
+        ;; this is fine b/c we still have a watch from the successful exists call
+        nil )
+      (catch Exception e (throw (wrap-in-runtime e))))))
+
+(defn get-data-with-version 
+  [^CuratorFramework zk ^String path watch?]
+  (let [stats (org.apache.zookeeper.data.Stat. )
+        path (normalize-path path)]
+    (try-cause
+     (if-let [data
+              (if (exists-node? zk path watch?)
+                (if watch?
+                  (.. zk (getData) (watched) (storingStatIn stats) (forPath path))
+                  (.. zk (getData) (storingStatIn stats) (forPath path))))]
+       {:data data
+        :version (.getVersion stats)})
+     (catch KeeperException$NoNodeException e
+       ;; this is fine b/c we still have a watch from the successful exists call
+       nil ))))
+
+(defn get-version 
+[^CuratorFramework zk ^String path watch?]
+  (if-let [stats
+           (if watch?
+             (.. zk (checkExists) (watched) (forPath (normalize-path path)))
+             (.. zk (checkExists) (forPath (normalize-path path))))]
+    (.getVersion stats)
+    nil))
+
+(defn get-children
+  [^CuratorFramework zk ^String path watch?]
+  (try
+    (if watch?
+      (.. zk (getChildren) (watched) (forPath (normalize-path path)))
+      (.. zk (getChildren) (forPath (normalize-path path))))
+    (catch Exception e (throw (wrap-in-runtime e)))))
+
+(defn delete-node-blobstore
+  "Deletes the state inside the zookeeper for a key, for which the
+   contents of the key starts with nimbus host port information"
+  [^CuratorFramework zk ^String parent-path ^String host-port-info]
+  (let [parent-path (normalize-path parent-path)
+        child-path-list (if (exists-node? zk parent-path false)
+                          (into [] (get-children zk parent-path false))
+                          [])]
+    (doseq [child child-path-list]
+      (when (.startsWith child host-port-info)
+        (log-debug "delete-node " "child" child)
+        (delete-node zk (str parent-path "/" child))))))
+
+(defn set-data
+  [^CuratorFramework zk ^String path ^bytes data]
+  (try
+    (.. zk (setData) (forPath (normalize-path path) data))
+    (catch Exception e (throw (wrap-in-runtime e)))))
+
+(defn exists
+  [^CuratorFramework zk ^String path watch?]
+  (exists-node? zk path watch?))
+
+(defnk mk-inprocess-zookeeper
+  [localdir :port nil]
+  (let [localfile (File. localdir)
+        zk (ZooKeeperServer. localfile localfile 2000)
+        [retport factory]
+        (loop [retport (if port port 2000)]
+          (if-let [factory-tmp
+                   (try-cause
+                     (doto (NIOServerCnxnFactory.)
+                       (.configure (InetSocketAddress. retport) 0))
+                     (catch BindException e
+                       (when (> (inc retport) (if port port 65535))
+                         (throw (RuntimeException.
+                                  "No port is available to launch an inprocess zookeeper.")))))]
+            [retport factory-tmp]
+            (recur (inc retport))))]
+    (log-message "Starting inprocess zookeeper at port " retport " and dir " localdir)
+    (.startup factory zk)
+    [retport factory]))
+
+(defn shutdown-inprocess-zookeeper
+  [handle]
+  (.shutdown handle))
+
+(defn- to-NimbusInfo [^Participant participant]
+  (let
+    [id (if (clojure.string/blank? (.getId participant))
+          (throw (RuntimeException. "No nimbus leader participant host found, have you started your nimbus hosts?"))
+          (.getId participant))
+     nimbus-info (NimbusInfo/parse id)]
+    (.setLeader nimbus-info (.isLeader participant))
+    nimbus-info))
+
+(defn leader-latch-listener-impl
+  "Leader latch listener that will be invoked when we either gain or lose leadership"
+  [conf zk leader-latch]
+  (let [hostname (.getCanonicalHostName (InetAddress/getLocalHost))]
+    (reify LeaderLatchListener
+      (^void isLeader[this]
+        (log-message (str hostname " gained leadership")))
+      (^void notLeader[this]
+        (log-message (str hostname " lost leadership."))))))
+
+(defn zk-leader-elector
+  "Zookeeper Implementation of ILeaderElector."
+  [conf]
+  (let [servers (conf STORM-ZOOKEEPER-SERVERS)
+        zk (mk-client conf (conf STORM-ZOOKEEPER-SERVERS) (conf STORM-ZOOKEEPER-PORT) :auth-conf conf)
+        leader-lock-path (str (conf STORM-ZOOKEEPER-ROOT) "/leader-lock")
+        id (.toHostPortString (NimbusInfo/fromConf conf))
+        leader-latch (atom (LeaderLatch. zk leader-lock-path id))
+        leader-latch-listener (atom (leader-latch-listener-impl conf zk @leader-latch))
+        ]
+    (reify ILeaderElector
+      (prepare [this conf]
+        (log-message "no-op for zookeeper implementation"))
+
+      (^void addToLeaderLockQueue [this]
+        ;if this latch is already closed, we need to create new instance.
+        (if (.equals LeaderLatch$State/CLOSED (.getState @leader-latch))
+          (do
+            (reset! leader-latch (LeaderLatch. zk leader-lock-path id))
+            (reset! leader-latch-listener (leader-latch-listener-impl conf zk @leader-latch))
+            (log-message "LeaderLatch was in closed state. Resetted the leaderLatch and listeners.")
+            ))
+
+        ;Only if the latch is not already started we invoke start.
+        (if (.equals LeaderLatch$State/LATENT (.getState @leader-latch))
+          (do
+            (.addListener @leader-latch @leader-latch-listener)
+            (.start @leader-latch)
+            (log-message "Queued up for leader lock."))
+          (log-message "Node already in queue for leader lock.")))
+
+      (^void removeFromLeaderLockQueue [this]
+        ;Only started latches can be closed.
+        (if (.equals LeaderLatch$State/STARTED (.getState @leader-latch))
+          (do
+            (.close @leader-latch)
+            (log-message "Removed from leader lock queue."))
+          (log-message "leader latch is not started so no removeFromLeaderLockQueue needed.")))
+
+      (^boolean isLeader [this]
+        (.hasLeadership @leader-latch))
+
+      (^NimbusInfo getLeader [this]
+        (to-NimbusInfo (.getLeader @leader-latch)))
+
+      (^List getAllNimbuses [this]
+        (let [participants (.getParticipants @leader-latch)]
+          (map (fn [^Participant participant]
+                 (to-NimbusInfo participant))
+            participants)))
+
+      (^void close[this]
+        (log-message "closing zookeeper connection of leader elector.")
+        (.close zk)))))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/storm/trident/testing.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/storm/trident/testing.clj b/storm-core/src/clj/storm/trident/testing.clj
deleted file mode 100644
index ac5fcab..0000000
--- a/storm-core/src/clj/storm/trident/testing.clj
+++ /dev/null
@@ -1,79 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-(ns storm.trident.testing
-  (:require [backtype.storm.LocalDRPC :as LocalDRPC])
-  (:import [storm.trident.testing FeederBatchSpout FeederCommitterBatchSpout MemoryMapState MemoryMapState$Factory TuplifyArgs])
-  (:require [backtype.storm [LocalDRPC]])
-  (:import [backtype.storm LocalDRPC])
-  (:import [backtype.storm.tuple Fields])
-  (:import [backtype.storm.generated KillOptions])
-  (:require [backtype.storm [testing :as t]])
-  (:use [backtype.storm util])
-  )
-
-(defn local-drpc []
-  (LocalDRPC.))
-
-(defn exec-drpc [^LocalDRPC drpc function-name args]
-  (let [res (.execute drpc function-name args)]
-    (from-json res)))
-
-(defn exec-drpc-tuples [^LocalDRPC drpc function-name tuples]
-  (exec-drpc drpc function-name (to-json tuples)))
-
-(defn feeder-spout [fields]
-  (FeederBatchSpout. fields))
-
-(defn feeder-committer-spout [fields]
-  (FeederCommitterBatchSpout. fields))
-
-(defn feed [feeder tuples]
-  (.feed feeder tuples))
-
-(defn fields [& fields]
-  (Fields. fields))
-
-(defn memory-map-state []
-  (MemoryMapState$Factory.))
-
-(defmacro with-drpc [[drpc] & body]
-  `(let [~drpc (backtype.storm.LocalDRPC.)]
-     ~@body
-     (.shutdown ~drpc)
-     ))
-
-(defn with-topology* [cluster topo body-fn]
-  (t/submit-local-topology (:nimbus cluster) "tester" {} (.build topo))
-  (body-fn)
-  (.killTopologyWithOpts (:nimbus cluster) "tester" (doto (KillOptions.) (.set_wait_secs 0)))
-  )
-
-(defmacro with-topology [[cluster topo] & body]
-  `(with-topology* ~cluster ~topo (fn [] ~@body)))
-
-(defn bootstrap-imports []
-  (import 'backtype.storm.LocalDRPC)
-  (import 'storm.trident.TridentTopology)
-  (import '[storm.trident.operation.builtin Count Sum Equals MapGet Debug FilterNull FirstN TupleCollectionGet])
-  )
-
-(defn drpc-tuples-input [topology function-name drpc outfields]
-  (-> topology
-      (.newDRPCStream function-name drpc)
-      (.each (fields "args") (TuplifyArgs.) outfields)
-      ))
-
-

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/genthrift.sh
----------------------------------------------------------------------
diff --git a/storm-core/src/genthrift.sh b/storm-core/src/genthrift.sh
index 54cd10a..eeec78a 100644
--- a/storm-core/src/genthrift.sh
+++ b/storm-core/src/genthrift.sh
@@ -16,9 +16,9 @@
 # limitations under the License.
 
 rm -rf gen-javabean gen-py py
-rm -rf jvm/backtype/storm/generated
+rm -rf jvm/org/apache/storm/generated
 thrift --gen java:beans,hashcode,nocamel,generated_annotations=undated --gen py:utf8strings storm.thrift
-for file in gen-javabean/backtype/storm/generated/* ; do
+for file in gen-javabean/org/apache/storm/generated/* ; do
   cat java_license_header.txt ${file} > ${file}.tmp
   mv -f ${file}.tmp ${file}
 done
@@ -28,6 +28,6 @@ for file in gen-py/storm/* ; do
   cat py_license_header.txt ${file} > ${file}.tmp
   mv -f ${file}.tmp ${file}
 done
-mv gen-javabean/backtype/storm/generated jvm/backtype/storm/generated
+mv gen-javabean/org/apache/storm/generated jvm/org/apache/storm/generated
 mv gen-py py
 rm -rf gen-javabean


[44/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/test/jvm/org/apache/storm/starter/tools/SlidingWindowCounterTest.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/test/jvm/org/apache/storm/starter/tools/SlidingWindowCounterTest.java b/examples/storm-starter/test/jvm/org/apache/storm/starter/tools/SlidingWindowCounterTest.java
new file mode 100644
index 0000000..e7b71cf
--- /dev/null
+++ b/examples/storm-starter/test/jvm/org/apache/storm/starter/tools/SlidingWindowCounterTest.java
@@ -0,0 +1,106 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter.tools;
+
+import org.testng.annotations.DataProvider;
+import org.testng.annotations.Test;
+
+import java.util.Map;
+
+import static org.fest.assertions.api.Assertions.assertThat;
+
+public class SlidingWindowCounterTest {
+
+  private static final int ANY_WINDOW_LENGTH_IN_SLOTS = 2;
+  private static final Object ANY_OBJECT = "ANY_OBJECT";
+
+  @DataProvider
+  public Object[][] illegalWindowLengths() {
+    return new Object[][]{ { -10 }, { -3 }, { -2 }, { -1 }, { 0 }, { 1 } };
+  }
+
+  @Test(expectedExceptions = IllegalArgumentException.class, dataProvider = "illegalWindowLengths")
+  public void lessThanTwoSlotsShouldThrowIAE(int windowLengthInSlots) {
+    new SlidingWindowCounter<Object>(windowLengthInSlots);
+  }
+
+  @DataProvider
+  public Object[][] legalWindowLengths() {
+    return new Object[][]{ { 2 }, { 3 }, { 20 } };
+  }
+
+  @Test(dataProvider = "legalWindowLengths")
+  public void twoOrMoreSlotsShouldBeValid(int windowLengthInSlots) {
+    new SlidingWindowCounter<Object>(windowLengthInSlots);
+  }
+
+  @Test
+  public void newInstanceShouldHaveEmptyCounts() {
+    // given
+    SlidingWindowCounter<Object> counter = new SlidingWindowCounter<Object>(ANY_WINDOW_LENGTH_IN_SLOTS);
+
+    // when
+    Map<Object, Long> counts = counter.getCountsThenAdvanceWindow();
+
+    // then
+    assertThat(counts).isEmpty();
+  }
+
+  @DataProvider
+  public Object[][] simulatedCounterIterations() {
+    return new Object[][]{ { 2, new int[]{ 3, 2, 0, 0, 1, 0, 0, 0 }, new long[]{ 3, 5, 2, 0, 1, 1, 0, 0 } },
+        { 3, new int[]{ 3, 2, 0, 0, 1, 0, 0, 0 }, new long[]{ 3, 5, 5, 2, 1, 1, 1, 0 } },
+        { 4, new int[]{ 3, 2, 0, 0, 1, 0, 0, 0 }, new long[]{ 3, 5, 5, 5, 3, 1, 1, 1 } },
+        { 5, new int[]{ 3, 2, 0, 0, 1, 0, 0, 0 }, new long[]{ 3, 5, 5, 5, 6, 3, 1, 1 } },
+        { 5, new int[]{ 3, 11, 5, 13, 7, 17, 0, 3, 50, 600, 7000 },
+            new long[]{ 3, 14, 19, 32, 39, 53, 42, 40, 77, 670, 7653 } }, };
+  }
+
+  @Test(dataProvider = "simulatedCounterIterations")
+  public void testCounterWithSimulatedRuns(int windowLengthInSlots, int[] incrementsPerIteration,
+      long[] expCountsPerIteration) {
+    // given
+    SlidingWindowCounter<Object> counter = new SlidingWindowCounter<Object>(windowLengthInSlots);
+    int numIterations = incrementsPerIteration.length;
+
+    for (int i = 0; i < numIterations; i++) {
+      int numIncrements = incrementsPerIteration[i];
+      long expCounts = expCountsPerIteration[i];
+      // Objects are absent if they were zero both this iteration
+      // and the last -- if only this one, we need to report zero.
+      boolean expAbsent = ((expCounts == 0) && ((i == 0) || (expCountsPerIteration[i - 1] == 0)));
+
+      // given (for this iteration)
+      for (int j = 0; j < numIncrements; j++) {
+        counter.incrementCount(ANY_OBJECT);
+      }
+
+      // when (for this iteration)
+      Map<Object, Long> counts = counter.getCountsThenAdvanceWindow();
+
+      // then (for this iteration)
+      if (expAbsent) {
+        assertThat(counts).doesNotContainKey(ANY_OBJECT);
+      }
+      else {
+        assertThat(counts.get(ANY_OBJECT)).isEqualTo(expCounts);
+      }
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/test/jvm/org/apache/storm/starter/tools/SlotBasedCounterTest.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/test/jvm/org/apache/storm/starter/tools/SlotBasedCounterTest.java b/examples/storm-starter/test/jvm/org/apache/storm/starter/tools/SlotBasedCounterTest.java
new file mode 100644
index 0000000..e4f7dcc
--- /dev/null
+++ b/examples/storm-starter/test/jvm/org/apache/storm/starter/tools/SlotBasedCounterTest.java
@@ -0,0 +1,181 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter.tools;
+
+import org.testng.annotations.DataProvider;
+import org.testng.annotations.Test;
+
+import java.util.Map;
+
+import static org.fest.assertions.api.Assertions.assertThat;
+
+public class SlotBasedCounterTest {
+
+  private static final int ANY_NUM_SLOTS = 1;
+  private static final int ANY_SLOT = 0;
+  private static final Object ANY_OBJECT = "ANY_OBJECT";
+
+  @DataProvider
+  public Object[][] illegalNumSlotsData() {
+    return new Object[][]{ { -10 }, { -3 }, { -2 }, { -1 }, { 0 } };
+  }
+
+  @Test(expectedExceptions = IllegalArgumentException.class, dataProvider = "illegalNumSlotsData")
+  public void negativeOrZeroNumSlotsShouldThrowIAE(int numSlots) {
+    new SlotBasedCounter<Object>(numSlots);
+  }
+
+  @DataProvider
+  public Object[][] legalNumSlotsData() {
+    return new Object[][]{ { 1 }, { 2 }, { 3 }, { 20 } };
+  }
+
+  @Test(dataProvider = "legalNumSlotsData")
+  public void positiveNumSlotsShouldBeOk(int numSlots) {
+    new SlotBasedCounter<Object>(numSlots);
+  }
+
+  @Test
+  public void newInstanceShouldHaveEmptyCounts() {
+    // given
+    SlotBasedCounter<Object> counter = new SlotBasedCounter<Object>(ANY_NUM_SLOTS);
+
+    // when
+    Map<Object, Long> counts = counter.getCounts();
+
+    // then
+    assertThat(counts).isEmpty();
+  }
+
+  @Test
+  public void shouldReturnNonEmptyCountsWhenAtLeastOneObjectWasCounted() {
+    // given
+    SlotBasedCounter<Object> counter = new SlotBasedCounter<Object>(ANY_NUM_SLOTS);
+    counter.incrementCount(ANY_OBJECT, ANY_SLOT);
+
+    // when
+    Map<Object, Long> counts = counter.getCounts();
+
+    // then
+    assertThat(counts).isNotEmpty();
+
+    // additional tests that go beyond what this test is primarily about
+    assertThat(counts.size()).isEqualTo(1);
+    assertThat(counts.get(ANY_OBJECT)).isEqualTo(1);
+  }
+
+  @DataProvider
+  public Object[][] incrementCountData() {
+    return new Object[][]{ { new String[]{ "foo", "bar" }, new int[]{ 3, 2 } } };
+  }
+
+  @Test(dataProvider = "incrementCountData")
+  public void shouldIncrementCount(Object[] objects, int[] expCounts) {
+    // given
+    SlotBasedCounter<Object> counter = new SlotBasedCounter<Object>(ANY_NUM_SLOTS);
+
+    // when
+    for (int i = 0; i < objects.length; i++) {
+      Object obj = objects[i];
+      int numIncrements = expCounts[i];
+      for (int j = 0; j < numIncrements; j++) {
+        counter.incrementCount(obj, ANY_SLOT);
+      }
+    }
+
+    // then
+    for (int i = 0; i < objects.length; i++) {
+      assertThat(counter.getCount(objects[i], ANY_SLOT)).isEqualTo(expCounts[i]);
+    }
+    assertThat(counter.getCount("nonexistentObject", ANY_SLOT)).isEqualTo(0);
+  }
+
+  @Test
+  public void shouldReturnZeroForNonexistentObject() {
+    // given
+    SlotBasedCounter<Object> counter = new SlotBasedCounter<Object>(ANY_NUM_SLOTS);
+
+    // when
+    counter.incrementCount("somethingElse", ANY_SLOT);
+
+    // then
+    assertThat(counter.getCount("nonexistentObject", ANY_SLOT)).isEqualTo(0);
+  }
+
+  @Test
+  public void shouldIncrementCountOnlyOneSlotAtATime() {
+    // given
+    int numSlots = 3;
+    Object obj = Long.valueOf(10);
+    SlotBasedCounter<Object> counter = new SlotBasedCounter<Object>(numSlots);
+
+    // when (empty)
+    // then
+    assertThat(counter.getCount(obj, 0)).isEqualTo(0);
+    assertThat(counter.getCount(obj, 1)).isEqualTo(0);
+    assertThat(counter.getCount(obj, 2)).isEqualTo(0);
+
+    // when
+    counter.incrementCount(obj, 1);
+
+    // then
+    assertThat(counter.getCount(obj, 0)).isEqualTo(0);
+    assertThat(counter.getCount(obj, 1)).isEqualTo(1);
+    assertThat(counter.getCount(obj, 2)).isEqualTo(0);
+  }
+
+  @Test
+  public void wipeSlotShouldSetAllCountsInSlotToZero() {
+    // given
+    SlotBasedCounter<Object> counter = new SlotBasedCounter<Object>(ANY_NUM_SLOTS);
+    Object countWasOne = "countWasOne";
+    Object countWasThree = "countWasThree";
+    counter.incrementCount(countWasOne, ANY_SLOT);
+    counter.incrementCount(countWasThree, ANY_SLOT);
+    counter.incrementCount(countWasThree, ANY_SLOT);
+    counter.incrementCount(countWasThree, ANY_SLOT);
+
+    // when
+    counter.wipeSlot(ANY_SLOT);
+
+    // then
+    assertThat(counter.getCount(countWasOne, ANY_SLOT)).isEqualTo(0);
+    assertThat(counter.getCount(countWasThree, ANY_SLOT)).isEqualTo(0);
+  }
+
+  @Test
+  public void wipeZerosShouldRemoveAnyObjectsWithZeroTotalCount() {
+    // given
+    SlotBasedCounter<Object> counter = new SlotBasedCounter<Object>(2);
+    int wipeSlot = 0;
+    int otherSlot = 1;
+    Object willBeRemoved = "willBeRemoved";
+    Object willContinueToBeTracked = "willContinueToBeTracked";
+    counter.incrementCount(willBeRemoved, wipeSlot);
+    counter.incrementCount(willContinueToBeTracked, wipeSlot);
+    counter.incrementCount(willContinueToBeTracked, otherSlot);
+
+    // when
+    counter.wipeSlot(wipeSlot);
+    counter.wipeZeros();
+
+    // then
+    assertThat(counter.getCounts()).doesNotContainKey(willBeRemoved);
+    assertThat(counter.getCounts()).containsKey(willContinueToBeTracked);
+  }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/test/jvm/storm/starter/bolt/IntermediateRankingsBoltTest.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/test/jvm/storm/starter/bolt/IntermediateRankingsBoltTest.java b/examples/storm-starter/test/jvm/storm/starter/bolt/IntermediateRankingsBoltTest.java
deleted file mode 100644
index 278a513..0000000
--- a/examples/storm-starter/test/jvm/storm/starter/bolt/IntermediateRankingsBoltTest.java
+++ /dev/null
@@ -1,146 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter.bolt;
-
-import backtype.storm.Config;
-import backtype.storm.topology.BasicOutputCollector;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.Values;
-import backtype.storm.utils.MockTupleHelpers;
-import com.google.common.collect.Lists;
-import org.testng.annotations.DataProvider;
-import org.testng.annotations.Test;
-
-import java.util.Map;
-
-import static org.fest.assertions.api.Assertions.assertThat;
-import static org.mockito.Matchers.any;
-import static org.mockito.Mockito.*;
-
-public class IntermediateRankingsBoltTest {
-
-  private static final String ANY_NON_SYSTEM_COMPONENT_ID = "irrelevant_component_id";
-  private static final String ANY_NON_SYSTEM_STREAM_ID = "irrelevant_stream_id";
-  private static final Object ANY_OBJECT = new Object();
-  private static final int ANY_TOPN = 10;
-  private static final long ANY_COUNT = 42;
-
-  private Tuple mockRankableTuple(Object obj, long count) {
-    Tuple tuple = MockTupleHelpers.mockTuple(ANY_NON_SYSTEM_COMPONENT_ID, ANY_NON_SYSTEM_STREAM_ID);
-    when(tuple.getValues()).thenReturn(Lists.newArrayList(ANY_OBJECT, ANY_COUNT));
-    return tuple;
-  }
-
-  @DataProvider
-  public Object[][] illegalTopN() {
-    return new Object[][]{ { -10 }, { -3 }, { -2 }, { -1 }, { 0 } };
-  }
-
-  @Test(expectedExceptions = IllegalArgumentException.class, dataProvider = "illegalTopN")
-  public void negativeOrZeroTopNShouldThrowIAE(int topN) {
-    new IntermediateRankingsBolt(topN);
-  }
-
-  @DataProvider
-  public Object[][] illegalEmitFrequency() {
-    return new Object[][]{ { -10 }, { -3 }, { -2 }, { -1 }, { 0 } };
-  }
-
-  @Test(expectedExceptions = IllegalArgumentException.class, dataProvider = "illegalEmitFrequency")
-  public void negativeOrZeroEmitFrequencyShouldThrowIAE(int emitFrequencyInSeconds) {
-    new IntermediateRankingsBolt(ANY_TOPN, emitFrequencyInSeconds);
-  }
-
-  @DataProvider
-  public Object[][] legalTopN() {
-    return new Object[][]{ { 1 }, { 2 }, { 3 }, { 20 } };
-  }
-
-  @Test(dataProvider = "legalTopN")
-  public void positiveTopNShouldBeOk(int topN) {
-    new IntermediateRankingsBolt(topN);
-  }
-
-  @DataProvider
-  public Object[][] legalEmitFrequency() {
-    return new Object[][]{ { 1 }, { 2 }, { 3 }, { 20 } };
-  }
-
-  @Test(dataProvider = "legalEmitFrequency")
-  public void positiveEmitFrequencyShouldBeOk(int emitFrequencyInSeconds) {
-    new IntermediateRankingsBolt(ANY_TOPN, emitFrequencyInSeconds);
-  }
-
-  @Test
-  public void shouldEmitSomethingIfTickTupleIsReceived() {
-    // given
-    Tuple tickTuple = MockTupleHelpers.mockTickTuple();
-    BasicOutputCollector collector = mock(BasicOutputCollector.class);
-    IntermediateRankingsBolt bolt = new IntermediateRankingsBolt();
-
-    // when
-    bolt.execute(tickTuple, collector);
-
-    // then
-    // verifyZeroInteractions(collector);
-    verify(collector).emit(any(Values.class));
-  }
-
-  @Test
-  public void shouldEmitNothingIfNormalTupleIsReceived() {
-    // given
-    Tuple normalTuple = mockRankableTuple(ANY_OBJECT, ANY_COUNT);
-    BasicOutputCollector collector = mock(BasicOutputCollector.class);
-    IntermediateRankingsBolt bolt = new IntermediateRankingsBolt();
-
-    // when
-    bolt.execute(normalTuple, collector);
-
-    // then
-    verifyZeroInteractions(collector);
-  }
-
-  @Test
-  public void shouldDeclareOutputFields() {
-    // given
-    OutputFieldsDeclarer declarer = mock(OutputFieldsDeclarer.class);
-    IntermediateRankingsBolt bolt = new IntermediateRankingsBolt();
-
-    // when
-    bolt.declareOutputFields(declarer);
-
-    // then
-    verify(declarer, times(1)).declare(any(Fields.class));
-  }
-
-  @Test
-  public void shouldSetTickTupleFrequencyInComponentConfigurationToNonZeroValue() {
-    // given
-    IntermediateRankingsBolt bolt = new IntermediateRankingsBolt();
-
-    // when
-    Map<String, Object> componentConfig = bolt.getComponentConfiguration();
-
-    // then
-    assertThat(componentConfig).containsKey(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS);
-    Integer emitFrequencyInSeconds = (Integer) componentConfig.get(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS);
-    assertThat(emitFrequencyInSeconds).isGreaterThan(0);
-  }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/test/jvm/storm/starter/bolt/RollingCountBoltTest.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/test/jvm/storm/starter/bolt/RollingCountBoltTest.java b/examples/storm-starter/test/jvm/storm/starter/bolt/RollingCountBoltTest.java
deleted file mode 100644
index ecb1216..0000000
--- a/examples/storm-starter/test/jvm/storm/starter/bolt/RollingCountBoltTest.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter.bolt;
-
-import backtype.storm.Config;
-import backtype.storm.task.OutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.Values;
-import backtype.storm.utils.MockTupleHelpers;
-import org.testng.annotations.Test;
-
-import java.util.Map;
-
-import static org.fest.assertions.api.Assertions.assertThat;
-import static org.mockito.Matchers.any;
-import static org.mockito.Mockito.*;
-
-public class RollingCountBoltTest {
-
-  private static final String ANY_NON_SYSTEM_COMPONENT_ID = "irrelevant_component_id";
-  private static final String ANY_NON_SYSTEM_STREAM_ID = "irrelevant_stream_id";
-
-  private Tuple mockNormalTuple(Object obj) {
-    Tuple tuple = MockTupleHelpers.mockTuple(ANY_NON_SYSTEM_COMPONENT_ID, ANY_NON_SYSTEM_STREAM_ID);
-    when(tuple.getValue(0)).thenReturn(obj);
-    return tuple;
-  }
-
-  @SuppressWarnings("rawtypes")
-  @Test
-  public void shouldEmitNothingIfNoObjectHasBeenCountedYetAndTickTupleIsReceived() {
-    // given
-    Tuple tickTuple = MockTupleHelpers.mockTickTuple();
-    RollingCountBolt bolt = new RollingCountBolt();
-    Map conf = mock(Map.class);
-    TopologyContext context = mock(TopologyContext.class);
-    OutputCollector collector = mock(OutputCollector.class);
-    bolt.prepare(conf, context, collector);
-
-    // when
-    bolt.execute(tickTuple);
-
-    // then
-    verifyZeroInteractions(collector);
-  }
-
-  @SuppressWarnings("rawtypes")
-  @Test
-  public void shouldEmitSomethingIfAtLeastOneObjectWasCountedAndTickTupleIsReceived() {
-    // given
-    Tuple normalTuple = mockNormalTuple(new Object());
-    Tuple tickTuple = MockTupleHelpers.mockTickTuple();
-
-    RollingCountBolt bolt = new RollingCountBolt();
-    Map conf = mock(Map.class);
-    TopologyContext context = mock(TopologyContext.class);
-    OutputCollector collector = mock(OutputCollector.class);
-    bolt.prepare(conf, context, collector);
-
-    // when
-    bolt.execute(normalTuple);
-    bolt.execute(tickTuple);
-
-    // then
-    verify(collector).emit(any(Values.class));
-  }
-
-  @Test
-  public void shouldDeclareOutputFields() {
-    // given
-    OutputFieldsDeclarer declarer = mock(OutputFieldsDeclarer.class);
-    RollingCountBolt bolt = new RollingCountBolt();
-
-    // when
-    bolt.declareOutputFields(declarer);
-
-    // then
-    verify(declarer, times(1)).declare(any(Fields.class));
-
-  }
-
-  @Test
-  public void shouldSetTickTupleFrequencyInComponentConfigurationToNonZeroValue() {
-    // given
-    RollingCountBolt bolt = new RollingCountBolt();
-
-    // when
-    Map<String, Object> componentConfig = bolt.getComponentConfiguration();
-
-    // then
-    assertThat(componentConfig).containsKey(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS);
-    Integer emitFrequencyInSeconds = (Integer) componentConfig.get(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS);
-    assertThat(emitFrequencyInSeconds).isGreaterThan(0);
-  }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/test/jvm/storm/starter/bolt/TotalRankingsBoltTest.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/test/jvm/storm/starter/bolt/TotalRankingsBoltTest.java b/examples/storm-starter/test/jvm/storm/starter/bolt/TotalRankingsBoltTest.java
deleted file mode 100644
index a6af931..0000000
--- a/examples/storm-starter/test/jvm/storm/starter/bolt/TotalRankingsBoltTest.java
+++ /dev/null
@@ -1,147 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter.bolt;
-
-import backtype.storm.Config;
-import backtype.storm.topology.BasicOutputCollector;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.Values;
-import backtype.storm.utils.MockTupleHelpers;
-import org.testng.annotations.DataProvider;
-import org.testng.annotations.Test;
-import storm.starter.tools.Rankings;
-
-import java.util.Map;
-
-import static org.fest.assertions.api.Assertions.assertThat;
-import static org.mockito.Matchers.any;
-import static org.mockito.Mockito.*;
-
-public class TotalRankingsBoltTest {
-
-  private static final String ANY_NON_SYSTEM_COMPONENT_ID = "irrelevant_component_id";
-  private static final String ANY_NON_SYSTEM_STREAM_ID = "irrelevant_stream_id";
-  private static final Object ANY_OBJECT = new Object();
-  private static final int ANY_TOPN = 10;
-  private static final long ANY_COUNT = 42;
-
-  private Tuple mockRankingsTuple(Object obj, long count) {
-    Tuple tuple = MockTupleHelpers.mockTuple(ANY_NON_SYSTEM_COMPONENT_ID, ANY_NON_SYSTEM_STREAM_ID);
-    Rankings rankings = mock(Rankings.class);
-    when(tuple.getValue(0)).thenReturn(rankings);
-    return tuple;
-  }
-
-  @DataProvider
-  public Object[][] illegalTopN() {
-    return new Object[][]{ { -10 }, { -3 }, { -2 }, { -1 }, { 0 } };
-  }
-
-  @Test(expectedExceptions = IllegalArgumentException.class, dataProvider = "illegalTopN")
-  public void negativeOrZeroTopNShouldThrowIAE(int topN) {
-    new TotalRankingsBolt(topN);
-  }
-
-  @DataProvider
-  public Object[][] illegalEmitFrequency() {
-    return new Object[][]{ { -10 }, { -3 }, { -2 }, { -1 }, { 0 } };
-  }
-
-  @Test(expectedExceptions = IllegalArgumentException.class, dataProvider = "illegalEmitFrequency")
-  public void negativeOrZeroEmitFrequencyShouldThrowIAE(int emitFrequencyInSeconds) {
-    new TotalRankingsBolt(ANY_TOPN, emitFrequencyInSeconds);
-  }
-
-  @DataProvider
-  public Object[][] legalTopN() {
-    return new Object[][]{ { 1 }, { 2 }, { 3 }, { 20 } };
-  }
-
-  @Test(dataProvider = "legalTopN")
-  public void positiveTopNShouldBeOk(int topN) {
-    new TotalRankingsBolt(topN);
-  }
-
-  @DataProvider
-  public Object[][] legalEmitFrequency() {
-    return new Object[][]{ { 1 }, { 2 }, { 3 }, { 20 } };
-  }
-
-  @Test(dataProvider = "legalEmitFrequency")
-  public void positiveEmitFrequencyShouldBeOk(int emitFrequencyInSeconds) {
-    new TotalRankingsBolt(ANY_TOPN, emitFrequencyInSeconds);
-  }
-
-  @Test
-  public void shouldEmitSomethingIfTickTupleIsReceived() {
-    // given
-    Tuple tickTuple = MockTupleHelpers.mockTickTuple();
-    BasicOutputCollector collector = mock(BasicOutputCollector.class);
-    TotalRankingsBolt bolt = new TotalRankingsBolt();
-
-    // when
-    bolt.execute(tickTuple, collector);
-
-    // then
-    // verifyZeroInteractions(collector);
-    verify(collector).emit(any(Values.class));
-  }
-
-  @Test
-  public void shouldEmitNothingIfNormalTupleIsReceived() {
-    // given
-    Tuple normalTuple = mockRankingsTuple(ANY_OBJECT, ANY_COUNT);
-    BasicOutputCollector collector = mock(BasicOutputCollector.class);
-    TotalRankingsBolt bolt = new TotalRankingsBolt();
-
-    // when
-    bolt.execute(normalTuple, collector);
-
-    // then
-    verifyZeroInteractions(collector);
-  }
-
-  @Test
-  public void shouldDeclareOutputFields() {
-    // given
-    OutputFieldsDeclarer declarer = mock(OutputFieldsDeclarer.class);
-    TotalRankingsBolt bolt = new TotalRankingsBolt();
-
-    // when
-    bolt.declareOutputFields(declarer);
-
-    // then
-    verify(declarer, times(1)).declare(any(Fields.class));
-  }
-
-  @Test
-  public void shouldSetTickTupleFrequencyInComponentConfigurationToNonZeroValue() {
-    // given
-    TotalRankingsBolt bolt = new TotalRankingsBolt();
-
-    // when
-    Map<String, Object> componentConfig = bolt.getComponentConfiguration();
-
-    // then
-    assertThat(componentConfig).containsKey(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS);
-    Integer emitFrequencyInSeconds = (Integer) componentConfig.get(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS);
-    assertThat(emitFrequencyInSeconds).isGreaterThan(0);
-  }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/test/jvm/storm/starter/tools/NthLastModifiedTimeTrackerTest.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/test/jvm/storm/starter/tools/NthLastModifiedTimeTrackerTest.java b/examples/storm-starter/test/jvm/storm/starter/tools/NthLastModifiedTimeTrackerTest.java
deleted file mode 100644
index fe4d987..0000000
--- a/examples/storm-starter/test/jvm/storm/starter/tools/NthLastModifiedTimeTrackerTest.java
+++ /dev/null
@@ -1,125 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter.tools;
-
-import backtype.storm.utils.Time;
-import org.testng.annotations.DataProvider;
-import org.testng.annotations.Test;
-
-import static org.fest.assertions.api.Assertions.assertThat;
-
-public class NthLastModifiedTimeTrackerTest {
-
-  private static final int ANY_NUM_TIMES_TO_TRACK = 3;
-  private static final int MILLIS_IN_SEC = 1000;
-
-  @DataProvider
-  public Object[][] illegalNumTimesData() {
-    return new Object[][]{ { -10 }, { -3 }, { -2 }, { -1 }, { 0 } };
-  }
-
-  @Test(expectedExceptions = IllegalArgumentException.class, dataProvider = "illegalNumTimesData")
-  public void negativeOrZeroNumTimesToTrackShouldThrowIAE(int numTimesToTrack) {
-    new NthLastModifiedTimeTracker(numTimesToTrack);
-  }
-
-  @DataProvider
-  public Object[][] legalNumTimesData() {
-    return new Object[][]{ { 1 }, { 2 }, { 3 }, { 20 } };
-  }
-
-  @Test(dataProvider = "legalNumTimesData")
-  public void positiveNumTimesToTrackShouldBeOk(int numTimesToTrack) {
-    new NthLastModifiedTimeTracker(numTimesToTrack);
-  }
-
-  @DataProvider
-  public Object[][] whenNotYetMarkedAsModifiedData() {
-    return new Object[][]{ { 0 }, { 1 }, { 2 }, { 3 }, { 4 }, { 5 }, { 8 }, { 10 } };
-  }
-
-  @Test(dataProvider = "whenNotYetMarkedAsModifiedData")
-  public void shouldReturnCorrectModifiedTimeEvenWhenNotYetMarkedAsModified(int secondsToAdvance) {
-    // given
-    Time.startSimulating();
-    NthLastModifiedTimeTracker tracker = new NthLastModifiedTimeTracker(ANY_NUM_TIMES_TO_TRACK);
-
-    // when
-    advanceSimulatedTimeBy(secondsToAdvance);
-    int seconds = tracker.secondsSinceOldestModification();
-
-    // then
-    assertThat(seconds).isEqualTo(secondsToAdvance);
-
-    // cleanup
-    Time.stopSimulating();
-  }
-
-  @DataProvider
-  public Object[][] simulatedTrackerIterations() {
-    return new Object[][]{ { 1, new int[]{ 0, 1 }, new int[]{ 0, 0 } }, { 1, new int[]{ 0, 2 }, new int[]{ 0, 0 } },
-        { 2, new int[]{ 2, 2 }, new int[]{ 2, 2 } }, { 2, new int[]{ 0, 4 }, new int[]{ 0, 4 } },
-        { 1, new int[]{ 1, 1, 1, 1, 1, 1, 1 }, new int[]{ 0, 0, 0, 0, 0, 0, 0 } },
-        { 1, new int[]{ 1, 2, 3, 4, 5, 6, 7 }, new int[]{ 0, 0, 0, 0, 0, 0, 0 } },
-        { 2, new int[]{ 1, 1, 1, 1, 1, 1, 1 }, new int[]{ 1, 1, 1, 1, 1, 1, 1 } },
-        { 2, new int[]{ 2, 2, 2, 2, 2, 2, 2 }, new int[]{ 2, 2, 2, 2, 2, 2, 2 } },
-        { 2, new int[]{ 1, 2, 3, 4, 5, 6, 7 }, new int[]{ 1, 2, 3, 4, 5, 6, 7 } },
-        { 3, new int[]{ 1, 1, 1, 1, 1, 1, 1 }, new int[]{ 1, 2, 2, 2, 2, 2, 2 } },
-        { 3, new int[]{ 1, 2, 3, 4, 5, 6, 7 }, new int[]{ 1, 3, 5, 7, 9, 11, 13 } },
-        { 3, new int[]{ 2, 2, 2, 2, 2, 2, 2 }, new int[]{ 2, 4, 4, 4, 4, 4, 4 } },
-        { 4, new int[]{ 1, 1, 1, 1, 1, 1, 1 }, new int[]{ 1, 2, 3, 3, 3, 3, 3 } },
-        { 4, new int[]{ 1, 2, 3, 4, 5, 6, 7 }, new int[]{ 1, 3, 6, 9, 12, 15, 18 } },
-        { 4, new int[]{ 2, 2, 2, 2, 2, 2, 2 }, new int[]{ 2, 4, 6, 6, 6, 6, 6 } },
-        { 5, new int[]{ 1, 1, 1, 1, 1, 1, 1 }, new int[]{ 1, 2, 3, 4, 4, 4, 4 } },
-        { 5, new int[]{ 1, 2, 3, 4, 5, 6, 7 }, new int[]{ 1, 3, 6, 10, 14, 18, 22 } },
-        { 5, new int[]{ 2, 2, 2, 2, 2, 2, 2 }, new int[]{ 2, 4, 6, 8, 8, 8, 8 } },
-        { 6, new int[]{ 1, 1, 1, 1, 1, 1, 1 }, new int[]{ 1, 2, 3, 4, 5, 5, 5 } },
-        { 6, new int[]{ 1, 2, 3, 4, 5, 6, 7 }, new int[]{ 1, 3, 6, 10, 15, 20, 25 } },
-        { 6, new int[]{ 2, 2, 2, 2, 2, 2, 2 }, new int[]{ 2, 4, 6, 8, 10, 10, 10 } },
-        { 3, new int[]{ 1, 2, 3 }, new int[]{ 1, 3, 5 } } };
-  }
-
-  @Test(dataProvider = "simulatedTrackerIterations")
-  public void shouldReturnCorrectModifiedTimeWhenMarkedAsModified(int numTimesToTrack,
-      int[] secondsToAdvancePerIteration, int[] expLastModifiedTimes) {
-    // given
-    Time.startSimulating();
-    NthLastModifiedTimeTracker tracker = new NthLastModifiedTimeTracker(numTimesToTrack);
-
-    int[] modifiedTimes = new int[expLastModifiedTimes.length];
-
-    // when
-    int i = 0;
-    for (int secondsToAdvance : secondsToAdvancePerIteration) {
-      advanceSimulatedTimeBy(secondsToAdvance);
-      tracker.markAsModified();
-      modifiedTimes[i] = tracker.secondsSinceOldestModification();
-      i++;
-    }
-
-    // then
-    assertThat(modifiedTimes).isEqualTo(expLastModifiedTimes);
-
-    // cleanup
-    Time.stopSimulating();
-  }
-
-  private void advanceSimulatedTimeBy(int seconds) {
-    Time.advanceTime(seconds * MILLIS_IN_SEC);
-  }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/test/jvm/storm/starter/tools/RankableObjectWithFieldsTest.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/test/jvm/storm/starter/tools/RankableObjectWithFieldsTest.java b/examples/storm-starter/test/jvm/storm/starter/tools/RankableObjectWithFieldsTest.java
deleted file mode 100644
index e83f922..0000000
--- a/examples/storm-starter/test/jvm/storm/starter/tools/RankableObjectWithFieldsTest.java
+++ /dev/null
@@ -1,252 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter.tools;
-
-import backtype.storm.tuple.Tuple;
-import com.google.common.collect.Lists;
-import org.testng.annotations.DataProvider;
-import org.testng.annotations.Test;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import static org.fest.assertions.api.Assertions.assertThat;
-import static org.mockito.Mockito.*;
-import static org.testng.Assert.assertFalse;
-import static org.testng.Assert.assertTrue;
-
-public class RankableObjectWithFieldsTest {
-
-  private static final Object ANY_OBJECT = new Object();
-  private static final long ANY_COUNT = 271;
-  private static final String ANY_FIELD = "someAdditionalField";
-  private static final int GREATER_THAN = 1;
-  private static final int EQUAL_TO = 0;
-  private static final int SMALLER_THAN = -1;
-
-  @Test(expectedExceptions = IllegalArgumentException.class)
-  public void constructorWithNullObjectAndNoFieldsShouldThrowIAE() {
-    new RankableObjectWithFields(null, ANY_COUNT);
-  }
-
-  @Test(expectedExceptions = IllegalArgumentException.class)
-  public void constructorWithNullObjectAndFieldsShouldThrowIAE() {
-    Object someAdditionalField = new Object();
-    new RankableObjectWithFields(null, ANY_COUNT, someAdditionalField);
-  }
-
-  @Test(expectedExceptions = IllegalArgumentException.class)
-  public void constructorWithNegativeCountAndNoFieldsShouldThrowIAE() {
-    new RankableObjectWithFields(ANY_OBJECT, -1);
-  }
-
-  @Test(expectedExceptions = IllegalArgumentException.class)
-  public void constructorWithNegativeCountAndFieldsShouldThrowIAE() {
-    Object someAdditionalField = new Object();
-    new RankableObjectWithFields(ANY_OBJECT, -1, someAdditionalField);
-  }
-
-  @Test
-  public void shouldBeEqualToItself() {
-    RankableObjectWithFields r = new RankableObjectWithFields(ANY_OBJECT, ANY_COUNT);
-    assertThat(r).isEqualTo(r);
-  }
-
-  @DataProvider
-  public Object[][] otherClassesData() {
-    return new Object[][]{ { new String("foo") }, { new Object() }, { Integer.valueOf(4) }, { Lists.newArrayList(7, 8,
-        9) } };
-  }
-
-  @Test(dataProvider = "otherClassesData")
-  public void shouldNotBeEqualToInstancesOfOtherClasses(Object notARankable) {
-    RankableObjectWithFields r = new RankableObjectWithFields(ANY_OBJECT, ANY_COUNT);
-    assertFalse(r.equals(notARankable), r + " is equal to " + notARankable + " but it should not be");
-  }
-
-  @DataProvider
-  public Object[][] falseDuplicatesData() {
-    return new Object[][]{ { new RankableObjectWithFields("foo", 0), new RankableObjectWithFields("foo", 1) },
-        { new RankableObjectWithFields("foo", 1), new RankableObjectWithFields("Foo", 1) },
-        { new RankableObjectWithFields("foo", 1), new RankableObjectWithFields("FOO", 1) },
-        { new RankableObjectWithFields("foo", 1), new RankableObjectWithFields("bar", 1) },
-        { new RankableObjectWithFields("", 0), new RankableObjectWithFields("", 1) }, { new RankableObjectWithFields("",
-        1), new RankableObjectWithFields("bar", 1) } };
-  }
-
-  @Test(dataProvider = "falseDuplicatesData")
-  public void shouldNotBeEqualToFalseDuplicates(RankableObjectWithFields r, RankableObjectWithFields falseDuplicate) {
-    assertFalse(r.equals(falseDuplicate), r + " is equal to " + falseDuplicate + " but it should not be");
-  }
-
-  @Test(dataProvider = "falseDuplicatesData")
-  public void shouldHaveDifferentHashCodeThanFalseDuplicates(RankableObjectWithFields r,
-      RankableObjectWithFields falseDuplicate) {
-    assertThat(r.hashCode()).isNotEqualTo(falseDuplicate.hashCode());
-  }
-
-  @DataProvider
-  public Object[][] trueDuplicatesData() {
-    return new Object[][]{ { new RankableObjectWithFields("foo", 0), new RankableObjectWithFields("foo", 0) },
-        { new RankableObjectWithFields("foo", 0), new RankableObjectWithFields("foo", 0, "someOtherField") },
-        { new RankableObjectWithFields("foo", 0, "someField"), new RankableObjectWithFields("foo", 0,
-            "someOtherField") } };
-  }
-
-  @Test(dataProvider = "trueDuplicatesData")
-  public void shouldBeEqualToTrueDuplicates(RankableObjectWithFields r, RankableObjectWithFields trueDuplicate) {
-    assertTrue(r.equals(trueDuplicate), r + " is not equal to " + trueDuplicate + " but it should be");
-  }
-
-  @Test(dataProvider = "trueDuplicatesData")
-  public void shouldHaveSameHashCodeAsTrueDuplicates(RankableObjectWithFields r,
-      RankableObjectWithFields trueDuplicate) {
-    assertThat(r.hashCode()).isEqualTo(trueDuplicate.hashCode());
-  }
-
-  @DataProvider
-  public Object[][] compareToData() {
-    return new Object[][]{ { new RankableObjectWithFields("foo", 1000), new RankableObjectWithFields("foo", 0),
-        GREATER_THAN }, { new RankableObjectWithFields("foo", 1), new RankableObjectWithFields("foo", 0),
-        GREATER_THAN }, { new RankableObjectWithFields("foo", 1000), new RankableObjectWithFields("bar", 0),
-        GREATER_THAN }, { new RankableObjectWithFields("foo", 1), new RankableObjectWithFields("bar", 0),
-        GREATER_THAN }, { new RankableObjectWithFields("foo", 0), new RankableObjectWithFields("foo", 0), EQUAL_TO },
-        { new RankableObjectWithFields("foo", 0), new RankableObjectWithFields("bar", 0), EQUAL_TO },
-        { new RankableObjectWithFields("foo", 0), new RankableObjectWithFields("foo", 1000), SMALLER_THAN },
-        { new RankableObjectWithFields("foo", 0), new RankableObjectWithFields("foo", 1), SMALLER_THAN },
-        { new RankableObjectWithFields("foo", 0), new RankableObjectWithFields("bar", 1), SMALLER_THAN },
-        { new RankableObjectWithFields("foo", 0), new RankableObjectWithFields("bar", 1000), SMALLER_THAN }, };
-  }
-
-  @Test(dataProvider = "compareToData")
-  public void verifyCompareTo(RankableObjectWithFields first, RankableObjectWithFields second, int expCompareToValue) {
-    assertThat(first.compareTo(second)).isEqualTo(expCompareToValue);
-  }
-
-  @DataProvider
-  public Object[][] toStringData() {
-    return new Object[][]{ { new String("foo"), 0L }, { new String("BAR"), 8L } };
-  }
-
-  @Test(dataProvider = "toStringData")
-  public void toStringShouldContainStringRepresentationsOfObjectAndCount(Object obj, long count) {
-    // given
-    RankableObjectWithFields r = new RankableObjectWithFields(obj, count);
-
-    // when
-    String strRepresentation = r.toString();
-
-    // then
-    assertThat(strRepresentation).contains(obj.toString()).contains("" + count);
-  }
-
-  @Test
-  public void shouldReturnTheObject() {
-    // given
-    RankableObjectWithFields r = new RankableObjectWithFields(ANY_OBJECT, ANY_COUNT, ANY_FIELD);
-
-    // when
-    Object obj = r.getObject();
-
-    // then
-    assertThat(obj).isEqualTo(ANY_OBJECT);
-  }
-
-  @Test
-  public void shouldReturnTheCount() {
-    // given
-    RankableObjectWithFields r = new RankableObjectWithFields(ANY_OBJECT, ANY_COUNT, ANY_FIELD);
-
-    // when
-    long count = r.getCount();
-
-    // then
-    assertThat(count).isEqualTo(ANY_COUNT);
-  }
-
-  @DataProvider
-  public Object[][] fieldsData() {
-    return new Object[][]{ { ANY_OBJECT, ANY_COUNT, new Object[]{ ANY_FIELD } },
-        { "quux", 42L, new Object[]{ "one", "two", "three" } } };
-  }
-
-  @Test(dataProvider = "fieldsData")
-  public void shouldReturnTheFields(Object obj, long count, Object[] fields) {
-    // given
-    RankableObjectWithFields r = new RankableObjectWithFields(obj, count, fields);
-
-    // when
-    List<Object> actualFields = r.getFields();
-
-    // then
-    assertThat(actualFields).isEqualTo(Lists.newArrayList(fields));
-  }
-
-  @Test(expectedExceptions = UnsupportedOperationException.class)
-  public void fieldsShouldBeImmutable() {
-    // given
-    RankableObjectWithFields r = new RankableObjectWithFields(ANY_OBJECT, ANY_COUNT, ANY_FIELD);
-
-    // when
-    List<Object> fields = r.getFields();
-    // try to modify the list, which should fail
-    fields.remove(0);
-
-    // then (exception)
-  }
-
-  @Test
-  public void shouldCreateRankableObjectFromTuple() {
-    // given
-    Tuple tuple = mock(Tuple.class);
-    List<Object> tupleValues = Lists.newArrayList(ANY_OBJECT, ANY_COUNT, ANY_FIELD);
-    when(tuple.getValues()).thenReturn(tupleValues);
-
-    // when
-    RankableObjectWithFields r = RankableObjectWithFields.from(tuple);
-
-    // then
-    assertThat(r.getObject()).isEqualTo(ANY_OBJECT);
-    assertThat(r.getCount()).isEqualTo(ANY_COUNT);
-    List<Object> fields = new ArrayList<Object>();
-    fields.add(ANY_FIELD);
-    assertThat(r.getFields()).isEqualTo(fields);
-
-  }
-
-  @DataProvider
-  public Object[][] copyData() {
-    return new Object[][]{ { new RankableObjectWithFields("foo", 0) }, { new RankableObjectWithFields("foo", 3,
-        "someOtherField") }, { new RankableObjectWithFields("foo", 0, "someField") } };
-  }
-
-  // TODO: What would be a good test to ensure that RankableObjectWithFields is at least somewhat defensively copied?
-  //       The contract of Rankable#copy() returns a Rankable value, not a RankableObjectWithFields.
-  @Test(dataProvider = "copyData")
-  public void copyShouldReturnCopy(RankableObjectWithFields original) {
-    // given
-
-    // when
-    Rankable copy = original.copy();
-
-    // then
-    assertThat(copy.getObject()).isEqualTo(original.getObject());
-    assertThat(copy.getCount()).isEqualTo(original.getCount());
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/test/jvm/storm/starter/tools/RankingsTest.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/test/jvm/storm/starter/tools/RankingsTest.java b/examples/storm-starter/test/jvm/storm/starter/tools/RankingsTest.java
deleted file mode 100644
index cab02cb..0000000
--- a/examples/storm-starter/test/jvm/storm/starter/tools/RankingsTest.java
+++ /dev/null
@@ -1,368 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter.tools;
-
-import com.google.common.base.Throwables;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Lists;
-import org.jmock.lib.concurrent.Blitzer;
-import org.testng.annotations.DataProvider;
-import org.testng.annotations.Test;
-
-import java.util.List;
-
-import static org.fest.assertions.api.Assertions.assertThat;
-
-public class RankingsTest {
-
-  private static final int ANY_TOPN = 42;
-  private static final Rankable ANY_RANKABLE = new RankableObjectWithFields("someObject", ANY_TOPN);
-  private static final Rankable ZERO = new RankableObjectWithFields("ZERO_COUNT", 0);
-  private static final Rankable A = new RankableObjectWithFields("A", 1);
-  private static final Rankable B = new RankableObjectWithFields("B", 2);
-  private static final Rankable C = new RankableObjectWithFields("C", 3);
-  private static final Rankable D = new RankableObjectWithFields("D", 4);
-  private static final Rankable E = new RankableObjectWithFields("E", 5);
-  private static final Rankable F = new RankableObjectWithFields("F", 6);
-  private static final Rankable G = new RankableObjectWithFields("G", 7);
-  private static final Rankable H = new RankableObjectWithFields("H", 8);
-
-  @DataProvider
-  public Object[][] illegalTopNData() {
-    return new Object[][]{ { 0 }, { -1 }, { -2 }, { -10 } };
-  }
-
-  @Test(expectedExceptions = IllegalArgumentException.class, dataProvider = "illegalTopNData")
-  public void constructorWithNegativeOrZeroTopNShouldThrowIAE(int topN) {
-    new Rankings(topN);
-  }
-
-  @DataProvider
-  public Object[][] copyRankingsData() {
-    return new Object[][]{ { 5, Lists.newArrayList(A, B, C) }, { 2, Lists.newArrayList(A, B, C, D) },
-        { 1, Lists.newArrayList() }, { 1, Lists.newArrayList(A) }, { 1, Lists.newArrayList(A, B) } };
-  }
-
-  @Test(dataProvider = "copyRankingsData")
-  public void copyConstructorShouldReturnCopy(int topN, List<Rankable> rankables) {
-    // given
-    Rankings rankings = new Rankings(topN);
-    for (Rankable r : rankables) {
-      rankings.updateWith(r);
-    }
-
-    // when
-    Rankings copy = new Rankings(rankings);
-
-    // then
-    assertThat(copy.maxSize()).isEqualTo(rankings.maxSize());
-    assertThat(copy.getRankings()).isEqualTo(rankings.getRankings());
-  }
-
-  @DataProvider
-  public Object[][] defensiveCopyRankingsData() {
-    return new Object[][]{ { 5, Lists.newArrayList(A, B, C), Lists.newArrayList(D) }, { 2, Lists.newArrayList(A, B, C,
-        D), Lists.newArrayList(E, F) }, { 1, Lists.newArrayList(), Lists.newArrayList(A) }, { 1, Lists.newArrayList(A),
-        Lists.newArrayList(B) }, { 1, Lists.newArrayList(ZERO), Lists.newArrayList(B) }, { 1, Lists.newArrayList(ZERO),
-        Lists.newArrayList() } };
-  }
-
-  @Test(dataProvider = "defensiveCopyRankingsData")
-  public void copyConstructorShouldReturnDefensiveCopy(int topN, List<Rankable> rankables, List<Rankable> changes) {
-    // given
-    Rankings original = new Rankings(topN);
-    for (Rankable r : rankables) {
-      original.updateWith(r);
-    }
-    int expSize = original.size();
-    List<Rankable> expRankings = original.getRankings();
-
-    // when
-    Rankings copy = new Rankings(original);
-    for (Rankable r : changes) {
-      copy.updateWith(r);
-    }
-
-    // then
-    assertThat(original.size()).isEqualTo(expSize);
-    assertThat(original.getRankings()).isEqualTo(expRankings);
-  }
-
-  @DataProvider
-  public Object[][] legalTopNData() {
-    return new Object[][]{ { 1 }, { 2 }, { 1000 }, { 1000000 } };
-  }
-
-  @Test(dataProvider = "legalTopNData")
-  public void constructorWithPositiveTopNShouldBeOk(int topN) {
-    // given/when
-    Rankings rankings = new Rankings(topN);
-
-    // then
-    assertThat(rankings.maxSize()).isEqualTo(topN);
-  }
-
-  @Test
-  public void shouldHaveDefaultConstructor() {
-    new Rankings();
-  }
-
-  @Test
-  public void defaultConstructorShouldSetPositiveTopN() {
-    // given/when
-    Rankings rankings = new Rankings();
-
-    // then
-    assertThat(rankings.maxSize()).isGreaterThan(0);
-  }
-
-  @DataProvider
-  public Object[][] rankingsGrowData() {
-    return new Object[][]{ { 2, Lists.newArrayList(new RankableObjectWithFields("A", 1), new RankableObjectWithFields(
-        "B", 2), new RankableObjectWithFields("C", 3)) }, { 2, Lists.newArrayList(new RankableObjectWithFields("A", 1),
-        new RankableObjectWithFields("B", 2), new RankableObjectWithFields("C", 3), new RankableObjectWithFields("D",
-        4)) } };
-  }
-
-  @Test(dataProvider = "rankingsGrowData")
-  public void sizeOfRankingsShouldNotGrowBeyondTopN(int topN, List<Rankable> rankables) {
-    // sanity check of the provided test data
-    assertThat(rankables.size()).overridingErrorMessage(
-        "The supplied test data is not correct: the number of rankables <%d> should be greater than <%d>",
-        rankables.size(), topN).isGreaterThan(topN);
-
-    // given
-    Rankings rankings = new Rankings(topN);
-
-    // when
-    for (Rankable r : rankables) {
-      rankings.updateWith(r);
-    }
-
-    // then
-    assertThat(rankings.size()).isLessThanOrEqualTo(rankings.maxSize());
-  }
-
-  @DataProvider
-  public Object[][] simulatedRankingsData() {
-    return new Object[][]{ { Lists.newArrayList(A), Lists.newArrayList(A) }, { Lists.newArrayList(B, D, A, C),
-        Lists.newArrayList(D, C, B, A) }, { Lists.newArrayList(B, F, A, C, D, E), Lists.newArrayList(F, E, D, C, B,
-        A) }, { Lists.newArrayList(G, B, F, A, C, D, E, H), Lists.newArrayList(H, G, F, E, D, C, B, A) } };
-  }
-
-  @Test(dataProvider = "simulatedRankingsData")
-  public void shouldCorrectlyRankWhenUpdatedWithRankables(List<Rankable> unsorted, List<Rankable> expSorted) {
-    // given
-    Rankings rankings = new Rankings(unsorted.size());
-
-    // when
-    for (Rankable r : unsorted) {
-      rankings.updateWith(r);
-    }
-
-    // then
-    assertThat(rankings.getRankings()).isEqualTo(expSorted);
-  }
-
-  @Test(dataProvider = "simulatedRankingsData")
-  public void shouldCorrectlyRankWhenEmptyAndUpdatedWithOtherRankings(List<Rankable> unsorted,
-      List<Rankable> expSorted) {
-    // given
-    Rankings rankings = new Rankings(unsorted.size());
-    Rankings otherRankings = new Rankings(rankings.maxSize());
-    for (Rankable r : unsorted) {
-      otherRankings.updateWith(r);
-    }
-
-    // when
-    rankings.updateWith(otherRankings);
-
-    // then
-    assertThat(rankings.getRankings()).isEqualTo(expSorted);
-  }
-
-  @Test(dataProvider = "simulatedRankingsData")
-  public void shouldCorrectlyRankWhenUpdatedWithEmptyOtherRankings(List<Rankable> unsorted, List<Rankable> expSorted) {
-    // given
-    Rankings rankings = new Rankings(unsorted.size());
-    for (Rankable r : unsorted) {
-      rankings.updateWith(r);
-    }
-    Rankings emptyRankings = new Rankings(ANY_TOPN);
-
-    // when
-    rankings.updateWith(emptyRankings);
-
-    // then
-    assertThat(rankings.getRankings()).isEqualTo(expSorted);
-  }
-
-  @DataProvider
-  public Object[][] simulatedRankingsAndOtherRankingsData() {
-    return new Object[][]{ { Lists.newArrayList(A), Lists.newArrayList(A), Lists.newArrayList(A) },
-        { Lists.newArrayList(A, C), Lists.newArrayList(B, D), Lists.newArrayList(D, C, B, A) }, { Lists.newArrayList(B,
-        F, A), Lists.newArrayList(C, D, E), Lists.newArrayList(F, E, D, C, B, A) }, { Lists.newArrayList(G, B, F, A, C),
-        Lists.newArrayList(D, E, H), Lists.newArrayList(H, G, F, E, D, C, B, A) } };
-  }
-
-  @Test(dataProvider = "simulatedRankingsAndOtherRankingsData")
-  public void shouldCorrectlyRankWhenNotEmptyAndUpdatedWithOtherRankings(List<Rankable> unsorted,
-      List<Rankable> unsortedForOtherRankings, List<Rankable> expSorted) {
-    // given
-    Rankings rankings = new Rankings(expSorted.size());
-    for (Rankable r : unsorted) {
-      rankings.updateWith(r);
-    }
-    Rankings otherRankings = new Rankings(unsortedForOtherRankings.size());
-    for (Rankable r : unsortedForOtherRankings) {
-      otherRankings.updateWith(r);
-    }
-
-    // when
-    rankings.updateWith(otherRankings);
-
-    // then
-    assertThat(rankings.getRankings()).isEqualTo(expSorted);
-  }
-
-  @DataProvider
-  public Object[][] duplicatesData() {
-    Rankable A1 = new RankableObjectWithFields("A", 1);
-    Rankable A2 = new RankableObjectWithFields("A", 2);
-    Rankable A3 = new RankableObjectWithFields("A", 3);
-    return new Object[][]{ { Lists.newArrayList(ANY_RANKABLE, ANY_RANKABLE, ANY_RANKABLE) }, { Lists.newArrayList(A1,
-        A2, A3) }, };
-  }
-
-  @Test(dataProvider = "duplicatesData")
-  public void shouldNotRankDuplicateObjectsMoreThanOnce(List<Rankable> duplicates) {
-    // given
-    Rankings rankings = new Rankings(duplicates.size());
-
-    // when
-    for (Rankable r : duplicates) {
-      rankings.updateWith(r);
-    }
-
-    // then
-    assertThat(rankings.size()).isEqualTo(1);
-  }
-
-  @DataProvider
-  public Object[][] removeZeroRankingsData() {
-    return new Object[][]{ { Lists.newArrayList(A, ZERO), Lists.newArrayList(A) }, { Lists.newArrayList(A),
-        Lists.newArrayList(A) }, { Lists.newArrayList(ZERO, A), Lists.newArrayList(A) }, { Lists.newArrayList(ZERO),
-        Lists.newArrayList() }, { Lists.newArrayList(ZERO, new RankableObjectWithFields("ZERO2", 0)),
-        Lists.newArrayList() }, { Lists.newArrayList(B, ZERO, new RankableObjectWithFields("ZERO2", 0), D,
-        new RankableObjectWithFields("ZERO3", 0), new RankableObjectWithFields("ZERO4", 0), C), Lists.newArrayList(D, C,
-        B) }, { Lists.newArrayList(A, ZERO, B), Lists.newArrayList(B, A) } };
-  }
-
-  @Test(dataProvider = "removeZeroRankingsData")
-  public void shouldRemoveZeroCounts(List<Rankable> unsorted, List<Rankable> expSorted) {
-    // given
-    Rankings rankings = new Rankings(unsorted.size());
-    for (Rankable r : unsorted) {
-      rankings.updateWith(r);
-    }
-
-    // when
-    rankings.pruneZeroCounts();
-
-    // then
-    assertThat(rankings.getRankings()).isEqualTo(expSorted);
-  }
-
-  @Test
-  public void updatingWithNewRankablesShouldBeThreadSafe() throws InterruptedException {
-    // given
-    final List<Rankable> entries = ImmutableList.of(A, B, C, D);
-    final Rankings rankings = new Rankings(entries.size());
-
-    // We are capturing exceptions thrown in Blitzer's child threads into this data structure so that we can properly
-    // pass/fail this test.  The reason is that Blitzer doesn't report exceptions, which is a known bug in Blitzer
-    // (JMOCK-263).  See https://github.com/jmock-developers/jmock-library/issues/22 for more information.
-    final List<Exception> exceptions = Lists.newArrayList();
-    Blitzer blitzer = new Blitzer(1000);
-
-    // when
-    blitzer.blitz(new Runnable() {
-      public void run() {
-        for (Rankable r : entries) {
-          try {
-            rankings.updateWith(r);
-          }
-          catch (RuntimeException e) {
-            synchronized(exceptions) {
-              exceptions.add(e);
-            }
-          }
-        }
-      }
-    });
-    blitzer.shutdown();
-
-    // then
-    //
-    if (!exceptions.isEmpty()) {
-      for (Exception e : exceptions) {
-        System.err.println(Throwables.getStackTraceAsString(e));
-      }
-    }
-    assertThat(exceptions).isEmpty();
-  }
-
-  @Test(dataProvider = "copyRankingsData")
-  public void copyShouldReturnCopy(int topN, List<Rankable> rankables) {
-    // given
-    Rankings rankings = new Rankings(topN);
-    for (Rankable r : rankables) {
-      rankings.updateWith(r);
-    }
-
-    // when
-    Rankings copy = rankings.copy();
-
-    // then
-    assertThat(copy.maxSize()).isEqualTo(rankings.maxSize());
-    assertThat(copy.getRankings()).isEqualTo(rankings.getRankings());
-  }
-
-  @Test(dataProvider = "defensiveCopyRankingsData")
-  public void copyShouldReturnDefensiveCopy(int topN, List<Rankable> rankables, List<Rankable> changes) {
-    // given
-    Rankings original = new Rankings(topN);
-    for (Rankable r : rankables) {
-      original.updateWith(r);
-    }
-    int expSize = original.size();
-    List<Rankable> expRankings = original.getRankings();
-
-    // when
-    Rankings copy = original.copy();
-    for (Rankable r : changes) {
-      copy.updateWith(r);
-    }
-    copy.pruneZeroCounts();
-
-    // then
-    assertThat(original.size()).isEqualTo(expSize);
-    assertThat(original.getRankings()).isEqualTo(expRankings);
-  }
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/test/jvm/storm/starter/tools/SlidingWindowCounterTest.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/test/jvm/storm/starter/tools/SlidingWindowCounterTest.java b/examples/storm-starter/test/jvm/storm/starter/tools/SlidingWindowCounterTest.java
deleted file mode 100644
index 920bf01..0000000
--- a/examples/storm-starter/test/jvm/storm/starter/tools/SlidingWindowCounterTest.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter.tools;
-
-import org.testng.annotations.DataProvider;
-import org.testng.annotations.Test;
-
-import java.util.Map;
-
-import static org.fest.assertions.api.Assertions.assertThat;
-
-public class SlidingWindowCounterTest {
-
-  private static final int ANY_WINDOW_LENGTH_IN_SLOTS = 2;
-  private static final Object ANY_OBJECT = "ANY_OBJECT";
-
-  @DataProvider
-  public Object[][] illegalWindowLengths() {
-    return new Object[][]{ { -10 }, { -3 }, { -2 }, { -1 }, { 0 }, { 1 } };
-  }
-
-  @Test(expectedExceptions = IllegalArgumentException.class, dataProvider = "illegalWindowLengths")
-  public void lessThanTwoSlotsShouldThrowIAE(int windowLengthInSlots) {
-    new SlidingWindowCounter<Object>(windowLengthInSlots);
-  }
-
-  @DataProvider
-  public Object[][] legalWindowLengths() {
-    return new Object[][]{ { 2 }, { 3 }, { 20 } };
-  }
-
-  @Test(dataProvider = "legalWindowLengths")
-  public void twoOrMoreSlotsShouldBeValid(int windowLengthInSlots) {
-    new SlidingWindowCounter<Object>(windowLengthInSlots);
-  }
-
-  @Test
-  public void newInstanceShouldHaveEmptyCounts() {
-    // given
-    SlidingWindowCounter<Object> counter = new SlidingWindowCounter<Object>(ANY_WINDOW_LENGTH_IN_SLOTS);
-
-    // when
-    Map<Object, Long> counts = counter.getCountsThenAdvanceWindow();
-
-    // then
-    assertThat(counts).isEmpty();
-  }
-
-  @DataProvider
-  public Object[][] simulatedCounterIterations() {
-    return new Object[][]{ { 2, new int[]{ 3, 2, 0, 0, 1, 0, 0, 0 }, new long[]{ 3, 5, 2, 0, 1, 1, 0, 0 } },
-        { 3, new int[]{ 3, 2, 0, 0, 1, 0, 0, 0 }, new long[]{ 3, 5, 5, 2, 1, 1, 1, 0 } },
-        { 4, new int[]{ 3, 2, 0, 0, 1, 0, 0, 0 }, new long[]{ 3, 5, 5, 5, 3, 1, 1, 1 } },
-        { 5, new int[]{ 3, 2, 0, 0, 1, 0, 0, 0 }, new long[]{ 3, 5, 5, 5, 6, 3, 1, 1 } },
-        { 5, new int[]{ 3, 11, 5, 13, 7, 17, 0, 3, 50, 600, 7000 },
-            new long[]{ 3, 14, 19, 32, 39, 53, 42, 40, 77, 670, 7653 } }, };
-  }
-
-  @Test(dataProvider = "simulatedCounterIterations")
-  public void testCounterWithSimulatedRuns(int windowLengthInSlots, int[] incrementsPerIteration,
-      long[] expCountsPerIteration) {
-    // given
-    SlidingWindowCounter<Object> counter = new SlidingWindowCounter<Object>(windowLengthInSlots);
-    int numIterations = incrementsPerIteration.length;
-
-    for (int i = 0; i < numIterations; i++) {
-      int numIncrements = incrementsPerIteration[i];
-      long expCounts = expCountsPerIteration[i];
-      // Objects are absent if they were zero both this iteration
-      // and the last -- if only this one, we need to report zero.
-      boolean expAbsent = ((expCounts == 0) && ((i == 0) || (expCountsPerIteration[i - 1] == 0)));
-
-      // given (for this iteration)
-      for (int j = 0; j < numIncrements; j++) {
-        counter.incrementCount(ANY_OBJECT);
-      }
-
-      // when (for this iteration)
-      Map<Object, Long> counts = counter.getCountsThenAdvanceWindow();
-
-      // then (for this iteration)
-      if (expAbsent) {
-        assertThat(counts).doesNotContainKey(ANY_OBJECT);
-      }
-      else {
-        assertThat(counts.get(ANY_OBJECT)).isEqualTo(expCounts);
-      }
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/test/jvm/storm/starter/tools/SlotBasedCounterTest.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/test/jvm/storm/starter/tools/SlotBasedCounterTest.java b/examples/storm-starter/test/jvm/storm/starter/tools/SlotBasedCounterTest.java
deleted file mode 100644
index 3ad042b..0000000
--- a/examples/storm-starter/test/jvm/storm/starter/tools/SlotBasedCounterTest.java
+++ /dev/null
@@ -1,181 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter.tools;
-
-import org.testng.annotations.DataProvider;
-import org.testng.annotations.Test;
-
-import java.util.Map;
-
-import static org.fest.assertions.api.Assertions.assertThat;
-
-public class SlotBasedCounterTest {
-
-  private static final int ANY_NUM_SLOTS = 1;
-  private static final int ANY_SLOT = 0;
-  private static final Object ANY_OBJECT = "ANY_OBJECT";
-
-  @DataProvider
-  public Object[][] illegalNumSlotsData() {
-    return new Object[][]{ { -10 }, { -3 }, { -2 }, { -1 }, { 0 } };
-  }
-
-  @Test(expectedExceptions = IllegalArgumentException.class, dataProvider = "illegalNumSlotsData")
-  public void negativeOrZeroNumSlotsShouldThrowIAE(int numSlots) {
-    new SlotBasedCounter<Object>(numSlots);
-  }
-
-  @DataProvider
-  public Object[][] legalNumSlotsData() {
-    return new Object[][]{ { 1 }, { 2 }, { 3 }, { 20 } };
-  }
-
-  @Test(dataProvider = "legalNumSlotsData")
-  public void positiveNumSlotsShouldBeOk(int numSlots) {
-    new SlotBasedCounter<Object>(numSlots);
-  }
-
-  @Test
-  public void newInstanceShouldHaveEmptyCounts() {
-    // given
-    SlotBasedCounter<Object> counter = new SlotBasedCounter<Object>(ANY_NUM_SLOTS);
-
-    // when
-    Map<Object, Long> counts = counter.getCounts();
-
-    // then
-    assertThat(counts).isEmpty();
-  }
-
-  @Test
-  public void shouldReturnNonEmptyCountsWhenAtLeastOneObjectWasCounted() {
-    // given
-    SlotBasedCounter<Object> counter = new SlotBasedCounter<Object>(ANY_NUM_SLOTS);
-    counter.incrementCount(ANY_OBJECT, ANY_SLOT);
-
-    // when
-    Map<Object, Long> counts = counter.getCounts();
-
-    // then
-    assertThat(counts).isNotEmpty();
-
-    // additional tests that go beyond what this test is primarily about
-    assertThat(counts.size()).isEqualTo(1);
-    assertThat(counts.get(ANY_OBJECT)).isEqualTo(1);
-  }
-
-  @DataProvider
-  public Object[][] incrementCountData() {
-    return new Object[][]{ { new String[]{ "foo", "bar" }, new int[]{ 3, 2 } } };
-  }
-
-  @Test(dataProvider = "incrementCountData")
-  public void shouldIncrementCount(Object[] objects, int[] expCounts) {
-    // given
-    SlotBasedCounter<Object> counter = new SlotBasedCounter<Object>(ANY_NUM_SLOTS);
-
-    // when
-    for (int i = 0; i < objects.length; i++) {
-      Object obj = objects[i];
-      int numIncrements = expCounts[i];
-      for (int j = 0; j < numIncrements; j++) {
-        counter.incrementCount(obj, ANY_SLOT);
-      }
-    }
-
-    // then
-    for (int i = 0; i < objects.length; i++) {
-      assertThat(counter.getCount(objects[i], ANY_SLOT)).isEqualTo(expCounts[i]);
-    }
-    assertThat(counter.getCount("nonexistentObject", ANY_SLOT)).isEqualTo(0);
-  }
-
-  @Test
-  public void shouldReturnZeroForNonexistentObject() {
-    // given
-    SlotBasedCounter<Object> counter = new SlotBasedCounter<Object>(ANY_NUM_SLOTS);
-
-    // when
-    counter.incrementCount("somethingElse", ANY_SLOT);
-
-    // then
-    assertThat(counter.getCount("nonexistentObject", ANY_SLOT)).isEqualTo(0);
-  }
-
-  @Test
-  public void shouldIncrementCountOnlyOneSlotAtATime() {
-    // given
-    int numSlots = 3;
-    Object obj = Long.valueOf(10);
-    SlotBasedCounter<Object> counter = new SlotBasedCounter<Object>(numSlots);
-
-    // when (empty)
-    // then
-    assertThat(counter.getCount(obj, 0)).isEqualTo(0);
-    assertThat(counter.getCount(obj, 1)).isEqualTo(0);
-    assertThat(counter.getCount(obj, 2)).isEqualTo(0);
-
-    // when
-    counter.incrementCount(obj, 1);
-
-    // then
-    assertThat(counter.getCount(obj, 0)).isEqualTo(0);
-    assertThat(counter.getCount(obj, 1)).isEqualTo(1);
-    assertThat(counter.getCount(obj, 2)).isEqualTo(0);
-  }
-
-  @Test
-  public void wipeSlotShouldSetAllCountsInSlotToZero() {
-    // given
-    SlotBasedCounter<Object> counter = new SlotBasedCounter<Object>(ANY_NUM_SLOTS);
-    Object countWasOne = "countWasOne";
-    Object countWasThree = "countWasThree";
-    counter.incrementCount(countWasOne, ANY_SLOT);
-    counter.incrementCount(countWasThree, ANY_SLOT);
-    counter.incrementCount(countWasThree, ANY_SLOT);
-    counter.incrementCount(countWasThree, ANY_SLOT);
-
-    // when
-    counter.wipeSlot(ANY_SLOT);
-
-    // then
-    assertThat(counter.getCount(countWasOne, ANY_SLOT)).isEqualTo(0);
-    assertThat(counter.getCount(countWasThree, ANY_SLOT)).isEqualTo(0);
-  }
-
-  @Test
-  public void wipeZerosShouldRemoveAnyObjectsWithZeroTotalCount() {
-    // given
-    SlotBasedCounter<Object> counter = new SlotBasedCounter<Object>(2);
-    int wipeSlot = 0;
-    int otherSlot = 1;
-    Object willBeRemoved = "willBeRemoved";
-    Object willContinueToBeTracked = "willContinueToBeTracked";
-    counter.incrementCount(willBeRemoved, wipeSlot);
-    counter.incrementCount(willContinueToBeTracked, wipeSlot);
-    counter.incrementCount(willContinueToBeTracked, otherSlot);
-
-    // when
-    counter.wipeSlot(wipeSlot);
-    counter.wipeZeros();
-
-    // then
-    assertThat(counter.getCounts()).doesNotContainKey(willBeRemoved);
-    assertThat(counter.getCounts()).containsKey(willContinueToBeTracked);
-  }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/flux/README.md
----------------------------------------------------------------------
diff --git a/external/flux/README.md b/external/flux/README.md
index c4ef145..7043689 100644
--- a/external/flux/README.md
+++ b/external/flux/README.md
@@ -236,7 +236,7 @@ sentence-spout[1](org.apache.storm.flux.spouts.GenericShellSpout)
 ---------------- BOLTS ---------------
 splitsentence[1](org.apache.storm.flux.bolts.GenericShellBolt)
 log[1](org.apache.storm.flux.wrappers.bolts.LogInfoBolt)
-count[1](backtype.storm.testing.TestWordCounter)
+count[1](org.apache.storm.testing.TestWordCounter)
 --------------- STREAMS ---------------
 sentence-spout --SHUFFLE--> splitsentence
 splitsentence --FIELDS--> count
@@ -255,7 +255,7 @@ definition consists of the following:
       * A list of spouts, each identified by a unique ID
       * A list of bolts, each identified by a unique ID
       * A list of "stream" objects representing a flow of tuples between spouts and bolts
-  4. **OR** (A JVM class that can produce a `backtype.storm.generated.StormTopology` instance:
+  4. **OR** (A JVM class that can produce a `org.apache.storm.generated.StormTopology` instance:
       * A `topologySource` definition.
 
 
@@ -270,13 +270,13 @@ config:
 # spout definitions
 spouts:
   - id: "spout-1"
-    className: "backtype.storm.testing.TestWordSpout"
+    className: "org.apache.storm.testing.TestWordSpout"
     parallelism: 1
 
 # bolt definitions
 bolts:
   - id: "bolt-1"
-    className: "backtype.storm.testing.TestWordCounter"
+    className: "org.apache.storm.testing.TestWordCounter"
     parallelism: 1
   - id: "bolt-2"
     className: "org.apache.storm.flux.wrappers.bolts.LogInfoBolt"
@@ -324,7 +324,7 @@ You would then be able to reference those properties by key in your `.yaml` file
 
 ```yaml
   - id: "zkHosts"
-    className: "storm.kafka.ZkHosts"
+    className: "org.apache.storm.kafka.ZkHosts"
     constructorArgs:
       - "${kafka.zookeeper.hosts}"
 ```
@@ -344,13 +344,13 @@ Components are essentially named object instances that are made available as con
 bolts. If you are familiar with the Spring framework, components are roughly analagous to Spring beans.
 
 Every component is identified, at a minimum, by a unique identifier (String) and a class name (String). For example,
-the following will make an instance of the `storm.kafka.StringScheme` class available as a reference under the key
-`"stringScheme"` . This assumes the `storm.kafka.StringScheme` has a default constructor.
+the following will make an instance of the `org.apache.storm.kafka.StringScheme` class available as a reference under the key
+`"stringScheme"` . This assumes the `org.apache.storm.kafka.StringScheme` has a default constructor.
 
 ```yaml
 components:
   - id: "stringScheme"
-    className: "storm.kafka.StringScheme"
+    className: "org.apache.storm.kafka.StringScheme"
 ```
 
 ### Contructor Arguments, References, Properties and Configuration Methods
@@ -362,7 +362,7 @@ object by calling the constructor that takes a single string as an argument:
 
 ```yaml
   - id: "zkHosts"
-    className: "storm.kafka.ZkHosts"
+    className: "org.apache.storm.kafka.ZkHosts"
     constructorArgs:
       - "localhost:2181"
       - true
@@ -378,10 +378,10 @@ to another component's constructor:
 ```yaml
 components:
   - id: "stringScheme"
-    className: "storm.kafka.StringScheme"
+    className: "org.apache.storm.kafka.StringScheme"
 
   - id: "stringMultiScheme"
-    className: "backtype.storm.spout.SchemeAsMultiScheme"
+    className: "org.apache.storm.spout.SchemeAsMultiScheme"
     constructorArgs:
       - ref: "stringScheme" # component with id "stringScheme" must be declared above.
 ```
@@ -393,7 +393,7 @@ JavaBean-like setter methods and fields declared as `public`:
 
 ```yaml
   - id: "spoutConfig"
-    className: "storm.kafka.SpoutConfig"
+    className: "org.apache.storm.kafka.SpoutConfig"
     constructorArgs:
       # brokerHosts
       - ref: "zkHosts"
@@ -492,7 +492,7 @@ FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, Units.MB);
 
 ## Topology Config
 The `config` section is simply a map of Storm topology configuration parameters that will be passed to the
-`backtype.storm.StormSubmitter` as an instance of the `backtype.storm.Config` class:
+`org.apache.storm.StormSubmitter` as an instance of the `org.apache.storm.Config` class:
 
 ```yaml
 config:
@@ -537,7 +537,7 @@ topologySource:
 ```
 
 __N.B.:__ The specified method must accept a single argument of type `java.util.Map<String, Object>` or
-`backtype.storm.Config`, and return a `backtype.storm.generated.StormTopology` object.
+`org.apache.storm.Config`, and return a `org.apache.storm.generated.StormTopology` object.
 
 # YAML DSL
 ## Spouts and Bolts
@@ -568,21 +568,21 @@ Kafka spout example:
 ```yaml
 components:
   - id: "stringScheme"
-    className: "storm.kafka.StringScheme"
+    className: "org.apache.storm.kafka.StringScheme"
 
   - id: "stringMultiScheme"
-    className: "backtype.storm.spout.SchemeAsMultiScheme"
+    className: "org.apache.storm.spout.SchemeAsMultiScheme"
     constructorArgs:
       - ref: "stringScheme"
 
   - id: "zkHosts"
-    className: "storm.kafka.ZkHosts"
+    className: "org.apache.storm.kafka.ZkHosts"
     constructorArgs:
       - "localhost:2181"
 
 # Alternative kafka config
 #  - id: "kafkaConfig"
-#    className: "storm.kafka.KafkaConfig"
+#    className: "org.apache.storm.kafka.KafkaConfig"
 #    constructorArgs:
 #      # brokerHosts
 #      - ref: "zkHosts"
@@ -592,7 +592,7 @@ components:
 #      - "myKafkaClientId"
 
   - id: "spoutConfig"
-    className: "storm.kafka.SpoutConfig"
+    className: "org.apache.storm.kafka.SpoutConfig"
     constructorArgs:
       # brokerHosts
       - ref: "zkHosts"
@@ -614,7 +614,7 @@ config:
 # spout definitions
 spouts:
   - id: "kafka-spout"
-    className: "storm.kafka.KafkaSpout"
+    className: "org.apache.storm.kafka.KafkaSpout"
     constructorArgs:
       - ref: "spoutConfig"
 
@@ -641,7 +641,7 @@ bolts:
     # ...
 
   - id: "count"
-    className: "backtype.storm.testing.TestWordCounter"
+    className: "org.apache.storm.testing.TestWordCounter"
     parallelism: 1
     # ...
 ```
@@ -708,7 +708,7 @@ Custom stream groupings are defined by setting the grouping type to `CUSTOM` and
 that tells Flux how to instantiate the custom class. The `customClass` definition extends `component`, so it supports
 constructor arguments, references, and properties as well.
 
-The example below creates a Stream with an instance of the `backtype.storm.testing.NGrouping` custom stream grouping
+The example below creates a Stream with an instance of the `org.apache.storm.testing.NGrouping` custom stream grouping
 class.
 
 ```yaml
@@ -718,7 +718,7 @@ class.
     grouping:
       type: CUSTOM
       customClass:
-        className: "backtype.storm.testing.NGrouping"
+        className: "org.apache.storm.testing.NGrouping"
         constructorArgs:
           - 1
 ```
@@ -786,7 +786,7 @@ bolts:
     parallelism: 1
 
   - id: "count"
-    className: "backtype.storm.testing.TestWordCounter"
+    className: "org.apache.storm.testing.TestWordCounter"
     parallelism: 1
 
 #stream definitions
@@ -835,4 +835,4 @@ topologySource:
 
 ## Committer Sponsors
 
- * P. Taylor Goetz ([ptgoetz@apache.org](mailto:ptgoetz@apache.org))
\ No newline at end of file
+ * P. Taylor Goetz ([ptgoetz@apache.org](mailto:ptgoetz@apache.org))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/flux/flux-core/src/main/java/org/apache/storm/flux/Flux.java
----------------------------------------------------------------------
diff --git a/external/flux/flux-core/src/main/java/org/apache/storm/flux/Flux.java b/external/flux/flux-core/src/main/java/org/apache/storm/flux/Flux.java
index 71c20a7..cdebd01 100644
--- a/external/flux/flux-core/src/main/java/org/apache/storm/flux/Flux.java
+++ b/external/flux/flux-core/src/main/java/org/apache/storm/flux/Flux.java
@@ -17,13 +17,13 @@
  */
 package org.apache.storm.flux;
 
-import backtype.storm.Config;
-import backtype.storm.LocalCluster;
-import backtype.storm.StormSubmitter;
-import backtype.storm.generated.StormTopology;
-import backtype.storm.generated.SubmitOptions;
-import backtype.storm.generated.TopologyInitialStatus;
-import backtype.storm.utils.Utils;
+import org.apache.storm.Config;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.StormSubmitter;
+import org.apache.storm.generated.StormTopology;
+import org.apache.storm.generated.SubmitOptions;
+import org.apache.storm.generated.TopologyInitialStatus;
+import org.apache.storm.utils.Utils;
 import org.apache.commons.cli.*;
 import org.apache.storm.flux.model.*;
 import org.apache.storm.flux.parser.FluxParser;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/flux/flux-core/src/main/java/org/apache/storm/flux/FluxBuilder.java
----------------------------------------------------------------------
diff --git a/external/flux/flux-core/src/main/java/org/apache/storm/flux/FluxBuilder.java b/external/flux/flux-core/src/main/java/org/apache/storm/flux/FluxBuilder.java
index 014116d..c16aa05 100644
--- a/external/flux/flux-core/src/main/java/org/apache/storm/flux/FluxBuilder.java
+++ b/external/flux/flux-core/src/main/java/org/apache/storm/flux/FluxBuilder.java
@@ -17,12 +17,12 @@
  */
 package org.apache.storm.flux;
 
-import backtype.storm.Config;
-import backtype.storm.generated.StormTopology;
-import backtype.storm.grouping.CustomStreamGrouping;
-import backtype.storm.topology.*;
-import backtype.storm.tuple.Fields;
-import backtype.storm.utils.Utils;
+import org.apache.storm.Config;
+import org.apache.storm.generated.StormTopology;
+import org.apache.storm.grouping.CustomStreamGrouping;
+import org.apache.storm.topology.*;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.utils.Utils;
 import org.apache.storm.flux.api.TopologySource;
 import org.apache.storm.flux.model.*;
 import org.slf4j.Logger;
@@ -35,7 +35,7 @@ public class FluxBuilder {
     private static Logger LOG = LoggerFactory.getLogger(FluxBuilder.class);
 
     /**
-     * Given a topology definition, return a populated `backtype.storm.Config` instance.
+     * Given a topology definition, return a populated `org.apache.storm.Config` instance.
      *
      * @param topologyDef
      * @return
@@ -103,7 +103,7 @@ public class FluxBuilder {
 
     /**
      * Given a `java.lang.Object` instance and a method name, attempt to find a method that matches the input
-     * parameter: `java.util.Map` or `backtype.storm.Config`.
+     * parameter: `java.util.Map` or `org.apache.storm.Config`.
      *
      * @param topologySource object to inspect for the specified method
      * @param methodName name of the method to look for

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/flux/flux-core/src/main/java/org/apache/storm/flux/api/TopologySource.java
----------------------------------------------------------------------
diff --git a/external/flux/flux-core/src/main/java/org/apache/storm/flux/api/TopologySource.java b/external/flux/flux-core/src/main/java/org/apache/storm/flux/api/TopologySource.java
index fbccfb7..2777854 100644
--- a/external/flux/flux-core/src/main/java/org/apache/storm/flux/api/TopologySource.java
+++ b/external/flux/flux-core/src/main/java/org/apache/storm/flux/api/TopologySource.java
@@ -18,7 +18,7 @@
 package org.apache.storm.flux.api;
 
 
-import backtype.storm.generated.StormTopology;
+import org.apache.storm.generated.StormTopology;
 
 import java.util.Map;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/flux/flux-core/src/main/java/org/apache/storm/flux/model/ExecutionContext.java
----------------------------------------------------------------------
diff --git a/external/flux/flux-core/src/main/java/org/apache/storm/flux/model/ExecutionContext.java b/external/flux/flux-core/src/main/java/org/apache/storm/flux/model/ExecutionContext.java
index e94b887..1520006 100644
--- a/external/flux/flux-core/src/main/java/org/apache/storm/flux/model/ExecutionContext.java
+++ b/external/flux/flux-core/src/main/java/org/apache/storm/flux/model/ExecutionContext.java
@@ -17,9 +17,9 @@
  */
 package org.apache.storm.flux.model;
 
-import backtype.storm.Config;
-import backtype.storm.task.IBolt;
-import backtype.storm.topology.IRichSpout;
+import org.apache.storm.Config;
+import org.apache.storm.task.IBolt;
+import org.apache.storm.topology.IRichSpout;
 
 import java.util.HashMap;
 import java.util.List;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/flux/flux-core/src/main/java/org/apache/storm/flux/model/ObjectDef.java
----------------------------------------------------------------------
diff --git a/external/flux/flux-core/src/main/java/org/apache/storm/flux/model/ObjectDef.java b/external/flux/flux-core/src/main/java/org/apache/storm/flux/model/ObjectDef.java
index dc9e6cb..bfac7dc 100644
--- a/external/flux/flux-core/src/main/java/org/apache/storm/flux/model/ObjectDef.java
+++ b/external/flux/flux-core/src/main/java/org/apache/storm/flux/model/ObjectDef.java
@@ -17,7 +17,7 @@
  */
 package org.apache.storm.flux.model;
 
-import backtype.storm.Config;
+import org.apache.storm.Config;
 
 import java.util.ArrayList;
 import java.util.LinkedHashMap;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/flux/flux-core/src/main/java/org/apache/storm/flux/model/TopologyDef.java
----------------------------------------------------------------------
diff --git a/external/flux/flux-core/src/main/java/org/apache/storm/flux/model/TopologyDef.java b/external/flux/flux-core/src/main/java/org/apache/storm/flux/model/TopologyDef.java
index a6ae450..86614f1 100644
--- a/external/flux/flux-core/src/main/java/org/apache/storm/flux/model/TopologyDef.java
+++ b/external/flux/flux-core/src/main/java/org/apache/storm/flux/model/TopologyDef.java
@@ -27,7 +27,7 @@ import java.util.*;
  *
  * It consists of the following:
  *   1. The topology name
- *   2. A `java.util.Map` representing the `backtype.storm.config` for the topology
+ *   2. A `java.util.Map` representing the `org.apache.storm.config` for the topology
  *   3. A list of spout definitions
  *   4. A list of bolt definitions
  *   5. A list of stream definitions that define the flow between spouts and bolts.

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/flux/flux-core/src/test/java/org/apache/storm/flux/TCKTest.java
----------------------------------------------------------------------
diff --git a/external/flux/flux-core/src/test/java/org/apache/storm/flux/TCKTest.java b/external/flux/flux-core/src/test/java/org/apache/storm/flux/TCKTest.java
index 91a81f1..7a5ed7a 100644
--- a/external/flux/flux-core/src/test/java/org/apache/storm/flux/TCKTest.java
+++ b/external/flux/flux-core/src/test/java/org/apache/storm/flux/TCKTest.java
@@ -17,8 +17,8 @@
  */
 package org.apache.storm.flux;
 
-import backtype.storm.Config;
-import backtype.storm.generated.StormTopology;
+import org.apache.storm.Config;
+import org.apache.storm.generated.StormTopology;
 import org.apache.storm.flux.model.ExecutionContext;
 import org.apache.storm.flux.model.TopologyDef;
 import org.apache.storm.flux.parser.FluxParser;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/flux/flux-core/src/test/java/org/apache/storm/flux/test/SimpleTopology.java
----------------------------------------------------------------------
diff --git a/external/flux/flux-core/src/test/java/org/apache/storm/flux/test/SimpleTopology.java b/external/flux/flux-core/src/test/java/org/apache/storm/flux/test/SimpleTopology.java
index 981d6b0..8e3cda2 100644
--- a/external/flux/flux-core/src/test/java/org/apache/storm/flux/test/SimpleTopology.java
+++ b/external/flux/flux-core/src/test/java/org/apache/storm/flux/test/SimpleTopology.java
@@ -17,8 +17,8 @@
  */
 package org.apache.storm.flux.test;
 
-import backtype.storm.generated.StormTopology;
-import backtype.storm.topology.TopologyBuilder;
+import org.apache.storm.generated.StormTopology;
+import org.apache.storm.topology.TopologyBuilder;
 import org.apache.storm.flux.api.TopologySource;
 import org.apache.storm.flux.wrappers.bolts.LogInfoBolt;
 import org.apache.storm.flux.wrappers.spouts.FluxShellSpout;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/flux/flux-core/src/test/java/org/apache/storm/flux/test/SimpleTopologySource.java
----------------------------------------------------------------------
diff --git a/external/flux/flux-core/src/test/java/org/apache/storm/flux/test/SimpleTopologySource.java b/external/flux/flux-core/src/test/java/org/apache/storm/flux/test/SimpleTopologySource.java
index 61eb113..2fadacf 100644
--- a/external/flux/flux-core/src/test/java/org/apache/storm/flux/test/SimpleTopologySource.java
+++ b/external/flux/flux-core/src/test/java/org/apache/storm/flux/test/SimpleTopologySource.java
@@ -17,8 +17,8 @@
  */
 package org.apache.storm.flux.test;
 
-import backtype.storm.generated.StormTopology;
-import backtype.storm.topology.TopologyBuilder;
+import org.apache.storm.generated.StormTopology;
+import org.apache.storm.topology.TopologyBuilder;
 import org.apache.storm.flux.api.TopologySource;
 import org.apache.storm.flux.wrappers.bolts.LogInfoBolt;
 import org.apache.storm.flux.wrappers.spouts.FluxShellSpout;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/flux/flux-core/src/test/java/org/apache/storm/flux/test/SimpleTopologyWithConfigParam.java
----------------------------------------------------------------------
diff --git a/external/flux/flux-core/src/test/java/org/apache/storm/flux/test/SimpleTopologyWithConfigParam.java b/external/flux/flux-core/src/test/java/org/apache/storm/flux/test/SimpleTopologyWithConfigParam.java
index 39e2e3d..8b0aa05 100644
--- a/external/flux/flux-core/src/test/java/org/apache/storm/flux/test/SimpleTopologyWithConfigParam.java
+++ b/external/flux/flux-core/src/test/java/org/apache/storm/flux/test/SimpleTopologyWithConfigParam.java
@@ -17,9 +17,9 @@
  */
 package org.apache.storm.flux.test;
 
-import backtype.storm.Config;
-import backtype.storm.generated.StormTopology;
-import backtype.storm.topology.TopologyBuilder;
+import org.apache.storm.Config;
+import org.apache.storm.generated.StormTopology;
+import org.apache.storm.topology.TopologyBuilder;
 import org.apache.storm.flux.wrappers.bolts.LogInfoBolt;
 import org.apache.storm.flux.wrappers.spouts.FluxShellSpout;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/flux/flux-core/src/test/java/org/apache/storm/flux/test/TestBolt.java
----------------------------------------------------------------------
diff --git a/external/flux/flux-core/src/test/java/org/apache/storm/flux/test/TestBolt.java b/external/flux/flux-core/src/test/java/org/apache/storm/flux/test/TestBolt.java
index f9f28c5..c8a7b85 100644
--- a/external/flux/flux-core/src/test/java/org/apache/storm/flux/test/TestBolt.java
+++ b/external/flux/flux-core/src/test/java/org/apache/storm/flux/test/TestBolt.java
@@ -17,10 +17,10 @@
  */
 package org.apache.storm.flux.test;
 
-import backtype.storm.topology.BasicOutputCollector;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.base.BaseBasicBolt;
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.topology.BasicOutputCollector;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.base.BaseBasicBolt;
+import org.apache.storm.tuple.Tuple;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 


[18/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/daemon/nimbus.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/daemon/nimbus.clj b/storm-core/src/clj/org/apache/storm/daemon/nimbus.clj
new file mode 100644
index 0000000..3f1d4e5
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/daemon/nimbus.clj
@@ -0,0 +1,2259 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+(ns org.apache.storm.daemon.nimbus
+  (:import [org.apache.thrift.server THsHaServer THsHaServer$Args])
+  (:import [org.apache.storm.generated KeyNotFoundException])
+  (:import [org.apache.storm.blobstore LocalFsBlobStore])
+  (:import [org.apache.thrift.protocol TBinaryProtocol TBinaryProtocol$Factory])
+  (:import [org.apache.thrift.exception])
+  (:import [org.apache.thrift.transport TNonblockingServerTransport TNonblockingServerSocket])
+  (:import [org.apache.commons.io FileUtils])
+  (:import [javax.security.auth Subject])
+  (:import [org.apache.storm.security.auth NimbusPrincipal])
+  (:import [java.nio ByteBuffer]
+           [java.util Collections List HashMap]
+           [org.apache.storm.generated NimbusSummary])
+  (:import [java.nio ByteBuffer]
+           [java.util Collections List HashMap ArrayList Iterator])
+  (:import [org.apache.storm.blobstore AtomicOutputStream BlobStoreAclHandler
+            InputStreamWithMeta KeyFilter KeySequenceNumber BlobSynchronizer])
+  (:import [java.io File FileOutputStream FileInputStream])
+  (:import [java.net InetAddress ServerSocket BindException])
+  (:import [java.nio.channels Channels WritableByteChannel])
+  (:import [org.apache.storm.security.auth ThriftServer ThriftConnectionType ReqContext AuthUtils])
+  (:use [org.apache.storm.scheduler.DefaultScheduler])
+  (:import [org.apache.storm.scheduler INimbus SupervisorDetails WorkerSlot TopologyDetails
+            Cluster Topologies SchedulerAssignment SchedulerAssignmentImpl DefaultScheduler ExecutorDetails])
+  (:import [org.apache.storm.nimbus NimbusInfo])
+  (:import [org.apache.storm.utils TimeCacheMap TimeCacheMap$ExpiredCallback Utils TupleUtils ThriftTopologyUtils
+            BufferFileInputStream BufferInputStream])
+  (:import [org.apache.storm.generated NotAliveException AlreadyAliveException StormTopology ErrorInfo
+            ExecutorInfo InvalidTopologyException Nimbus$Iface Nimbus$Processor SubmitOptions TopologyInitialStatus
+            KillOptions RebalanceOptions ClusterSummary SupervisorSummary TopologySummary TopologyInfo TopologyHistoryInfo
+            ExecutorSummary AuthorizationException GetInfoOptions NumErrorsChoice SettableBlobMeta ReadableBlobMeta
+            BeginDownloadResult ListBlobsResult ComponentPageInfo TopologyPageInfo LogConfig LogLevel LogLevelAction
+            ProfileRequest ProfileAction NodeInfo])
+  (:import [org.apache.storm.daemon Shutdownable])
+  (:import [org.apache.storm.cluster ClusterStateContext DaemonType])
+  (:use [org.apache.storm util config log timer zookeeper local-state])
+  (:require [org.apache.storm [cluster :as cluster]
+                            [converter :as converter]
+                            [stats :as stats]])
+  (:require [clojure.set :as set])
+  (:import [org.apache.storm.daemon.common StormBase Assignment])
+  (:use [org.apache.storm.daemon common])
+  (:use [org.apache.storm config])
+  (:import [org.apache.zookeeper data.ACL ZooDefs$Ids ZooDefs$Perms])
+  (:import [org.apache.storm.utils VersionInfo])
+  (:require [clj-time.core :as time])
+  (:require [clj-time.coerce :as coerce])
+  (:require [metrics.meters :refer [defmeter mark!]])
+  (:require [metrics.gauges :refer [defgauge]])
+  (:gen-class
+    :methods [^{:static true} [launch [org.apache.storm.scheduler.INimbus] void]]))
+
+(defmeter nimbus:num-submitTopologyWithOpts-calls)
+(defmeter nimbus:num-submitTopology-calls)
+(defmeter nimbus:num-killTopologyWithOpts-calls)
+(defmeter nimbus:num-killTopology-calls)
+(defmeter nimbus:num-rebalance-calls)
+(defmeter nimbus:num-activate-calls)
+(defmeter nimbus:num-deactivate-calls)
+(defmeter nimbus:num-debug-calls)
+(defmeter nimbus:num-setWorkerProfiler-calls)
+(defmeter nimbus:num-getComponentPendingProfileActions-calls)
+(defmeter nimbus:num-setLogConfig-calls)
+(defmeter nimbus:num-uploadNewCredentials-calls)
+(defmeter nimbus:num-beginFileUpload-calls)
+(defmeter nimbus:num-uploadChunk-calls)
+(defmeter nimbus:num-finishFileUpload-calls)
+(defmeter nimbus:num-beginFileDownload-calls)
+(defmeter nimbus:num-downloadChunk-calls)
+(defmeter nimbus:num-getNimbusConf-calls)
+(defmeter nimbus:num-getLogConfig-calls)
+(defmeter nimbus:num-getTopologyConf-calls)
+(defmeter nimbus:num-getTopology-calls)
+(defmeter nimbus:num-getUserTopology-calls)
+(defmeter nimbus:num-getClusterInfo-calls)
+(defmeter nimbus:num-getTopologyInfoWithOpts-calls)
+(defmeter nimbus:num-getTopologyInfo-calls)
+(defmeter nimbus:num-getTopologyPageInfo-calls)
+(defmeter nimbus:num-getComponentPageInfo-calls)
+(defmeter nimbus:num-shutdown-calls)
+
+(def STORM-VERSION (VersionInfo/getVersion))
+
+(defn file-cache-map [conf]
+  (TimeCacheMap.
+   (int (conf NIMBUS-FILE-COPY-EXPIRATION-SECS))
+   (reify TimeCacheMap$ExpiredCallback
+          (expire [this id stream]
+                  (.close stream)
+                  ))
+   ))
+
+(defn mk-scheduler [conf inimbus]
+  (let [forced-scheduler (.getForcedScheduler inimbus)
+        scheduler (cond
+                    forced-scheduler
+                    (do (log-message "Using forced scheduler from INimbus " (class forced-scheduler))
+                        forced-scheduler)
+
+                    (conf STORM-SCHEDULER)
+                    (do (log-message "Using custom scheduler: " (conf STORM-SCHEDULER))
+                        (-> (conf STORM-SCHEDULER) new-instance))
+
+                    :else
+                    (do (log-message "Using default scheduler")
+                        (DefaultScheduler.)))]
+    (.prepare scheduler conf)
+    scheduler
+    ))
+
+(defmulti blob-sync cluster-mode)
+
+(defnk is-leader [nimbus :throw-exception true]
+  (let [leader-elector (:leader-elector nimbus)]
+    (if (.isLeader leader-elector) true
+      (if throw-exception
+        (let [leader-address (.getLeader leader-elector)]
+          (throw (RuntimeException. (str "not a leader, current leader is " leader-address))))))))
+
+(def NIMBUS-ZK-ACLS
+  [(first ZooDefs$Ids/CREATOR_ALL_ACL)
+   (ACL. (bit-or ZooDefs$Perms/READ ZooDefs$Perms/CREATE) ZooDefs$Ids/ANYONE_ID_UNSAFE)])
+
+(defn mk-blob-cache-map
+  "Constructs a TimeCacheMap instance with a blob store timeout whose
+  expiration callback invokes cancel on the value held by an expired entry when
+  that value is an AtomicOutputStream and calls close otherwise."
+  [conf]
+  (TimeCacheMap.
+    (int (conf NIMBUS-BLOBSTORE-EXPIRATION-SECS))
+    (reify TimeCacheMap$ExpiredCallback
+      (expire [this id stream]
+        (if (instance? AtomicOutputStream stream)
+          (.cancel stream)
+          (.close stream))))))
+
+(defn mk-bloblist-cache-map
+  "Constructs a TimeCacheMap instance with a blobstore timeout and no callback
+  function."
+  [conf]
+  (TimeCacheMap. (int (conf NIMBUS-BLOBSTORE-EXPIRATION-SECS))))
+
+(defn create-tology-action-notifier [conf]
+  (when-not (clojure.string/blank? (conf NIMBUS-TOPOLOGY-ACTION-NOTIFIER-PLUGIN))
+    (let [instance (new-instance (conf NIMBUS-TOPOLOGY-ACTION-NOTIFIER-PLUGIN))]
+      (try
+        (.prepare instance conf)
+        instance
+        (catch Exception e
+          (log-warn-error e "Ingoring exception, Could not initialize " (conf NIMBUS-TOPOLOGY-ACTION-NOTIFIER-PLUGIN)))))))
+
+(defn nimbus-data [conf inimbus]
+  (let [forced-scheduler (.getForcedScheduler inimbus)]
+    {:conf conf
+     :nimbus-host-port-info (NimbusInfo/fromConf conf)
+     :inimbus inimbus
+     :authorization-handler (mk-authorization-handler (conf NIMBUS-AUTHORIZER) conf)
+     :impersonation-authorization-handler (mk-authorization-handler (conf NIMBUS-IMPERSONATION-AUTHORIZER) conf)
+     :submitted-count (atom 0)
+     :storm-cluster-state (cluster/mk-storm-cluster-state conf :acls (when
+                                                                       (Utils/isZkAuthenticationConfiguredStormServer
+                                                                         conf)
+                                                                       NIMBUS-ZK-ACLS)
+                                                          :context (ClusterStateContext. DaemonType/NIMBUS))
+     :submit-lock (Object.)
+     :cred-update-lock (Object.)
+     :log-update-lock (Object.)
+     :heartbeats-cache (atom {})
+     :downloaders (file-cache-map conf)
+     :uploaders (file-cache-map conf)
+     :blob-store (Utils/getNimbusBlobStore conf (NimbusInfo/fromConf conf))
+     :blob-downloaders (mk-blob-cache-map conf)
+     :blob-uploaders (mk-blob-cache-map conf)
+     :blob-listers (mk-bloblist-cache-map conf)
+     :uptime (uptime-computer)
+     :validator (new-instance (conf NIMBUS-TOPOLOGY-VALIDATOR))
+     :timer (mk-timer :kill-fn (fn [t]
+                                 (log-error t "Error when processing event")
+                                 (exit-process! 20 "Error when processing an event")
+                                 ))
+     :scheduler (mk-scheduler conf inimbus)
+     :leader-elector (zk-leader-elector conf)
+     :id->sched-status (atom {})
+     :node-id->resources (atom {}) ;;resources of supervisors
+     :id->resources (atom {}) ;;resources of topologies
+     :cred-renewers (AuthUtils/GetCredentialRenewers conf)
+     :topology-history-lock (Object.)
+     :topo-history-state (nimbus-topo-history-state conf)
+     :nimbus-autocred-plugins (AuthUtils/getNimbusAutoCredPlugins conf)
+     :nimbus-topology-action-notifier (create-tology-action-notifier conf)
+     }))
+
+(defn inbox [nimbus]
+  (master-inbox (:conf nimbus)))
+
+(defn- get-subject
+  []
+  (let [req (ReqContext/context)]
+    (.subject req)))
+
+(defn- read-storm-conf [conf storm-id blob-store]
+  (clojurify-structure
+    (Utils/fromCompressedJsonConf
+      (.readBlob blob-store (master-stormconf-key storm-id) (get-subject)))))
+
+(declare delay-event)
+(declare mk-assignments)
+
+(defn get-nimbus-subject
+  []
+  (let [subject (Subject.)
+        principal (NimbusPrincipal.)
+        principals (.getPrincipals subject)]
+    (.add principals principal)
+    subject))
+
+(def nimbus-subject
+  (get-nimbus-subject))
+
+(defn- get-key-list-from-id
+  [conf id]
+  (log-debug "set keys id = " id "set = " #{(master-stormcode-key id) (master-stormjar-key id) (master-stormconf-key id)})
+  (if (local-mode? conf)
+    [(master-stormcode-key id) (master-stormconf-key id)]
+    [(master-stormcode-key id) (master-stormjar-key id) (master-stormconf-key id)]))
+
+(defn kill-transition [nimbus storm-id]
+  (fn [kill-time]
+    (let [delay (if kill-time
+                  kill-time
+                  (get (read-storm-conf (:conf nimbus) storm-id (:blob-store nimbus))
+                       TOPOLOGY-MESSAGE-TIMEOUT-SECS))]
+      (delay-event nimbus
+                   storm-id
+                   delay
+                   :remove)
+      {
+        :status {:type :killed}
+        :topology-action-options {:delay-secs delay :action :kill}})
+    ))
+
+(defn rebalance-transition [nimbus storm-id status]
+  (fn [time num-workers executor-overrides]
+    (let [delay (if time
+                  time
+                  (get (read-storm-conf (:conf nimbus) storm-id (:blob-store nimbus))
+                       TOPOLOGY-MESSAGE-TIMEOUT-SECS))]
+      (delay-event nimbus
+                   storm-id
+                   delay
+                   :do-rebalance)
+      {:status {:type :rebalancing}
+       :prev-status status
+       :topology-action-options (-> {:delay-secs delay :action :rebalance}
+                                  (assoc-non-nil :num-workers num-workers)
+                                  (assoc-non-nil :component->executors executor-overrides))
+       })))
+
+(defn do-rebalance [nimbus storm-id status storm-base]
+  (let [rebalance-options (:topology-action-options storm-base)]
+    (.update-storm! (:storm-cluster-state nimbus)
+      storm-id
+        (-> {:topology-action-options nil}
+          (assoc-non-nil :component->executors (:component->executors rebalance-options))
+          (assoc-non-nil :num-workers (:num-workers rebalance-options)))))
+  (mk-assignments nimbus :scratch-topology-id storm-id))
+
+(defn state-transitions [nimbus storm-id status storm-base]
+  {:active {:inactivate :inactive
+            :activate nil
+            :rebalance (rebalance-transition nimbus storm-id status)
+            :kill (kill-transition nimbus storm-id)
+            }
+   :inactive {:activate :active
+              :inactivate nil
+              :rebalance (rebalance-transition nimbus storm-id status)
+              :kill (kill-transition nimbus storm-id)
+              }
+   :killed {:startup (fn [] (delay-event nimbus
+                                         storm-id
+                                         (-> storm-base
+                                             :topology-action-options
+                                             :delay-secs)
+                                         :remove)
+                             nil)
+            :kill (kill-transition nimbus storm-id)
+            :remove (fn []
+                      (log-message "Killing topology: " storm-id)
+                      (.remove-storm! (:storm-cluster-state nimbus)
+                                      storm-id)
+                      (when (instance? LocalFsBlobStore (:blob-store nimbus))
+                        (doseq [blob-key (get-key-list-from-id (:conf nimbus) storm-id)]
+                          (.remove-blobstore-key! (:storm-cluster-state nimbus) blob-key)
+                          (.remove-key-version! (:storm-cluster-state nimbus) blob-key)))
+                      nil)
+            }
+   :rebalancing {:startup (fn [] (delay-event nimbus
+                                              storm-id
+                                              (-> storm-base
+                                                  :topology-action-options
+                                                  :delay-secs)
+                                              :do-rebalance)
+                                 nil)
+                 :kill (kill-transition nimbus storm-id)
+                 :do-rebalance (fn []
+                                 (do-rebalance nimbus storm-id status storm-base)
+                                 (:type (:prev-status storm-base)))
+                 }})
+
+(defn transition!
+  ([nimbus storm-id event]
+     (transition! nimbus storm-id event false))
+  ([nimbus storm-id event error-on-no-transition?]
+    (is-leader nimbus)
+    (locking (:submit-lock nimbus)
+       (let [system-events #{:startup}
+             [event & event-args] (if (keyword? event) [event] event)
+             storm-base (-> nimbus :storm-cluster-state  (.storm-base storm-id nil))
+             status (:status storm-base)]
+         ;; handles the case where event was scheduled but topology has been removed
+         (if-not status
+           (log-message "Cannot apply event " event " to " storm-id " because topology no longer exists")
+           (let [get-event (fn [m e]
+                             (if (contains? m e)
+                               (m e)
+                               (let [msg (str "No transition for event: " event
+                                              ", status: " status,
+                                              " storm-id: " storm-id)]
+                                 (if error-on-no-transition?
+                                   (throw-runtime msg)
+                                   (do (when-not (contains? system-events event)
+                                         (log-message msg))
+                                       nil))
+                                 )))
+                 transition (-> (state-transitions nimbus storm-id status storm-base)
+                                (get (:type status))
+                                (get-event event))
+                 transition (if (or (nil? transition)
+                                    (keyword? transition))
+                              (fn [] transition)
+                              transition)
+                 storm-base-updates (apply transition event-args)
+                 storm-base-updates (if (keyword? storm-base-updates) ;if it's just a symbol, that just indicates new status.
+                                      {:status {:type storm-base-updates}}
+                                      storm-base-updates)]
+
+             (when storm-base-updates
+               (.update-storm! (:storm-cluster-state nimbus) storm-id storm-base-updates)))))
+       )))
+
+(defn transition-name! [nimbus storm-name event & args]
+  (let [storm-id (get-storm-id (:storm-cluster-state nimbus) storm-name)]
+    (when-not storm-id
+      (throw (NotAliveException. storm-name)))
+    (apply transition! nimbus storm-id event args)))
+
+(defn delay-event [nimbus storm-id delay-secs event]
+  (log-message "Delaying event " event " for " delay-secs " secs for " storm-id)
+  (schedule (:timer nimbus)
+            delay-secs
+            #(transition! nimbus storm-id event false)
+            ))
+
+;; active -> reassign in X secs
+
+;; killed -> wait kill time then shutdown
+;; active -> reassign in X secs
+;; inactive -> nothing
+;; rebalance -> wait X seconds then rebalance
+;; swap... (need to handle kill during swap, etc.)
+;; event transitions are delayed by timer... anything else that comes through (e.g. a kill) override the transition? or just disable other transitions during the transition?
+
+
+(defmulti setup-jar cluster-mode)
+(defmulti clean-inbox cluster-mode)
+
+;; swapping design
+;; -- need 2 ports per worker (swap port and regular port)
+;; -- topology that swaps in can use all the existing topologies swap ports, + unused worker slots
+;; -- how to define worker resources? port range + number of workers?
+
+
+;; Monitoring (or by checking when nodes go down or heartbeats aren't received):
+;; 1. read assignment
+;; 2. see which executors/nodes are up
+;; 3. make new assignment to fix any problems
+;; 4. if a storm exists but is not taken down fully, ensure that storm takedown is launched (step by step remove executors and finally remove assignments)
+
+(defn- assigned-slots
+  "Returns a map from node-id to a set of ports"
+  [storm-cluster-state]
+
+  (let [assignments (.assignments storm-cluster-state nil)]
+    (defaulted
+      (apply merge-with set/union
+             (for [a assignments
+                   [_ [node port]] (-> (.assignment-info storm-cluster-state a nil) :executor->node+port)]
+               {node #{port}}
+               ))
+      {})
+    ))
+
+(defn- all-supervisor-info
+  ([storm-cluster-state] (all-supervisor-info storm-cluster-state nil))
+  ([storm-cluster-state callback]
+     (let [supervisor-ids (.supervisors storm-cluster-state callback)]
+       (into {}
+             (mapcat
+              (fn [id]
+                (if-let [info (.supervisor-info storm-cluster-state id)]
+                  [[id info]]
+                  ))
+              supervisor-ids))
+       )))
+
+(defn- all-scheduling-slots
+  [nimbus topologies missing-assignment-topologies]
+  (let [storm-cluster-state (:storm-cluster-state nimbus)
+        ^INimbus inimbus (:inimbus nimbus)
+
+        supervisor-infos (all-supervisor-info storm-cluster-state nil)
+
+        supervisor-details (dofor [[id info] supervisor-infos]
+                             (SupervisorDetails. id (:meta info) (:resources-map info)))
+
+        ret (.allSlotsAvailableForScheduling inimbus
+                     supervisor-details
+                     topologies
+                     (set missing-assignment-topologies)
+                     )
+        ]
+    (dofor [^WorkerSlot slot ret]
+      [(.getNodeId slot) (.getPort slot)]
+      )))
+
+(defn- get-version-for-key [key nimbus-host-port-info conf]
+  (let [version (KeySequenceNumber. key nimbus-host-port-info)]
+    (.getKeySequenceNumber version conf)))
+
+(defn get-key-seq-from-blob-store [blob-store]
+  (let [key-iter (.listKeys blob-store)]
+    (iterator-seq key-iter)))
+
+(defn- setup-storm-code [nimbus conf storm-id tmp-jar-location storm-conf topology]
+  (let [subject (get-subject)
+        storm-cluster-state (:storm-cluster-state nimbus)
+        blob-store (:blob-store nimbus)
+        jar-key (master-stormjar-key storm-id)
+        code-key (master-stormcode-key storm-id)
+        conf-key (master-stormconf-key storm-id)
+        nimbus-host-port-info (:nimbus-host-port-info nimbus)]
+    (when tmp-jar-location ;;in local mode there is no jar
+      (.createBlob blob-store jar-key (FileInputStream. tmp-jar-location) (SettableBlobMeta. BlobStoreAclHandler/DEFAULT) subject)
+      (if (instance? LocalFsBlobStore blob-store)
+        (.setup-blobstore! storm-cluster-state jar-key nimbus-host-port-info (get-version-for-key jar-key nimbus-host-port-info conf))))
+    (.createBlob blob-store conf-key (Utils/toCompressedJsonConf storm-conf) (SettableBlobMeta. BlobStoreAclHandler/DEFAULT) subject)
+    (if (instance? LocalFsBlobStore blob-store)
+      (.setup-blobstore! storm-cluster-state conf-key nimbus-host-port-info (get-version-for-key conf-key nimbus-host-port-info conf)))
+    (.createBlob blob-store code-key (Utils/serialize topology) (SettableBlobMeta. BlobStoreAclHandler/DEFAULT) subject)
+    (if (instance? LocalFsBlobStore blob-store)
+      (.setup-blobstore! storm-cluster-state code-key nimbus-host-port-info (get-version-for-key code-key nimbus-host-port-info conf)))))
+
+(defn- read-storm-topology [storm-id blob-store]
+  (Utils/deserialize
+    (.readBlob blob-store (master-stormcode-key storm-id) (get-subject)) StormTopology))
+
+(defn get-blob-replication-count
+  [blob-key nimbus]
+  (if (:blob-store nimbus)
+        (-> (:blob-store nimbus)
+          (.getBlobReplication  blob-key nimbus-subject))))
+
+(defn- wait-for-desired-code-replication [nimbus conf storm-id]
+  (let [min-replication-count (conf TOPOLOGY-MIN-REPLICATION-COUNT)
+        max-replication-wait-time (conf TOPOLOGY-MAX-REPLICATION-WAIT-TIME-SEC)
+        current-replication-count-jar (if (not (local-mode? conf))
+                                        (atom (get-blob-replication-count (master-stormjar-key storm-id) nimbus))
+                                        (atom min-replication-count))
+        current-replication-count-code (atom (get-blob-replication-count (master-stormcode-key storm-id) nimbus))
+        current-replication-count-conf (atom (get-blob-replication-count (master-stormconf-key storm-id) nimbus))
+        total-wait-time (atom 0)]
+    (if (:blob-store nimbus)
+      (while (and
+               (or (> min-replication-count @current-replication-count-jar)
+                   (> min-replication-count @current-replication-count-code)
+                   (> min-replication-count @current-replication-count-conf))
+               (or (neg? max-replication-wait-time)
+                   (< @total-wait-time max-replication-wait-time)))
+        (sleep-secs 1)
+        (log-debug "waiting for desired replication to be achieved.
+          min-replication-count = " min-replication-count  " max-replication-wait-time = " max-replication-wait-time
+          (if (not (local-mode? conf))"current-replication-count for jar key = " @current-replication-count-jar)
+          "current-replication-count for code key = " @current-replication-count-code
+          "current-replication-count for conf key = " @current-replication-count-conf
+          " total-wait-time " @total-wait-time)
+        (swap! total-wait-time inc)
+        (if (not (local-mode? conf))
+          (reset! current-replication-count-conf  (get-blob-replication-count (master-stormconf-key storm-id) nimbus)))
+        (reset! current-replication-count-code  (get-blob-replication-count (master-stormcode-key storm-id) nimbus))
+        (reset! current-replication-count-jar  (get-blob-replication-count (master-stormjar-key storm-id) nimbus))))
+    (if (and (< min-replication-count @current-replication-count-conf)
+             (< min-replication-count @current-replication-count-code)
+             (< min-replication-count @current-replication-count-jar))
+      (log-message "desired replication count of "  min-replication-count " not achieved but we have hit the max wait time "
+        max-replication-wait-time " so moving on with replication count for conf key = " @current-replication-count-conf
+        " for code key = " @current-replication-count-code "for jar key = " @current-replication-count-jar)
+      (log-message "desired replication count "  min-replication-count " achieved, "
+        "current-replication-count for conf key = " @current-replication-count-conf ", "
+        "current-replication-count for code key = " @current-replication-count-code ", "
+        "current-replication-count for jar key = " @current-replication-count-jar))))
+
+(defn- read-storm-topology-as-nimbus [storm-id blob-store]
+  (Utils/deserialize
+    (.readBlob blob-store (master-stormcode-key storm-id) nimbus-subject) StormTopology))
+
+(declare compute-executor->component)
+
+(defn read-storm-conf-as-nimbus [storm-id blob-store]
+  (clojurify-structure
+    (Utils/fromCompressedJsonConf
+      (.readBlob blob-store (master-stormconf-key storm-id) nimbus-subject))))
+
+(defn read-topology-details [nimbus storm-id]
+  (let [blob-store (:blob-store nimbus)
+        storm-base (or
+                     (.storm-base (:storm-cluster-state nimbus) storm-id nil)
+                     (throw (NotAliveException. storm-id)))
+        topology-conf (read-storm-conf-as-nimbus storm-id blob-store)
+        topology (read-storm-topology-as-nimbus storm-id blob-store)
+        executor->component (->> (compute-executor->component nimbus storm-id)
+                                 (map-key (fn [[start-task end-task]]
+                                            (ExecutorDetails. (int start-task) (int end-task)))))]
+    (TopologyDetails. storm-id
+                      topology-conf
+                      topology
+                      (:num-workers storm-base)
+                      executor->component
+                      (:launch-time-secs storm-base))))
+
+;; Does not assume that clocks are synchronized. Executor heartbeat is only used so that
+;; nimbus knows when it's received a new heartbeat. All timing is done by nimbus and
+;; tracked through heartbeat-cache
+(defn- update-executor-cache [curr hb timeout]
+  (let [reported-time (:time-secs hb)
+        {last-nimbus-time :nimbus-time
+         last-reported-time :executor-reported-time} curr
+        reported-time (cond reported-time reported-time
+                            last-reported-time last-reported-time
+                            :else 0)
+        nimbus-time (if (or (not last-nimbus-time)
+                        (not= last-reported-time reported-time))
+                      (current-time-secs)
+                      last-nimbus-time
+                      )]
+      {:is-timed-out (and
+                       nimbus-time
+                       (>= (time-delta nimbus-time) timeout))
+       :nimbus-time nimbus-time
+       :executor-reported-time reported-time
+       :heartbeat hb}))
+
+(defn update-heartbeat-cache [cache executor-beats all-executors timeout]
+  (let [cache (select-keys cache all-executors)]
+    (into {}
+      (for [executor all-executors :let [curr (cache executor)]]
+        [executor
+         (update-executor-cache curr (get executor-beats executor) timeout)]
+         ))))
+
+(defn update-heartbeats! [nimbus storm-id all-executors existing-assignment]
+  (log-debug "Updating heartbeats for " storm-id " " (pr-str all-executors))
+  (let [storm-cluster-state (:storm-cluster-state nimbus)
+        executor-beats (.executor-beats storm-cluster-state storm-id (:executor->node+port existing-assignment))
+        cache (update-heartbeat-cache (@(:heartbeats-cache nimbus) storm-id)
+                                      executor-beats
+                                      all-executors
+                                      ((:conf nimbus) NIMBUS-TASK-TIMEOUT-SECS))]
+      (swap! (:heartbeats-cache nimbus) assoc storm-id cache)))
+
+(defn- update-all-heartbeats! [nimbus existing-assignments topology->executors]
+  "update all the heartbeats for all the topologies's executors"
+  (doseq [[tid assignment] existing-assignments
+          :let [all-executors (topology->executors tid)]]
+    (update-heartbeats! nimbus tid all-executors assignment)))
+
+(defn- alive-executors
+  [nimbus ^TopologyDetails topology-details all-executors existing-assignment]
+  (log-debug "Computing alive executors for " (.getId topology-details) "\n"
+             "Executors: " (pr-str all-executors) "\n"
+             "Assignment: " (pr-str existing-assignment) "\n"
+             "Heartbeat cache: " (pr-str (@(:heartbeats-cache nimbus) (.getId topology-details)))
+             )
+  ;; TODO: need to consider all executors associated with a dead executor (in same slot) dead as well,
+  ;; don't just rely on heartbeat being the same
+  (let [conf (:conf nimbus)
+        storm-id (.getId topology-details)
+        executor-start-times (:executor->start-time-secs existing-assignment)
+        heartbeats-cache (@(:heartbeats-cache nimbus) storm-id)]
+    (->> all-executors
+        (filter (fn [executor]
+          (let [start-time (get executor-start-times executor)
+                is-timed-out (-> heartbeats-cache (get executor) :is-timed-out)]
+            (if (and start-time
+                   (or
+                    (< (time-delta start-time)
+                       (conf NIMBUS-TASK-LAUNCH-SECS))
+                    (not is-timed-out)
+                    ))
+              true
+              (do
+                (log-message "Executor " storm-id ":" executor " not alive")
+                false))
+            )))
+        doall)))
+
+
+(defn- to-executor-id [task-ids]
+  [(first task-ids) (last task-ids)])
+
+(defn- compute-executors [nimbus storm-id]
+  (let [conf (:conf nimbus)
+        blob-store (:blob-store nimbus)
+        storm-base (.storm-base (:storm-cluster-state nimbus) storm-id nil)
+        component->executors (:component->executors storm-base)
+        storm-conf (read-storm-conf-as-nimbus storm-id blob-store)
+        topology (read-storm-topology-as-nimbus storm-id blob-store)
+        task->component (storm-task-info topology storm-conf)]
+    (->> (storm-task-info topology storm-conf)
+         reverse-map
+         (map-val sort)
+         (join-maps component->executors)
+         (map-val (partial apply partition-fixed))
+         (mapcat second)
+         (map to-executor-id)
+         )))
+
+(defn- compute-executor->component [nimbus storm-id]
+  (let [conf (:conf nimbus)
+        blob-store (:blob-store nimbus)
+        executors (compute-executors nimbus storm-id)
+        topology (read-storm-topology-as-nimbus storm-id blob-store)
+        storm-conf (read-storm-conf-as-nimbus storm-id blob-store)
+        task->component (storm-task-info topology storm-conf)
+        executor->component (into {} (for [executor executors
+                                           :let [start-task (first executor)
+                                                 component (task->component start-task)]]
+                                       {executor component}))]
+        executor->component))
+
+(defn- compute-topology->executors [nimbus storm-ids]
+  "compute a topology-id -> executors map"
+  (into {} (for [tid storm-ids]
+             {tid (set (compute-executors nimbus tid))})))
+
+(defn- compute-topology->alive-executors [nimbus existing-assignments topologies topology->executors scratch-topology-id]
+  "compute a topology-id -> alive executors map"
+  (into {} (for [[tid assignment] existing-assignments
+                 :let [topology-details (.getById topologies tid)
+                       all-executors (topology->executors tid)
+                       alive-executors (if (and scratch-topology-id (= scratch-topology-id tid))
+                                         all-executors
+                                         (set (alive-executors nimbus topology-details all-executors assignment)))]]
+             {tid alive-executors})))
+
+(defn- compute-supervisor->dead-ports [nimbus existing-assignments topology->executors topology->alive-executors]
+  (let [dead-slots (into [] (for [[tid assignment] existing-assignments
+                                  :let [all-executors (topology->executors tid)
+                                        alive-executors (topology->alive-executors tid)
+                                        dead-executors (set/difference all-executors alive-executors)
+                                        dead-slots (->> (:executor->node+port assignment)
+                                                        (filter #(contains? dead-executors (first %)))
+                                                        vals)]]
+                              dead-slots))
+        supervisor->dead-ports (->> dead-slots
+                                    (apply concat)
+                                    (map (fn [[sid port]] {sid #{port}}))
+                                    (apply (partial merge-with set/union)))]
+    (or supervisor->dead-ports {})))
+
+(defn- compute-topology->scheduler-assignment [nimbus existing-assignments topology->alive-executors]
+  "convert assignment information in zk to SchedulerAssignment, so it can be used by scheduler api."
+  (into {} (for [[tid assignment] existing-assignments
+                 :let [alive-executors (topology->alive-executors tid)
+                       executor->node+port (:executor->node+port assignment)
+                       worker->resources (:worker->resources assignment)
+                       ;; making a map from node+port to WorkerSlot with allocated resources
+                       node+port->slot (into {} (for [[[node port] [mem-on-heap mem-off-heap cpu]] worker->resources]
+                                                  {[node port]
+                                                   (doto (WorkerSlot. node port)
+                                                     (.allocateResource
+                                                       mem-on-heap
+                                                       mem-off-heap
+                                                       cpu))}))
+                       executor->slot (into {} (for [[executor [node port]] executor->node+port]
+                                                 ;; filter out the dead executors
+                                                 (if (contains? alive-executors executor)
+                                                   {(ExecutorDetails. (first executor)
+                                                                      (second executor))
+                                                    (get node+port->slot [node port])}
+                                                   {})))]]
+             {tid (SchedulerAssignmentImpl. tid executor->slot)})))
+
+(defn- read-all-supervisor-details [nimbus all-scheduling-slots supervisor->dead-ports]
+  "return a map: {supervisor-id SupervisorDetails}"
+  (let [storm-cluster-state (:storm-cluster-state nimbus)
+        supervisor-infos (all-supervisor-info storm-cluster-state)
+        nonexistent-supervisor-slots (apply dissoc all-scheduling-slots (keys supervisor-infos))
+        all-supervisor-details (into {} (for [[sid supervisor-info] supervisor-infos
+                                              :let [hostname (:hostname supervisor-info)
+                                                    scheduler-meta (:scheduler-meta supervisor-info)
+                                                    dead-ports (supervisor->dead-ports sid)
+                                                    ;; hide the dead-ports from the all-ports
+                                                    ;; these dead-ports can be reused in next round of assignments
+                                                    all-ports (-> (get all-scheduling-slots sid)
+                                                                  (set/difference dead-ports)
+                                                                  ((fn [ports] (map int ports))))
+                                                    supervisor-details (SupervisorDetails. sid hostname scheduler-meta all-ports (:resources-map supervisor-info))
+                                                    ]]
+                                          {sid supervisor-details}))]
+    (merge all-supervisor-details
+           (into {}
+              (for [[sid ports] nonexistent-supervisor-slots]
+                [sid (SupervisorDetails. sid nil ports)]))
+           )))
+
+(defn- compute-topology->executor->node+port [scheduler-assignments]
+  "convert {topology-id -> SchedulerAssignment} to
+           {topology-id -> {executor [node port]}}"
+  (map-val (fn [^SchedulerAssignment assignment]
+             (->> assignment
+                  .getExecutorToSlot
+                  (#(into {} (for [[^ExecutorDetails executor ^WorkerSlot slot] %]
+                              {[(.getStartTask executor) (.getEndTask executor)]
+                               [(.getNodeId slot) (.getPort slot)]})))))
+           scheduler-assignments))
+
+;; NEW NOTES
+;; only assign to supervisors who are there and haven't timed out
+;; need to reassign workers with executors that have timed out (will this make it brittle?)
+;; need to read in the topology and storm-conf from disk
+;; if no slots available and no slots used by this storm, just skip and do nothing
+;; otherwise, package rest of executors into available slots (up to how much it needs)
+
+;; in the future could allocate executors intelligently (so that "close" tasks reside on same machine)
+
+;; TODO: slots that have dead executor should be reused as long as supervisor is active
+
+
+;; (defn- assigned-slots-from-scheduler-assignments [topology->assignment]
+;;   (->> topology->assignment
+;;        vals
+;;        (map (fn [^SchedulerAssignment a] (.getExecutorToSlot a)))
+;;        (mapcat vals)
+;;        (map (fn [^WorkerSlot s] {(.getNodeId s) #{(.getPort s)}}))
+;;        (apply merge-with set/union)
+;;        ))
+
+(defn num-used-workers [^SchedulerAssignment scheduler-assignment]
+  (if scheduler-assignment
+    (count (.getSlots scheduler-assignment))
+    0 ))
+
+(defn convert-assignments-to-worker->resources [new-scheduler-assignments]
+  "convert {topology-id -> SchedulerAssignment} to
+           {topology-id -> {[node port] [mem-on-heap mem-off-heap cpu]}}
+   Make sure this can deal with other non-RAS schedulers
+   later we may further support map-for-any-resources"
+  (map-val (fn [^SchedulerAssignment assignment]
+             (->> assignment
+                  .getExecutorToSlot
+                  .values
+                  (#(into {} (for [^WorkerSlot slot %]
+                              {[(.getNodeId slot) (.getPort slot)]
+                               [(.getAllocatedMemOnHeap slot) (.getAllocatedMemOffHeap slot) (.getAllocatedCpu slot)]
+                               })))))
+           new-scheduler-assignments))
+
+(defn compute-new-topology->executor->node+port [new-scheduler-assignments existing-assignments]
+  (let [new-topology->executor->node+port (compute-topology->executor->node+port new-scheduler-assignments)]
+    ;; print some useful information.
+    (doseq [[topology-id executor->node+port] new-topology->executor->node+port
+            :let [old-executor->node+port (-> topology-id
+                                              existing-assignments
+                                              :executor->node+port)
+                  reassignment (filter (fn [[executor node+port]]
+                                         (and (contains? old-executor->node+port executor)
+                                              (not (= node+port (old-executor->node+port executor)))))
+                                       executor->node+port)]]
+      (when-not (empty? reassignment)
+        (let [new-slots-cnt (count (set (vals executor->node+port)))
+              reassign-executors (keys reassignment)]
+          (log-message "Reassigning " topology-id " to " new-slots-cnt " slots")
+          (log-message "Reassign executors: " (vec reassign-executors)))))
+
+    new-topology->executor->node+port))
+
+;; public so it can be mocked out
+(defn compute-new-scheduler-assignments [nimbus existing-assignments topologies scratch-topology-id]
+  (let [conf (:conf nimbus)
+        storm-cluster-state (:storm-cluster-state nimbus)
+        topology->executors (compute-topology->executors nimbus (keys existing-assignments))
+        ;; update the executors heartbeats first.
+        _ (update-all-heartbeats! nimbus existing-assignments topology->executors)
+        topology->alive-executors (compute-topology->alive-executors nimbus
+                                                                     existing-assignments
+                                                                     topologies
+                                                                     topology->executors
+                                                                     scratch-topology-id)
+        supervisor->dead-ports (compute-supervisor->dead-ports nimbus
+                                                               existing-assignments
+                                                               topology->executors
+                                                               topology->alive-executors)
+        topology->scheduler-assignment (compute-topology->scheduler-assignment nimbus
+                                                                               existing-assignments
+                                                                               topology->alive-executors)
+
+        missing-assignment-topologies (->> topologies
+                                           .getTopologies
+                                           (map (memfn getId))
+                                           (filter (fn [t]
+                                                     (let [alle (get topology->executors t)
+                                                           alivee (get topology->alive-executors t)]
+                                                       (or (empty? alle)
+                                                           (not= alle alivee)
+                                                           (< (-> topology->scheduler-assignment
+                                                                  (get t)
+                                                                  num-used-workers )
+                                                              (-> topologies (.getById t) .getNumWorkers)))))))
+        all-scheduling-slots (->> (all-scheduling-slots nimbus topologies missing-assignment-topologies)
+                                  (map (fn [[node-id port]] {node-id #{port}}))
+                                  (apply merge-with set/union))
+
+        supervisors (read-all-supervisor-details nimbus all-scheduling-slots supervisor->dead-ports)
+        cluster (Cluster. (:inimbus nimbus) supervisors topology->scheduler-assignment conf)
+        _ (.setStatusMap cluster (deref (:id->sched-status nimbus)))
+        ;; call scheduler.schedule to schedule all the topologies
+        ;; the new assignments for all the topologies are in the cluster object.
+        _ (.schedule (:scheduler nimbus) topologies cluster)
+        _ (.setResourcesMap cluster @(:id->resources nimbus))
+        _ (if-not (conf SCHEDULER-DISPLAY-RESOURCE) (.updateAssignedMemoryForTopologyAndSupervisor cluster topologies))
+        ;;merge with existing statuses
+        _ (reset! (:id->sched-status nimbus) (merge (deref (:id->sched-status nimbus)) (.getStatusMap cluster)))
+        _ (reset! (:node-id->resources nimbus) (.getSupervisorsResourcesMap cluster))
+        _ (reset! (:id->resources nimbus) (.getResourcesMap cluster))]
+    (.getAssignments cluster)))
+
+(defn changed-executors [executor->node+port new-executor->node+port]
+  (let [executor->node+port (if executor->node+port (sort executor->node+port) nil)
+        new-executor->node+port (if new-executor->node+port (sort new-executor->node+port) nil)
+        slot-assigned (reverse-map executor->node+port)
+        new-slot-assigned (reverse-map new-executor->node+port)
+        brand-new-slots (map-diff slot-assigned new-slot-assigned)]
+    (apply concat (vals brand-new-slots))
+    ))
+
+(defn newly-added-slots [existing-assignment new-assignment]
+  (let [old-slots (-> (:executor->node+port existing-assignment)
+                      vals
+                      set)
+        new-slots (-> (:executor->node+port new-assignment)
+                      vals
+                      set)]
+    (set/difference new-slots old-slots)))
+
+
+(defn basic-supervisor-details-map [storm-cluster-state]
+  (let [infos (all-supervisor-info storm-cluster-state)]
+    (->> infos
+         (map (fn [[id info]]
+                 [id (SupervisorDetails. id (:hostname info) (:scheduler-meta info) nil (:resources-map info))]))
+         (into {}))))
+
+(defn- to-worker-slot [[node port]]
+  (WorkerSlot. node port))
+
+;; get existing assignment (just the executor->node+port map) -> default to {}
+;; filter out ones which have a executor timeout
+;; figure out available slots on cluster. add to that the used valid slots to get total slots. figure out how many executors should be in each slot (e.g., 4, 4, 4, 5)
+;; only keep existing slots that satisfy one of those slots. for rest, reassign them across remaining slots
+;; edge case for slots with no executor timeout but with supervisor timeout... just treat these as valid slots that can be reassigned to. worst comes to worse the executor will timeout and won't assign here next time around
+(defnk mk-assignments [nimbus :scratch-topology-id nil]
+  (if (is-leader nimbus :throw-exception false)
+    (let [conf (:conf nimbus)
+        storm-cluster-state (:storm-cluster-state nimbus)
+        ^INimbus inimbus (:inimbus nimbus)
+        ;; read all the topologies
+        topology-ids (.active-storms storm-cluster-state)
+        topologies (into {} (for [tid topology-ids]
+                              {tid (read-topology-details nimbus tid)}))
+        topologies (Topologies. topologies)
+        ;; read all the assignments
+        assigned-topology-ids (.assignments storm-cluster-state nil)
+        existing-assignments (into {} (for [tid assigned-topology-ids]
+                                        ;; for the topology which wants rebalance (specified by the scratch-topology-id)
+                                        ;; we exclude its assignment, meaning that all the slots occupied by its assignment
+                                        ;; will be treated as free slot in the scheduler code.
+                                        (when (or (nil? scratch-topology-id) (not= tid scratch-topology-id))
+                                          {tid (.assignment-info storm-cluster-state tid nil)})))
+        ;; make the new assignments for topologies
+        new-scheduler-assignments (compute-new-scheduler-assignments
+                                       nimbus
+                                       existing-assignments
+                                       topologies
+                                       scratch-topology-id)
+
+        topology->executor->node+port (compute-new-topology->executor->node+port new-scheduler-assignments existing-assignments)
+
+        topology->executor->node+port (merge (into {} (for [id assigned-topology-ids] {id nil})) topology->executor->node+port)
+        new-assigned-worker->resources (convert-assignments-to-worker->resources new-scheduler-assignments)
+        now-secs (current-time-secs)
+
+        basic-supervisor-details-map (basic-supervisor-details-map storm-cluster-state)
+
+        ;; construct the final Assignments by adding start-times etc into it
+        new-assignments (into {} (for [[topology-id executor->node+port] topology->executor->node+port
+                                        :let [existing-assignment (get existing-assignments topology-id)
+                                              all-nodes (->> executor->node+port vals (map first) set)
+                                              node->host (->> all-nodes
+                                                              (mapcat (fn [node]
+                                                                        (if-let [host (.getHostName inimbus basic-supervisor-details-map node)]
+                                                                          [[node host]]
+                                                                          )))
+                                                              (into {}))
+                                              all-node->host (merge (:node->host existing-assignment) node->host)
+                                              reassign-executors (changed-executors (:executor->node+port existing-assignment) executor->node+port)
+                                              start-times (merge (:executor->start-time-secs existing-assignment)
+                                                                (into {}
+                                                                      (for [id reassign-executors]
+                                                                        [id now-secs]
+                                                                        )))
+                                              worker->resources (get new-assigned-worker->resources topology-id)]]
+                                   {topology-id (Assignment.
+                                                 (conf STORM-LOCAL-DIR)
+                                                 (select-keys all-node->host all-nodes)
+                                                 executor->node+port
+                                                 start-times
+                                                 worker->resources)}))]
+
+    ;; tasks figure out what tasks to talk to by looking at topology at runtime
+    ;; only log/set when there's been a change to the assignment
+    (doseq [[topology-id assignment] new-assignments
+            :let [existing-assignment (get existing-assignments topology-id)
+                  topology-details (.getById topologies topology-id)]]
+      (if (= existing-assignment assignment)
+        (log-debug "Assignment for " topology-id " hasn't changed")
+        (do
+          (log-message "Setting new assignment for topology id " topology-id ": " (pr-str assignment))
+          (.set-assignment! storm-cluster-state topology-id assignment)
+          )))
+    (->> new-assignments
+          (map (fn [[topology-id assignment]]
+            (let [existing-assignment (get existing-assignments topology-id)]
+              [topology-id (map to-worker-slot (newly-added-slots existing-assignment assignment))]
+              )))
+          (into {})
+          (.assignSlots inimbus topologies)))
+    (log-message "not a leader, skipping assignments")))
+
+(defn notify-topology-action-listener [nimbus storm-id action]
+  (let [topology-action-notifier (:nimbus-topology-action-notifier nimbus)]
+    (when (not-nil? topology-action-notifier)
+      (try (.notify topology-action-notifier storm-id action)
+        (catch Exception e
+        (log-warn-error e "Ignoring exception from Topology action notifier for storm-Id " storm-id))))))
+
+(defn- start-storm [nimbus storm-name storm-id topology-initial-status]
+  {:pre [(#{:active :inactive} topology-initial-status)]}
+  (let [storm-cluster-state (:storm-cluster-state nimbus)
+        conf (:conf nimbus)
+        blob-store (:blob-store nimbus)
+        storm-conf (read-storm-conf conf storm-id blob-store)
+        topology (system-topology! storm-conf (read-storm-topology storm-id blob-store))
+        num-executors (->> (all-components topology) (map-val num-start-executors))]
+    (log-message "Activating " storm-name ": " storm-id)
+    (.activate-storm! storm-cluster-state
+                      storm-id
+                      (StormBase. storm-name
+                                  (current-time-secs)
+                                  {:type topology-initial-status}
+                                  (storm-conf TOPOLOGY-WORKERS)
+                                  num-executors
+                                  (storm-conf TOPOLOGY-SUBMITTER-USER)
+                                  nil
+                                  nil
+                                  {}))
+    (notify-topology-action-listener nimbus storm-name "activate")))
+
+;; Master:
+;; job submit:
+;; 1. read which nodes are available
+;; 2. set assignments
+;; 3. start storm - necessary in case master goes down, when goes back up can remember to take down the storm (2 states: on or off)
+
+(defn storm-active? [storm-cluster-state storm-name]
+  (not-nil? (get-storm-id storm-cluster-state storm-name)))
+
+(defn check-storm-active! [nimbus storm-name active?]
+  (if (= (not active?)
+         (storm-active? (:storm-cluster-state nimbus)
+                        storm-name))
+    (if active?
+      (throw (NotAliveException. (str storm-name " is not alive")))
+      (throw (AlreadyAliveException. (str storm-name " is already active"))))
+    ))
+
+(defn check-authorization!
+  ([nimbus storm-name storm-conf operation context]
+     (let [aclHandler (:authorization-handler nimbus)
+           impersonation-authorizer (:impersonation-authorization-handler nimbus)
+           ctx (or context (ReqContext/context))
+           check-conf (if storm-conf storm-conf (if storm-name {TOPOLOGY-NAME storm-name}))]
+       (log-thrift-access (.requestID ctx) (.remoteAddress ctx) (.principal ctx) operation)
+       (if (.isImpersonating ctx)
+         (do
+          (log-warn "principal: " (.realPrincipal ctx) " is trying to impersonate principal: " (.principal ctx))
+          (if impersonation-authorizer
+           (if-not (.permit impersonation-authorizer ctx operation check-conf)
+             (throw (AuthorizationException. (str "principal " (.realPrincipal ctx) " is not authorized to impersonate
+                        principal " (.principal ctx) " from host " (.remoteAddress ctx) " Please see SECURITY.MD to learn
+                        how to configure impersonation acls."))))
+           (log-warn "impersonation attempt but " NIMBUS-IMPERSONATION-AUTHORIZER " has no authorizer configured. potential
+                      security risk, please see SECURITY.MD to learn how to configure impersonation authorizer."))))
+
+       (if aclHandler
+         (if-not (.permit aclHandler ctx operation check-conf)
+           (throw (AuthorizationException. (str operation (if storm-name (str " on topology " storm-name)) " is not authorized")))
+           ))))
+  ([nimbus storm-name storm-conf operation]
+     (check-authorization! nimbus storm-name storm-conf operation (ReqContext/context))))
+
+(defn code-ids [blob-store]
+  (let [to-id (reify KeyFilter
+                (filter [this key] (get-id-from-blob-key key)))]
+    (set (.filterAndListKeys blob-store to-id))))
+
+(defn cleanup-storm-ids [conf storm-cluster-state blob-store]
+  (let [heartbeat-ids (set (.heartbeat-storms storm-cluster-state))
+        error-ids (set (.error-topologies storm-cluster-state))
+        code-ids (code-ids blob-store)
+        assigned-ids (set (.active-storms storm-cluster-state))]
+    (set/difference (set/union heartbeat-ids error-ids code-ids) assigned-ids)
+    ))
+
+(defn extract-status-str [base]
+  (let [t (-> base :status :type)]
+    (.toUpperCase (name t))
+    ))
+
+(defn mapify-serializations [sers]
+  (->> sers
+       (map (fn [e] (if (map? e) e {e nil})))
+       (apply merge)
+       ))
+
+(defn- component-parallelism [storm-conf component]
+  (let [storm-conf (merge storm-conf (component-conf component))
+        num-tasks (or (storm-conf TOPOLOGY-TASKS) (num-start-executors component))
+        max-parallelism (storm-conf TOPOLOGY-MAX-TASK-PARALLELISM)
+        ]
+    (if max-parallelism
+      (min max-parallelism num-tasks)
+      num-tasks)))
+
+(defn normalize-topology [storm-conf ^StormTopology topology]
+  (let [ret (.deepCopy topology)]
+    (doseq [[_ component] (all-components ret)]
+      (.set_json_conf
+        (.get_common component)
+        (->> {TOPOLOGY-TASKS (component-parallelism storm-conf component)}
+             (merge (component-conf component))
+             to-json )))
+    ret ))
+
+(defn normalize-conf [conf storm-conf ^StormTopology topology]
+  ;; ensure that serializations are same for all tasks no matter what's on
+  ;; the supervisors. this also allows you to declare the serializations as a sequence
+  (let [component-confs (map
+                         #(-> (ThriftTopologyUtils/getComponentCommon topology %)
+                              .get_json_conf
+                              from-json)
+                         (ThriftTopologyUtils/getComponentIds topology))
+        total-conf (merge conf storm-conf)
+
+        get-merged-conf-val (fn [k merge-fn]
+                              (merge-fn
+                               (concat
+                                (mapcat #(get % k) component-confs)
+                                (or (get storm-conf k)
+                                    (get conf k)))))]
+    ;; topology level serialization registrations take priority
+    ;; that way, if there's a conflict, a user can force which serialization to use
+    ;; append component conf to storm-conf
+    (merge storm-conf
+           {TOPOLOGY-KRYO-DECORATORS (get-merged-conf-val TOPOLOGY-KRYO-DECORATORS distinct)
+            TOPOLOGY-KRYO-REGISTER (get-merged-conf-val TOPOLOGY-KRYO-REGISTER mapify-serializations)
+            TOPOLOGY-ACKER-EXECUTORS (total-conf TOPOLOGY-ACKER-EXECUTORS)
+            TOPOLOGY-EVENTLOGGER-EXECUTORS (total-conf TOPOLOGY-EVENTLOGGER-EXECUTORS)
+            TOPOLOGY-MAX-TASK-PARALLELISM (total-conf TOPOLOGY-MAX-TASK-PARALLELISM)})))
+
+(defn blob-rm-key [blob-store key storm-cluster-state]
+  (try
+    (.deleteBlob blob-store key nimbus-subject)
+    (if (instance? LocalFsBlobStore blob-store)
+      (.remove-blobstore-key! storm-cluster-state key))
+    (catch Exception e
+      (log-message "Exception" e))))
+
+(defn blob-rm-topology-keys [id blob-store storm-cluster-state]
+  (blob-rm-key blob-store (master-stormjar-key id) storm-cluster-state)
+  (blob-rm-key blob-store (master-stormconf-key id) storm-cluster-state)
+  (blob-rm-key blob-store (master-stormcode-key id) storm-cluster-state))
+
+(defn do-cleanup [nimbus]
+  (if (is-leader nimbus :throw-exception false)
+    (let [storm-cluster-state (:storm-cluster-state nimbus)
+          conf (:conf nimbus)
+          submit-lock (:submit-lock nimbus)
+          blob-store (:blob-store nimbus)]
+      (let [to-cleanup-ids (locking submit-lock
+                             (cleanup-storm-ids conf storm-cluster-state blob-store))]
+        (when-not (empty? to-cleanup-ids)
+          (doseq [id to-cleanup-ids]
+            (log-message "Cleaning up " id)
+            (.teardown-heartbeats! storm-cluster-state id)
+            (.teardown-topology-errors! storm-cluster-state id)
+            (rmr (master-stormdist-root conf id))
+            (blob-rm-topology-keys id blob-store storm-cluster-state)
+            (swap! (:heartbeats-cache nimbus) dissoc id)))))
+    (log-message "not a leader, skipping cleanup")))
+
+(defn- file-older-than? [now seconds file]
+  (<= (+ (.lastModified file) (to-millis seconds)) (to-millis now)))
+
+(defn clean-inbox [dir-location seconds]
+  "Deletes jar files in dir older than seconds."
+  (let [now (current-time-secs)
+        pred #(and (.isFile %) (file-older-than? now seconds %))
+        files (filter pred (file-seq (File. dir-location)))]
+    (doseq [f files]
+      (if (.delete f)
+        (log-message "Cleaning inbox ... deleted: " (.getName f))
+        ;; This should never happen
+        (log-error "Cleaning inbox ... error deleting: " (.getName f))))))
+
+(defn clean-topology-history
+  "Deletes topologies from history older than minutes."
+  [mins nimbus]
+  (locking (:topology-history-lock nimbus)
+    (let [cutoff-age (- (current-time-secs) (* mins 60))
+          topo-history-state (:topo-history-state nimbus)
+          curr-history (vec (ls-topo-hist topo-history-state))
+          new-history (vec (filter (fn [line]
+                                     (> (line :timestamp) cutoff-age)) curr-history))]
+      (ls-topo-hist! topo-history-state new-history))))
+
+(defn cleanup-corrupt-topologies! [nimbus]
+  (let [storm-cluster-state (:storm-cluster-state nimbus)
+        blob-store (:blob-store nimbus)
+        code-ids (set (code-ids blob-store))
+        active-topologies (set (.active-storms storm-cluster-state))
+        corrupt-topologies (set/difference active-topologies code-ids)]
+    (doseq [corrupt corrupt-topologies]
+      (log-message "Corrupt topology " corrupt " has state on zookeeper but doesn't have a local dir on Nimbus. Cleaning up...")
+      (.remove-storm! storm-cluster-state corrupt)
+      (if (instance? LocalFsBlobStore blob-store)
+        (doseq [blob-key (get-key-list-from-id (:conf nimbus) corrupt)]
+          (.remove-blobstore-key! storm-cluster-state blob-key))))))
+
+(defn setup-blobstore [nimbus]
+  "Sets up blobstore state for all current keys."
+  (let [storm-cluster-state (:storm-cluster-state nimbus)
+        blob-store (:blob-store nimbus)
+        local-set-of-keys (set (get-key-seq-from-blob-store blob-store))
+        all-keys (set (.active-keys storm-cluster-state))
+        locally-available-active-keys (set/intersection local-set-of-keys all-keys)
+        keys-to-delete (set/difference local-set-of-keys all-keys)
+        conf (:conf nimbus)
+        nimbus-host-port-info (:nimbus-host-port-info nimbus)]
+    (log-debug "Deleting keys not on the zookeeper" keys-to-delete)
+    (doseq [key keys-to-delete]
+      (.deleteBlob blob-store key nimbus-subject))
+    (log-debug "Creating list of key entries for blobstore inside zookeeper" all-keys "local" locally-available-active-keys)
+    (doseq [key locally-available-active-keys]
+      (.setup-blobstore! storm-cluster-state key (:nimbus-host-port-info nimbus) (get-version-for-key key nimbus-host-port-info conf)))))
+
+(defn- get-errors [storm-cluster-state storm-id component-id]
+  (->> (.errors storm-cluster-state storm-id component-id)
+       (map #(doto (ErrorInfo. (:error %) (:time-secs %))
+                   (.set_host (:host %))
+                   (.set_port (:port %))))))
+
+(defn- thriftify-executor-id [[first-task-id last-task-id]]
+  (ExecutorInfo. (int first-task-id) (int last-task-id)))
+
+(def DISALLOWED-TOPOLOGY-NAME-STRS #{"/" "." ":" "\\"})
+
+(defn validate-topology-name! [name]
+  (if (some #(.contains name %) DISALLOWED-TOPOLOGY-NAME-STRS)
+    (throw (InvalidTopologyException.
+            (str "Topology name cannot contain any of the following: " (pr-str DISALLOWED-TOPOLOGY-NAME-STRS))))
+  (if (clojure.string/blank? name)
+    (throw (InvalidTopologyException.
+            ("Topology name cannot be blank"))))))
+
+;; We will only file at <Storm dist root>/<Topology ID>/<File>
+;; to be accessed via Thrift
+;; ex., storm-local/nimbus/stormdist/aa-1-1377104853/stormjar.jar
+(defn check-file-access [conf file-path]
+  (log-debug "check file access:" file-path)
+  (try
+    (if (not= (.getCanonicalFile (File. (master-stormdist-root conf)))
+          (-> (File. file-path) .getCanonicalFile .getParentFile .getParentFile))
+      (throw (AuthorizationException. (str "Invalid file path: " file-path))))
+    (catch Exception e
+      (throw (AuthorizationException. (str "Invalid file path: " file-path))))))
+
+(defn try-read-storm-conf
+  [conf storm-id blob-store]
+  (try-cause
+    (read-storm-conf-as-nimbus storm-id blob-store)
+    (catch KeyNotFoundException e
+      (throw (NotAliveException. (str storm-id))))))
+
+(defn try-read-storm-conf-from-name
+  [conf storm-name nimbus]
+  (let [storm-cluster-state (:storm-cluster-state nimbus)
+        blob-store (:blob-store nimbus)
+        id (get-storm-id storm-cluster-state storm-name)]
+    (try-read-storm-conf conf id blob-store)))
+
+(defn try-read-storm-topology
+  [storm-id blob-store]
+  (try-cause
+    (read-storm-topology-as-nimbus storm-id blob-store)
+    (catch KeyNotFoundException e
+      (throw (NotAliveException. (str storm-id))))))
+
+(defn add-topology-to-history-log
+  [storm-id nimbus topology-conf]
+  (log-message "Adding topo to history log: " storm-id)
+  (locking (:topology-history-lock nimbus)
+    (let [topo-history-state (:topo-history-state nimbus)
+          users (get-topo-logs-users topology-conf)
+          groups (get-topo-logs-groups topology-conf)
+          curr-history (vec (ls-topo-hist topo-history-state))
+          new-history (conj curr-history {:topoid storm-id :timestamp (current-time-secs)
+                                          :users users :groups groups})]
+      (ls-topo-hist! topo-history-state new-history))))
+
+(defn igroup-mapper
+  [storm-conf]
+  (AuthUtils/GetGroupMappingServiceProviderPlugin storm-conf))
+
+(defn user-groups
+  [user storm-conf]
+  (if (clojure.string/blank? user) [] (.getGroups (igroup-mapper storm-conf) user)))
+
+(defn does-users-group-intersect?
+  "Check to see if any of the users groups intersect with the list of groups passed in"
+  [user groups-to-check storm-conf]
+  (let [groups (user-groups user storm-conf)]
+    (> (.size (set/intersection (set groups) (set groups-to-check))) 0)))
+
+(defn read-topology-history
+  [nimbus user admin-users]
+  (let [topo-history-state (:topo-history-state nimbus)
+        curr-history (vec (ls-topo-hist topo-history-state))
+        topo-user-can-access (fn [line user storm-conf]
+                               (if (nil? user)
+                                 (line :topoid)
+                                 (if (or (some #(= % user) admin-users)
+                                       (does-users-group-intersect? user (line :groups) storm-conf)
+                                       (some #(= % user) (line :users)))
+                                   (line :topoid)
+                                   nil)))]
+    (remove nil? (map #(topo-user-can-access % user (:conf nimbus)) curr-history))))
+
+(defn renew-credentials [nimbus]
+  (if (is-leader nimbus :throw-exception false)
+    (let [storm-cluster-state (:storm-cluster-state nimbus)
+          blob-store (:blob-store nimbus)
+          renewers (:cred-renewers nimbus)
+          update-lock (:cred-update-lock nimbus)
+          assigned-ids (set (.active-storms storm-cluster-state))]
+      (when-not (empty? assigned-ids)
+        (doseq [id assigned-ids]
+          (locking update-lock
+            (let [orig-creds (.credentials storm-cluster-state id nil)
+                  topology-conf (try-read-storm-conf (:conf nimbus) id blob-store)]
+              (if orig-creds
+                (let [new-creds (HashMap. orig-creds)]
+                  (doseq [renewer renewers]
+                    (log-message "Renewing Creds For " id " with " renewer)
+                    (.renew renewer new-creds (Collections/unmodifiableMap topology-conf)))
+                  (when-not (= orig-creds new-creds)
+                    (.set-credentials! storm-cluster-state id new-creds topology-conf)
+                    ))))))))
+    (log-message "not a leader skipping , credential renweal.")))
+
+(defn validate-topology-size [topo-conf nimbus-conf topology]
+  (let [workers-count (get topo-conf TOPOLOGY-WORKERS)
+        workers-allowed (get nimbus-conf NIMBUS-SLOTS-PER-TOPOLOGY)
+        num-executors (->> (all-components topology) (map-val num-start-executors))
+        executors-count (reduce + (vals num-executors))
+        executors-allowed (get nimbus-conf NIMBUS-EXECUTORS-PER-TOPOLOGY)]
+    (when (and
+           (not (nil? executors-allowed))
+           (> executors-count executors-allowed))
+      (throw
+       (InvalidTopologyException.
+        (str "Failed to submit topology. Topology requests more than " executors-allowed " executors."))))
+    (when (and
+           (not (nil? workers-allowed))
+           (> workers-count workers-allowed))
+      (throw
+       (InvalidTopologyException.
+        (str "Failed to submit topology. Topology requests more than " workers-allowed " workers."))))))
+
+(defn- set-logger-timeouts [log-config]
+  (let [timeout-secs (.get_reset_log_level_timeout_secs log-config)
+       timeout (time/plus (time/now) (time/secs timeout-secs))]
+   (if (time/after? timeout (time/now))
+     (.set_reset_log_level_timeout_epoch log-config (coerce/to-long timeout))
+     (.unset_reset_log_level_timeout_epoch log-config))))
+
+(defmethod blob-sync :distributed [conf nimbus]
+  (if (not (is-leader nimbus :throw-exception false))
+    (let [storm-cluster-state (:storm-cluster-state nimbus)
+          nimbus-host-port-info (:nimbus-host-port-info nimbus)
+          blob-store-key-set (set (get-key-seq-from-blob-store (:blob-store nimbus)))
+          zk-key-set (set (.blobstore storm-cluster-state (fn [] (blob-sync conf nimbus))))]
+      (log-debug "blob-sync " "blob-store-keys " blob-store-key-set "zookeeper-keys " zk-key-set)
+      (let [sync-blobs (doto
+                          (BlobSynchronizer. (:blob-store nimbus) conf)
+                          (.setNimbusInfo nimbus-host-port-info)
+                          (.setBlobStoreKeySet blob-store-key-set)
+                          (.setZookeeperKeySet zk-key-set))]
+        (.syncBlobs sync-blobs)))))
+
+(defmethod blob-sync :local [conf nimbus]
+  nil)
+
+(defserverfn service-handler [conf inimbus]
+  (.prepare inimbus conf (master-inimbus-dir conf))
+  (log-message "Starting Nimbus with conf " conf)
+  (let [nimbus (nimbus-data conf inimbus)
+        blob-store (:blob-store nimbus)
+        principal-to-local (AuthUtils/GetPrincipalToLocalPlugin conf)
+        admin-users (or (.get conf NIMBUS-ADMINS) [])
+        get-common-topo-info
+          (fn [^String storm-id operation]
+            (let [storm-cluster-state (:storm-cluster-state nimbus)
+                  topology-conf (try-read-storm-conf conf storm-id blob-store)
+                  storm-name (topology-conf TOPOLOGY-NAME)
+                  _ (check-authorization! nimbus
+                                          storm-name
+                                          topology-conf
+                                          operation)
+                  topology (try-read-storm-topology storm-id blob-store)
+                  task->component (storm-task-info topology topology-conf)
+                  base (.storm-base storm-cluster-state storm-id nil)
+                  launch-time-secs (if base (:launch-time-secs base)
+                                     (throw
+                                       (NotAliveException. (str storm-id))))
+                  assignment (.assignment-info storm-cluster-state storm-id nil)
+                  beats (map-val :heartbeat (get @(:heartbeats-cache nimbus)
+                                                 storm-id))
+                  all-components (set (vals task->component))]
+              {:storm-name storm-name
+               :storm-cluster-state storm-cluster-state
+               :all-components all-components
+               :launch-time-secs launch-time-secs
+               :assignment assignment
+               :beats beats
+               :topology topology
+               :task->component task->component
+               :base base}))
+        get-last-error (fn [storm-cluster-state storm-id component-id]
+                         (if-let [e (.last-error storm-cluster-state
+                                                 storm-id
+                                                 component-id)]
+                           (doto (ErrorInfo. (:error e) (:time-secs e))
+                             (.set_host (:host e))
+                             (.set_port (:port e)))))]
+    (.prepare ^org.apache.storm.nimbus.ITopologyValidator (:validator nimbus) conf)
+
+    ;add to nimbuses
+    (.add-nimbus-host! (:storm-cluster-state nimbus) (.toHostPortString (:nimbus-host-port-info nimbus))
+      (NimbusSummary.
+        (.getHost (:nimbus-host-port-info nimbus))
+        (.getPort (:nimbus-host-port-info nimbus))
+        (current-time-secs)
+        false ;is-leader
+        STORM-VERSION))
+
+    (.addToLeaderLockQueue (:leader-elector nimbus))
+    (cleanup-corrupt-topologies! nimbus)
+    (when (instance? LocalFsBlobStore blob-store)
+      ;register call back for blob-store
+      (.blobstore (:storm-cluster-state nimbus) (fn [] (blob-sync conf nimbus)))
+      (setup-blobstore nimbus))
+
+    (when (is-leader nimbus :throw-exception false)
+      (doseq [storm-id (.active-storms (:storm-cluster-state nimbus))]
+        (transition! nimbus storm-id :startup)))
+    (schedule-recurring (:timer nimbus)
+                        0
+                        (conf NIMBUS-MONITOR-FREQ-SECS)
+                        (fn []
+                          (when-not (conf NIMBUS-DO-NOT-REASSIGN)
+                            (locking (:submit-lock nimbus)
+                              (mk-assignments nimbus)))
+                          (do-cleanup nimbus)))
+    ;; Schedule Nimbus inbox cleaner
+    (schedule-recurring (:timer nimbus)
+                        0
+                        (conf NIMBUS-CLEANUP-INBOX-FREQ-SECS)
+                        (fn []
+                          (clean-inbox (inbox nimbus) (conf NIMBUS-INBOX-JAR-EXPIRATION-SECS))))
+    ;; Schedule nimbus code sync thread to sync code from other nimbuses.
+    (if (instance? LocalFsBlobStore blob-store)
+      (schedule-recurring (:timer nimbus)
+                          0
+                          (conf NIMBUS-CODE-SYNC-FREQ-SECS)
+                          (fn []
+                            (blob-sync conf nimbus))))
+    ;; Schedule topology history cleaner
+    (when-let [interval (conf LOGVIEWER-CLEANUP-INTERVAL-SECS)]
+      (schedule-recurring (:timer nimbus)
+        0
+        (conf LOGVIEWER-CLEANUP-INTERVAL-SECS)
+        (fn []
+          (clean-topology-history (conf LOGVIEWER-CLEANUP-AGE-MINS) nimbus))))
+    (schedule-recurring (:timer nimbus)
+                        0
+                        (conf NIMBUS-CREDENTIAL-RENEW-FREQ-SECS)
+                        (fn []
+                          (renew-credentials nimbus)))
+
+    (defgauge nimbus:num-supervisors
+      (fn [] (.size (.supervisors (:storm-cluster-state nimbus) nil))))
+
+    (start-metrics-reporters)
+
+    (reify Nimbus$Iface
+      (^void submitTopologyWithOpts
+        [this ^String storm-name ^String uploadedJarLocation ^String serializedConf ^StormTopology topology
+         ^SubmitOptions submitOptions]
+        (try
+          (mark! nimbus:num-submitTopologyWithOpts-calls)
+          (is-leader nimbus)
+          (assert (not-nil? submitOptions))
+          (validate-topology-name! storm-name)
+          (check-authorization! nimbus storm-name nil "submitTopology")
+          (check-storm-active! nimbus storm-name false)
+          (let [topo-conf (from-json serializedConf)]
+            (try
+              (validate-configs-with-schemas topo-conf)
+              (catch IllegalArgumentException ex
+                (throw (InvalidTopologyException. (.getMessage ex)))))
+            (.validate ^org.apache.storm.nimbus.ITopologyValidator (:validator nimbus)
+                       storm-name
+                       topo-conf
+                       topology))
+          (swap! (:submitted-count nimbus) inc)
+          (let [storm-id (str storm-name "-" @(:submitted-count nimbus) "-" (current-time-secs))
+                credentials (.get_creds submitOptions)
+                credentials (when credentials (.get_creds credentials))
+                topo-conf (from-json serializedConf)
+                storm-conf-submitted (normalize-conf
+                            conf
+                            (-> topo-conf
+                              (assoc STORM-ID storm-id)
+                              (assoc TOPOLOGY-NAME storm-name))
+                            topology)
+                req (ReqContext/context)
+                principal (.principal req)
+                submitter-principal (if principal (.toString principal))
+                submitter-user (.toLocal principal-to-local principal)
+                system-user (System/getProperty "user.name")
+                topo-acl (distinct (remove nil? (conj (.get storm-conf-submitted TOPOLOGY-USERS) submitter-principal, submitter-user)))
+                storm-conf (-> storm-conf-submitted
+                               (assoc TOPOLOGY-SUBMITTER-PRINCIPAL (if submitter-principal submitter-principal ""))
+                               (assoc TOPOLOGY-SUBMITTER-USER (if submitter-user submitter-user system-user)) ;Don't let the user set who we launch as
+                               (assoc TOPOLOGY-USERS topo-acl)
+                               (assoc STORM-ZOOKEEPER-SUPERACL (.get conf STORM-ZOOKEEPER-SUPERACL)))
+                storm-conf (if (Utils/isZkAuthenticationConfiguredStormServer conf)
+                                storm-conf
+                                (dissoc storm-conf STORM-ZOOKEEPER-TOPOLOGY-AUTH-SCHEME STORM-ZOOKEEPER-TOPOLOGY-AUTH-PAYLOAD))
+                total-storm-conf (merge conf storm-conf)
+                topology (normalize-topology total-storm-conf topology)
+                storm-cluster-state (:storm-cluster-state nimbus)]
+            (when credentials (doseq [nimbus-autocred-plugin (:nimbus-autocred-plugins nimbus)]
+              (.populateCredentials nimbus-autocred-plugin credentials (Collections/unmodifiableMap storm-conf))))
+            (if (and (conf SUPERVISOR-RUN-WORKER-AS-USER) (or (nil? submitter-user) (.isEmpty (.trim submitter-user))))
+              (throw (AuthorizationException. "Could not determine the user to run this topology as.")))
+            (system-topology! total-storm-conf topology) ;; this validates the structure of the topology
+            (validate-topology-size topo-conf conf topology)
+            (when (and (Utils/isZkAuthenticationConfiguredStormServer conf)
+                       (not (Utils/isZkAuthenticationConfiguredTopology storm-conf)))
+                (throw (IllegalArgumentException. "The cluster is configured for zookeeper authentication, but no payload was provided.")))
+            (log-message "Received topology submission for "
+                         storm-name
+                         " with conf "
+                         (redact-value storm-conf STORM-ZOOKEEPER-TOPOLOGY-AUTH-PAYLOAD))
+            ;; lock protects against multiple topologies being submitted at once and
+            ;; cleanup thread killing topology in b/w assignment and starting the topology
+            (locking (:submit-lock nimbus)
+              (check-storm-active! nimbus storm-name false)
+              ;;cred-update-lock is not needed here because creds are being added for the first time.
+              (.set-credentials! storm-cluster-state storm-id credentials storm-conf)
+              (log-message "uploadedJar " uploadedJarLocation)
+              (setup-storm-code nimbus conf storm-id uploadedJarLocation total-storm-conf topology)
+              (wait-for-desired-code-replication nimbus total-storm-conf storm-id)
+              (.setup-heartbeats! storm-cluster-state storm-id)
+              (.setup-backpressure! storm-cluster-state storm-id)
+              (notify-topology-action-listener nimbus storm-name "submitTopology")
+              (let [thrift-status->kw-status {TopologyInitialStatus/INACTIVE :inactive
+                                              TopologyInitialStatus/ACTIVE :active}]
+                (start-storm nimbus storm-name storm-id (thrift-status->kw-status (.get_initial_status submitOptions))))))
+          (catch Throwable e
+            (log-warn-error e "Topology submission exception. (topology name='" storm-name "')")
+            (throw e))))
+
+      (^void submitTopology
+        [this ^String storm-name ^String uploadedJarLocation ^String serializedConf ^StormTopology topology]
+        (mark! nimbus:num-submitTopology-calls)
+        (.submitTopologyWithOpts this storm-name uploadedJarLocation serializedConf topology
+                                 (SubmitOptions. TopologyInitialStatus/ACTIVE)))
+
+      (^void killTopology [this ^String name]
+        (mark! nimbus:num-killTopology-calls)
+        (.killTopologyWithOpts this name (KillOptions.)))
+
+      (^void killTopologyWithOpts [this ^String storm-name ^KillOptions options]
+        (mark! nimbus:num-killTopologyWithOpts-calls)
+        (check-storm-active! nimbus storm-name true)
+        (let [topology-conf (try-read-storm-conf-from-name conf storm-name nimbus)
+              operation "killTopology"]
+          (check-authorization! nimbus storm-name topology-conf operation)
+          (let [wait-amt (if (.is_set_wait_secs options)
+                           (.get_wait_secs options)
+                           )]
+            (transition-name! nimbus storm-name [:kill wait-amt] true)
+            (notify-topology-action-listener nimbus storm-name operation))
+          (add-topology-to-history-log (get-storm-id (:storm-cluster-state nimbus) storm-name)
+            nimbus topology-conf)))
+
+      (^void rebalance [this ^String storm-name ^RebalanceOptions options]
+        (mark! nimbus:num-rebalance-calls)
+        (check-storm-active! nimbus storm-name true)
+        (let [topology-conf (try-read-storm-conf-from-name conf storm-name nimbus)
+              operation "rebalance"]
+          (check-authorization! nimbus storm-name topology-conf operation)
+          (let [wait-amt (if (.is_set_wait_secs options)
+                           (.get_wait_secs options))
+                num-workers (if (.is_set_num_workers options)
+                              (.get_num_workers options))
+                executor-overrides (if (.is_set_num_executors options)
+                                     (.get_num_executors options)
+                                     {})]
+            (doseq [[c num-executors] executor-overrides]
+              (when (<= num-executors 0)
+                (throw (InvalidTopologyException. "Number of executors must be greater than 0"))
+                ))
+            (transition-name! nimbus storm-name [:rebalance wait-amt num-workers executor-overrides] true)
+
+            (notify-topology-action-listener nimbus storm-name operation))))
+
+      (activate [this storm-name]
+        (mark! nimbus:num-activate-calls)
+        (let [topology-conf (try-read-storm-conf-from-name conf storm-name nimbus)
+              operation "activate"]
+          (check-authorization! nimbus storm-name topology-conf operation)
+          (transition-name! nimbus storm-name :activate true)
+          (notify-topology-action-listener nimbus storm-name operation)))
+
+      (deactivate [this storm-name]
+        (mark! nimbus:num-deactivate-calls)
+        (let [topology-conf (try-read-storm-conf-from-name conf storm-name nimbus)
+              operation "deactivate"]
+          (check-authorization! nimbus storm-name topology-conf operation)
+          (transition-name! nimbus storm-name :inactivate true)
+          (notify-topology-action-listener nimbus storm-name operation)))
+
+      (debug [this storm-name component-id enable? samplingPct]
+        (mark! nimbus:num-debug-calls)
+        (let [storm-cluster-state (:storm-cluster-state nimbus)
+              storm-id (get-storm-id storm-cluster-state storm-name)
+              topology-conf (try-read-storm-conf conf storm-id blob-store)
+              ;; make sure samplingPct is within bounds.
+              spct (Math/max (Math/min samplingPct 100.0) 0.0)
+              ;; while disabling we retain the sampling pct.
+              debug-options (if enable? {:enable enable? :samplingpct spct} {:enable enable?})
+              storm-base-updates (assoc {} :component->debug (if (empty? component-id)
+                                                               {storm-id debug-options}
+                                                               {component-id debug-options}))]
+          (check-authorization! nimbus storm-name topology-conf "debug")
+          (when-not storm-id
+            (throw (NotAliveException. storm-name)))
+          (log-message "Nimbus setting debug to " enable? " for storm-name '" storm-name "' storm-id '" storm-id "' sampling pct '" spct "'"
+            (if (not (clojure.string/blank? component-id)) (str " component-id '" component-id "'")))
+          (locking (:submit-lock nimbus)
+            (.update-storm! storm-cluster-state storm-id storm-base-updates))))
+
+      (^void setWorkerProfiler
+        [this ^String id ^ProfileRequest profileRequest]
+        (mark! nimbus:num-setWorkerProfiler-calls)
+        (let [topology-conf (try-read-storm-conf conf id (:blob-store nimbus))
+              storm-name (topology-conf TOPOLOGY-NAME)
+              _ (check-authorization! nimbus storm-name topology-conf "setWorkerProfiler")
+              storm-cluster-state (:storm-cluster-state nimbus)]
+          (.set-worker-profile-request storm-cluster-state id profileRequest)))
+
+      (^List getComponentPendingProfileActions
+        [this ^String id ^String component_id ^ProfileAction action]
+        (mark! nimbus:num-getComponentPendingProfileActions-calls)
+        (let [info (get-common-topo-info id "getComponentPendingProfileActions")
+              storm-cluster-state (:storm-cluster-state info)
+              task->component (:task->component info)
+              {:keys [executor->node+port node->host]} (:assignment info)
+              executor->host+port (map-val (fn [[node port]]
+                                             [(node->host node) port])
+                                    executor->node+port)
+              nodeinfos (stats/extract-nodeinfos-from-hb-for-comp executor->host+port task->component false component_id)
+              all-pending-actions-for-topology (.get-topology-profile-requests storm-cluster-state id true)
+              latest-profile-actions (remove nil? (map (fn [nodeInfo]
+                                                         (->> all-pending-actions-for-topology
+                                                              (filter #(and (= (:host nodeInfo) (.get_node (.get_nodeInfo %)))
+                                                                         (= (:port nodeInfo) (first (.get_port (.get_nodeInfo  %))))))
+                                                              (filter #(= action (.get_action %)))
+                                                              (sort-by #(.get_time_stamp %) >)
+                                                              first))
+                                                    nodeinfos))]
+          (log-message "Latest profile actions for topology " id " component " component_id " " (pr-str latest-profile-actions))
+          latest-profile-actions))
+
+      (^void setLogConfig [this ^String id ^LogConfig log-config-msg]
+        (mark! nimbus:num-setLogConfig-calls)
+        (let [topology-conf (try-read-storm-conf conf id (:blob-store nimbus))
+              storm-name (topology-conf TOPOLOGY-NAME)
+              _ (check-authorization! nimbus storm-name topology-conf "setLogConfig")
+              storm-cluster-state (:storm-

<TRUNCATED>

[33/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-redis/src/test/java/org/apache/storm/redis/trident/WordCountTridentRedisMap.java
----------------------------------------------------------------------
diff --git a/external/storm-redis/src/test/java/org/apache/storm/redis/trident/WordCountTridentRedisMap.java b/external/storm-redis/src/test/java/org/apache/storm/redis/trident/WordCountTridentRedisMap.java
index cfda54e..384f97c 100644
--- a/external/storm-redis/src/test/java/org/apache/storm/redis/trident/WordCountTridentRedisMap.java
+++ b/external/storm-redis/src/test/java/org/apache/storm/redis/trident/WordCountTridentRedisMap.java
@@ -17,22 +17,22 @@
  */
 package org.apache.storm.redis.trident;
 
-import backtype.storm.Config;
-import backtype.storm.LocalCluster;
-import backtype.storm.StormSubmitter;
-import backtype.storm.generated.StormTopology;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Values;
+import org.apache.storm.Config;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.StormSubmitter;
+import org.apache.storm.generated.StormTopology;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Values;
 import org.apache.storm.redis.common.mapper.RedisDataTypeDescription;
 import org.apache.storm.redis.trident.state.RedisMapState;
 import org.apache.storm.redis.common.config.JedisPoolConfig;
-import storm.trident.Stream;
-import storm.trident.TridentState;
-import storm.trident.TridentTopology;
-import storm.trident.operation.builtin.MapGet;
-import storm.trident.operation.builtin.Sum;
-import storm.trident.state.StateFactory;
-import storm.trident.testing.FixedBatchSpout;
+import org.apache.storm.trident.Stream;
+import org.apache.storm.trident.TridentState;
+import org.apache.storm.trident.TridentTopology;
+import org.apache.storm.trident.operation.builtin.MapGet;
+import org.apache.storm.trident.operation.builtin.Sum;
+import org.apache.storm.trident.state.StateFactory;
+import org.apache.storm.trident.testing.FixedBatchSpout;
 
 public class WordCountTridentRedisMap {
     public static StormTopology buildTopology(String redisHost, Integer redisPort){

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-solr/src/main/java/org/apache/storm/solr/bolt/SolrUpdateBolt.java
----------------------------------------------------------------------
diff --git a/external/storm-solr/src/main/java/org/apache/storm/solr/bolt/SolrUpdateBolt.java b/external/storm-solr/src/main/java/org/apache/storm/solr/bolt/SolrUpdateBolt.java
index 4a9599e..2c56c39 100644
--- a/external/storm-solr/src/main/java/org/apache/storm/solr/bolt/SolrUpdateBolt.java
+++ b/external/storm-solr/src/main/java/org/apache/storm/solr/bolt/SolrUpdateBolt.java
@@ -18,11 +18,11 @@
 
 package org.apache.storm.solr.bolt;
 
-import backtype.storm.task.OutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.base.BaseRichBolt;
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.base.BaseRichBolt;
+import org.apache.storm.tuple.Tuple;
 import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.SolrServerException;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-solr/src/main/java/org/apache/storm/solr/mapper/SolrFieldsMapper.java
----------------------------------------------------------------------
diff --git a/external/storm-solr/src/main/java/org/apache/storm/solr/mapper/SolrFieldsMapper.java b/external/storm-solr/src/main/java/org/apache/storm/solr/mapper/SolrFieldsMapper.java
index 9fa38cd..d078959 100644
--- a/external/storm-solr/src/main/java/org/apache/storm/solr/mapper/SolrFieldsMapper.java
+++ b/external/storm-solr/src/main/java/org/apache/storm/solr/mapper/SolrFieldsMapper.java
@@ -20,7 +20,7 @@ package org.apache.storm.solr.mapper;
 
 import static org.apache.storm.solr.schema.SolrFieldTypeFinder.FieldTypeWrapper;
 
-import backtype.storm.tuple.ITuple;
+import org.apache.storm.tuple.ITuple;
 import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-solr/src/main/java/org/apache/storm/solr/mapper/SolrJsonMapper.java
----------------------------------------------------------------------
diff --git a/external/storm-solr/src/main/java/org/apache/storm/solr/mapper/SolrJsonMapper.java b/external/storm-solr/src/main/java/org/apache/storm/solr/mapper/SolrJsonMapper.java
index f9d6e9b..704ec2d 100644
--- a/external/storm-solr/src/main/java/org/apache/storm/solr/mapper/SolrJsonMapper.java
+++ b/external/storm-solr/src/main/java/org/apache/storm/solr/mapper/SolrJsonMapper.java
@@ -18,7 +18,7 @@
 
 package org.apache.storm.solr.mapper;
 
-import backtype.storm.tuple.ITuple;
+import org.apache.storm.tuple.ITuple;
 import com.google.gson.Gson;
 import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrRequest;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-solr/src/main/java/org/apache/storm/solr/mapper/SolrMapper.java
----------------------------------------------------------------------
diff --git a/external/storm-solr/src/main/java/org/apache/storm/solr/mapper/SolrMapper.java b/external/storm-solr/src/main/java/org/apache/storm/solr/mapper/SolrMapper.java
index 96242d1..5c0223b 100644
--- a/external/storm-solr/src/main/java/org/apache/storm/solr/mapper/SolrMapper.java
+++ b/external/storm-solr/src/main/java/org/apache/storm/solr/mapper/SolrMapper.java
@@ -18,8 +18,8 @@
 
 package org.apache.storm.solr.mapper;
 
-import backtype.storm.tuple.ITuple;
-import backtype.storm.tuple.Tuple;
+import org.apache.storm.tuple.ITuple;
+import org.apache.storm.tuple.Tuple;
 import org.apache.solr.client.solrj.SolrRequest;
 
 import java.io.Serializable;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-solr/src/main/java/org/apache/storm/solr/trident/SolrState.java
----------------------------------------------------------------------
diff --git a/external/storm-solr/src/main/java/org/apache/storm/solr/trident/SolrState.java b/external/storm-solr/src/main/java/org/apache/storm/solr/trident/SolrState.java
index 8187d11..d84d140 100644
--- a/external/storm-solr/src/main/java/org/apache/storm/solr/trident/SolrState.java
+++ b/external/storm-solr/src/main/java/org/apache/storm/solr/trident/SolrState.java
@@ -18,7 +18,7 @@
 
 package org.apache.storm.solr.trident;
 
-import backtype.storm.topology.FailedException;
+import org.apache.storm.topology.FailedException;
 import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
@@ -26,8 +26,8 @@ import org.apache.storm.solr.config.SolrConfig;
 import org.apache.storm.solr.mapper.SolrMapper;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import storm.trident.state.State;
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.trident.state.State;
+import org.apache.storm.trident.tuple.TridentTuple;
 
 import java.util.List;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-solr/src/main/java/org/apache/storm/solr/trident/SolrStateFactory.java
----------------------------------------------------------------------
diff --git a/external/storm-solr/src/main/java/org/apache/storm/solr/trident/SolrStateFactory.java b/external/storm-solr/src/main/java/org/apache/storm/solr/trident/SolrStateFactory.java
index a1f815d..7b092ba 100644
--- a/external/storm-solr/src/main/java/org/apache/storm/solr/trident/SolrStateFactory.java
+++ b/external/storm-solr/src/main/java/org/apache/storm/solr/trident/SolrStateFactory.java
@@ -18,11 +18,11 @@
 
 package org.apache.storm.solr.trident;
 
-import backtype.storm.task.IMetricsContext;
+import org.apache.storm.task.IMetricsContext;
 import org.apache.storm.solr.config.SolrConfig;
 import org.apache.storm.solr.mapper.SolrMapper;
-import storm.trident.state.State;
-import storm.trident.state.StateFactory;
+import org.apache.storm.trident.state.State;
+import org.apache.storm.trident.state.StateFactory;
 
 import java.util.Map;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-solr/src/main/java/org/apache/storm/solr/trident/SolrUpdater.java
----------------------------------------------------------------------
diff --git a/external/storm-solr/src/main/java/org/apache/storm/solr/trident/SolrUpdater.java b/external/storm-solr/src/main/java/org/apache/storm/solr/trident/SolrUpdater.java
index db7b995..53698fa 100644
--- a/external/storm-solr/src/main/java/org/apache/storm/solr/trident/SolrUpdater.java
+++ b/external/storm-solr/src/main/java/org/apache/storm/solr/trident/SolrUpdater.java
@@ -18,9 +18,9 @@
 
 package org.apache.storm.solr.trident;
 
-import storm.trident.operation.TridentCollector;
-import storm.trident.state.BaseStateUpdater;
-import storm.trident.tuple.TridentTuple;
+import org.apache.storm.trident.operation.TridentCollector;
+import org.apache.storm.trident.state.BaseStateUpdater;
+import org.apache.storm.trident.tuple.TridentTuple;
 
 import java.util.List;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-solr/src/test/java/org/apache/storm/solr/spout/SolrFieldsSpout.java
----------------------------------------------------------------------
diff --git a/external/storm-solr/src/test/java/org/apache/storm/solr/spout/SolrFieldsSpout.java b/external/storm-solr/src/test/java/org/apache/storm/solr/spout/SolrFieldsSpout.java
index 7d2357c..8e3390d 100644
--- a/external/storm-solr/src/test/java/org/apache/storm/solr/spout/SolrFieldsSpout.java
+++ b/external/storm-solr/src/test/java/org/apache/storm/solr/spout/SolrFieldsSpout.java
@@ -18,12 +18,12 @@
 
 package org.apache.storm.solr.spout;
 
-import backtype.storm.spout.SpoutOutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.base.BaseRichSpout;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Values;
+import org.apache.storm.spout.SpoutOutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.base.BaseRichSpout;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Values;
 import com.google.common.collect.Lists;
 import org.apache.storm.solr.util.TestUtil;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-solr/src/test/java/org/apache/storm/solr/spout/SolrJsonSpout.java
----------------------------------------------------------------------
diff --git a/external/storm-solr/src/test/java/org/apache/storm/solr/spout/SolrJsonSpout.java b/external/storm-solr/src/test/java/org/apache/storm/solr/spout/SolrJsonSpout.java
index 6afed2c..bb0c83c 100644
--- a/external/storm-solr/src/test/java/org/apache/storm/solr/spout/SolrJsonSpout.java
+++ b/external/storm-solr/src/test/java/org/apache/storm/solr/spout/SolrJsonSpout.java
@@ -18,12 +18,12 @@
 
 package org.apache.storm.solr.spout;
 
-import backtype.storm.spout.SpoutOutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.base.BaseRichSpout;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Values;
+import org.apache.storm.spout.SpoutOutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.base.BaseRichSpout;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Values;
 import com.google.common.collect.Lists;
 import com.google.gson.Gson;
 import org.apache.storm.solr.util.TestUtil;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-solr/src/test/java/org/apache/storm/solr/topology/SolrFieldsTopology.java
----------------------------------------------------------------------
diff --git a/external/storm-solr/src/test/java/org/apache/storm/solr/topology/SolrFieldsTopology.java b/external/storm-solr/src/test/java/org/apache/storm/solr/topology/SolrFieldsTopology.java
index 809f434..5c9f16d 100644
--- a/external/storm-solr/src/test/java/org/apache/storm/solr/topology/SolrFieldsTopology.java
+++ b/external/storm-solr/src/test/java/org/apache/storm/solr/topology/SolrFieldsTopology.java
@@ -18,8 +18,8 @@
 
 package org.apache.storm.solr.topology;
 
-import backtype.storm.generated.StormTopology;
-import backtype.storm.topology.TopologyBuilder;
+import org.apache.storm.generated.StormTopology;
+import org.apache.storm.topology.TopologyBuilder;
 import org.apache.storm.solr.bolt.SolrUpdateBolt;
 import org.apache.storm.solr.config.CountBasedCommit;
 import org.apache.storm.solr.config.SolrCommitStrategy;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-solr/src/test/java/org/apache/storm/solr/topology/SolrJsonTopology.java
----------------------------------------------------------------------
diff --git a/external/storm-solr/src/test/java/org/apache/storm/solr/topology/SolrJsonTopology.java b/external/storm-solr/src/test/java/org/apache/storm/solr/topology/SolrJsonTopology.java
index 02e6d6f..24e6b5e 100644
--- a/external/storm-solr/src/test/java/org/apache/storm/solr/topology/SolrJsonTopology.java
+++ b/external/storm-solr/src/test/java/org/apache/storm/solr/topology/SolrJsonTopology.java
@@ -18,8 +18,8 @@
 
 package org.apache.storm.solr.topology;
 
-import backtype.storm.generated.StormTopology;
-import backtype.storm.topology.TopologyBuilder;
+import org.apache.storm.generated.StormTopology;
+import org.apache.storm.topology.TopologyBuilder;
 import org.apache.storm.solr.bolt.SolrUpdateBolt;
 import org.apache.storm.solr.mapper.SolrJsonMapper;
 import org.apache.storm.solr.mapper.SolrMapper;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-solr/src/test/java/org/apache/storm/solr/topology/SolrTopology.java
----------------------------------------------------------------------
diff --git a/external/storm-solr/src/test/java/org/apache/storm/solr/topology/SolrTopology.java b/external/storm-solr/src/test/java/org/apache/storm/solr/topology/SolrTopology.java
index 607cf98..e0f4dc6 100644
--- a/external/storm-solr/src/test/java/org/apache/storm/solr/topology/SolrTopology.java
+++ b/external/storm-solr/src/test/java/org/apache/storm/solr/topology/SolrTopology.java
@@ -18,10 +18,10 @@
 
 package org.apache.storm.solr.topology;
 
-import backtype.storm.Config;
-import backtype.storm.LocalCluster;
-import backtype.storm.StormSubmitter;
-import backtype.storm.generated.StormTopology;
+import org.apache.storm.Config;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.StormSubmitter;
+import org.apache.storm.generated.StormTopology;
 import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
 import org.apache.storm.solr.config.SolrCommitStrategy;

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-solr/src/test/java/org/apache/storm/solr/trident/SolrFieldsTridentTopology.java
----------------------------------------------------------------------
diff --git a/external/storm-solr/src/test/java/org/apache/storm/solr/trident/SolrFieldsTridentTopology.java b/external/storm-solr/src/test/java/org/apache/storm/solr/trident/SolrFieldsTridentTopology.java
index 4884c82..d022c8a 100644
--- a/external/storm-solr/src/test/java/org/apache/storm/solr/trident/SolrFieldsTridentTopology.java
+++ b/external/storm-solr/src/test/java/org/apache/storm/solr/trident/SolrFieldsTridentTopology.java
@@ -18,13 +18,13 @@
 
 package org.apache.storm.solr.trident;
 
-import backtype.storm.generated.StormTopology;
-import backtype.storm.tuple.Fields;
+import org.apache.storm.generated.StormTopology;
+import org.apache.storm.tuple.Fields;
 import org.apache.storm.solr.spout.SolrFieldsSpout;
 import org.apache.storm.solr.topology.SolrFieldsTopology;
-import storm.trident.Stream;
-import storm.trident.TridentTopology;
-import storm.trident.state.StateFactory;
+import org.apache.storm.trident.Stream;
+import org.apache.storm.trident.TridentTopology;
+import org.apache.storm.trident.state.StateFactory;
 
 import java.io.IOException;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-solr/src/test/java/org/apache/storm/solr/trident/SolrJsonTridentTopology.java
----------------------------------------------------------------------
diff --git a/external/storm-solr/src/test/java/org/apache/storm/solr/trident/SolrJsonTridentTopology.java b/external/storm-solr/src/test/java/org/apache/storm/solr/trident/SolrJsonTridentTopology.java
index d03b1dd..75131b8 100644
--- a/external/storm-solr/src/test/java/org/apache/storm/solr/trident/SolrJsonTridentTopology.java
+++ b/external/storm-solr/src/test/java/org/apache/storm/solr/trident/SolrJsonTridentTopology.java
@@ -18,13 +18,13 @@
 
 package org.apache.storm.solr.trident;
 
-import backtype.storm.generated.StormTopology;
-import backtype.storm.tuple.Fields;
+import org.apache.storm.generated.StormTopology;
+import org.apache.storm.tuple.Fields;
 import org.apache.storm.solr.spout.SolrJsonSpout;
 import org.apache.storm.solr.topology.SolrJsonTopology;
-import storm.trident.Stream;
-import storm.trident.TridentTopology;
-import storm.trident.state.StateFactory;
+import org.apache.storm.trident.Stream;
+import org.apache.storm.trident.TridentTopology;
+import org.apache.storm.trident.state.StateFactory;
 
 import java.io.IOException;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/log4j2/cluster.xml
----------------------------------------------------------------------
diff --git a/log4j2/cluster.xml b/log4j2/cluster.xml
index f349d8c..ca333b2 100644
--- a/log4j2/cluster.xml
+++ b/log4j2/cluster.xml
@@ -73,15 +73,15 @@
 </appenders>
 <loggers>
 
-    <Logger name="backtype.storm.logging.filters.AccessLoggingFilter" level="info" additivity="false">
+    <Logger name="org.apache.storm.logging.filters.AccessLoggingFilter" level="info" additivity="false">
         <AppenderRef ref="WEB-ACCESS"/>
         <AppenderRef ref="syslog"/>
     </Logger>
-    <Logger name="backtype.storm.logging.ThriftAccessLogger" level="info" additivity="false">
+    <Logger name="org.apache.storm.logging.ThriftAccessLogger" level="info" additivity="false">
         <AppenderRef ref="THRIFT-ACCESS"/>
         <AppenderRef ref="syslog"/>
     </Logger>
-    <Logger name="backtype.storm.metric.LoggingMetricsConsumer" level="info">
+    <Logger name="org.apache.storm.metric.LoggingMetricsConsumer" level="info">
         <AppenderRef ref="METRICS"/>
     </Logger>
     <root level="info"> <!-- We log everything -->

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 181c2a9..bced62b 100644
--- a/pom.xml
+++ b/pom.xml
@@ -241,7 +241,7 @@
         <maven-surefire.version>2.18.1</maven-surefire.version>
 
         <!-- Java and clojure build lifecycle test properties are defined here to avoid having to create a default profile -->
-        <java.unit.test.exclude>backtype.storm.testing.IntegrationTest</java.unit.test.exclude>
+        <java.unit.test.exclude>org.apache.storm.testing.IntegrationTest</java.unit.test.exclude>
         <java.unit.test.include>**/Test*.java, **/*Test.java, **/*TestCase.java</java.unit.test.include>    <!--maven surefire plugin default test list-->
         <!-- by default the clojure test set are all clojure tests that are not integration tests. This property is overridden in the profiles -->
         <clojure.test.set>!integration.*</clojure.test.set>
@@ -364,7 +364,7 @@
             <id>all-tests</id>
             <properties>
                 <java.integration.test.include>**/*.java</java.integration.test.include>
-                <java.integration.test.group>backtype.storm.testing.IntegrationTest</java.integration.test.group>
+                <java.integration.test.group>org.apache.storm.testing.IntegrationTest</java.integration.test.group>
                 <clojure.test.set>*.*</clojure.test.set>
             </properties>
         </profile>
@@ -374,7 +374,7 @@
                 <!--Java-->
                 <java.unit.test.include>no.unit.tests</java.unit.test.include>
                 <java.integration.test.include>**/*.java</java.integration.test.include>
-                <java.integration.test.group>backtype.storm.testing.IntegrationTest</java.integration.test.group>
+                <java.integration.test.group>org.apache.storm.testing.IntegrationTest</java.integration.test.group>
                 <!--Clojure-->
                 <clojure.test.set>integration.*</clojure.test.set>
                 <clojure.test.declared.namespace.only>true</clojure.test.declared.namespace.only>

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/pom.xml
----------------------------------------------------------------------
diff --git a/storm-core/pom.xml b/storm-core/pom.xml
index 765e798..08fffa4 100644
--- a/storm-core/pom.xml
+++ b/storm-core/pom.xml
@@ -1018,9 +1018,9 @@
                                 <configuration>
                                     <append>true</append>
                                     <excludes>
-                                        <exclude>backtype/storm/metric/api/IMetricsConsumer$DataPointFieldAccess</exclude>
-                                        <exclude>backtype/storm/metric/api/IMetricsConsumer$TaskInfoFieldAccess</exclude>
-                                        <exclude>backtype/storm/testing/TestSerObjectFieldAccess</exclude>
+                                        <exclude>org/apache/storm/metric/api/IMetricsConsumer$DataPointFieldAccess</exclude>
+                                        <exclude>org/apache/storm/metric/api/IMetricsConsumer$TaskInfoFieldAccess</exclude>
+                                        <exclude>org/apache/storm/testing/TestSerObjectFieldAccess</exclude>
                                     </excludes>
                                 </configuration>
                             </execution>
@@ -1032,7 +1032,7 @@
                                 </goals>
                                 <configuration>
                                     <excludes>
-                                        <exclude>backtype/storm/generated/*</exclude> <!--Thrift generated code-->
+                                        <exclude>org/apache/storm/generated/*</exclude> <!--Thrift generated code-->
                                     </excludes>
                                     <includes>
                                         <include>backtype/*/*/*/*</include>

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/LocalCluster.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/LocalCluster.clj b/storm-core/src/clj/backtype/storm/LocalCluster.clj
deleted file mode 100644
index aa37c89..0000000
--- a/storm-core/src/clj/backtype/storm/LocalCluster.clj
+++ /dev/null
@@ -1,106 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-
-(ns backtype.storm.LocalCluster
-  (:use [backtype.storm testing config util])
-  (:import [backtype.storm.utils Utils])
-  (:import [java.util Map])
-  (:gen-class
-    :init init
-    :implements [backtype.storm.ILocalCluster]
-    :constructors {[] []
-                   [java.util.Map] []
-                   [String Long] []}
-    :state state))
-
-(defn -init
-  ([]
-   (let [ret (mk-local-storm-cluster
-               :daemon-conf
-               {TOPOLOGY-ENABLE-MESSAGE-TIMEOUTS true})]
-     [[] ret]))
-  ([^String zk-host ^Long zk-port]
-   (let [ret (mk-local-storm-cluster :daemon-conf {TOPOLOGY-ENABLE-MESSAGE-TIMEOUTS true
-                                                     STORM-ZOOKEEPER-SERVERS (list zk-host)
-                                                     STORM-ZOOKEEPER-PORT zk-port})]
-     [[] ret]))
-  ([^Map stateMap]
-     [[] stateMap]))
-
-(defn submit-hook [hook name conf topology]
-  (let [topologyInfo (Utils/getTopologyInfo name nil conf)]
-    (.notify hook topologyInfo conf topology)))
-
-(defn -submitTopology
-  [this name conf topology]
-  (submit-local-topology
-    (:nimbus (. this state)) name conf topology)
-  (let [hook (get-configured-class conf STORM-TOPOLOGY-SUBMISSION-NOTIFIER-PLUGIN)]
-    (when hook (submit-hook hook name conf topology))))
-
-
-(defn -submitTopologyWithOpts
-  [this name conf topology submit-opts]
-  (submit-local-topology-with-opts
-    (:nimbus (. this state)) name conf topology submit-opts))
-
-(defn -uploadNewCredentials
-  [this name creds]
-  (.uploadNewCredentials (:nimbus (. this state)) name creds))
-
-(defn -shutdown
-  [this]
-  (kill-local-storm-cluster (. this state)))
-
-(defn -killTopology
-  [this name]
-  (.killTopology (:nimbus (. this state)) name))
-
-(defn -getTopologyConf
-  [this id]
-  (.getTopologyConf (:nimbus (. this state)) id))
-
-(defn -getTopology
-  [this id]
-  (.getTopology (:nimbus (. this state)) id))
-
-(defn -getClusterInfo
-  [this]
-  (.getClusterInfo (:nimbus (. this state))))
-
-(defn -getTopologyInfo
-  [this id]
-  (.getTopologyInfo (:nimbus (. this state)) id))
-
-(defn -killTopologyWithOpts
-  [this name opts]
-  (.killTopologyWithOpts (:nimbus (. this state)) name opts))
-
-(defn -activate
-  [this name]
-  (.activate (:nimbus (. this state)) name))
-
-(defn -deactivate
-  [this name]
-  (.deactivate (:nimbus (. this state)) name))
-
-(defn -rebalance
-  [this name opts]
-  (.rebalance (:nimbus (. this state)) name opts))
-
-(defn -getState
-  [this]
-  (.state this))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/LocalDRPC.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/LocalDRPC.clj b/storm-core/src/clj/backtype/storm/LocalDRPC.clj
deleted file mode 100644
index 9773821..0000000
--- a/storm-core/src/clj/backtype/storm/LocalDRPC.clj
+++ /dev/null
@@ -1,56 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-
-(ns backtype.storm.LocalDRPC
-  (:require [backtype.storm.daemon [drpc :as drpc]])
-  (:use [backtype.storm config util])
-  (:import [backtype.storm.utils InprocMessaging ServiceRegistry])
-  (:gen-class
-   :init init
-   :implements [backtype.storm.ILocalDRPC]
-   :constructors {[] []}
-   :state state ))
-
-(defn -init []
-  (let [handler (drpc/service-handler (read-storm-config))
-        id (ServiceRegistry/registerService handler)
-        ]
-    [[] {:service-id id :handler handler}]
-    ))
-
-(defn -execute [this func funcArgs]
-  (.execute (:handler (. this state)) func funcArgs)
-  )
-
-(defn -result [this id result]
-  (.result (:handler (. this state)) id result)
-  )
-
-(defn -fetchRequest [this func]
-  (.fetchRequest (:handler (. this state)) func)
-  )
-
-(defn -failRequest [this id]
-  (.failRequest (:handler (. this state)) id)
-  )
-
-(defn -getServiceId [this]
-  (:service-id (. this state)))
-
-(defn -shutdown [this]
-  (ServiceRegistry/unregisterService (:service-id (. this state)))
-  (.shutdown (:handler (. this state)))
-  )

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/MockAutoCred.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/MockAutoCred.clj b/storm-core/src/clj/backtype/storm/MockAutoCred.clj
deleted file mode 100644
index 5e37528..0000000
--- a/storm-core/src/clj/backtype/storm/MockAutoCred.clj
+++ /dev/null
@@ -1,58 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-
-;;mock implementation of INimbusCredentialPlugin,IAutoCredentials and ICredentialsRenewer for testing only.
-(ns backtype.storm.MockAutoCred
-  (:use [backtype.storm testing config])
-  (:import [backtype.storm.security.INimbusCredentialPlugin]
-           [backtype.storm.security.auth   ICredentialsRenewer])
-  (:gen-class
-    :implements [backtype.storm.security.INimbusCredentialPlugin
-                 backtype.storm.security.auth.IAutoCredentials
-                 backtype.storm.security.auth.ICredentialsRenewer]))
-
-(def nimbus-cred-key "nimbusCredTestKey")
-(def nimbus-cred-val "nimbusTestCred")
-(def nimbus-cred-renew-val "renewedNimbusTestCred")
-(def gateway-cred-key "gatewayCredTestKey")
-(def gateway-cred-val "gatewayTestCred")
-(def gateway-cred-renew-val "renewedGatewayTestCred")
-
-(defn -populateCredentials
-  ([this creds conf]
-  (.put creds nimbus-cred-key nimbus-cred-val))
-  ([this creds]
-  (.put creds gateway-cred-key gateway-cred-val)))
-
-(defn -prepare
-  [this conf])
-
-(defn -renew
-  [this cred conf]
-  (.put cred nimbus-cred-key nimbus-cred-renew-val)
-  (.put cred gateway-cred-key gateway-cred-renew-val))
-
-(defn -populateSubject
-  [subject credentials]
-  (.add (.getPublicCredentials subject) (.get credentials nimbus-cred-key))
-  (.add (.getPublicCredentials subject) (.get credentials gateway-cred-key)))
-
-(defn -updateSubject
-  [subject credentials]
-  (-populateSubject subject credentials))
-
-
-

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/blobstore.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/blobstore.clj b/storm-core/src/clj/backtype/storm/blobstore.clj
deleted file mode 100644
index 936f4b5..0000000
--- a/storm-core/src/clj/backtype/storm/blobstore.clj
+++ /dev/null
@@ -1,28 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-
-(ns backtype.storm.blobstore
-  (:import [backtype.storm.utils Utils])
-  (:import [backtype.storm.blobstore ClientBlobStore])
-  (:use [backtype.storm config]))
-
-(defmacro with-configured-blob-client
-  [client-sym & body]
-  `(let [conf# (read-storm-config)
-         ^ClientBlobStore ~client-sym (Utils/getClientBlobStore conf#)]
-     (try
-       ~@body
-       (finally (.shutdown ~client-sym)))))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/clojure.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/clojure.clj b/storm-core/src/clj/backtype/storm/clojure.clj
deleted file mode 100644
index a73166a..0000000
--- a/storm-core/src/clj/backtype/storm/clojure.clj
+++ /dev/null
@@ -1,201 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-
-(ns backtype.storm.clojure
-  (:use [backtype.storm util])
-  (:import [backtype.storm StormSubmitter])
-  (:import [backtype.storm.generated StreamInfo])
-  (:import [backtype.storm.tuple Tuple])
-  (:import [backtype.storm.task OutputCollector IBolt TopologyContext])
-  (:import [backtype.storm.spout SpoutOutputCollector ISpout])
-  (:import [backtype.storm.utils Utils])
-  (:import [backtype.storm.clojure ClojureBolt ClojureSpout])
-  (:import [java.util List])
-  (:require [backtype.storm [thrift :as thrift]]))
-
-(defn direct-stream [fields]
-  (StreamInfo. fields true))
-
-(defn to-spec [avar]
-  (let [m (meta avar)]
-    [(str (:ns m)) (str (:name m))]))
-
-(defn clojure-bolt* [output-spec fn-var conf-fn-var args]
-  (ClojureBolt. (to-spec fn-var) (to-spec conf-fn-var) args (thrift/mk-output-spec output-spec)))
-
-(defmacro clojure-bolt [output-spec fn-sym conf-fn-sym args]
-  `(clojure-bolt* ~output-spec (var ~fn-sym) (var ~conf-fn-sym) ~args))
-
-(defn clojure-spout* [output-spec fn-var conf-var args]
-  (let [m (meta fn-var)]
-    (ClojureSpout. (to-spec fn-var) (to-spec conf-var) args (thrift/mk-output-spec output-spec))
-    ))
-
-(defmacro clojure-spout [output-spec fn-sym conf-sym args]
-  `(clojure-spout* ~output-spec (var ~fn-sym) (var ~conf-sym) ~args))
-
-(defn normalize-fns [body]
-  (for [[name args & impl] body
-        :let [args (-> "this"
-                       gensym
-                       (cons args)
-                       vec)]]
-    (concat [name args] impl)
-    ))
-
-(defmacro bolt [& body]
-  (let [[bolt-fns other-fns] (split-with #(not (symbol? %)) body)
-        fns (normalize-fns bolt-fns)]
-    `(reify IBolt
-       ~@fns
-       ~@other-fns)))
-
-(defmacro bolt-execute [& body]
-  `(bolt
-     (~'execute ~@body)))
-
-(defmacro spout [& body]
-  (let [[spout-fns other-fns] (split-with #(not (symbol? %)) body)
-        fns (normalize-fns spout-fns)]
-    `(reify ISpout
-       ~@fns
-       ~@other-fns)))
-
-(defmacro defbolt [name output-spec & [opts & impl :as all]]
-  (if-not (map? opts)
-    `(defbolt ~name ~output-spec {} ~@all)
-    (let [worker-name (symbol (str name "__"))
-          conf-fn-name (symbol (str name "__conf__"))
-          params (:params opts)
-          conf-code (:conf opts)
-          fn-body (if (:prepare opts)
-                    (cons 'fn impl)
-                    (let [[args & impl-body] impl
-                          coll-sym (nth args 1)
-                          args (vec (take 1 args))
-                          prepargs [(gensym "conf") (gensym "context") coll-sym]]
-                      `(fn ~prepargs (bolt (~'execute ~args ~@impl-body)))))
-          definer (if params
-                    `(defn ~name [& args#]
-                       (clojure-bolt ~output-spec ~worker-name ~conf-fn-name args#))
-                    `(def ~name
-                       (clojure-bolt ~output-spec ~worker-name ~conf-fn-name []))
-                    )
-          ]
-      `(do
-         (defn ~conf-fn-name ~(if params params [])
-           ~conf-code
-           )
-         (defn ~worker-name ~(if params params [])
-           ~fn-body
-           )
-         ~definer
-         ))))
-
-(defmacro defspout [name output-spec & [opts & impl :as all]]
-  (if-not (map? opts)
-    `(defspout ~name ~output-spec {} ~@all)
-    (let [worker-name (symbol (str name "__"))
-          conf-fn-name (symbol (str name "__conf__"))
-          params (:params opts)
-          conf-code (:conf opts)
-          prepare? (:prepare opts)
-          prepare? (if (nil? prepare?) true prepare?)
-          fn-body (if prepare?
-                    (cons 'fn impl)
-                    (let [[args & impl-body] impl
-                          coll-sym (first args)
-                          prepargs [(gensym "conf") (gensym "context") coll-sym]]
-                      `(fn ~prepargs (spout (~'nextTuple [] ~@impl-body)))))
-          definer (if params
-                    `(defn ~name [& args#]
-                       (clojure-spout ~output-spec ~worker-name ~conf-fn-name args#))
-                    `(def ~name
-                       (clojure-spout ~output-spec ~worker-name ~conf-fn-name []))
-                    )
-          ]
-      `(do
-         (defn ~conf-fn-name ~(if params params [])
-           ~conf-code
-           )
-         (defn ~worker-name ~(if params params [])
-           ~fn-body
-           )
-         ~definer
-         ))))
-
-(defprotocol TupleValues
-  (tuple-values [values collector stream]))
-
-(extend-protocol TupleValues
-  java.util.Map
-  (tuple-values [this collector ^String stream]
-    (let [^TopologyContext context (:context collector)
-          fields (..  context (getThisOutputFields stream) toList) ]
-      (vec (map (into
-                  (empty this) (for [[k v] this]
-                                   [(if (keyword? k) (name k) k) v]))
-                fields))))
-  java.util.List
-  (tuple-values [this collector stream]
-    this))
-
-(defnk emit-bolt! [collector values
-                   :stream Utils/DEFAULT_STREAM_ID :anchor []]
-  (let [^List anchor (collectify anchor)
-        values (tuple-values values collector stream) ]
-    (.emit ^OutputCollector (:output-collector collector) stream anchor values)
-    ))
-
-(defnk emit-direct-bolt! [collector task values
-                          :stream Utils/DEFAULT_STREAM_ID :anchor []]
-  (let [^List anchor (collectify anchor)
-        values (tuple-values values collector stream) ]
-    (.emitDirect ^OutputCollector (:output-collector collector) task stream anchor values)
-    ))
-
-(defn ack! [collector ^Tuple tuple]
-  (.ack ^OutputCollector (:output-collector collector) tuple))
-
-(defn fail! [collector ^Tuple tuple]
-  (.fail ^OutputCollector (:output-collector collector) tuple))
-
-(defn report-error! [collector ^Tuple tuple]
-  (.reportError ^OutputCollector (:output-collector collector) tuple))
-
-(defnk emit-spout! [collector values
-                    :stream Utils/DEFAULT_STREAM_ID :id nil]
-  (let [values (tuple-values values collector stream)]
-    (.emit ^SpoutOutputCollector (:output-collector collector) stream values id)))
-
-(defnk emit-direct-spout! [collector task values
-                           :stream Utils/DEFAULT_STREAM_ID :id nil]
-  (let [values (tuple-values values collector stream)]
-    (.emitDirect ^SpoutOutputCollector (:output-collector collector) task stream values id)))
-
-(defalias topology thrift/mk-topology)
-(defalias bolt-spec thrift/mk-bolt-spec)
-(defalias spout-spec thrift/mk-spout-spec)
-(defalias shell-bolt-spec thrift/mk-shell-bolt-spec)
-(defalias shell-spout-spec thrift/mk-shell-spout-spec)
-
-(defn submit-remote-topology [name conf topology]
-  (StormSubmitter/submitTopology name conf topology))
-
-(defn local-cluster []
-  ;; do this to avoid a cyclic dependency of
-  ;; LocalCluster -> testing -> nimbus -> bootstrap -> clojure -> LocalCluster
-  (eval '(new backtype.storm.LocalCluster)))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/cluster.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/cluster.clj b/storm-core/src/clj/backtype/storm/cluster.clj
deleted file mode 100644
index ebe4955..0000000
--- a/storm-core/src/clj/backtype/storm/cluster.clj
+++ /dev/null
@@ -1,691 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-
-(ns backtype.storm.cluster
-  (:import [org.apache.zookeeper.data Stat ACL Id]
-           [backtype.storm.generated SupervisorInfo Assignment StormBase ClusterWorkerHeartbeat ErrorInfo Credentials NimbusSummary
-            LogConfig ProfileAction ProfileRequest NodeInfo]
-           [java.io Serializable])
-  (:import [org.apache.zookeeper KeeperException KeeperException$NoNodeException ZooDefs ZooDefs$Ids ZooDefs$Perms])
-  (:import [org.apache.curator.framework CuratorFramework])
-  (:import [backtype.storm.utils Utils])
-  (:import [backtype.storm.cluster ClusterState ClusterStateContext ClusterStateListener ConnectionState])
-  (:import [java.security MessageDigest])
-  (:import [org.apache.zookeeper.server.auth DigestAuthenticationProvider])
-  (:import [backtype.storm.nimbus NimbusInfo])
-  (:use [backtype.storm util log config converter])
-  (:require [backtype.storm [zookeeper :as zk]])
-  (:require [backtype.storm.daemon [common :as common]]))
-
-(defn mk-topo-only-acls
-  [topo-conf]
-  (let [payload (.get topo-conf STORM-ZOOKEEPER-TOPOLOGY-AUTH-PAYLOAD)]
-    (when (Utils/isZkAuthenticationConfiguredTopology topo-conf)
-      [(first ZooDefs$Ids/CREATOR_ALL_ACL)
-       (ACL. ZooDefs$Perms/READ (Id. "digest" (DigestAuthenticationProvider/generateDigest payload)))])))
- 
-(defnk mk-distributed-cluster-state
-  [conf :auth-conf nil :acls nil :context (ClusterStateContext.)]
-  (let [clazz (Class/forName (or (conf STORM-CLUSTER-STATE-STORE)
-                                 "backtype.storm.cluster_state.zookeeper_state_factory"))
-        state-instance (.newInstance clazz)]
-    (log-debug "Creating cluster state: " (.toString clazz))
-    (or (.mkState state-instance conf auth-conf acls context)
-        nil)))
-
-(defprotocol StormClusterState
-  (assignments [this callback])
-  (assignment-info [this storm-id callback])
-  (assignment-info-with-version [this storm-id callback])
-  (assignment-version [this storm-id callback])
-  ;returns key information under /storm/blobstore/key
-  (blobstore-info [this blob-key])
-  ;returns list of nimbus summaries stored under /stormroot/nimbuses/<nimbus-ids> -> <data>
-  (nimbuses [this])
-  ;adds the NimbusSummary to /stormroot/nimbuses/nimbus-id
-  (add-nimbus-host! [this nimbus-id nimbus-summary])
-
-  (active-storms [this])
-  (storm-base [this storm-id callback])
-  (get-worker-heartbeat [this storm-id node port])
-  (get-worker-profile-requests [this storm-id nodeinfo thrift?])
-  (get-topology-profile-requests [this storm-id thrift?])
-  (set-worker-profile-request [this storm-id profile-request])
-  (delete-topology-profile-requests [this storm-id profile-request])
-  (executor-beats [this storm-id executor->node+port])
-  (supervisors [this callback])
-  (supervisor-info [this supervisor-id]) ;; returns nil if doesn't exist
-  (setup-heartbeats! [this storm-id])
-  (teardown-heartbeats! [this storm-id])
-  (teardown-topology-errors! [this storm-id])
-  (heartbeat-storms [this])
-  (error-topologies [this])
-  (set-topology-log-config! [this storm-id log-config])
-  (topology-log-config [this storm-id cb])
-  (worker-heartbeat! [this storm-id node port info])
-  (remove-worker-heartbeat! [this storm-id node port])
-  (supervisor-heartbeat! [this supervisor-id info])
-  (worker-backpressure! [this storm-id node port info])
-  (topology-backpressure [this storm-id callback])
-  (setup-backpressure! [this storm-id])
-  (remove-worker-backpressure! [this storm-id node port])
-  (activate-storm! [this storm-id storm-base])
-  (update-storm! [this storm-id new-elems])
-  (remove-storm-base! [this storm-id])
-  (set-assignment! [this storm-id info])
-  ;; sets up information related to key consisting of nimbus
-  ;; host:port and version info of the blob
-  (setup-blobstore! [this key nimbusInfo versionInfo])
-  (active-keys [this])
-  (blobstore [this callback])
-  (remove-storm! [this storm-id])
-  (remove-blobstore-key! [this blob-key])
-  (remove-key-version! [this blob-key])
-  (report-error [this storm-id component-id node port error])
-  (errors [this storm-id component-id])
-  (last-error [this storm-id component-id])
-  (set-credentials! [this storm-id creds topo-conf])
-  (credentials [this storm-id callback])
-  (disconnect [this]))
-
-(def ASSIGNMENTS-ROOT "assignments")
-(def CODE-ROOT "code")
-(def STORMS-ROOT "storms")
-(def SUPERVISORS-ROOT "supervisors")
-(def WORKERBEATS-ROOT "workerbeats")
-(def BACKPRESSURE-ROOT "backpressure")
-(def ERRORS-ROOT "errors")
-(def BLOBSTORE-ROOT "blobstore")
-; Stores the latest update sequence for a blob
-(def BLOBSTORE-MAX-KEY-SEQUENCE-NUMBER-ROOT "blobstoremaxkeysequencenumber")
-(def NIMBUSES-ROOT "nimbuses")
-(def CREDENTIALS-ROOT "credentials")
-(def LOGCONFIG-ROOT "logconfigs")
-(def PROFILERCONFIG-ROOT "profilerconfigs")
-
-(def ASSIGNMENTS-SUBTREE (str "/" ASSIGNMENTS-ROOT))
-(def STORMS-SUBTREE (str "/" STORMS-ROOT))
-(def SUPERVISORS-SUBTREE (str "/" SUPERVISORS-ROOT))
-(def WORKERBEATS-SUBTREE (str "/" WORKERBEATS-ROOT))
-(def BACKPRESSURE-SUBTREE (str "/" BACKPRESSURE-ROOT))
-(def ERRORS-SUBTREE (str "/" ERRORS-ROOT))
-;; Blobstore subtree /storm/blobstore
-(def BLOBSTORE-SUBTREE (str "/" BLOBSTORE-ROOT))
-(def BLOBSTORE-MAX-KEY-SEQUENCE-NUMBER-SUBTREE (str "/" BLOBSTORE-MAX-KEY-SEQUENCE-NUMBER-ROOT))
-(def NIMBUSES-SUBTREE (str "/" NIMBUSES-ROOT))
-(def CREDENTIALS-SUBTREE (str "/" CREDENTIALS-ROOT))
-(def LOGCONFIG-SUBTREE (str "/" LOGCONFIG-ROOT))
-(def PROFILERCONFIG-SUBTREE (str "/" PROFILERCONFIG-ROOT))
-
-(defn supervisor-path
-  [id]
-  (str SUPERVISORS-SUBTREE "/" id))
-
-(defn assignment-path
-  [id]
-  (str ASSIGNMENTS-SUBTREE "/" id))
-
-(defn blobstore-path
-  [key]
-  (str BLOBSTORE-SUBTREE "/" key))
-
-(defn blobstore-max-key-sequence-number-path
-  [key]
-  (str BLOBSTORE-MAX-KEY-SEQUENCE-NUMBER-SUBTREE "/" key))
-
-(defn nimbus-path
-  [id]
-  (str NIMBUSES-SUBTREE "/" id))
-
-(defn storm-path
-  [id]
-  (str STORMS-SUBTREE "/" id))
-
-(defn workerbeat-storm-root
-  [storm-id]
-  (str WORKERBEATS-SUBTREE "/" storm-id))
-
-(defn workerbeat-path
-  [storm-id node port]
-  (str (workerbeat-storm-root storm-id) "/" node "-" port))
-
-(defn backpressure-storm-root
-  [storm-id]
-  (str BACKPRESSURE-SUBTREE "/" storm-id))
-
-(defn backpressure-path
-  [storm-id node port]
-  (str (backpressure-storm-root storm-id) "/" node "-" port))
-
-(defn error-storm-root
-  [storm-id]
-  (str ERRORS-SUBTREE "/" storm-id))
-
-(defn error-path
-  [storm-id component-id]
-  (str (error-storm-root storm-id) "/" (url-encode component-id)))
-
-(def last-error-path-seg "last-error")
-
-(defn last-error-path
-  [storm-id component-id]
-  (str (error-storm-root storm-id)
-       "/"
-       (url-encode component-id)
-       "-"
-       last-error-path-seg))
-
-(defn credentials-path
-  [storm-id]
-  (str CREDENTIALS-SUBTREE "/" storm-id))
-
-(defn log-config-path
-  [storm-id]
-  (str LOGCONFIG-SUBTREE "/" storm-id))
-
-(defn profiler-config-path
-  ([storm-id]
-   (str PROFILERCONFIG-SUBTREE "/" storm-id))
-  ([storm-id host port request-type]
-   (str (profiler-config-path storm-id) "/" host "_" port "_" request-type)))
-
-(defn- issue-callback!
-  [cb-atom]
-  (let [cb @cb-atom]
-    (reset! cb-atom nil)
-    (when cb
-      (cb))))
-
-(defn- issue-map-callback!
-  [cb-atom id]
-  (let [cb (@cb-atom id)]
-    (swap! cb-atom dissoc id)
-    (when cb
-      (cb id))))
-
-(defn- maybe-deserialize
-  [ser clazz]
-  (when ser
-    (Utils/deserialize ser clazz)))
-
-(defrecord TaskError [error time-secs host port])
-
-(defn- parse-error-path
-  [^String p]
-  (Long/parseLong (.substring p 1)))
-
-(defn convert-executor-beats
-  "Ensures that we only return heartbeats for executors assigned to
-  this worker."
-  [executors worker-hb]
-  (let [executor-stats (:executor-stats worker-hb)]
-    (->> executors
-         (map (fn [t]
-                (if (contains? executor-stats t)
-                  {t {:time-secs (:time-secs worker-hb)
-                      :uptime (:uptime worker-hb)
-                      :stats (get executor-stats t)}})))
-         (into {}))))
-
-;; Watches should be used for optimization. When ZK is reconnecting, they're not guaranteed to be called.
-(defnk mk-storm-cluster-state
-  [cluster-state-spec :acls nil :context (ClusterStateContext.)]
-  (let [[solo? cluster-state] (if (instance? ClusterState cluster-state-spec)
-                                [false cluster-state-spec]
-                                [true (mk-distributed-cluster-state cluster-state-spec :auth-conf cluster-state-spec :acls acls :context context)])
-        assignment-info-callback (atom {})
-        assignment-info-with-version-callback (atom {})
-        assignment-version-callback (atom {})
-        supervisors-callback (atom nil)
-        backpressure-callback (atom {})   ;; we want to reigister a topo directory getChildren callback for all workers of this dir
-        assignments-callback (atom nil)
-        storm-base-callback (atom {})
-        blobstore-callback (atom nil)
-        credentials-callback (atom {})
-        log-config-callback (atom {})
-        state-id (.register
-                  cluster-state
-                  (fn [type path]
-                    (let [[subtree & args] (tokenize-path path)]
-                      (condp = subtree
-                         ASSIGNMENTS-ROOT (if (empty? args)
-                                             (issue-callback! assignments-callback)
-                                             (do
-                                               (issue-map-callback! assignment-info-callback (first args))
-                                               (issue-map-callback! assignment-version-callback (first args))
-                                               (issue-map-callback! assignment-info-with-version-callback (first args))))
-                         SUPERVISORS-ROOT (issue-callback! supervisors-callback)
-                         BLOBSTORE-ROOT (issue-callback! blobstore-callback) ;; callback register for blobstore
-                         STORMS-ROOT (issue-map-callback! storm-base-callback (first args))
-                         CREDENTIALS-ROOT (issue-map-callback! credentials-callback (first args))
-                         LOGCONFIG-ROOT (issue-map-callback! log-config-callback (first args))
-                         BACKPRESSURE-ROOT (issue-map-callback! backpressure-callback (first args))
-                         ;; this should never happen
-                         (exit-process! 30 "Unknown callback for subtree " subtree args)))))]
-    (doseq [p [ASSIGNMENTS-SUBTREE STORMS-SUBTREE SUPERVISORS-SUBTREE WORKERBEATS-SUBTREE ERRORS-SUBTREE BLOBSTORE-SUBTREE NIMBUSES-SUBTREE
-               LOGCONFIG-SUBTREE]]
-      (.mkdirs cluster-state p acls))
-    (reify
-      StormClusterState
-
-      (assignments
-        [this callback]
-        (when callback
-          (reset! assignments-callback callback))
-        (.get_children cluster-state ASSIGNMENTS-SUBTREE (not-nil? callback)))
-
-      (assignment-info
-        [this storm-id callback]
-        (when callback
-          (swap! assignment-info-callback assoc storm-id callback))
-        (clojurify-assignment (maybe-deserialize (.get_data cluster-state (assignment-path storm-id) (not-nil? callback)) Assignment)))
-
-      (assignment-info-with-version 
-        [this storm-id callback]
-        (when callback
-          (swap! assignment-info-with-version-callback assoc storm-id callback))
-        (let [{data :data version :version} 
-              (.get_data_with_version cluster-state (assignment-path storm-id) (not-nil? callback))]
-        {:data (clojurify-assignment (maybe-deserialize data Assignment))
-         :version version}))
-
-      (assignment-version 
-        [this storm-id callback]
-        (when callback
-          (swap! assignment-version-callback assoc storm-id callback))
-        (.get_version cluster-state (assignment-path storm-id) (not-nil? callback)))
-
-      ;; blobstore state
-      (blobstore
-        [this callback]
-        (when callback
-          (reset! blobstore-callback callback))
-        (.sync_path cluster-state BLOBSTORE-SUBTREE)
-        (.get_children cluster-state BLOBSTORE-SUBTREE (not-nil? callback)))
-
-      (nimbuses
-        [this]
-        (map #(maybe-deserialize (.get_data cluster-state (nimbus-path %1) false) NimbusSummary)
-          (.get_children cluster-state NIMBUSES-SUBTREE false)))
-
-      (add-nimbus-host!
-        [this nimbus-id nimbus-summary]
-        ;explicit delete for ephmeral node to ensure this session creates the entry.
-        (.delete_node cluster-state (nimbus-path nimbus-id))
-
-        (.add_listener cluster-state (reify ClusterStateListener
-                        (^void stateChanged[this ^ConnectionState newState]
-                          (log-message "Connection state listener invoked, zookeeper connection state has changed to " newState)
-                          (if (.equals newState ConnectionState/RECONNECTED)
-                            (do
-                              (log-message "Connection state has changed to reconnected so setting nimbuses entry one more time")
-                              (.set_ephemeral_node cluster-state (nimbus-path nimbus-id) (Utils/serialize nimbus-summary) acls))))))
-        
-        (.set_ephemeral_node cluster-state (nimbus-path nimbus-id) (Utils/serialize nimbus-summary) acls))
-
-      (setup-blobstore!
-        [this key nimbusInfo versionInfo]
-        (let [path (str (blobstore-path key) "/" (.toHostPortString nimbusInfo) "-" versionInfo)]
-          (log-message "setup-path" path)
-          (.mkdirs cluster-state (blobstore-path key) acls)
-          ;we delete the node first to ensure the node gets created as part of this session only.
-          (.delete_node_blobstore cluster-state (str (blobstore-path key)) (.toHostPortString nimbusInfo))
-          (.set_ephemeral_node cluster-state path nil acls)))
-
-      (blobstore-info
-        [this blob-key]
-        (let [path (blobstore-path blob-key)]
-          (.sync_path cluster-state path)
-          (.get_children cluster-state path false)))
-
-      (active-storms
-        [this]
-        (.get_children cluster-state STORMS-SUBTREE false))
-
-      (active-keys
-        [this]
-        (.get_children cluster-state BLOBSTORE-SUBTREE false))
-
-      (heartbeat-storms
-        [this]
-        (.get_worker_hb_children cluster-state WORKERBEATS-SUBTREE false))
-
-      (error-topologies
-        [this]
-        (.get_children cluster-state ERRORS-SUBTREE false))
-
-      (get-worker-heartbeat
-        [this storm-id node port]
-        (let [worker-hb (.get_worker_hb cluster-state (workerbeat-path storm-id node port) false)]
-          (if worker-hb
-            (-> worker-hb
-              (maybe-deserialize ClusterWorkerHeartbeat)
-              clojurify-zk-worker-hb))))
-
-      (executor-beats
-        [this storm-id executor->node+port]
-        ;; need to take executor->node+port in explicitly so that we don't run into a situation where a
-        ;; long dead worker with a skewed clock overrides all the timestamps. By only checking heartbeats
-        ;; with an assigned node+port, and only reading executors from that heartbeat that are actually assigned,
-        ;; we avoid situations like that
-        (let [node+port->executors (reverse-map executor->node+port)
-              all-heartbeats (for [[[node port] executors] node+port->executors]
-                               (->> (get-worker-heartbeat this storm-id node port)
-                                    (convert-executor-beats executors)
-                                    ))]
-          (apply merge all-heartbeats)))
-
-      (supervisors
-        [this callback]
-        (when callback
-          (reset! supervisors-callback callback))
-        (.get_children cluster-state SUPERVISORS-SUBTREE (not-nil? callback)))
-
-      (supervisor-info
-        [this supervisor-id]
-        (clojurify-supervisor-info (maybe-deserialize (.get_data cluster-state (supervisor-path supervisor-id) false) SupervisorInfo)))
-
-      (topology-log-config
-        [this storm-id cb]
-        (when cb
-          (swap! log-config-callback assoc storm-id cb))
-        (maybe-deserialize (.get_data cluster-state (log-config-path storm-id) (not-nil? cb)) LogConfig))
-
-      (set-topology-log-config!
-        [this storm-id log-config]
-        (.set_data cluster-state (log-config-path storm-id) (Utils/serialize log-config) acls))
-
-      (set-worker-profile-request
-        [this storm-id profile-request]
-        (let [request-type (.get_action profile-request)
-              host (.get_node (.get_nodeInfo profile-request))
-              port (first (.get_port (.get_nodeInfo profile-request)))]
-          (.set_data cluster-state
-                     (profiler-config-path storm-id host port request-type)
-                     (Utils/serialize profile-request)
-                     acls)))
-
-      (get-topology-profile-requests
-        [this storm-id thrift?]
-        (let [path (profiler-config-path storm-id)
-              requests (if (.node_exists cluster-state path false)
-                         (dofor [c (.get_children cluster-state path false)]
-                                (let [raw (.get_data cluster-state (str path "/" c) false)
-                                      request (maybe-deserialize raw ProfileRequest)]
-                                      (if thrift?
-                                        request
-                                        (clojurify-profile-request request)))))]
-          requests))
-
-      (delete-topology-profile-requests
-        [this storm-id profile-request]
-        (let [profile-request-inst (thriftify-profile-request profile-request)
-              action (:action profile-request)
-              host (:host profile-request)
-              port (:port profile-request)]
-          (.delete_node cluster-state
-           (profiler-config-path storm-id host port action))))
-          
-      (get-worker-profile-requests
-        [this storm-id node-info thrift?]
-        (let [host (:host node-info)
-              port (:port node-info)
-              profile-requests (get-topology-profile-requests this storm-id thrift?)]
-          (if thrift?
-            (filter #(and (= host (.get_node (.get_nodeInfo %))) (= port (first (.get_port (.get_nodeInfo  %)))))
-                    profile-requests)
-            (filter #(and (= host (:host %)) (= port (:port %)))
-                    profile-requests))))
-      
-      (worker-heartbeat!
-        [this storm-id node port info]
-        (let [thrift-worker-hb (thriftify-zk-worker-hb info)]
-          (if thrift-worker-hb
-            (.set_worker_hb cluster-state (workerbeat-path storm-id node port) (Utils/serialize thrift-worker-hb) acls))))
-
-      (remove-worker-heartbeat!
-        [this storm-id node port]
-        (.delete_worker_hb cluster-state (workerbeat-path storm-id node port)))
-
-      (setup-heartbeats!
-        [this storm-id]
-        (.mkdirs cluster-state (workerbeat-storm-root storm-id) acls))
-
-      (teardown-heartbeats!
-        [this storm-id]
-        (try-cause
-          (.delete_worker_hb cluster-state (workerbeat-storm-root storm-id))
-          (catch KeeperException e
-            (log-warn-error e "Could not teardown heartbeats for " storm-id))))
-
-      (worker-backpressure!
-        [this storm-id node port on?]
-        "if znode exists and to be not on?, delete; if exists and on?, do nothing;
-        if not exists and to be on?, create; if not exists and not on?, do nothing"
-        (let [path (backpressure-path storm-id node port)
-              existed (.node_exists cluster-state path false)]
-          (if existed
-            (if (not on?)
-              (.delete_node cluster-state path))   ;; delete the znode since the worker is not congested
-            (if on?
-              (.set_ephemeral_node cluster-state path nil acls))))) ;; create the znode since worker is congested
-    
-      (topology-backpressure
-        [this storm-id callback]
-        "if the backpresure/storm-id dir is empty, this topology has throttle-on, otherwise not."
-        (when callback
-          (swap! backpressure-callback assoc storm-id callback))
-        (let [path (backpressure-storm-root storm-id)
-              children (.get_children cluster-state path (not-nil? callback))]
-              (> (count children) 0)))
-      
-      (setup-backpressure!
-        [this storm-id]
-        (.mkdirs cluster-state (backpressure-storm-root storm-id) acls))
-
-      (remove-worker-backpressure!
-        [this storm-id node port]
-        (.delete_node cluster-state (backpressure-path storm-id node port)))
-
-      (teardown-topology-errors!
-        [this storm-id]
-        (try-cause
-          (.delete_node cluster-state (error-storm-root storm-id))
-          (catch KeeperException e
-            (log-warn-error e "Could not teardown errors for " storm-id))))
-
-      (supervisor-heartbeat!
-        [this supervisor-id info]
-        (let [thrift-supervisor-info (thriftify-supervisor-info info)]
-          (.set_ephemeral_node cluster-state (supervisor-path supervisor-id) (Utils/serialize thrift-supervisor-info) acls)))
-
-      (activate-storm!
-        [this storm-id storm-base]
-        (let [thrift-storm-base (thriftify-storm-base storm-base)]
-          (.set_data cluster-state (storm-path storm-id) (Utils/serialize thrift-storm-base) acls)))
-
-      (update-storm!
-        [this storm-id new-elems]
-        (let [base (storm-base this storm-id nil)
-              executors (:component->executors base)
-              component->debug (:component->debug base)
-              new-elems (update new-elems :component->executors (partial merge executors))
-              new-elems (update new-elems :component->debug (partial merge-with merge component->debug))]
-          (.set_data cluster-state (storm-path storm-id)
-                    (-> base
-                        (merge new-elems)
-                        thriftify-storm-base
-                        Utils/serialize)
-                    acls)))
-
-      (storm-base
-        [this storm-id callback]
-        (when callback
-          (swap! storm-base-callback assoc storm-id callback))
-        (clojurify-storm-base (maybe-deserialize (.get_data cluster-state (storm-path storm-id) (not-nil? callback)) StormBase)))
-
-      (remove-storm-base!
-        [this storm-id]
-        (.delete_node cluster-state (storm-path storm-id)))
-
-      (set-assignment!
-        [this storm-id info]
-        (let [thrift-assignment (thriftify-assignment info)]
-          (.set_data cluster-state (assignment-path storm-id) (Utils/serialize thrift-assignment) acls)))
-
-      (remove-blobstore-key!
-        [this blob-key]
-        (log-debug "removing key" blob-key)
-        (.delete_node cluster-state (blobstore-path blob-key)))
-
-      (remove-key-version!
-        [this blob-key]
-        (.delete_node cluster-state (blobstore-max-key-sequence-number-path blob-key)))
-
-      (remove-storm!
-        [this storm-id]
-        (.delete_node cluster-state (assignment-path storm-id))
-        (.delete_node cluster-state (credentials-path storm-id))
-        (.delete_node cluster-state (log-config-path storm-id))
-        (.delete_node cluster-state (profiler-config-path storm-id))
-        (remove-storm-base! this storm-id))
-
-      (set-credentials!
-         [this storm-id creds topo-conf]
-         (let [topo-acls (mk-topo-only-acls topo-conf)
-               path (credentials-path storm-id)
-               thriftified-creds (thriftify-credentials creds)]
-           (.set_data cluster-state path (Utils/serialize thriftified-creds) topo-acls)))
-
-      (credentials
-        [this storm-id callback]
-        (when callback
-          (swap! credentials-callback assoc storm-id callback))
-        (clojurify-crdentials (maybe-deserialize (.get_data cluster-state (credentials-path storm-id) (not-nil? callback)) Credentials)))
-
-      (report-error
-         [this storm-id component-id node port error]
-         (let [path (error-path storm-id component-id)
-               last-error-path (last-error-path storm-id component-id)
-               data (thriftify-error {:time-secs (current-time-secs) :error (stringify-error error) :host node :port port})
-               _ (.mkdirs cluster-state path acls)
-               ser-data (Utils/serialize data)
-               _ (.mkdirs cluster-state path acls)
-               _ (.create_sequential cluster-state (str path "/e") ser-data acls)
-               _ (.set_data cluster-state last-error-path ser-data acls)
-               to-kill (->> (.get_children cluster-state path false)
-                            (sort-by parse-error-path)
-                            reverse
-                            (drop 10))]
-           (doseq [k to-kill]
-             (.delete_node cluster-state (str path "/" k)))))
-
-      (errors
-         [this storm-id component-id]
-         (let [path (error-path storm-id component-id)
-               errors (if (.node_exists cluster-state path false)
-                        (dofor [c (.get_children cluster-state path false)]
-                          (if-let [data (-> (.get_data cluster-state
-                                                      (str path "/" c)
-                                                      false)
-                                          (maybe-deserialize ErrorInfo)
-                                          clojurify-error)]
-                            (map->TaskError data)))
-                        ())]
-           (->> (filter not-nil? errors)
-                (sort-by (comp - :time-secs)))))
-
-      (last-error
-        [this storm-id component-id]
-        (let [path (last-error-path storm-id component-id)]
-          (if (.node_exists cluster-state path false)
-            (if-let [data (-> (.get_data cluster-state path false)
-                              (maybe-deserialize ErrorInfo)
-                              clojurify-error)]
-              (map->TaskError data)))))
-      
-      (disconnect
-         [this]
-        (.unregister cluster-state state-id)
-        (when solo?
-          (.close cluster-state))))))
-
-;; daemons have a single thread that will respond to events
-;; start with initialize event
-;; callbacks add events to the thread's queue
-
-;; keeps in memory cache of the state, only for what client subscribes to. Any subscription is automatically kept in sync, and when there are changes, client is notified.
-;; master gives orders through state, and client records status in state (ephemerally)
-
-;; master tells nodes what workers to launch
-
-;; master writes this. supervisors and workers subscribe to this to understand complete topology. each storm is a map from nodes to workers to tasks to ports whenever topology changes everyone will be notified
-;; master includes timestamp of each assignment so that appropriate time can be given to each worker to start up
-;; /assignments/{storm id}
-
-;; which tasks they talk to, etc. (immutable until shutdown)
-;; everyone reads this in full to understand structure
-;; /tasks/{storm id}/{task id} ; just contains bolt id
-
-;; supervisors send heartbeats here, master doesn't subscribe but checks asynchronously
-;; /supervisors/status/{ephemeral node ids}  ;; node metadata such as port ranges are kept here
-
-;; tasks send heartbeats here, master doesn't subscribe, just checks asynchronously
-;; /taskbeats/{storm id}/{ephemeral task id}
-
-;; contains data about whether it's started or not, tasks and workers subscribe to specific storm here to know when to shutdown
-;; master manipulates
-;; /storms/{storm id}
-
-;; Zookeeper flows:
-
-;; Master:
-;; job submit:
-;; 1. read which nodes are available
-;; 2. set up the worker/{storm}/{task} stuff (static)
-;; 3. set assignments
-;; 4. start storm - necessary in case master goes down, when goes back up can remember to take down the storm (2 states: on or off)
-
-;; Monitoring (or by checking when nodes go down or heartbeats aren't received):
-;; 1. read assignment
-;; 2. see which tasks/nodes are up
-;; 3. make new assignment to fix any problems
-;; 4. if a storm exists but is not taken down fully, ensure that storm takedown is launched (step by step remove tasks and finally remove assignments)
-
-;; masters only possible watches is on ephemeral nodes and tasks, and maybe not even
-
-;; Supervisor:
-;; 1. monitor /storms/* and assignments
-;; 2. local state about which workers are local
-;; 3. when storm is on, check that workers are running locally & start/kill if different than assignments
-;; 4. when storm is off, monitor tasks for workers - when they all die or don't hearbeat, kill the process and cleanup
-
-;; Worker:
-;; 1. On startup, start the tasks if the storm is on
-
-;; Task:
-;; 1. monitor assignments, reroute when assignments change
-;; 2. monitor storm (when storm turns off, error if assignments change) - take down tasks as master turns them off
-
-;; locally on supervisor: workers write pids locally on startup, supervisor deletes it on shutdown (associates pid with worker name)
-;; supervisor periodically checks to make sure processes are alive
-;; {rootdir}/workers/{storm id}/{worker id}   ;; contains pid inside
-
-;; all tasks in a worker share the same cluster state
-;; workers, supervisors, and tasks subscribes to storm to know when it's started or stopped
-;; on stopped, master removes records in order (tasks need to subscribe to themselves to see if they disappear)
-;; when a master removes a worker, the supervisor should kill it (and escalate to kill -9)
-;; on shutdown, tasks subscribe to tasks that send data to them to wait for them to die. when node disappears, they can die

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/cluster_state/zookeeper_state_factory.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/cluster_state/zookeeper_state_factory.clj b/storm-core/src/clj/backtype/storm/cluster_state/zookeeper_state_factory.clj
deleted file mode 100644
index fa36240..0000000
--- a/storm-core/src/clj/backtype/storm/cluster_state/zookeeper_state_factory.clj
+++ /dev/null
@@ -1,161 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-
-(ns backtype.storm.cluster-state.zookeeper-state-factory
-  (:import [org.apache.curator.framework.state ConnectionStateListener])
-  (:import [org.apache.zookeeper KeeperException$NoNodeException]
-           [backtype.storm.cluster ClusterState DaemonType])
-  (:use [backtype.storm cluster config log util])
-  (:require [backtype.storm [zookeeper :as zk]])
-  (:gen-class
-   :implements [backtype.storm.cluster.ClusterStateFactory]))
-
-(defn -mkState [this conf auth-conf acls context]
-  (let [zk (zk/mk-client conf (conf STORM-ZOOKEEPER-SERVERS) (conf STORM-ZOOKEEPER-PORT) :auth-conf auth-conf)]
-    (zk/mkdirs zk (conf STORM-ZOOKEEPER-ROOT) acls)
-    (.close zk))
-  (let [callbacks (atom {})
-        active (atom true)
-        zk-writer (zk/mk-client conf
-                         (conf STORM-ZOOKEEPER-SERVERS)
-                         (conf STORM-ZOOKEEPER-PORT)
-                         :auth-conf auth-conf
-                         :root (conf STORM-ZOOKEEPER-ROOT)
-                         :watcher (fn [state type path]
-                                    (when @active
-                                      (when-not (= :connected state)
-                                        (log-warn "Received event " state ":" type ":" path " with disconnected Writer Zookeeper."))
-                                      (when-not (= :none type)
-                                        (doseq [callback (vals @callbacks)]
-                                          (callback type path))))))
-        is-nimbus? (= (.getDaemonType context) DaemonType/NIMBUS)
-        zk-reader (if is-nimbus?
-                    (zk/mk-client conf
-                         (conf STORM-ZOOKEEPER-SERVERS)
-                         (conf STORM-ZOOKEEPER-PORT)
-                         :auth-conf auth-conf
-                         :root (conf STORM-ZOOKEEPER-ROOT)
-                         :watcher (fn [state type path]
-                                    (when @active
-                                      (when-not (= :connected state)
-                                        (log-warn "Received event " state ":" type ":" path " with disconnected Reader Zookeeper."))
-                                      (when-not (= :none type)
-                                        (doseq [callback (vals @callbacks)]
-                                          (callback type path))))))
-                    zk-writer)]
-    (reify
-     ClusterState
-
-     (register
-       [this callback]
-       (let [id (uuid)]
-         (swap! callbacks assoc id callback)
-         id))
-
-     (unregister
-       [this id]
-       (swap! callbacks dissoc id))
-
-     (set-ephemeral-node
-       [this path data acls]
-       (zk/mkdirs zk-writer (parent-path path) acls)
-       (if (zk/exists zk-writer path false)
-         (try-cause
-           (zk/set-data zk-writer path data) ; should verify that it's ephemeral
-           (catch KeeperException$NoNodeException e
-             (log-warn-error e "Ephemeral node disappeared between checking for existing and setting data")
-             (zk/create-node zk-writer path data :ephemeral acls)))
-         (zk/create-node zk-writer path data :ephemeral acls)))
-
-     (create-sequential
-       [this path data acls]
-       (zk/create-node zk-writer path data :sequential acls))
-
-     (set-data
-       [this path data acls]
-       ;; note: this does not turn off any existing watches
-       (if (zk/exists zk-writer path false)
-         (zk/set-data zk-writer path data)
-         (do
-           (zk/mkdirs zk-writer (parent-path path) acls)
-           (zk/create-node zk-writer path data :persistent acls))))
-
-     (set-worker-hb
-       [this path data acls]
-       (.set_data this path data acls))
-
-     (delete-node
-       [this path]
-       (zk/delete-node zk-writer path))
-
-     (delete-worker-hb
-       [this path]
-       (.delete_node this path))
-
-     (get-data
-       [this path watch?]
-       (zk/get-data zk-reader path watch?))
-
-     (get-data-with-version
-       [this path watch?]
-       (zk/get-data-with-version zk-reader path watch?))
-
-     (get-version
-       [this path watch?]
-       (zk/get-version zk-reader path watch?))
-
-     (get-worker-hb
-       [this path watch?]
-       (.get_data this path watch?))
-
-     (get-children
-       [this path watch?]
-       (zk/get-children zk-reader path watch?))
-
-     (get-worker-hb-children
-       [this path watch?]
-       (.get_children this path watch?))
-
-     (mkdirs
-       [this path acls]
-       (zk/mkdirs zk-writer path acls))
-
-     (node-exists
-       [this path watch?]
-       (zk/exists-node? zk-reader path watch?))
-
-     (add-listener
-       [this listener]
-       (let [curator-listener (reify ConnectionStateListener
-                                (stateChanged
-                                  [this client newState]
-                                  (.stateChanged listener client newState)))]
-         (zk/add-listener zk-reader curator-listener)))
-
-     (sync-path
-       [this path]
-       (zk/sync-path zk-writer path))
-
-      (delete-node-blobstore
-        [this path nimbus-host-port-info]
-        (zk/delete-node-blobstore zk-writer path nimbus-host-port-info))
-
-     (close
-       [this]
-       (reset! active false)
-       (.close zk-writer)
-       (if is-nimbus?
-         (.close zk-reader))))))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/command/activate.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/command/activate.clj b/storm-core/src/clj/backtype/storm/command/activate.clj
deleted file mode 100644
index 500e981..0000000
--- a/storm-core/src/clj/backtype/storm/command/activate.clj
+++ /dev/null
@@ -1,24 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-(ns backtype.storm.command.activate
-  (:use [backtype.storm thrift log])
-  (:gen-class))
-
-(defn -main [name] 
-  (with-configured-nimbus-connection nimbus
-    (.activate nimbus name)
-    (log-message "Activated topology: " name)
-    ))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/command/blobstore.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/command/blobstore.clj b/storm-core/src/clj/backtype/storm/command/blobstore.clj
deleted file mode 100644
index ae7f919..0000000
--- a/storm-core/src/clj/backtype/storm/command/blobstore.clj
+++ /dev/null
@@ -1,162 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-(ns backtype.storm.command.blobstore
-  (:import [java.io InputStream OutputStream]
-           [backtype.storm.generated SettableBlobMeta AccessControl AuthorizationException
-            KeyNotFoundException]
-           [backtype.storm.blobstore BlobStoreAclHandler])
-  (:use [backtype.storm config]
-        [clojure.string :only [split]]
-        [clojure.tools.cli :only [cli]]
-        [clojure.java.io :only [copy input-stream output-stream]]
-        [backtype.storm blobstore log util])
-  (:gen-class))
-
-(defn update-blob-from-stream
-  "Update a blob in the blob store from an InputStream"
-  [key ^InputStream in]
-  (with-configured-blob-client blobstore
-    (let [out (.updateBlob blobstore key)]
-      (try 
-        (copy in out)
-        (.close out)
-        (catch Exception e
-          (log-message e)
-          (.cancel out)
-          (throw e))))))
-
-(defn create-blob-from-stream
-  "Create a blob in the blob store from an InputStream"
-  [key ^InputStream in ^SettableBlobMeta meta]
-  (with-configured-blob-client blobstore
-    (let [out (.createBlob blobstore key meta)]
-      (try 
-        (copy in out)
-        (.close out)
-        (catch Exception e
-          (.cancel out)
-          (throw e))))))
-
-(defn read-blob
-  "Read a blob in the blob store and write to an OutputStream"
-  [key ^OutputStream out]
-  (with-configured-blob-client blobstore
-    (with-open [in (.getBlob blobstore key)]
-      (copy in out))))
-
-(defn as-access-control
-  "Convert a parameter to an AccessControl object"
-  [param]
-  (BlobStoreAclHandler/parseAccessControl (str param)))
-
-(defn as-acl
-  [param]
-  (map as-access-control (split param #",")))
-
-(defn access-control-str
-  [^AccessControl acl]
-  (BlobStoreAclHandler/accessControlToString acl))
-
-(defn read-cli [args]
-  (let [[{file :file} [key] _] (cli args ["-f" "--file" :default nil])]
-    (if file
-      (with-open [f (output-stream file)]
-        (read-blob key f))
-      (read-blob key System/out))))
-
-(defn update-cli [args]
-  (let [[{file :file} [key] _] (cli args ["-f" "--file" :default nil])]
-    (if file
-      (with-open [f (input-stream file)]
-        (update-blob-from-stream key f))
-      (update-blob-from-stream key System/in))
-    (log-message "Successfully updated " key)))
-
-(defn create-cli [args]
-  (let [[{file :file acl :acl replication-factor :replication-factor} [key] _] (cli args ["-f" "--file" :default nil]
-                                                  ["-a" "--acl" :default [] :parse-fn as-acl]
-                                                  ["-r" "--replication-factor" :default -1 :parse-fn parse-int])
-        meta (doto (SettableBlobMeta. acl)
-                   (.set_replication_factor replication-factor))]
-    (validate-key-name! key)
-    (log-message "Creating " key " with ACL " (pr-str (map access-control-str acl)))
-    (if file
-      (with-open [f (input-stream file)]
-        (create-blob-from-stream key f meta))
-      (create-blob-from-stream key System/in meta))
-    (log-message "Successfully created " key)))
-
-(defn delete-cli [args]
-  (with-configured-blob-client blobstore
-    (doseq [key args]
-      (.deleteBlob blobstore key)
-      (log-message "deleted " key))))
-
-(defn list-cli [args]
-  (with-configured-blob-client blobstore
-    (let [keys (if (empty? args) (iterator-seq (.listKeys blobstore)) args)]
-      (doseq [key keys]
-        (try
-          (let [meta (.getBlobMeta blobstore key)
-                version (.get_version meta)
-                acl (.get_acl (.get_settable meta))]
-            (log-message key " " version " " (pr-str (map access-control-str acl))))
-          (catch AuthorizationException ae
-            (if-not (empty? args) (log-error "ACCESS DENIED to key: " key)))
-          (catch KeyNotFoundException knf
-            (if-not (empty? args) (log-error key " NOT FOUND"))))))))
-
-(defn set-acl-cli [args]
-  (let [[{set-acl :set} [key] _]
-           (cli args ["-s" "--set" :default [] :parse-fn as-acl])]
-    (with-configured-blob-client blobstore
-      (let [meta (.getBlobMeta blobstore key)
-            acl (.get_acl (.get_settable meta))
-            new-acl (if set-acl set-acl acl)
-            new-meta (SettableBlobMeta. new-acl)]
-        (log-message "Setting ACL for " key " to " (pr-str (map access-control-str new-acl)))
-        (.setBlobMeta blobstore key new-meta)))))
-
-(defn rep-cli [args]
-  (let [sub-command (first args)
-        new-args (rest args)]
-    (with-configured-blob-client blobstore
-      (condp = sub-command
-      "--read" (let [key (first new-args)
-                     blob-replication (.getBlobReplication blobstore key)]
-                 (log-message "Current replication factor " blob-replication)
-                 blob-replication)
-      "--update" (let [[{replication-factor :replication-factor} [key] _]
-                        (cli new-args ["-r" "--replication-factor" :parse-fn parse-int])]
-                   (if (nil? replication-factor)
-                     (throw (RuntimeException. (str "Please set the replication factor")))
-                     (let [blob-replication (.updateBlobReplication blobstore key replication-factor)]
-                       (log-message "Replication factor is set to " blob-replication)
-                       blob-replication)))
-      :else (throw (RuntimeException. (str sub-command " is not a supported blobstore command")))))))
-
-(defn -main [& args]
-  (let [command (first args)
-        new-args (rest args)]
-    (condp = command
-      "cat" (read-cli new-args)
-      "create" (create-cli new-args)
-      "update" (update-cli new-args)
-      "delete" (delete-cli new-args)
-      "list" (list-cli new-args)
-      "set-acl" (set-acl-cli new-args)
-      "replication" (rep-cli new-args)
-      :else (throw (RuntimeException. (str command " is not a supported blobstore command"))))))


[48/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/RollingCountBolt.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/RollingCountBolt.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/RollingCountBolt.java
new file mode 100644
index 0000000..730f156
--- /dev/null
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/RollingCountBolt.java
@@ -0,0 +1,142 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter.bolt;
+
+import org.apache.storm.Config;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.base.BaseRichBolt;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.Values;
+import org.apache.storm.utils.TupleUtils;
+import org.apache.log4j.Logger;
+import org.apache.storm.starter.tools.NthLastModifiedTimeTracker;
+import org.apache.storm.starter.tools.SlidingWindowCounter;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+
+/**
+ * This bolt performs rolling counts of incoming objects, i.e. sliding window based counting.
+ * <p/>
+ * The bolt is configured by two parameters, the length of the sliding window in seconds (which influences the output
+ * data of the bolt, i.e. how it will count objects) and the emit frequency in seconds (which influences how often the
+ * bolt will output the latest window counts). For instance, if the window length is set to an equivalent of five
+ * minutes and the emit frequency to one minute, then the bolt will output the latest five-minute sliding window every
+ * minute.
+ * <p/>
+ * The bolt emits a rolling count tuple per object, consisting of the object itself, its latest rolling count, and the
+ * actual duration of the sliding window. The latter is included in case the expected sliding window length (as
+ * configured by the user) is different from the actual length, e.g. due to high system load. Note that the actual
+ * window length is tracked and calculated for the window, and not individually for each object within a window.
+ * <p/>
+ * Note: During the startup phase you will usually observe that the bolt warns you about the actual sliding window
+ * length being smaller than the expected length. This behavior is expected and is caused by the way the sliding window
+ * counts are initially "loaded up". You can safely ignore this warning during startup (e.g. you will see this warning
+ * during the first ~ five minutes of startup time if the window length is set to five minutes).
+ */
+public class RollingCountBolt extends BaseRichBolt {
+
+  private static final long serialVersionUID = 5537727428628598519L;
+  private static final Logger LOG = Logger.getLogger(RollingCountBolt.class);
+  private static final int NUM_WINDOW_CHUNKS = 5;
+  private static final int DEFAULT_SLIDING_WINDOW_IN_SECONDS = NUM_WINDOW_CHUNKS * 60;
+  private static final int DEFAULT_EMIT_FREQUENCY_IN_SECONDS = DEFAULT_SLIDING_WINDOW_IN_SECONDS / NUM_WINDOW_CHUNKS;
+  private static final String WINDOW_LENGTH_WARNING_TEMPLATE =
+      "Actual window length is %d seconds when it should be %d seconds"
+          + " (you can safely ignore this warning during the startup phase)";
+
+  private final SlidingWindowCounter<Object> counter;
+  private final int windowLengthInSeconds;
+  private final int emitFrequencyInSeconds;
+  private OutputCollector collector;
+  private NthLastModifiedTimeTracker lastModifiedTracker;
+
+  public RollingCountBolt() {
+    this(DEFAULT_SLIDING_WINDOW_IN_SECONDS, DEFAULT_EMIT_FREQUENCY_IN_SECONDS);
+  }
+
+  public RollingCountBolt(int windowLengthInSeconds, int emitFrequencyInSeconds) {
+    this.windowLengthInSeconds = windowLengthInSeconds;
+    this.emitFrequencyInSeconds = emitFrequencyInSeconds;
+    counter = new SlidingWindowCounter<Object>(deriveNumWindowChunksFrom(this.windowLengthInSeconds,
+        this.emitFrequencyInSeconds));
+  }
+
+  private int deriveNumWindowChunksFrom(int windowLengthInSeconds, int windowUpdateFrequencyInSeconds) {
+    return windowLengthInSeconds / windowUpdateFrequencyInSeconds;
+  }
+
+  @SuppressWarnings("rawtypes")
+  @Override
+  public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
+    this.collector = collector;
+    lastModifiedTracker = new NthLastModifiedTimeTracker(deriveNumWindowChunksFrom(this.windowLengthInSeconds,
+        this.emitFrequencyInSeconds));
+  }
+
+  @Override
+  public void execute(Tuple tuple) {
+    if (TupleUtils.isTick(tuple)) {
+      LOG.debug("Received tick tuple, triggering emit of current window counts");
+      emitCurrentWindowCounts();
+    }
+    else {
+      countObjAndAck(tuple);
+    }
+  }
+
+  private void emitCurrentWindowCounts() {
+    Map<Object, Long> counts = counter.getCountsThenAdvanceWindow();
+    int actualWindowLengthInSeconds = lastModifiedTracker.secondsSinceOldestModification();
+    lastModifiedTracker.markAsModified();
+    if (actualWindowLengthInSeconds != windowLengthInSeconds) {
+      LOG.warn(String.format(WINDOW_LENGTH_WARNING_TEMPLATE, actualWindowLengthInSeconds, windowLengthInSeconds));
+    }
+    emit(counts, actualWindowLengthInSeconds);
+  }
+
+  private void emit(Map<Object, Long> counts, int actualWindowLengthInSeconds) {
+    for (Entry<Object, Long> entry : counts.entrySet()) {
+      Object obj = entry.getKey();
+      Long count = entry.getValue();
+      collector.emit(new Values(obj, count, actualWindowLengthInSeconds));
+    }
+  }
+
+  private void countObjAndAck(Tuple tuple) {
+    Object obj = tuple.getValue(0);
+    counter.incrementCount(obj);
+    collector.ack(tuple);
+  }
+
+  @Override
+  public void declareOutputFields(OutputFieldsDeclarer declarer) {
+    declarer.declare(new Fields("obj", "count", "actualWindowLengthInSeconds"));
+  }
+
+  @Override
+  public Map<String, Object> getComponentConfiguration() {
+    Map<String, Object> conf = new HashMap<String, Object>();
+    conf.put(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, emitFrequencyInSeconds);
+    return conf;
+  }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/SingleJoinBolt.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/SingleJoinBolt.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/SingleJoinBolt.java
new file mode 100644
index 0000000..163c0f2
--- /dev/null
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/SingleJoinBolt.java
@@ -0,0 +1,114 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter.bolt;
+
+import org.apache.storm.Config;
+import org.apache.storm.generated.GlobalStreamId;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.base.BaseRichBolt;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.utils.TimeCacheMap;
+
+import java.util.*;
+
+public class SingleJoinBolt extends BaseRichBolt {
+  OutputCollector _collector;
+  Fields _idFields;
+  Fields _outFields;
+  int _numSources;
+  TimeCacheMap<List<Object>, Map<GlobalStreamId, Tuple>> _pending;
+  Map<String, GlobalStreamId> _fieldLocations;
+
+  public SingleJoinBolt(Fields outFields) {
+    _outFields = outFields;
+  }
+
+  @Override
+  public void prepare(Map conf, TopologyContext context, OutputCollector collector) {
+    _fieldLocations = new HashMap<String, GlobalStreamId>();
+    _collector = collector;
+    int timeout = ((Number) conf.get(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS)).intValue();
+    _pending = new TimeCacheMap<List<Object>, Map<GlobalStreamId, Tuple>>(timeout, new ExpireCallback());
+    _numSources = context.getThisSources().size();
+    Set<String> idFields = null;
+    for (GlobalStreamId source : context.getThisSources().keySet()) {
+      Fields fields = context.getComponentOutputFields(source.get_componentId(), source.get_streamId());
+      Set<String> setFields = new HashSet<String>(fields.toList());
+      if (idFields == null)
+        idFields = setFields;
+      else
+        idFields.retainAll(setFields);
+
+      for (String outfield : _outFields) {
+        for (String sourcefield : fields) {
+          if (outfield.equals(sourcefield)) {
+            _fieldLocations.put(outfield, source);
+          }
+        }
+      }
+    }
+    _idFields = new Fields(new ArrayList<String>(idFields));
+
+    if (_fieldLocations.size() != _outFields.size()) {
+      throw new RuntimeException("Cannot find all outfields among sources");
+    }
+  }
+
+  @Override
+  public void execute(Tuple tuple) {
+    List<Object> id = tuple.select(_idFields);
+    GlobalStreamId streamId = new GlobalStreamId(tuple.getSourceComponent(), tuple.getSourceStreamId());
+    if (!_pending.containsKey(id)) {
+      _pending.put(id, new HashMap<GlobalStreamId, Tuple>());
+    }
+    Map<GlobalStreamId, Tuple> parts = _pending.get(id);
+    if (parts.containsKey(streamId))
+      throw new RuntimeException("Received same side of single join twice");
+    parts.put(streamId, tuple);
+    if (parts.size() == _numSources) {
+      _pending.remove(id);
+      List<Object> joinResult = new ArrayList<Object>();
+      for (String outField : _outFields) {
+        GlobalStreamId loc = _fieldLocations.get(outField);
+        joinResult.add(parts.get(loc).getValueByField(outField));
+      }
+      _collector.emit(new ArrayList<Tuple>(parts.values()), joinResult);
+
+      for (Tuple part : parts.values()) {
+        _collector.ack(part);
+      }
+    }
+  }
+
+  @Override
+  public void declareOutputFields(OutputFieldsDeclarer declarer) {
+    declarer.declare(_outFields);
+  }
+
+  private class ExpireCallback implements TimeCacheMap.ExpiredCallback<List<Object>, Map<GlobalStreamId, Tuple>> {
+    @Override
+    public void expire(List<Object> id, Map<GlobalStreamId, Tuple> tuples) {
+      for (Tuple tuple : tuples.values()) {
+        _collector.fail(tuple);
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/SlidingWindowSumBolt.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/SlidingWindowSumBolt.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/SlidingWindowSumBolt.java
new file mode 100644
index 0000000..cd58380
--- /dev/null
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/SlidingWindowSumBolt.java
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter.bolt;
+
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.base.BaseWindowedBolt;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.tuple.Values;
+import org.apache.storm.windowing.TupleWindow;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Computes sliding window sum
+ */
+public class SlidingWindowSumBolt extends BaseWindowedBolt {
+    private static final Logger LOG = LoggerFactory.getLogger(SlidingWindowSumBolt.class);
+
+    private int sum = 0;
+    private OutputCollector collector;
+
+    @Override
+    public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
+        this.collector = collector;
+    }
+
+    @Override
+    public void execute(TupleWindow inputWindow) {
+            /*
+             * The inputWindow gives a view of
+             * (a) all the events in the window
+             * (b) events that expired since last activation of the window
+             * (c) events that newly arrived since last activation of the window
+             */
+        List<Tuple> tuplesInWindow = inputWindow.get();
+        List<Tuple> newTuples = inputWindow.getNew();
+        List<Tuple> expiredTuples = inputWindow.getExpired();
+
+        LOG.debug("Events in current window: " + tuplesInWindow.size());
+            /*
+             * Instead of iterating over all the tuples in the window to compute
+             * the sum, the values for the new events are added and old events are
+             * subtracted. Similar optimizations might be possible in other
+             * windowing computations.
+             */
+        for (Tuple tuple : newTuples) {
+            sum += (int) tuple.getValue(0);
+        }
+        for (Tuple tuple : expiredTuples) {
+            sum -= (int) tuple.getValue(0);
+        }
+        collector.emit(new Values(sum));
+    }
+
+    @Override
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+        declarer.declare(new Fields("sum"));
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/TotalRankingsBolt.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/TotalRankingsBolt.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/TotalRankingsBolt.java
new file mode 100644
index 0000000..bfed34e
--- /dev/null
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/bolt/TotalRankingsBolt.java
@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter.bolt;
+
+import org.apache.storm.tuple.Tuple;
+import org.apache.log4j.Logger;
+import org.apache.storm.starter.tools.Rankings;
+
+/**
+ * This bolt merges incoming {@link Rankings}.
+ * <p/>
+ * It can be used to merge intermediate rankings generated by {@link IntermediateRankingsBolt} into a final,
+ * consolidated ranking. To do so, configure this bolt with a globalGrouping on {@link IntermediateRankingsBolt}.
+ */
+public final class TotalRankingsBolt extends AbstractRankerBolt {
+
+  private static final long serialVersionUID = -8447525895532302198L;
+  private static final Logger LOG = Logger.getLogger(TotalRankingsBolt.class);
+
+  public TotalRankingsBolt() {
+    super();
+  }
+
+  public TotalRankingsBolt(int topN) {
+    super(topN);
+  }
+
+  public TotalRankingsBolt(int topN, int emitFrequencyInSeconds) {
+    super(topN, emitFrequencyInSeconds);
+  }
+
+  @Override
+  void updateRankingsWithTuple(Tuple tuple) {
+    Rankings rankingsToBeMerged = (Rankings) tuple.getValue(0);
+    super.getRankings().updateWith(rankingsToBeMerged);
+    super.getRankings().pruneZeroCounts();
+  }
+
+  @Override
+  Logger getLogger() {
+    return LOG;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/org/apache/storm/starter/spout/RandomIntegerSpout.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/spout/RandomIntegerSpout.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/spout/RandomIntegerSpout.java
new file mode 100644
index 0000000..e81ca40
--- /dev/null
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/spout/RandomIntegerSpout.java
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter.spout;
+
+import org.apache.storm.spout.SpoutOutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.base.BaseRichSpout;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Values;
+import org.apache.storm.utils.Utils;
+
+import java.util.Map;
+import java.util.Random;
+
+/**
+ * Emits a random integer and a timestamp value (offset by one day),
+ * every 100 ms. The ts field can be used in tuple time based windowing.
+ */
+public class RandomIntegerSpout extends BaseRichSpout {
+    private SpoutOutputCollector collector;
+    private Random rand;
+
+    @Override
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+        declarer.declare(new Fields("value", "ts"));
+    }
+
+    @Override
+    public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
+        this.collector = collector;
+        this.rand = new Random();
+    }
+
+    @Override
+    public void nextTuple() {
+        Utils.sleep(100);
+        collector.emit(new Values(rand.nextInt(1000), System.currentTimeMillis() - (24 * 60 * 60 * 1000)));
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/org/apache/storm/starter/spout/RandomSentenceSpout.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/spout/RandomSentenceSpout.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/spout/RandomSentenceSpout.java
new file mode 100644
index 0000000..49bec2e
--- /dev/null
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/spout/RandomSentenceSpout.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter.spout;
+
+import org.apache.storm.spout.SpoutOutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.base.BaseRichSpout;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Values;
+import org.apache.storm.utils.Utils;
+
+import java.util.Map;
+import java.util.Random;
+
+public class RandomSentenceSpout extends BaseRichSpout {
+  SpoutOutputCollector _collector;
+  Random _rand;
+
+
+  @Override
+  public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
+    _collector = collector;
+    _rand = new Random();
+  }
+
+  @Override
+  public void nextTuple() {
+    Utils.sleep(100);
+    String[] sentences = new String[]{ "the cow jumped over the moon", "an apple a day keeps the doctor away",
+        "four score and seven years ago", "snow white and the seven dwarfs", "i am at two with nature" };
+    String sentence = sentences[_rand.nextInt(sentences.length)];
+    _collector.emit(new Values(sentence));
+  }
+
+  @Override
+  public void ack(Object id) {
+  }
+
+  @Override
+  public void fail(Object id) {
+  }
+
+  @Override
+  public void declareOutputFields(OutputFieldsDeclarer declarer) {
+    declarer.declare(new Fields("word"));
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/org/apache/storm/starter/spout/TwitterSampleSpout.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/spout/TwitterSampleSpout.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/spout/TwitterSampleSpout.java
new file mode 100644
index 0000000..df26d25
--- /dev/null
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/spout/TwitterSampleSpout.java
@@ -0,0 +1,164 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.storm.starter.spout;
+
+import java.util.Map;
+import java.util.concurrent.LinkedBlockingQueue;
+
+import twitter4j.FilterQuery;
+import twitter4j.StallWarning;
+import twitter4j.Status;
+import twitter4j.StatusDeletionNotice;
+import twitter4j.StatusListener;
+import twitter4j.TwitterStream;
+import twitter4j.TwitterStreamFactory;
+import twitter4j.auth.AccessToken;
+import twitter4j.conf.ConfigurationBuilder;
+
+import org.apache.storm.Config;
+import org.apache.storm.spout.SpoutOutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.base.BaseRichSpout;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Values;
+import org.apache.storm.utils.Utils;
+
+@SuppressWarnings("serial")
+public class TwitterSampleSpout extends BaseRichSpout {
+
+	SpoutOutputCollector _collector;
+	LinkedBlockingQueue<Status> queue = null;
+	TwitterStream _twitterStream;
+	String consumerKey;
+	String consumerSecret;
+	String accessToken;
+	String accessTokenSecret;
+	String[] keyWords;
+
+	public TwitterSampleSpout(String consumerKey, String consumerSecret,
+			String accessToken, String accessTokenSecret, String[] keyWords) {
+		this.consumerKey = consumerKey;
+		this.consumerSecret = consumerSecret;
+		this.accessToken = accessToken;
+		this.accessTokenSecret = accessTokenSecret;
+		this.keyWords = keyWords;
+	}
+
+	public TwitterSampleSpout() {
+		// TODO Auto-generated constructor stub
+	}
+
+	@Override
+	public void open(Map conf, TopologyContext context,
+			SpoutOutputCollector collector) {
+		queue = new LinkedBlockingQueue<Status>(1000);
+		_collector = collector;
+
+		StatusListener listener = new StatusListener() {
+
+			@Override
+			public void onStatus(Status status) {
+			
+				queue.offer(status);
+			}
+
+			@Override
+			public void onDeletionNotice(StatusDeletionNotice sdn) {
+			}
+
+			@Override
+			public void onTrackLimitationNotice(int i) {
+			}
+
+			@Override
+			public void onScrubGeo(long l, long l1) {
+			}
+
+			@Override
+			public void onException(Exception ex) {
+			}
+
+			@Override
+			public void onStallWarning(StallWarning arg0) {
+				// TODO Auto-generated method stub
+
+			}
+
+		};
+
+		TwitterStream twitterStream = new TwitterStreamFactory(
+				new ConfigurationBuilder().setJSONStoreEnabled(true).build())
+				.getInstance();
+
+		twitterStream.addListener(listener);
+		twitterStream.setOAuthConsumer(consumerKey, consumerSecret);
+		AccessToken token = new AccessToken(accessToken, accessTokenSecret);
+		twitterStream.setOAuthAccessToken(token);
+		
+		if (keyWords.length == 0) {
+
+			twitterStream.sample();
+		}
+
+		else {
+
+			FilterQuery query = new FilterQuery().track(keyWords);
+			twitterStream.filter(query);
+		}
+
+	}
+
+	@Override
+	public void nextTuple() {
+		Status ret = queue.poll();
+		if (ret == null) {
+			Utils.sleep(50);
+		} else {
+			_collector.emit(new Values(ret));
+
+		}
+	}
+
+	@Override
+	public void close() {
+		_twitterStream.shutdown();
+	}
+
+	@Override
+	public Map<String, Object> getComponentConfiguration() {
+		Config ret = new Config();
+		ret.setMaxTaskParallelism(1);
+		return ret;
+	}
+
+	@Override
+	public void ack(Object id) {
+	}
+
+	@Override
+	public void fail(Object id) {
+	}
+
+	@Override
+	public void declareOutputFields(OutputFieldsDeclarer declarer) {
+		declarer.declare(new Fields("tweet"));
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/NthLastModifiedTimeTracker.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/NthLastModifiedTimeTracker.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/NthLastModifiedTimeTracker.java
new file mode 100644
index 0000000..faa4e32
--- /dev/null
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/NthLastModifiedTimeTracker.java
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter.tools;
+
+import org.apache.storm.utils.Time;
+import org.apache.commons.collections.buffer.CircularFifoBuffer;
+
+/**
+ * This class tracks the time-since-last-modify of a "thing" in a rolling fashion.
+ * <p/>
+ * For example, create a 5-slot tracker to track the five most recent time-since-last-modify.
+ * <p/>
+ * You must manually "mark" that the "something" that you want to track -- in terms of modification times -- has just
+ * been modified.
+ */
+public class NthLastModifiedTimeTracker {
+
+  private static final int MILLIS_IN_SEC = 1000;
+
+  private final CircularFifoBuffer lastModifiedTimesMillis;
+
+  public NthLastModifiedTimeTracker(int numTimesToTrack) {
+    if (numTimesToTrack < 1) {
+      throw new IllegalArgumentException(
+          "numTimesToTrack must be greater than zero (you requested " + numTimesToTrack + ")");
+    }
+    lastModifiedTimesMillis = new CircularFifoBuffer(numTimesToTrack);
+    initLastModifiedTimesMillis();
+  }
+
+  private void initLastModifiedTimesMillis() {
+    long nowCached = now();
+    for (int i = 0; i < lastModifiedTimesMillis.maxSize(); i++) {
+      lastModifiedTimesMillis.add(Long.valueOf(nowCached));
+    }
+  }
+
+  private long now() {
+    return Time.currentTimeMillis();
+  }
+
+  public int secondsSinceOldestModification() {
+    long modifiedTimeMillis = ((Long) lastModifiedTimesMillis.get()).longValue();
+    return (int) ((now() - modifiedTimeMillis) / MILLIS_IN_SEC);
+  }
+
+  public void markAsModified() {
+    updateLastModifiedTime();
+  }
+
+  private void updateLastModifiedTime() {
+    lastModifiedTimesMillis.add(now());
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/Rankable.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/Rankable.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/Rankable.java
new file mode 100644
index 0000000..85f2b62
--- /dev/null
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/Rankable.java
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter.tools;
+
+public interface Rankable extends Comparable<Rankable> {
+
+  Object getObject();
+
+  long getCount();
+
+  /**
+   * Note: We do not defensively copy the object wrapped by the Rankable.  It is passed as is.
+   *
+   * @return a defensive copy
+   */
+  Rankable copy();
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/RankableObjectWithFields.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/RankableObjectWithFields.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/RankableObjectWithFields.java
new file mode 100644
index 0000000..b1a9dca
--- /dev/null
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/RankableObjectWithFields.java
@@ -0,0 +1,148 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter.tools;
+
+import org.apache.storm.tuple.Tuple;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+
+import java.io.Serializable;
+import java.util.List;
+
+/**
+ * This class wraps an objects and its associated count, including any additional data fields.
+ * <p/>
+ * This class can be used, for instance, to track the number of occurrences of an object in a Storm topology.
+ */
+public class RankableObjectWithFields implements Rankable, Serializable {
+
+  private static final long serialVersionUID = -9102878650001058090L;
+  private static final String toStringSeparator = "|";
+
+  private final Object obj;
+  private final long count;
+  private final ImmutableList<Object> fields;
+
+  public RankableObjectWithFields(Object obj, long count, Object... otherFields) {
+    if (obj == null) {
+      throw new IllegalArgumentException("The object must not be null");
+    }
+    if (count < 0) {
+      throw new IllegalArgumentException("The count must be >= 0");
+    }
+    this.obj = obj;
+    this.count = count;
+    fields = ImmutableList.copyOf(otherFields);
+
+  }
+
+  /**
+   * Construct a new instance based on the provided {@link Tuple}.
+   * <p/>
+   * This method expects the object to be ranked in the first field (index 0) of the provided tuple, and the number of
+   * occurrences of the object (its count) in the second field (index 1). Any further fields in the tuple will be
+   * extracted and tracked, too. These fields can be accessed via {@link RankableObjectWithFields#getFields()}.
+   *
+   * @param tuple
+   *
+   * @return new instance based on the provided tuple
+   */
+  public static RankableObjectWithFields from(Tuple tuple) {
+    List<Object> otherFields = Lists.newArrayList(tuple.getValues());
+    Object obj = otherFields.remove(0);
+    Long count = (Long) otherFields.remove(0);
+    return new RankableObjectWithFields(obj, count, otherFields.toArray());
+  }
+
+  public Object getObject() {
+    return obj;
+  }
+
+  public long getCount() {
+    return count;
+  }
+
+  /**
+   * @return an immutable list of any additional data fields of the object (may be empty but will never be null)
+   */
+  public List<Object> getFields() {
+    return fields;
+  }
+
+  @Override
+  public int compareTo(Rankable other) {
+    long delta = this.getCount() - other.getCount();
+    if (delta > 0) {
+      return 1;
+    }
+    else if (delta < 0) {
+      return -1;
+    }
+    else {
+      return 0;
+    }
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) {
+      return true;
+    }
+    if (!(o instanceof RankableObjectWithFields)) {
+      return false;
+    }
+    RankableObjectWithFields other = (RankableObjectWithFields) o;
+    return obj.equals(other.obj) && count == other.count;
+  }
+
+  @Override
+  public int hashCode() {
+    int result = 17;
+    int countHash = (int) (count ^ (count >>> 32));
+    result = 31 * result + countHash;
+    result = 31 * result + obj.hashCode();
+    return result;
+  }
+
+  public String toString() {
+    StringBuffer buf = new StringBuffer();
+    buf.append("[");
+    buf.append(obj);
+    buf.append(toStringSeparator);
+    buf.append(count);
+    for (Object field : fields) {
+      buf.append(toStringSeparator);
+      buf.append(field);
+    }
+    buf.append("]");
+    return buf.toString();
+  }
+
+  /**
+   * Note: We do not defensively copy the wrapped object and any accompanying fields.  We do guarantee, however,
+   * do return a defensive (shallow) copy of the List object that is wrapping any accompanying fields.
+   *
+   * @return
+   */
+  @Override
+  public Rankable copy() {
+    List<Object> shallowCopyOfFields = ImmutableList.copyOf(getFields());
+    return new RankableObjectWithFields(getObject(), getCount(), shallowCopyOfFields);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/Rankings.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/Rankings.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/Rankings.java
new file mode 100644
index 0000000..17174b3
--- /dev/null
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/Rankings.java
@@ -0,0 +1,156 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter.tools;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+
+import java.io.Serializable;
+import java.util.Collections;
+import java.util.List;
+
+public class Rankings implements Serializable {
+
+  private static final long serialVersionUID = -1549827195410578903L;
+  private static final int DEFAULT_COUNT = 10;
+
+  private final int maxSize;
+  private final List<Rankable> rankedItems = Lists.newArrayList();
+
+  public Rankings() {
+    this(DEFAULT_COUNT);
+  }
+
+  public Rankings(int topN) {
+    if (topN < 1) {
+      throw new IllegalArgumentException("topN must be >= 1");
+    }
+    maxSize = topN;
+  }
+
+  /**
+   * Copy constructor.
+   * @param other
+   */
+  public Rankings(Rankings other) {
+    this(other.maxSize());
+    updateWith(other);
+  }
+
+  /**
+   * @return the maximum possible number (size) of ranked objects this instance can hold
+   */
+  public int maxSize() {
+    return maxSize;
+  }
+
+  /**
+   * @return the number (size) of ranked objects this instance is currently holding
+   */
+  public int size() {
+    return rankedItems.size();
+  }
+
+  /**
+   * The returned defensive copy is only "somewhat" defensive.  We do, for instance, return a defensive copy of the
+   * enclosing List instance, and we do try to defensively copy any contained Rankable objects, too.  However, the
+   * contract of {@link org.apache.storm.starter.tools.Rankable#copy()} does not guarantee that any Object's embedded within
+   * a Rankable will be defensively copied, too.
+   *
+   * @return a somewhat defensive copy of ranked items
+   */
+  public List<Rankable> getRankings() {
+    List<Rankable> copy = Lists.newLinkedList();
+    for (Rankable r: rankedItems) {
+      copy.add(r.copy());
+    }
+    return ImmutableList.copyOf(copy);
+  }
+
+  public void updateWith(Rankings other) {
+    for (Rankable r : other.getRankings()) {
+      updateWith(r);
+    }
+  }
+
+  public void updateWith(Rankable r) {
+    synchronized(rankedItems) {
+      addOrReplace(r);
+      rerank();
+      shrinkRankingsIfNeeded();
+    }
+  }
+
+  private void addOrReplace(Rankable r) {
+    Integer rank = findRankOf(r);
+    if (rank != null) {
+      rankedItems.set(rank, r);
+    }
+    else {
+      rankedItems.add(r);
+    }
+  }
+
+  private Integer findRankOf(Rankable r) {
+    Object tag = r.getObject();
+    for (int rank = 0; rank < rankedItems.size(); rank++) {
+      Object cur = rankedItems.get(rank).getObject();
+      if (cur.equals(tag)) {
+        return rank;
+      }
+    }
+    return null;
+  }
+
+  private void rerank() {
+    Collections.sort(rankedItems);
+    Collections.reverse(rankedItems);
+  }
+
+  private void shrinkRankingsIfNeeded() {
+    if (rankedItems.size() > maxSize) {
+      rankedItems.remove(maxSize);
+    }
+  }
+
+  /**
+   * Removes ranking entries that have a count of zero.
+   */
+  public void pruneZeroCounts() {
+    int i = 0;
+    while (i < rankedItems.size()) {
+      if (rankedItems.get(i).getCount() == 0) {
+        rankedItems.remove(i);
+      }
+      else {
+        i++;
+      }
+    }
+  }
+
+  public String toString() {
+    return rankedItems.toString();
+  }
+
+  /**
+   * Creates a (defensive) copy of itself.
+   */
+  public Rankings copy() {
+    return new Rankings(this);
+  }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/SlidingWindowCounter.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/SlidingWindowCounter.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/SlidingWindowCounter.java
new file mode 100644
index 0000000..b95a6a9
--- /dev/null
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/SlidingWindowCounter.java
@@ -0,0 +1,119 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter.tools;
+
+import java.io.Serializable;
+import java.util.Map;
+
+/**
+ * This class counts objects in a sliding window fashion.
+ * <p/>
+ * It is designed 1) to give multiple "producer" threads write access to the counter, i.e. being able to increment
+ * counts of objects, and 2) to give a single "consumer" thread (e.g. {@link PeriodicSlidingWindowCounter}) read access
+ * to the counter. Whenever the consumer thread performs a read operation, this class will advance the head slot of the
+ * sliding window counter. This means that the consumer thread indirectly controls where writes of the producer threads
+ * will go to. Also, by itself this class will not advance the head slot.
+ * <p/>
+ * A note for analyzing data based on a sliding window count: During the initial <code>windowLengthInSlots</code>
+ * iterations, this sliding window counter will always return object counts that are equal or greater than in the
+ * previous iteration. This is the effect of the counter "loading up" at the very start of its existence. Conceptually,
+ * this is the desired behavior.
+ * <p/>
+ * To give an example, using a counter with 5 slots which for the sake of this example represent 1 minute of time each:
+ * <p/>
+ * <pre>
+ * {@code
+ * Sliding window counts of an object X over time
+ *
+ * Minute (timeline):
+ * 1    2   3   4   5   6   7   8
+ *
+ * Observed counts per minute:
+ * 1    1   1   1   0   0   0   0
+ *
+ * Counts returned by counter:
+ * 1    2   3   4   4   3   2   1
+ * }
+ * </pre>
+ * <p/>
+ * As you can see in this example, for the first <code>windowLengthInSlots</code> (here: the first five minutes) the
+ * counter will always return counts equal or greater than in the previous iteration (1, 2, 3, 4, 4). This initial load
+ * effect needs to be accounted for whenever you want to perform analyses such as trending topics; otherwise your
+ * analysis algorithm might falsely identify the object to be trending as the counter seems to observe continuously
+ * increasing counts. Also, note that during the initial load phase <em>every object</em> will exhibit increasing
+ * counts.
+ * <p/>
+ * On a high-level, the counter exhibits the following behavior: If you asked the example counter after two minutes,
+ * "how often did you count the object during the past five minutes?", then it should reply "I have counted it 2 times
+ * in the past five minutes", implying that it can only account for the last two of those five minutes because the
+ * counter was not running before that time.
+ *
+ * @param <T> The type of those objects we want to count.
+ */
+public final class SlidingWindowCounter<T> implements Serializable {
+
+  private static final long serialVersionUID = -2645063988768785810L;
+
+  private SlotBasedCounter<T> objCounter;
+  private int headSlot;
+  private int tailSlot;
+  private int windowLengthInSlots;
+
+  public SlidingWindowCounter(int windowLengthInSlots) {
+    if (windowLengthInSlots < 2) {
+      throw new IllegalArgumentException(
+          "Window length in slots must be at least two (you requested " + windowLengthInSlots + ")");
+    }
+    this.windowLengthInSlots = windowLengthInSlots;
+    this.objCounter = new SlotBasedCounter<T>(this.windowLengthInSlots);
+
+    this.headSlot = 0;
+    this.tailSlot = slotAfter(headSlot);
+  }
+
+  public void incrementCount(T obj) {
+    objCounter.incrementCount(obj, headSlot);
+  }
+
+  /**
+   * Return the current (total) counts of all tracked objects, then advance the window.
+   * <p/>
+   * Whenever this method is called, we consider the counts of the current sliding window to be available to and
+   * successfully processed "upstream" (i.e. by the caller). Knowing this we will start counting any subsequent
+   * objects within the next "chunk" of the sliding window.
+   *
+   * @return The current (total) counts of all tracked objects.
+   */
+  public Map<T, Long> getCountsThenAdvanceWindow() {
+    Map<T, Long> counts = objCounter.getCounts();
+    objCounter.wipeZeros();
+    objCounter.wipeSlot(tailSlot);
+    advanceHead();
+    return counts;
+  }
+
+  private void advanceHead() {
+    headSlot = tailSlot;
+    tailSlot = slotAfter(tailSlot);
+  }
+
+  private int slotAfter(int slot) {
+    return (slot + 1) % windowLengthInSlots;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/SlotBasedCounter.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/SlotBasedCounter.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/SlotBasedCounter.java
new file mode 100644
index 0000000..b8ca15b
--- /dev/null
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/tools/SlotBasedCounter.java
@@ -0,0 +1,118 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter.tools;
+
+import java.io.Serializable;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * This class provides per-slot counts of the occurrences of objects.
+ * <p/>
+ * It can be used, for instance, as a building block for implementing sliding window counting of objects.
+ *
+ * @param <T> The type of those objects we want to count.
+ */
+public final class SlotBasedCounter<T> implements Serializable {
+
+  private static final long serialVersionUID = 4858185737378394432L;
+
+  private final Map<T, long[]> objToCounts = new HashMap<T, long[]>();
+  private final int numSlots;
+
+  public SlotBasedCounter(int numSlots) {
+    if (numSlots <= 0) {
+      throw new IllegalArgumentException("Number of slots must be greater than zero (you requested " + numSlots + ")");
+    }
+    this.numSlots = numSlots;
+  }
+
+  public void incrementCount(T obj, int slot) {
+    long[] counts = objToCounts.get(obj);
+    if (counts == null) {
+      counts = new long[this.numSlots];
+      objToCounts.put(obj, counts);
+    }
+    counts[slot]++;
+  }
+
+  public long getCount(T obj, int slot) {
+    long[] counts = objToCounts.get(obj);
+    if (counts == null) {
+      return 0;
+    }
+    else {
+      return counts[slot];
+    }
+  }
+
+  public Map<T, Long> getCounts() {
+    Map<T, Long> result = new HashMap<T, Long>();
+    for (T obj : objToCounts.keySet()) {
+      result.put(obj, computeTotalCount(obj));
+    }
+    return result;
+  }
+
+  private long computeTotalCount(T obj) {
+    long[] curr = objToCounts.get(obj);
+    long total = 0;
+    for (long l : curr) {
+      total += l;
+    }
+    return total;
+  }
+
+  /**
+   * Reset the slot count of any tracked objects to zero for the given slot.
+   *
+   * @param slot
+   */
+  public void wipeSlot(int slot) {
+    for (T obj : objToCounts.keySet()) {
+      resetSlotCountToZero(obj, slot);
+    }
+  }
+
+  private void resetSlotCountToZero(T obj, int slot) {
+    long[] counts = objToCounts.get(obj);
+    counts[slot] = 0;
+  }
+
+  private boolean shouldBeRemovedFromCounter(T obj) {
+    return computeTotalCount(obj) == 0;
+  }
+
+  /**
+   * Remove any object from the counter whose total count is zero (to free up memory).
+   */
+  public void wipeZeros() {
+    Set<T> objToBeRemoved = new HashSet<T>();
+    for (T obj : objToCounts.keySet()) {
+      if (shouldBeRemovedFromCounter(obj)) {
+        objToBeRemoved.add(obj);
+      }
+    }
+    for (T obj : objToBeRemoved) {
+      objToCounts.remove(obj);
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentKafkaWordCount.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentKafkaWordCount.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentKafkaWordCount.java
new file mode 100644
index 0000000..dc4cb4b
--- /dev/null
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentKafkaWordCount.java
@@ -0,0 +1,229 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ * Contains some contributions under the Thrift Software License.
+ * Please see doc/old-thrift-license.txt in the Thrift distribution for
+ * details.
+ */
+package org.apache.storm.starter.trident;
+
+
+import org.apache.storm.Config;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.LocalDRPC;
+import org.apache.storm.generated.StormTopology;
+import org.apache.storm.spout.SchemeAsMultiScheme;
+import org.apache.storm.topology.TopologyBuilder;
+import org.apache.storm.tuple.Fields;
+import org.apache.kafka.clients.producer.ProducerConfig;
+import org.apache.storm.kafka.StringScheme;
+import org.apache.storm.kafka.ZkHosts;
+import org.apache.storm.kafka.bolt.KafkaBolt;
+import org.apache.storm.kafka.bolt.mapper.FieldNameBasedTupleToKafkaMapper;
+import org.apache.storm.kafka.bolt.selector.DefaultTopicSelector;
+import org.apache.storm.kafka.trident.TransactionalTridentKafkaSpout;
+import org.apache.storm.kafka.trident.TridentKafkaConfig;
+import org.apache.storm.starter.spout.RandomSentenceSpout;
+import org.apache.storm.trident.Stream;
+import org.apache.storm.trident.TridentState;
+import org.apache.storm.trident.TridentTopology;
+import org.apache.storm.trident.operation.builtin.Count;
+import org.apache.storm.trident.operation.builtin.FilterNull;
+import org.apache.storm.trident.operation.builtin.MapGet;
+import org.apache.storm.trident.testing.MemoryMapState;
+import org.apache.storm.trident.testing.Split;
+
+import java.util.Properties;
+
+/**
+ * A sample word count trident topology using transactional kafka spout that has the following components.
+ * <ol>
+ * <li> {@link KafkaBolt}
+ * that receives random sentences from {@link RandomSentenceSpout} and
+ * publishes the sentences to a kafka "test" topic.
+ * </li>
+ * <li> {@link TransactionalTridentKafkaSpout}
+ * that consumes sentences from the "test" topic, splits it into words, aggregates
+ * and stores the word count in a {@link MemoryMapState}.
+ * </li>
+ * <li> DRPC query
+ * that returns the word counts by querying the trident state (MemoryMapState).
+ * </li>
+ * </ol>
+ * <p>
+ *     For more background read the <a href="https://storm.apache.org/documentation/Trident-tutorial.html">trident tutorial</a>,
+ *     <a href="https://storm.apache.org/documentation/Trident-state">trident state</a> and
+ *     <a href="https://github.com/apache/storm/tree/master/external/storm-kafka"> Storm Kafka </a>.
+ * </p>
+ */
+public class TridentKafkaWordCount {
+
+    private String zkUrl;
+    private String brokerUrl;
+
+    TridentKafkaWordCount(String zkUrl, String brokerUrl) {
+        this.zkUrl = zkUrl;
+        this.brokerUrl = brokerUrl;
+    }
+
+    /**
+     * Creates a transactional kafka spout that consumes any new data published to "test" topic.
+     * <p/>
+     * For more info on transactional spouts
+     * see "Transactional spouts" section in
+     * <a href="https://storm.apache.org/documentation/Trident-state"> Trident state</a> doc.
+     *
+     * @return a transactional trident kafka spout.
+     */
+    private TransactionalTridentKafkaSpout createKafkaSpout() {
+        ZkHosts hosts = new ZkHosts(zkUrl);
+        TridentKafkaConfig config = new TridentKafkaConfig(hosts, "test");
+        config.scheme = new SchemeAsMultiScheme(new StringScheme());
+
+        // Consume new data from the topic
+        config.startOffsetTime = kafka.api.OffsetRequest.LatestTime();
+        return new TransactionalTridentKafkaSpout(config);
+    }
+
+
+    private Stream addDRPCStream(TridentTopology tridentTopology, TridentState state, LocalDRPC drpc) {
+        return tridentTopology.newDRPCStream("words", drpc)
+                .each(new Fields("args"), new Split(), new Fields("word"))
+                .groupBy(new Fields("word"))
+                .stateQuery(state, new Fields("word"), new MapGet(), new Fields("count"))
+                .each(new Fields("count"), new FilterNull())
+                .project(new Fields("word", "count"));
+    }
+
+    private TridentState addTridentState(TridentTopology tridentTopology) {
+        return tridentTopology.newStream("spout1", createKafkaSpout()).parallelismHint(1)
+                .each(new Fields("str"), new Split(), new Fields("word"))
+                .groupBy(new Fields("word"))
+                .persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count"))
+                .parallelismHint(1);
+    }
+
+    /**
+     * Creates a trident topology that consumes sentences from the kafka "test" topic using a
+     * {@link TransactionalTridentKafkaSpout} computes the word count and stores it in a {@link MemoryMapState}.
+     * A DRPC stream is then created to query the word counts.
+     * @param drpc
+     * @return
+     */
+    public StormTopology buildConsumerTopology(LocalDRPC drpc) {
+        TridentTopology tridentTopology = new TridentTopology();
+        addDRPCStream(tridentTopology, addTridentState(tridentTopology), drpc);
+        return tridentTopology.build();
+    }
+
+    /**
+     * Return the consumer topology config.
+     *
+     * @return the topology config
+     */
+    public Config getConsumerConfig() {
+        Config conf = new Config();
+        conf.setMaxSpoutPending(20);
+        //  conf.setDebug(true);
+        return conf;
+    }
+
+    /**
+     * A topology that produces random sentences using {@link RandomSentenceSpout} and
+     * publishes the sentences using a KafkaBolt to kafka "test" topic.
+     *
+     * @return the storm topology
+     */
+    public StormTopology buildProducerTopology(Properties prop) {
+        TopologyBuilder builder = new TopologyBuilder();
+        builder.setSpout("spout", new RandomSentenceSpout(), 2);
+        /**
+         * The output field of the RandomSentenceSpout ("word") is provided as the boltMessageField
+         * so that this gets written out as the message in the kafka topic.
+         */
+        KafkaBolt bolt = new KafkaBolt().withProducerProperties(prop)
+                .withTopicSelector(new DefaultTopicSelector("test"))
+                .withTupleToKafkaMapper(new FieldNameBasedTupleToKafkaMapper("key", "word"));
+        builder.setBolt("forwardToKafka", bolt, 1).shuffleGrouping("spout");
+        return builder.createTopology();
+    }
+
+    /**
+     * Returns the storm config for the topology that publishes sentences to kafka "test" topic using a kafka bolt.
+     * The KAFKA_BROKER_PROPERTIES is needed for the KafkaBolt.
+     *
+     * @return the topology config
+     */
+    public Properties getProducerConfig() {
+        Properties props = new Properties();
+        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerUrl);
+        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
+        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
+        props.put(ProducerConfig.CLIENT_ID_CONFIG, "storm-kafka-producer");
+        return props;
+    }
+
+    /**
+     * <p>
+     * To run this topology ensure you have a kafka broker running.
+     * </p>
+     * Create a topic test with command line,
+     * kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partition 1 --topic test
+     */
+    public static void main(String[] args) throws Exception {
+
+        String zkUrl = "localhost:2181";        // the defaults.
+        String brokerUrl = "localhost:9092";
+
+        if (args.length > 2 || (args.length == 1 && args[0].matches("^-h|--help$"))) {
+            System.out.println("Usage: TridentKafkaWordCount [kafka zookeeper url] [kafka broker url]");
+            System.out.println("   E.g TridentKafkaWordCount [" + zkUrl + "]" + " [" + brokerUrl + "]");
+            System.exit(1);
+        } else if (args.length == 1) {
+            zkUrl = args[0];
+        } else if (args.length == 2) {
+            zkUrl = args[0];
+            brokerUrl = args[1];
+        }
+
+        System.out.println("Using Kafka zookeeper url: " + zkUrl + " broker url: " + brokerUrl);
+
+        TridentKafkaWordCount wordCount = new TridentKafkaWordCount(zkUrl, brokerUrl);
+
+        LocalDRPC drpc = new LocalDRPC();
+        LocalCluster cluster = new LocalCluster();
+
+        // submit the consumer topology.
+        cluster.submitTopology("wordCounter", wordCount.getConsumerConfig(), wordCount.buildConsumerTopology(drpc));
+
+        Config conf = new Config();
+        conf.setMaxSpoutPending(20);
+        // submit the producer topology.
+        cluster.submitTopology("kafkaBolt", conf, wordCount.buildProducerTopology(wordCount.getProducerConfig()));
+
+        // keep querying the word counts for a minute.
+        for (int i = 0; i < 60; i++) {
+            System.out.println("DRPC RESULT: " + drpc.execute("words", "the and apple snow jumped"));
+            Thread.sleep(1000);
+        }
+
+        cluster.killTopology("kafkaBolt");
+        cluster.killTopology("wordCounter");
+        cluster.shutdown();
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentReach.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentReach.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentReach.java
new file mode 100644
index 0000000..056b2b6
--- /dev/null
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentReach.java
@@ -0,0 +1,156 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter.trident;
+
+import org.apache.storm.Config;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.LocalDRPC;
+import org.apache.storm.generated.StormTopology;
+import org.apache.storm.task.IMetricsContext;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Values;
+import org.apache.storm.trident.TridentState;
+import org.apache.storm.trident.TridentTopology;
+import org.apache.storm.trident.operation.BaseFunction;
+import org.apache.storm.trident.operation.CombinerAggregator;
+import org.apache.storm.trident.operation.TridentCollector;
+import org.apache.storm.trident.operation.builtin.MapGet;
+import org.apache.storm.trident.operation.builtin.Sum;
+import org.apache.storm.trident.state.ReadOnlyState;
+import org.apache.storm.trident.state.State;
+import org.apache.storm.trident.state.StateFactory;
+import org.apache.storm.trident.state.map.ReadOnlyMapState;
+import org.apache.storm.trident.tuple.TridentTuple;
+
+import java.util.*;
+
+public class TridentReach {
+  public static Map<String, List<String>> TWEETERS_DB = new HashMap<String, List<String>>() {{
+    put("foo.com/blog/1", Arrays.asList("sally", "bob", "tim", "george", "nathan"));
+    put("engineering.twitter.com/blog/5", Arrays.asList("adam", "david", "sally", "nathan"));
+    put("tech.backtype.com/blog/123", Arrays.asList("tim", "mike", "john"));
+  }};
+
+  public static Map<String, List<String>> FOLLOWERS_DB = new HashMap<String, List<String>>() {{
+    put("sally", Arrays.asList("bob", "tim", "alice", "adam", "jim", "chris", "jai"));
+    put("bob", Arrays.asList("sally", "nathan", "jim", "mary", "david", "vivian"));
+    put("tim", Arrays.asList("alex"));
+    put("nathan", Arrays.asList("sally", "bob", "adam", "harry", "chris", "vivian", "emily", "jordan"));
+    put("adam", Arrays.asList("david", "carissa"));
+    put("mike", Arrays.asList("john", "bob"));
+    put("john", Arrays.asList("alice", "nathan", "jim", "mike", "bob"));
+  }};
+
+  public static class StaticSingleKeyMapState extends ReadOnlyState implements ReadOnlyMapState<Object> {
+    public static class Factory implements StateFactory {
+      Map _map;
+
+      public Factory(Map map) {
+        _map = map;
+      }
+
+      @Override
+      public State makeState(Map conf, IMetricsContext metrics, int partitionIndex, int numPartitions) {
+        return new StaticSingleKeyMapState(_map);
+      }
+
+    }
+
+    Map _map;
+
+    public StaticSingleKeyMapState(Map map) {
+      _map = map;
+    }
+
+
+    @Override
+    public List<Object> multiGet(List<List<Object>> keys) {
+      List<Object> ret = new ArrayList();
+      for (List<Object> key : keys) {
+        Object singleKey = key.get(0);
+        ret.add(_map.get(singleKey));
+      }
+      return ret;
+    }
+
+  }
+
+  public static class One implements CombinerAggregator<Integer> {
+    @Override
+    public Integer init(TridentTuple tuple) {
+      return 1;
+    }
+
+    @Override
+    public Integer combine(Integer val1, Integer val2) {
+      return 1;
+    }
+
+    @Override
+    public Integer zero() {
+      return 1;
+    }
+  }
+
+  public static class ExpandList extends BaseFunction {
+
+    @Override
+    public void execute(TridentTuple tuple, TridentCollector collector) {
+      List l = (List) tuple.getValue(0);
+      if (l != null) {
+        for (Object o : l) {
+          collector.emit(new Values(o));
+        }
+      }
+    }
+
+  }
+
+  public static StormTopology buildTopology(LocalDRPC drpc) {
+    TridentTopology topology = new TridentTopology();
+    TridentState urlToTweeters = topology.newStaticState(new StaticSingleKeyMapState.Factory(TWEETERS_DB));
+    TridentState tweetersToFollowers = topology.newStaticState(new StaticSingleKeyMapState.Factory(FOLLOWERS_DB));
+
+
+    topology.newDRPCStream("reach", drpc).stateQuery(urlToTweeters, new Fields("args"), new MapGet(), new Fields(
+        "tweeters")).each(new Fields("tweeters"), new ExpandList(), new Fields("tweeter")).shuffle().stateQuery(
+        tweetersToFollowers, new Fields("tweeter"), new MapGet(), new Fields("followers")).each(new Fields("followers"),
+        new ExpandList(), new Fields("follower")).groupBy(new Fields("follower")).aggregate(new One(), new Fields(
+        "one")).aggregate(new Fields("one"), new Sum(), new Fields("reach"));
+    return topology.build();
+  }
+
+  public static void main(String[] args) throws Exception {
+    LocalDRPC drpc = new LocalDRPC();
+
+    Config conf = new Config();
+    LocalCluster cluster = new LocalCluster();
+
+    cluster.submitTopology("reach", conf, buildTopology(drpc));
+
+    Thread.sleep(2000);
+
+    System.out.println("REACH: " + drpc.execute("reach", "aaa"));
+    System.out.println("REACH: " + drpc.execute("reach", "foo.com/blog/1"));
+    System.out.println("REACH: " + drpc.execute("reach", "engineering.twitter.com/blog/5"));
+
+
+    cluster.shutdown();
+    drpc.shutdown();
+  }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentWordCount.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentWordCount.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentWordCount.java
new file mode 100644
index 0000000..93ccf18
--- /dev/null
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentWordCount.java
@@ -0,0 +1,85 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter.trident;
+
+import org.apache.storm.Config;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.LocalDRPC;
+import org.apache.storm.StormSubmitter;
+import org.apache.storm.generated.StormTopology;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Values;
+import org.apache.storm.trident.TridentState;
+import org.apache.storm.trident.TridentTopology;
+import org.apache.storm.trident.operation.BaseFunction;
+import org.apache.storm.trident.operation.TridentCollector;
+import org.apache.storm.trident.operation.builtin.Count;
+import org.apache.storm.trident.operation.builtin.FilterNull;
+import org.apache.storm.trident.operation.builtin.MapGet;
+import org.apache.storm.trident.operation.builtin.Sum;
+import org.apache.storm.trident.testing.FixedBatchSpout;
+import org.apache.storm.trident.testing.MemoryMapState;
+import org.apache.storm.trident.tuple.TridentTuple;
+
+
+public class TridentWordCount {
+  public static class Split extends BaseFunction {
+    @Override
+    public void execute(TridentTuple tuple, TridentCollector collector) {
+      String sentence = tuple.getString(0);
+      for (String word : sentence.split(" ")) {
+        collector.emit(new Values(word));
+      }
+    }
+  }
+
+  public static StormTopology buildTopology(LocalDRPC drpc) {
+    FixedBatchSpout spout = new FixedBatchSpout(new Fields("sentence"), 3, new Values("the cow jumped over the moon"),
+        new Values("the man went to the store and bought some candy"), new Values("four score and seven years ago"),
+        new Values("how many apples can you eat"), new Values("to be or not to be the person"));
+    spout.setCycle(true);
+
+    TridentTopology topology = new TridentTopology();
+    TridentState wordCounts = topology.newStream("spout1", spout).parallelismHint(16).each(new Fields("sentence"),
+        new Split(), new Fields("word")).groupBy(new Fields("word")).persistentAggregate(new MemoryMapState.Factory(),
+        new Count(), new Fields("count")).parallelismHint(16);
+
+    topology.newDRPCStream("words", drpc).each(new Fields("args"), new Split(), new Fields("word")).groupBy(new Fields(
+        "word")).stateQuery(wordCounts, new Fields("word"), new MapGet(), new Fields("count")).each(new Fields("count"),
+        new FilterNull()).aggregate(new Fields("count"), new Sum(), new Fields("sum"));
+    return topology.build();
+  }
+
+  public static void main(String[] args) throws Exception {
+    Config conf = new Config();
+    conf.setMaxSpoutPending(20);
+    if (args.length == 0) {
+      LocalDRPC drpc = new LocalDRPC();
+      LocalCluster cluster = new LocalCluster();
+      cluster.submitTopology("wordCounter", conf, buildTopology(drpc));
+      for (int i = 0; i < 100; i++) {
+        System.out.println("DRPC RESULT: " + drpc.execute("words", "cat the dog jumped"));
+        Thread.sleep(1000);
+      }
+    }
+    else {
+      conf.setNumWorkers(3);
+      StormSubmitter.submitTopologyWithProgressBar(args[0], conf, buildTopology(null));
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/org/apache/storm/starter/util/StormRunner.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/util/StormRunner.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/util/StormRunner.java
new file mode 100644
index 0000000..d7f2bf4
--- /dev/null
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/util/StormRunner.java
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.starter.util;
+
+import org.apache.storm.Config;
+import org.apache.storm.LocalCluster;
+import org.apache.storm.StormSubmitter;
+import org.apache.storm.generated.AlreadyAliveException;
+import org.apache.storm.generated.AuthorizationException;
+import org.apache.storm.generated.InvalidTopologyException;
+import org.apache.storm.generated.StormTopology;
+
+public final class StormRunner {
+
+  private static final int MILLIS_IN_SEC = 1000;
+
+  private StormRunner() {
+  }
+
+  public static void runTopologyLocally(StormTopology topology, String topologyName, Config conf, int runtimeInSeconds)
+      throws InterruptedException {
+    LocalCluster cluster = new LocalCluster();
+    cluster.submitTopology(topologyName, conf, topology);
+    Thread.sleep((long) runtimeInSeconds * MILLIS_IN_SEC);
+    cluster.killTopology(topologyName);
+    cluster.shutdown();
+  }
+
+  public static void runTopologyRemotely(StormTopology topology, String topologyName, Config conf)
+      throws AlreadyAliveException, InvalidTopologyException, AuthorizationException {
+    StormSubmitter.submitTopology(topologyName, conf, topology);
+  }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/storm/starter/BasicDRPCTopology.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/storm/starter/BasicDRPCTopology.java b/examples/storm-starter/src/jvm/storm/starter/BasicDRPCTopology.java
deleted file mode 100644
index 3ea83a1..0000000
--- a/examples/storm-starter/src/jvm/storm/starter/BasicDRPCTopology.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter;
-
-import backtype.storm.Config;
-import backtype.storm.LocalCluster;
-import backtype.storm.LocalDRPC;
-import backtype.storm.StormSubmitter;
-import backtype.storm.drpc.LinearDRPCTopologyBuilder;
-import backtype.storm.topology.BasicOutputCollector;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.base.BaseBasicBolt;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.Values;
-
-/**
- * This topology is a basic example of doing distributed RPC on top of Storm. It implements a function that appends a
- * "!" to any string you send the DRPC function.
- *
- * @see <a href="http://storm.apache.org/documentation/Distributed-RPC.html">Distributed RPC</a>
- */
-public class BasicDRPCTopology {
-  public static class ExclaimBolt extends BaseBasicBolt {
-    @Override
-    public void execute(Tuple tuple, BasicOutputCollector collector) {
-      String input = tuple.getString(1);
-      collector.emit(new Values(tuple.getValue(0), input + "!"));
-    }
-
-    @Override
-    public void declareOutputFields(OutputFieldsDeclarer declarer) {
-      declarer.declare(new Fields("id", "result"));
-    }
-
-  }
-
-  public static void main(String[] args) throws Exception {
-    LinearDRPCTopologyBuilder builder = new LinearDRPCTopologyBuilder("exclamation");
-    builder.addBolt(new ExclaimBolt(), 3);
-
-    Config conf = new Config();
-
-    if (args == null || args.length == 0) {
-      LocalDRPC drpc = new LocalDRPC();
-      LocalCluster cluster = new LocalCluster();
-
-      cluster.submitTopology("drpc-demo", conf, builder.createLocalTopology(drpc));
-
-      for (String word : new String[]{ "hello", "goodbye" }) {
-        System.out.println("Result for \"" + word + "\": " + drpc.execute("exclamation", word));
-      }
-
-      Thread.sleep(10000);
-      drpc.shutdown();
-      cluster.shutdown();
-    }
-    else {
-      conf.setNumWorkers(3);
-      StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createRemoteTopology());
-    }
-  }
-}


[46/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/storm/starter/ThroughputVsLatency.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/storm/starter/ThroughputVsLatency.java b/examples/storm-starter/src/jvm/storm/starter/ThroughputVsLatency.java
deleted file mode 100644
index 4c6680e..0000000
--- a/examples/storm-starter/src/jvm/storm/starter/ThroughputVsLatency.java
+++ /dev/null
@@ -1,432 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter;
-
-import backtype.storm.Config;
-import backtype.storm.LocalCluster;
-import backtype.storm.StormSubmitter;
-import backtype.storm.metric.HttpForwardingMetricsServer;
-import backtype.storm.metric.HttpForwardingMetricsConsumer;
-import backtype.storm.metric.api.IMetric;
-import backtype.storm.metric.api.IMetricsConsumer.TaskInfo;
-import backtype.storm.metric.api.IMetricsConsumer.DataPoint;
-import backtype.storm.generated.*;
-import backtype.storm.spout.SpoutOutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.BasicOutputCollector;
-import backtype.storm.topology.IRichBolt;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.TopologyBuilder;
-import backtype.storm.topology.base.BaseBasicBolt;
-import backtype.storm.topology.base.BaseRichSpout;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.Values;
-import backtype.storm.utils.NimbusClient;
-import backtype.storm.utils.Utils;
-import backtype.storm.StormSubmitter;
-
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Random;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ThreadLocalRandom;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.apache.storm.metrics.hdrhistogram.HistogramMetric;
-import org.HdrHistogram.Histogram;
-
-/**
- * WordCount but the spout goes at a predefined rate and we collect
- * proper latency statistics.
- */
-public class ThroughputVsLatency {
-  private static class SentWithTime {
-    public final String sentence;
-    public final long time;
-
-    SentWithTime(String sentence, long time) {
-        this.sentence = sentence;
-        this.time = time;
-    }
-  }
-
-  public static class C {
-    LocalCluster _local = null;
-    Nimbus.Client _client = null;
-
-    public C(Map conf) {
-      Map clusterConf = Utils.readStormConfig();
-      if (conf != null) {
-        clusterConf.putAll(conf);
-      }
-      Boolean isLocal = (Boolean)clusterConf.get("run.local");
-      if (isLocal != null && isLocal) {
-        _local = new LocalCluster();
-      } else {
-        _client = NimbusClient.getConfiguredClient(clusterConf).getClient();
-      }
-    }
-
-    public ClusterSummary getClusterInfo() throws Exception {
-      if (_local != null) {
-        return _local.getClusterInfo();
-      } else {
-        return _client.getClusterInfo();
-      }
-    }
-
-    public TopologyInfo getTopologyInfo(String id) throws Exception {
-      if (_local != null) {
-        return _local.getTopologyInfo(id);
-      } else {
-        return _client.getTopologyInfo(id);
-      }
-    }
-
-    public void killTopologyWithOpts(String name, KillOptions opts) throws Exception {
-      if (_local != null) {
-        _local.killTopologyWithOpts(name, opts);
-      } else {
-        _client.killTopologyWithOpts(name, opts);
-      }
-    }
-
-    public void submitTopology(String name, Map stormConf, StormTopology topology) throws Exception {
-      if (_local != null) {
-        _local.submitTopology(name, stormConf, topology);
-      } else {
-        StormSubmitter.submitTopology(name, stormConf, topology);
-      }
-    }
-
-    public boolean isLocal() {
-      return _local != null;
-    }
-  }
-
-  public static class FastRandomSentenceSpout extends BaseRichSpout {
-    static final String[] SENTENCES = new String[]{ "the cow jumped over the moon", "an apple a day keeps the doctor away",
-          "four score and seven years ago", "snow white and the seven dwarfs", "i am at two with nature" };
-
-    SpoutOutputCollector _collector;
-    long _periodNano;
-    long _emitAmount;
-    Random _rand;
-    long _nextEmitTime;
-    long _emitsLeft;
-    HistogramMetric _histo;
-
-    public FastRandomSentenceSpout(long ratePerSecond) {
-        if (ratePerSecond > 0) {
-            _periodNano = Math.max(1, 1000000000/ratePerSecond);
-            _emitAmount = Math.max(1, (long)((ratePerSecond / 1000000000.0) * _periodNano));
-        } else {
-            _periodNano = Long.MAX_VALUE - 1;
-            _emitAmount = 1;
-        }
-    }
-
-    @Override
-    public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
-      _collector = collector;
-      _rand = ThreadLocalRandom.current();
-      _nextEmitTime = System.nanoTime();
-      _emitsLeft = _emitAmount;
-      _histo = new HistogramMetric(3600000000000L, 3);
-      context.registerMetric("comp-lat-histo", _histo, 10); //Update every 10 seconds, so we are not too far behind
-    }
-
-    @Override
-    public void nextTuple() {
-      if (_emitsLeft <= 0 && _nextEmitTime <= System.nanoTime()) {
-          _emitsLeft = _emitAmount;
-          _nextEmitTime = _nextEmitTime + _periodNano;
-      }
-
-      if (_emitsLeft > 0) {
-          String sentence = SENTENCES[_rand.nextInt(SENTENCES.length)];
-          _collector.emit(new Values(sentence), new SentWithTime(sentence, _nextEmitTime - _periodNano));
-          _emitsLeft--;
-      }
-    }
-
-    @Override
-    public void ack(Object id) {
-      long end = System.nanoTime();
-      SentWithTime st = (SentWithTime)id;
-      _histo.recordValue(end-st.time);
-    }
-
-    @Override
-    public void fail(Object id) {
-      SentWithTime st = (SentWithTime)id;
-      _collector.emit(new Values(st.sentence), id);
-    }
-
-    @Override
-    public void declareOutputFields(OutputFieldsDeclarer declarer) {
-      declarer.declare(new Fields("sentence"));
-    }
-  }
-
-  public static class SplitSentence extends BaseBasicBolt {
-    @Override
-    public void execute(Tuple tuple, BasicOutputCollector collector) {
-      String sentence = tuple.getString(0);
-      for (String word: sentence.split("\\s+")) {
-          collector.emit(new Values(word, 1));
-      }
-    }
-
-    @Override
-    public void declareOutputFields(OutputFieldsDeclarer declarer) {
-      declarer.declare(new Fields("word", "count"));
-    }
-  }
-
-  public static class WordCount extends BaseBasicBolt {
-    Map<String, Integer> counts = new HashMap<String, Integer>();
-
-    @Override
-    public void execute(Tuple tuple, BasicOutputCollector collector) {
-      String word = tuple.getString(0);
-      Integer count = counts.get(word);
-      if (count == null)
-        count = 0;
-      count++;
-      counts.put(word, count);
-      collector.emit(new Values(word, count));
-    }
-
-    @Override
-    public void declareOutputFields(OutputFieldsDeclarer declarer) {
-      declarer.declare(new Fields("word", "count"));
-    }
-  }
-
-  private static class MemMeasure {
-    private long _mem = 0;
-    private long _time = 0;
-
-    public synchronized void update(long mem) {
-        _mem = mem;
-        _time = System.currentTimeMillis();
-    }
-
-    public synchronized long get() {
-        return isExpired() ? 0l : _mem;
-    }
-
-    public synchronized boolean isExpired() {
-        return (System.currentTimeMillis() - _time) >= 20000;
-    }
-  }
-
-  private static final Histogram _histo = new Histogram(3600000000000L, 3);
-  private static final AtomicLong _systemCPU = new AtomicLong(0);
-  private static final AtomicLong _userCPU = new AtomicLong(0);
-  private static final AtomicLong _gcCount = new AtomicLong(0);
-  private static final AtomicLong _gcMs = new AtomicLong(0);
-  private static final ConcurrentHashMap<String, MemMeasure> _memoryBytes = new ConcurrentHashMap<String, MemMeasure>();
-
-  private static long readMemory() {
-    long total = 0;
-    for (MemMeasure mem: _memoryBytes.values()) {
-      total += mem.get();
-    }
-    return total;
-  }
-
-  private static long _prev_acked = 0;
-  private static long _prev_uptime = 0;
-
-  public static void printMetrics(C client, String name) throws Exception {
-    ClusterSummary summary = client.getClusterInfo();
-    String id = null;
-    for (TopologySummary ts: summary.get_topologies()) {
-      if (name.equals(ts.get_name())) {
-        id = ts.get_id();
-      }
-    }
-    if (id == null) {
-      throw new Exception("Could not find a topology named "+name);
-    }
-    TopologyInfo info = client.getTopologyInfo(id);
-    int uptime = info.get_uptime_secs();
-    long acked = 0;
-    long failed = 0;
-    for (ExecutorSummary exec: info.get_executors()) {
-      if ("spout".equals(exec.get_component_id())) {
-        SpoutStats stats = exec.get_stats().get_specific().get_spout();
-        Map<String, Long> failedMap = stats.get_failed().get(":all-time");
-        Map<String, Long> ackedMap = stats.get_acked().get(":all-time");
-        if (ackedMap != null) {
-          for (String key: ackedMap.keySet()) {
-            if (failedMap != null) {
-              Long tmp = failedMap.get(key);
-              if (tmp != null) {
-                  failed += tmp;
-              }
-            }
-            long ackVal = ackedMap.get(key);
-            acked += ackVal;
-          }
-        }
-      }
-    }
-    long ackedThisTime = acked - _prev_acked;
-    long thisTime = uptime - _prev_uptime;
-    long nnpct, nnnpct, min, max;
-    double mean, stddev;
-    synchronized(_histo) {
-      nnpct = _histo.getValueAtPercentile(99.0);
-      nnnpct = _histo.getValueAtPercentile(99.9);
-      min = _histo.getMinValue();
-      max = _histo.getMaxValue();
-      mean = _histo.getMean();
-      stddev = _histo.getStdDeviation();
-      _histo.reset();
-    }
-    long user = _userCPU.getAndSet(0);
-    long sys = _systemCPU.getAndSet(0);
-    long gc = _gcMs.getAndSet(0);
-    double memMB = readMemory() / (1024.0 * 1024.0);
-    System.out.printf("uptime: %,4d acked: %,9d acked/sec: %,10.2f failed: %,8d " +
-                      "99%%: %,15d 99.9%%: %,15d min: %,15d max: %,15d mean: %,15.2f " +
-                      "stddev: %,15.2f user: %,10d sys: %,10d gc: %,10d mem: %,10.2f\n",
-                       uptime, ackedThisTime, (((double)ackedThisTime)/thisTime), failed, nnpct, nnnpct,
-                       min, max, mean, stddev, user, sys, gc, memMB);
-    _prev_uptime = uptime;
-    _prev_acked = acked;
-  }
-
-  public static void kill(C client, String name) throws Exception {
-    KillOptions opts = new KillOptions();
-    opts.set_wait_secs(0);
-    client.killTopologyWithOpts(name, opts);
-  }
-
-  public static void main(String[] args) throws Exception {
-    long ratePerSecond = 500;
-    if (args != null && args.length > 0) {
-        ratePerSecond = Long.valueOf(args[0]);
-    }
-
-    int parallelism = 4;
-    if (args != null && args.length > 1) {
-        parallelism = Integer.valueOf(args[1]);
-    }
-
-    int numMins = 5;
-    if (args != null && args.length > 2) {
-        numMins = Integer.valueOf(args[2]);
-    }
-
-    String name = "wc-test";
-    if (args != null && args.length > 3) {
-        name = args[3];
-    }
-
-    Config conf = new Config();
-    HttpForwardingMetricsServer metricServer = new HttpForwardingMetricsServer(conf) {
-        @Override
-        public void handle(TaskInfo taskInfo, Collection<DataPoint> dataPoints) {
-            String worker = taskInfo.srcWorkerHost + ":" + taskInfo.srcWorkerPort;
-            for (DataPoint dp: dataPoints) {
-                if ("comp-lat-histo".equals(dp.name) && dp.value instanceof Histogram) {
-                    synchronized(_histo) {
-                        _histo.add((Histogram)dp.value);
-                    }
-                } else if ("CPU".equals(dp.name) && dp.value instanceof Map) {
-                   Map<Object, Object> m = (Map<Object, Object>)dp.value;
-                   Object sys = m.get("sys-ms");
-                   if (sys instanceof Number) {
-                       _systemCPU.getAndAdd(((Number)sys).longValue());
-                   }
-                   Object user = m.get("user-ms");
-                   if (user instanceof Number) {
-                       _userCPU.getAndAdd(((Number)user).longValue());
-                   }
-                } else if (dp.name.startsWith("GC/") && dp.value instanceof Map) {
-                   Map<Object, Object> m = (Map<Object, Object>)dp.value;
-                   Object count = m.get("count");
-                   if (count instanceof Number) {
-                       _gcCount.getAndAdd(((Number)count).longValue());
-                   }
-                   Object time = m.get("timeMs");
-                   if (time instanceof Number) {
-                       _gcMs.getAndAdd(((Number)time).longValue());
-                   }
-                } else if (dp.name.startsWith("memory/") && dp.value instanceof Map) {
-                   Map<Object, Object> m = (Map<Object, Object>)dp.value;
-                   Object val = m.get("usedBytes");
-                   if (val instanceof Number) {
-                       MemMeasure mm = _memoryBytes.get(worker);
-                       if (mm == null) {
-                         mm = new MemMeasure();
-                         MemMeasure tmp = _memoryBytes.putIfAbsent(worker, mm);
-                         mm = tmp == null ? mm : tmp; 
-                       }
-                       mm.update(((Number)val).longValue());
-                   }
-                }
-            }
-        }
-    };
-
-    metricServer.serve();
-    String url = metricServer.getUrl();
-
-    C cluster = new C(conf);
-    conf.setNumWorkers(parallelism);
-    conf.registerMetricsConsumer(backtype.storm.metric.LoggingMetricsConsumer.class);
-    conf.registerMetricsConsumer(backtype.storm.metric.HttpForwardingMetricsConsumer.class, url, 1);
-    Map<String, String> workerMetrics = new HashMap<String, String>();
-    if (!cluster.isLocal()) {
-      //sigar uses JNI and does not work in local mode
-      workerMetrics.put("CPU", "org.apache.storm.metrics.sigar.CPUMetric");
-    }
-    conf.put(Config.TOPOLOGY_WORKER_METRICS, workerMetrics);
-    conf.put(Config.TOPOLOGY_BUILTIN_METRICS_BUCKET_SIZE_SECS, 10);
-    conf.put(Config.TOPOLOGY_WORKER_GC_CHILDOPTS,
-      "-XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:NewSize=128m -XX:CMSInitiatingOccupancyFraction=70 -XX:-CMSConcurrentMTEnabled");
-    conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-Xmx2g");
-
-    TopologyBuilder builder = new TopologyBuilder();
-
-    int numEach = 4 * parallelism;
-    builder.setSpout("spout", new FastRandomSentenceSpout(ratePerSecond/numEach), numEach);
-
-    builder.setBolt("split", new SplitSentence(), numEach).shuffleGrouping("spout");
-    builder.setBolt("count", new WordCount(), numEach).fieldsGrouping("split", new Fields("word"));
-
-    try {
-        cluster.submitTopology(name, conf, builder.createTopology());
-
-        for (int i = 0; i < numMins * 2; i++) {
-            Thread.sleep(30 * 1000);
-            printMetrics(cluster, name);
-        }
-    } finally {
-        kill(cluster, name);
-    }
-    System.exit(0);
-  }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/storm/starter/TransactionalGlobalCount.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/storm/starter/TransactionalGlobalCount.java b/examples/storm-starter/src/jvm/storm/starter/TransactionalGlobalCount.java
deleted file mode 100644
index 706afd1..0000000
--- a/examples/storm-starter/src/jvm/storm/starter/TransactionalGlobalCount.java
+++ /dev/null
@@ -1,174 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter;
-
-import backtype.storm.Config;
-import backtype.storm.LocalCluster;
-import backtype.storm.coordination.BatchOutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.testing.MemoryTransactionalSpout;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.base.BaseBatchBolt;
-import backtype.storm.topology.base.BaseTransactionalBolt;
-import backtype.storm.transactional.ICommitter;
-import backtype.storm.transactional.TransactionAttempt;
-import backtype.storm.transactional.TransactionalTopologyBuilder;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.Values;
-
-import java.math.BigInteger;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-/**
- * This is a basic example of a transactional topology. It keeps a count of the number of tuples seen so far in a
- * database. The source of data and the databases are mocked out as in memory maps for demonstration purposes.
- *
- * @see <a href="http://storm.apache.org/documentation/Transactional-topologies.html">Transactional topologies</a>
- */
-public class TransactionalGlobalCount {
-  public static final int PARTITION_TAKE_PER_BATCH = 3;
-  public static final Map<Integer, List<List<Object>>> DATA = new HashMap<Integer, List<List<Object>>>() {{
-    put(0, new ArrayList<List<Object>>() {{
-      add(new Values("cat"));
-      add(new Values("dog"));
-      add(new Values("chicken"));
-      add(new Values("cat"));
-      add(new Values("dog"));
-      add(new Values("apple"));
-    }});
-    put(1, new ArrayList<List<Object>>() {{
-      add(new Values("cat"));
-      add(new Values("dog"));
-      add(new Values("apple"));
-      add(new Values("banana"));
-    }});
-    put(2, new ArrayList<List<Object>>() {{
-      add(new Values("cat"));
-      add(new Values("cat"));
-      add(new Values("cat"));
-      add(new Values("cat"));
-      add(new Values("cat"));
-      add(new Values("dog"));
-      add(new Values("dog"));
-      add(new Values("dog"));
-      add(new Values("dog"));
-    }});
-  }};
-
-  public static class Value {
-    int count = 0;
-    BigInteger txid;
-  }
-
-  public static Map<String, Value> DATABASE = new HashMap<String, Value>();
-  public static final String GLOBAL_COUNT_KEY = "GLOBAL-COUNT";
-
-  public static class BatchCount extends BaseBatchBolt {
-    Object _id;
-    BatchOutputCollector _collector;
-
-    int _count = 0;
-
-    @Override
-    public void prepare(Map conf, TopologyContext context, BatchOutputCollector collector, Object id) {
-      _collector = collector;
-      _id = id;
-    }
-
-    @Override
-    public void execute(Tuple tuple) {
-      _count++;
-    }
-
-    @Override
-    public void finishBatch() {
-      _collector.emit(new Values(_id, _count));
-    }
-
-    @Override
-    public void declareOutputFields(OutputFieldsDeclarer declarer) {
-      declarer.declare(new Fields("id", "count"));
-    }
-  }
-
-  public static class UpdateGlobalCount extends BaseTransactionalBolt implements ICommitter {
-    TransactionAttempt _attempt;
-    BatchOutputCollector _collector;
-
-    int _sum = 0;
-
-    @Override
-    public void prepare(Map conf, TopologyContext context, BatchOutputCollector collector, TransactionAttempt attempt) {
-      _collector = collector;
-      _attempt = attempt;
-    }
-
-    @Override
-    public void execute(Tuple tuple) {
-      _sum += tuple.getInteger(1);
-    }
-
-    @Override
-    public void finishBatch() {
-      Value val = DATABASE.get(GLOBAL_COUNT_KEY);
-      Value newval;
-      if (val == null || !val.txid.equals(_attempt.getTransactionId())) {
-        newval = new Value();
-        newval.txid = _attempt.getTransactionId();
-        if (val == null) {
-          newval.count = _sum;
-        }
-        else {
-          newval.count = _sum + val.count;
-        }
-        DATABASE.put(GLOBAL_COUNT_KEY, newval);
-      }
-      else {
-        newval = val;
-      }
-      _collector.emit(new Values(_attempt, newval.count));
-    }
-
-    @Override
-    public void declareOutputFields(OutputFieldsDeclarer declarer) {
-      declarer.declare(new Fields("id", "sum"));
-    }
-  }
-
-  public static void main(String[] args) throws Exception {
-    MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH);
-    TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("global-count", "spout", spout, 3);
-    builder.setBolt("partial-count", new BatchCount(), 5).noneGrouping("spout");
-    builder.setBolt("sum", new UpdateGlobalCount()).globalGrouping("partial-count");
-
-    LocalCluster cluster = new LocalCluster();
-
-    Config config = new Config();
-    config.setDebug(true);
-    config.setMaxSpoutPending(3);
-
-    cluster.submitTopology("global-count-topology", config, builder.buildTopology());
-
-    Thread.sleep(3000);
-    cluster.shutdown();
-  }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/storm/starter/TransactionalWords.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/storm/starter/TransactionalWords.java b/examples/storm-starter/src/jvm/storm/starter/TransactionalWords.java
deleted file mode 100644
index 4d5ba1b..0000000
--- a/examples/storm-starter/src/jvm/storm/starter/TransactionalWords.java
+++ /dev/null
@@ -1,246 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter;
-
-import backtype.storm.Config;
-import backtype.storm.LocalCluster;
-import backtype.storm.coordination.BatchOutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.testing.MemoryTransactionalSpout;
-import backtype.storm.topology.BasicOutputCollector;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.base.BaseBasicBolt;
-import backtype.storm.topology.base.BaseTransactionalBolt;
-import backtype.storm.transactional.ICommitter;
-import backtype.storm.transactional.TransactionAttempt;
-import backtype.storm.transactional.TransactionalTopologyBuilder;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.Values;
-
-import java.math.BigInteger;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-/**
- * This class defines a more involved transactional topology then TransactionalGlobalCount. This topology processes a
- * stream of words and produces two outputs:
- * <p/>
- * 1. A count for each word (stored in a database) 2. The number of words for every bucket of 10 counts. So it stores in
- * the database how many words have appeared 0-9 times, how many have appeared 10-19 times, and so on.
- * <p/>
- * A batch of words can cause the bucket counts to decrement for some buckets and increment for others as words move
- * between buckets as their counts accumulate.
- */
-public class TransactionalWords {
-  public static class CountValue {
-    Integer prev_count = null;
-    int count = 0;
-    BigInteger txid = null;
-  }
-
-  public static class BucketValue {
-    int count = 0;
-    BigInteger txid;
-  }
-
-  public static final int BUCKET_SIZE = 10;
-
-  public static Map<String, CountValue> COUNT_DATABASE = new HashMap<String, CountValue>();
-  public static Map<Integer, BucketValue> BUCKET_DATABASE = new HashMap<Integer, BucketValue>();
-
-
-  public static final int PARTITION_TAKE_PER_BATCH = 3;
-
-  public static final Map<Integer, List<List<Object>>> DATA = new HashMap<Integer, List<List<Object>>>() {{
-    put(0, new ArrayList<List<Object>>() {{
-      add(new Values("cat"));
-      add(new Values("dog"));
-      add(new Values("chicken"));
-      add(new Values("cat"));
-      add(new Values("dog"));
-      add(new Values("apple"));
-    }});
-    put(1, new ArrayList<List<Object>>() {{
-      add(new Values("cat"));
-      add(new Values("dog"));
-      add(new Values("apple"));
-      add(new Values("banana"));
-    }});
-    put(2, new ArrayList<List<Object>>() {{
-      add(new Values("cat"));
-      add(new Values("cat"));
-      add(new Values("cat"));
-      add(new Values("cat"));
-      add(new Values("cat"));
-      add(new Values("dog"));
-      add(new Values("dog"));
-      add(new Values("dog"));
-      add(new Values("dog"));
-    }});
-  }};
-
-  public static class KeyedCountUpdater extends BaseTransactionalBolt implements ICommitter {
-    Map<String, Integer> _counts = new HashMap<String, Integer>();
-    BatchOutputCollector _collector;
-    TransactionAttempt _id;
-
-    int _count = 0;
-
-    @Override
-    public void prepare(Map conf, TopologyContext context, BatchOutputCollector collector, TransactionAttempt id) {
-      _collector = collector;
-      _id = id;
-    }
-
-    @Override
-    public void execute(Tuple tuple) {
-      String key = tuple.getString(1);
-      Integer curr = _counts.get(key);
-      if (curr == null)
-        curr = 0;
-      _counts.put(key, curr + 1);
-    }
-
-    @Override
-    public void finishBatch() {
-      for (String key : _counts.keySet()) {
-        CountValue val = COUNT_DATABASE.get(key);
-        CountValue newVal;
-        if (val == null || !val.txid.equals(_id)) {
-          newVal = new CountValue();
-          newVal.txid = _id.getTransactionId();
-          if (val != null) {
-            newVal.prev_count = val.count;
-            newVal.count = val.count;
-          }
-          newVal.count = newVal.count + _counts.get(key);
-          COUNT_DATABASE.put(key, newVal);
-        }
-        else {
-          newVal = val;
-        }
-        _collector.emit(new Values(_id, key, newVal.count, newVal.prev_count));
-      }
-    }
-
-    @Override
-    public void declareOutputFields(OutputFieldsDeclarer declarer) {
-      declarer.declare(new Fields("id", "key", "count", "prev-count"));
-    }
-  }
-
-  public static class Bucketize extends BaseBasicBolt {
-    @Override
-    public void execute(Tuple tuple, BasicOutputCollector collector) {
-      TransactionAttempt attempt = (TransactionAttempt) tuple.getValue(0);
-      int curr = tuple.getInteger(2);
-      Integer prev = tuple.getInteger(3);
-
-      int currBucket = curr / BUCKET_SIZE;
-      Integer prevBucket = null;
-      if (prev != null) {
-        prevBucket = prev / BUCKET_SIZE;
-      }
-
-      if (prevBucket == null) {
-        collector.emit(new Values(attempt, currBucket, 1));
-      }
-      else if (currBucket != prevBucket) {
-        collector.emit(new Values(attempt, currBucket, 1));
-        collector.emit(new Values(attempt, prevBucket, -1));
-      }
-    }
-
-    @Override
-    public void declareOutputFields(OutputFieldsDeclarer declarer) {
-      declarer.declare(new Fields("attempt", "bucket", "delta"));
-    }
-  }
-
-  public static class BucketCountUpdater extends BaseTransactionalBolt {
-    Map<Integer, Integer> _accum = new HashMap<Integer, Integer>();
-    BatchOutputCollector _collector;
-    TransactionAttempt _attempt;
-
-    int _count = 0;
-
-    @Override
-    public void prepare(Map conf, TopologyContext context, BatchOutputCollector collector, TransactionAttempt attempt) {
-      _collector = collector;
-      _attempt = attempt;
-    }
-
-    @Override
-    public void execute(Tuple tuple) {
-      Integer bucket = tuple.getInteger(1);
-      Integer delta = tuple.getInteger(2);
-      Integer curr = _accum.get(bucket);
-      if (curr == null)
-        curr = 0;
-      _accum.put(bucket, curr + delta);
-    }
-
-    @Override
-    public void finishBatch() {
-      for (Integer bucket : _accum.keySet()) {
-        BucketValue currVal = BUCKET_DATABASE.get(bucket);
-        BucketValue newVal;
-        if (currVal == null || !currVal.txid.equals(_attempt.getTransactionId())) {
-          newVal = new BucketValue();
-          newVal.txid = _attempt.getTransactionId();
-          newVal.count = _accum.get(bucket);
-          if (currVal != null)
-            newVal.count += currVal.count;
-          BUCKET_DATABASE.put(bucket, newVal);
-        }
-        else {
-          newVal = currVal;
-        }
-        _collector.emit(new Values(_attempt, bucket, newVal.count));
-      }
-    }
-
-    @Override
-    public void declareOutputFields(OutputFieldsDeclarer declarer) {
-      declarer.declare(new Fields("id", "bucket", "count"));
-    }
-  }
-
-  public static void main(String[] args) throws Exception {
-    MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH);
-    TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("top-n-words", "spout", spout, 2);
-    builder.setBolt("count", new KeyedCountUpdater(), 5).fieldsGrouping("spout", new Fields("word"));
-    builder.setBolt("bucketize", new Bucketize()).noneGrouping("count");
-    builder.setBolt("buckets", new BucketCountUpdater(), 5).fieldsGrouping("bucketize", new Fields("bucket"));
-
-
-    LocalCluster cluster = new LocalCluster();
-
-    Config config = new Config();
-    config.setDebug(true);
-    config.setMaxSpoutPending(3);
-
-    cluster.submitTopology("top-n-topology", config, builder.buildTopology());
-
-    Thread.sleep(3000);
-    cluster.shutdown();
-  }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/storm/starter/WordCountTopology.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/storm/starter/WordCountTopology.java b/examples/storm-starter/src/jvm/storm/starter/WordCountTopology.java
deleted file mode 100644
index 7260beb..0000000
--- a/examples/storm-starter/src/jvm/storm/starter/WordCountTopology.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter;
-
-import backtype.storm.Config;
-import backtype.storm.LocalCluster;
-import backtype.storm.StormSubmitter;
-import backtype.storm.task.ShellBolt;
-import backtype.storm.topology.BasicOutputCollector;
-import backtype.storm.topology.IRichBolt;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.TopologyBuilder;
-import backtype.storm.topology.base.BaseBasicBolt;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.Values;
-import storm.starter.spout.RandomSentenceSpout;
-
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * This topology demonstrates Storm's stream groupings and multilang capabilities.
- */
-public class WordCountTopology {
-  public static class SplitSentence extends ShellBolt implements IRichBolt {
-
-    public SplitSentence() {
-      super("python", "splitsentence.py");
-    }
-
-    @Override
-    public void declareOutputFields(OutputFieldsDeclarer declarer) {
-      declarer.declare(new Fields("word"));
-    }
-
-    @Override
-    public Map<String, Object> getComponentConfiguration() {
-      return null;
-    }
-  }
-
-  public static class WordCount extends BaseBasicBolt {
-    Map<String, Integer> counts = new HashMap<String, Integer>();
-
-    @Override
-    public void execute(Tuple tuple, BasicOutputCollector collector) {
-      String word = tuple.getString(0);
-      Integer count = counts.get(word);
-      if (count == null)
-        count = 0;
-      count++;
-      counts.put(word, count);
-      collector.emit(new Values(word, count));
-    }
-
-    @Override
-    public void declareOutputFields(OutputFieldsDeclarer declarer) {
-      declarer.declare(new Fields("word", "count"));
-    }
-  }
-
-  public static void main(String[] args) throws Exception {
-
-    TopologyBuilder builder = new TopologyBuilder();
-
-    builder.setSpout("spout", new RandomSentenceSpout(), 5);
-
-    builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout");
-    builder.setBolt("count", new WordCount(), 12).fieldsGrouping("split", new Fields("word"));
-
-    Config conf = new Config();
-    conf.setDebug(true);
-
-    if (args != null && args.length > 0) {
-      conf.setNumWorkers(3);
-
-      StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
-    }
-    else {
-      conf.setMaxTaskParallelism(3);
-
-      LocalCluster cluster = new LocalCluster();
-      cluster.submitTopology("word-count", conf, builder.createTopology());
-
-      Thread.sleep(10000);
-
-      cluster.shutdown();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/storm/starter/WordCountTopologyNode.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/storm/starter/WordCountTopologyNode.java b/examples/storm-starter/src/jvm/storm/starter/WordCountTopologyNode.java
deleted file mode 100644
index 3fe982f..0000000
--- a/examples/storm-starter/src/jvm/storm/starter/WordCountTopologyNode.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter;
-
-import backtype.storm.Config;
-import backtype.storm.LocalCluster;
-import backtype.storm.StormSubmitter;
-import backtype.storm.spout.ShellSpout;
-import backtype.storm.task.ShellBolt;
-import backtype.storm.topology.*;
-import backtype.storm.topology.base.BaseBasicBolt;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.Values;
-
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * This topology demonstrates Storm's stream groupings and multilang capabilities.
- */
-public class WordCountTopologyNode {
-  public static class SplitSentence extends ShellBolt implements IRichBolt {
-
-    public SplitSentence() {
-      super("node", "splitsentence.js");
-    }
-
-    @Override
-    public void declareOutputFields(OutputFieldsDeclarer declarer) {
-      declarer.declare(new Fields("word"));
-    }
-
-    @Override
-    public Map<String, Object> getComponentConfiguration() {
-      return null;
-    }
-  }
-
-    public static class RandomSentence extends ShellSpout implements IRichSpout {
-
-        public RandomSentence() {
-            super("node", "randomsentence.js");
-        }
-
-        @Override
-        public void declareOutputFields(OutputFieldsDeclarer declarer) {
-            declarer.declare(new Fields("word"));
-        }
-
-        @Override
-        public Map<String, Object> getComponentConfiguration() {
-            return null;
-        }
-    }
-
-  public static class WordCount extends BaseBasicBolt {
-    Map<String, Integer> counts = new HashMap<String, Integer>();
-
-    @Override
-    public void execute(Tuple tuple, BasicOutputCollector collector) {
-      String word = tuple.getString(0);
-      Integer count = counts.get(word);
-      if (count == null)
-        count = 0;
-      count++;
-      counts.put(word, count);
-      collector.emit(new Values(word, count));
-    }
-
-    @Override
-    public void declareOutputFields(OutputFieldsDeclarer declarer) {
-      declarer.declare(new Fields("word", "count"));
-    }
-  }
-
-  public static void main(String[] args) throws Exception {
-
-    TopologyBuilder builder = new TopologyBuilder();
-
-    builder.setSpout("spout", new RandomSentence(), 5);
-
-    builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout");
-    builder.setBolt("count", new WordCount(), 12).fieldsGrouping("split", new Fields("word"));
-
-    Config conf = new Config();
-    conf.setDebug(true);
-
-
-    if (args != null && args.length > 0) {
-      conf.setNumWorkers(3);
-
-      StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
-    }
-    else {
-      conf.setMaxTaskParallelism(3);
-
-      LocalCluster cluster = new LocalCluster();
-      cluster.submitTopology("word-count", conf, builder.createTopology());
-
-      Thread.sleep(10000);
-
-      cluster.shutdown();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/storm/starter/bolt/AbstractRankerBolt.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/storm/starter/bolt/AbstractRankerBolt.java b/examples/storm-starter/src/jvm/storm/starter/bolt/AbstractRankerBolt.java
deleted file mode 100644
index 64ceb29..0000000
--- a/examples/storm-starter/src/jvm/storm/starter/bolt/AbstractRankerBolt.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter.bolt;
-
-import backtype.storm.Config;
-import backtype.storm.topology.BasicOutputCollector;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.base.BaseBasicBolt;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.Values;
-import backtype.storm.utils.TupleUtils;
-import org.apache.log4j.Logger;
-import storm.starter.tools.Rankings;
-
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * This abstract bolt provides the basic behavior of bolts that rank objects according to their count.
- * <p/>
- * It uses a template method design pattern for {@link AbstractRankerBolt#execute(Tuple, BasicOutputCollector)} to allow
- * actual bolt implementations to specify how incoming tuples are processed, i.e. how the objects embedded within those
- * tuples are retrieved and counted.
- */
-public abstract class AbstractRankerBolt extends BaseBasicBolt {
-
-  private static final long serialVersionUID = 4931640198501530202L;
-  private static final int DEFAULT_EMIT_FREQUENCY_IN_SECONDS = 2;
-  private static final int DEFAULT_COUNT = 10;
-
-  private final int emitFrequencyInSeconds;
-  private final int count;
-  private final Rankings rankings;
-
-  public AbstractRankerBolt() {
-    this(DEFAULT_COUNT, DEFAULT_EMIT_FREQUENCY_IN_SECONDS);
-  }
-
-  public AbstractRankerBolt(int topN) {
-    this(topN, DEFAULT_EMIT_FREQUENCY_IN_SECONDS);
-  }
-
-  public AbstractRankerBolt(int topN, int emitFrequencyInSeconds) {
-    if (topN < 1) {
-      throw new IllegalArgumentException("topN must be >= 1 (you requested " + topN + ")");
-    }
-    if (emitFrequencyInSeconds < 1) {
-      throw new IllegalArgumentException(
-          "The emit frequency must be >= 1 seconds (you requested " + emitFrequencyInSeconds + " seconds)");
-    }
-    count = topN;
-    this.emitFrequencyInSeconds = emitFrequencyInSeconds;
-    rankings = new Rankings(count);
-  }
-
-  protected Rankings getRankings() {
-    return rankings;
-  }
-
-  /**
-   * This method functions as a template method (design pattern).
-   */
-  @Override
-  public final void execute(Tuple tuple, BasicOutputCollector collector) {
-    if (TupleUtils.isTick(tuple)) {
-      getLogger().debug("Received tick tuple, triggering emit of current rankings");
-      emitRankings(collector);
-    }
-    else {
-      updateRankingsWithTuple(tuple);
-    }
-  }
-
-  abstract void updateRankingsWithTuple(Tuple tuple);
-
-  private void emitRankings(BasicOutputCollector collector) {
-    collector.emit(new Values(rankings.copy()));
-    getLogger().debug("Rankings: " + rankings);
-  }
-
-  @Override
-  public void declareOutputFields(OutputFieldsDeclarer declarer) {
-    declarer.declare(new Fields("rankings"));
-  }
-
-  @Override
-  public Map<String, Object> getComponentConfiguration() {
-    Map<String, Object> conf = new HashMap<String, Object>();
-    conf.put(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, emitFrequencyInSeconds);
-    return conf;
-  }
-
-  abstract Logger getLogger();
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/storm/starter/bolt/IntermediateRankingsBolt.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/storm/starter/bolt/IntermediateRankingsBolt.java b/examples/storm-starter/src/jvm/storm/starter/bolt/IntermediateRankingsBolt.java
deleted file mode 100644
index d1805ff..0000000
--- a/examples/storm-starter/src/jvm/storm/starter/bolt/IntermediateRankingsBolt.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter.bolt;
-
-import backtype.storm.tuple.Tuple;
-import org.apache.log4j.Logger;
-import storm.starter.tools.Rankable;
-import storm.starter.tools.RankableObjectWithFields;
-
-/**
- * This bolt ranks incoming objects by their count.
- * <p/>
- * It assumes the input tuples to adhere to the following format: (object, object_count, additionalField1,
- * additionalField2, ..., additionalFieldN).
- */
-public final class IntermediateRankingsBolt extends AbstractRankerBolt {
-
-  private static final long serialVersionUID = -1369800530256637409L;
-  private static final Logger LOG = Logger.getLogger(IntermediateRankingsBolt.class);
-
-  public IntermediateRankingsBolt() {
-    super();
-  }
-
-  public IntermediateRankingsBolt(int topN) {
-    super(topN);
-  }
-
-  public IntermediateRankingsBolt(int topN, int emitFrequencyInSeconds) {
-    super(topN, emitFrequencyInSeconds);
-  }
-
-  @Override
-  void updateRankingsWithTuple(Tuple tuple) {
-    Rankable rankable = RankableObjectWithFields.from(tuple);
-    super.getRankings().updateWith(rankable);
-  }
-
-  @Override
-  Logger getLogger() {
-    return LOG;
-  }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/storm/starter/bolt/PrinterBolt.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/storm/starter/bolt/PrinterBolt.java b/examples/storm-starter/src/jvm/storm/starter/bolt/PrinterBolt.java
deleted file mode 100644
index 58fc8ca..0000000
--- a/examples/storm-starter/src/jvm/storm/starter/bolt/PrinterBolt.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter.bolt;
-
-import backtype.storm.topology.BasicOutputCollector;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.base.BaseBasicBolt;
-import backtype.storm.tuple.Tuple;
-
-
-public class PrinterBolt extends BaseBasicBolt {
-
-  @Override
-  public void execute(Tuple tuple, BasicOutputCollector collector) {
-    System.out.println(tuple);
-  }
-
-  @Override
-  public void declareOutputFields(OutputFieldsDeclarer ofd) {
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/storm/starter/bolt/RollingCountAggBolt.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/storm/starter/bolt/RollingCountAggBolt.java b/examples/storm-starter/src/jvm/storm/starter/bolt/RollingCountAggBolt.java
deleted file mode 100644
index e222a97..0000000
--- a/examples/storm-starter/src/jvm/storm/starter/bolt/RollingCountAggBolt.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter.bolt;
-
-import backtype.storm.Config;
-import backtype.storm.task.OutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.base.BaseRichBolt;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.Values;
-import org.apache.log4j.Logger;
-import storm.starter.tools.NthLastModifiedTimeTracker;
-import storm.starter.tools.SlidingWindowCounter;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-
-/**
- * This bolt aggregates counts from multiple upstream bolts.
- */
-public class RollingCountAggBolt extends BaseRichBolt {
-  private static final long serialVersionUID = 5537727428628598519L;
-  private static final Logger LOG = Logger.getLogger(RollingCountAggBolt.class);
-  //Mapping of key->upstreamBolt->count
-  private Map<Object, Map<Integer, Long>> counts = new HashMap<Object, Map<Integer, Long>>();
-  private OutputCollector collector;
-
-
-  @SuppressWarnings("rawtypes")
-  @Override
-  public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
-    this.collector = collector;
-  }
-
-  @Override
-  public void execute(Tuple tuple) {
-    Object obj = tuple.getValue(0);
-    long count = tuple.getLong(1);
-    int source = tuple.getSourceTask();
-    Map<Integer, Long> subCounts = counts.get(obj);
-    if (subCounts == null) {
-      subCounts = new HashMap<Integer, Long>();
-      counts.put(obj, subCounts);
-    }
-    //Update the current count for this object
-    subCounts.put(source, count);
-    //Output the sum of all the known counts so for this key
-    long sum = 0;
-    for (Long val: subCounts.values()) {
-      sum += val;
-    }
-    collector.emit(new Values(obj, sum));
-  }
-
-  @Override
-  public void declareOutputFields(OutputFieldsDeclarer declarer) {
-    declarer.declare(new Fields("obj", "count"));
-  }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/storm/starter/bolt/RollingCountBolt.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/storm/starter/bolt/RollingCountBolt.java b/examples/storm-starter/src/jvm/storm/starter/bolt/RollingCountBolt.java
deleted file mode 100644
index 31f7ee2..0000000
--- a/examples/storm-starter/src/jvm/storm/starter/bolt/RollingCountBolt.java
+++ /dev/null
@@ -1,142 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter.bolt;
-
-import backtype.storm.Config;
-import backtype.storm.task.OutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.base.BaseRichBolt;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.Values;
-import backtype.storm.utils.TupleUtils;
-import org.apache.log4j.Logger;
-import storm.starter.tools.NthLastModifiedTimeTracker;
-import storm.starter.tools.SlidingWindowCounter;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-
-/**
- * This bolt performs rolling counts of incoming objects, i.e. sliding window based counting.
- * <p/>
- * The bolt is configured by two parameters, the length of the sliding window in seconds (which influences the output
- * data of the bolt, i.e. how it will count objects) and the emit frequency in seconds (which influences how often the
- * bolt will output the latest window counts). For instance, if the window length is set to an equivalent of five
- * minutes and the emit frequency to one minute, then the bolt will output the latest five-minute sliding window every
- * minute.
- * <p/>
- * The bolt emits a rolling count tuple per object, consisting of the object itself, its latest rolling count, and the
- * actual duration of the sliding window. The latter is included in case the expected sliding window length (as
- * configured by the user) is different from the actual length, e.g. due to high system load. Note that the actual
- * window length is tracked and calculated for the window, and not individually for each object within a window.
- * <p/>
- * Note: During the startup phase you will usually observe that the bolt warns you about the actual sliding window
- * length being smaller than the expected length. This behavior is expected and is caused by the way the sliding window
- * counts are initially "loaded up". You can safely ignore this warning during startup (e.g. you will see this warning
- * during the first ~ five minutes of startup time if the window length is set to five minutes).
- */
-public class RollingCountBolt extends BaseRichBolt {
-
-  private static final long serialVersionUID = 5537727428628598519L;
-  private static final Logger LOG = Logger.getLogger(RollingCountBolt.class);
-  private static final int NUM_WINDOW_CHUNKS = 5;
-  private static final int DEFAULT_SLIDING_WINDOW_IN_SECONDS = NUM_WINDOW_CHUNKS * 60;
-  private static final int DEFAULT_EMIT_FREQUENCY_IN_SECONDS = DEFAULT_SLIDING_WINDOW_IN_SECONDS / NUM_WINDOW_CHUNKS;
-  private static final String WINDOW_LENGTH_WARNING_TEMPLATE =
-      "Actual window length is %d seconds when it should be %d seconds"
-          + " (you can safely ignore this warning during the startup phase)";
-
-  private final SlidingWindowCounter<Object> counter;
-  private final int windowLengthInSeconds;
-  private final int emitFrequencyInSeconds;
-  private OutputCollector collector;
-  private NthLastModifiedTimeTracker lastModifiedTracker;
-
-  public RollingCountBolt() {
-    this(DEFAULT_SLIDING_WINDOW_IN_SECONDS, DEFAULT_EMIT_FREQUENCY_IN_SECONDS);
-  }
-
-  public RollingCountBolt(int windowLengthInSeconds, int emitFrequencyInSeconds) {
-    this.windowLengthInSeconds = windowLengthInSeconds;
-    this.emitFrequencyInSeconds = emitFrequencyInSeconds;
-    counter = new SlidingWindowCounter<Object>(deriveNumWindowChunksFrom(this.windowLengthInSeconds,
-        this.emitFrequencyInSeconds));
-  }
-
-  private int deriveNumWindowChunksFrom(int windowLengthInSeconds, int windowUpdateFrequencyInSeconds) {
-    return windowLengthInSeconds / windowUpdateFrequencyInSeconds;
-  }
-
-  @SuppressWarnings("rawtypes")
-  @Override
-  public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
-    this.collector = collector;
-    lastModifiedTracker = new NthLastModifiedTimeTracker(deriveNumWindowChunksFrom(this.windowLengthInSeconds,
-        this.emitFrequencyInSeconds));
-  }
-
-  @Override
-  public void execute(Tuple tuple) {
-    if (TupleUtils.isTick(tuple)) {
-      LOG.debug("Received tick tuple, triggering emit of current window counts");
-      emitCurrentWindowCounts();
-    }
-    else {
-      countObjAndAck(tuple);
-    }
-  }
-
-  private void emitCurrentWindowCounts() {
-    Map<Object, Long> counts = counter.getCountsThenAdvanceWindow();
-    int actualWindowLengthInSeconds = lastModifiedTracker.secondsSinceOldestModification();
-    lastModifiedTracker.markAsModified();
-    if (actualWindowLengthInSeconds != windowLengthInSeconds) {
-      LOG.warn(String.format(WINDOW_LENGTH_WARNING_TEMPLATE, actualWindowLengthInSeconds, windowLengthInSeconds));
-    }
-    emit(counts, actualWindowLengthInSeconds);
-  }
-
-  private void emit(Map<Object, Long> counts, int actualWindowLengthInSeconds) {
-    for (Entry<Object, Long> entry : counts.entrySet()) {
-      Object obj = entry.getKey();
-      Long count = entry.getValue();
-      collector.emit(new Values(obj, count, actualWindowLengthInSeconds));
-    }
-  }
-
-  private void countObjAndAck(Tuple tuple) {
-    Object obj = tuple.getValue(0);
-    counter.incrementCount(obj);
-    collector.ack(tuple);
-  }
-
-  @Override
-  public void declareOutputFields(OutputFieldsDeclarer declarer) {
-    declarer.declare(new Fields("obj", "count", "actualWindowLengthInSeconds"));
-  }
-
-  @Override
-  public Map<String, Object> getComponentConfiguration() {
-    Map<String, Object> conf = new HashMap<String, Object>();
-    conf.put(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, emitFrequencyInSeconds);
-    return conf;
-  }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/storm/starter/bolt/SingleJoinBolt.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/storm/starter/bolt/SingleJoinBolt.java b/examples/storm-starter/src/jvm/storm/starter/bolt/SingleJoinBolt.java
deleted file mode 100644
index 85a7a26..0000000
--- a/examples/storm-starter/src/jvm/storm/starter/bolt/SingleJoinBolt.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter.bolt;
-
-import backtype.storm.Config;
-import backtype.storm.generated.GlobalStreamId;
-import backtype.storm.task.OutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.base.BaseRichBolt;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.utils.TimeCacheMap;
-
-import java.util.*;
-
-public class SingleJoinBolt extends BaseRichBolt {
-  OutputCollector _collector;
-  Fields _idFields;
-  Fields _outFields;
-  int _numSources;
-  TimeCacheMap<List<Object>, Map<GlobalStreamId, Tuple>> _pending;
-  Map<String, GlobalStreamId> _fieldLocations;
-
-  public SingleJoinBolt(Fields outFields) {
-    _outFields = outFields;
-  }
-
-  @Override
-  public void prepare(Map conf, TopologyContext context, OutputCollector collector) {
-    _fieldLocations = new HashMap<String, GlobalStreamId>();
-    _collector = collector;
-    int timeout = ((Number) conf.get(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS)).intValue();
-    _pending = new TimeCacheMap<List<Object>, Map<GlobalStreamId, Tuple>>(timeout, new ExpireCallback());
-    _numSources = context.getThisSources().size();
-    Set<String> idFields = null;
-    for (GlobalStreamId source : context.getThisSources().keySet()) {
-      Fields fields = context.getComponentOutputFields(source.get_componentId(), source.get_streamId());
-      Set<String> setFields = new HashSet<String>(fields.toList());
-      if (idFields == null)
-        idFields = setFields;
-      else
-        idFields.retainAll(setFields);
-
-      for (String outfield : _outFields) {
-        for (String sourcefield : fields) {
-          if (outfield.equals(sourcefield)) {
-            _fieldLocations.put(outfield, source);
-          }
-        }
-      }
-    }
-    _idFields = new Fields(new ArrayList<String>(idFields));
-
-    if (_fieldLocations.size() != _outFields.size()) {
-      throw new RuntimeException("Cannot find all outfields among sources");
-    }
-  }
-
-  @Override
-  public void execute(Tuple tuple) {
-    List<Object> id = tuple.select(_idFields);
-    GlobalStreamId streamId = new GlobalStreamId(tuple.getSourceComponent(), tuple.getSourceStreamId());
-    if (!_pending.containsKey(id)) {
-      _pending.put(id, new HashMap<GlobalStreamId, Tuple>());
-    }
-    Map<GlobalStreamId, Tuple> parts = _pending.get(id);
-    if (parts.containsKey(streamId))
-      throw new RuntimeException("Received same side of single join twice");
-    parts.put(streamId, tuple);
-    if (parts.size() == _numSources) {
-      _pending.remove(id);
-      List<Object> joinResult = new ArrayList<Object>();
-      for (String outField : _outFields) {
-        GlobalStreamId loc = _fieldLocations.get(outField);
-        joinResult.add(parts.get(loc).getValueByField(outField));
-      }
-      _collector.emit(new ArrayList<Tuple>(parts.values()), joinResult);
-
-      for (Tuple part : parts.values()) {
-        _collector.ack(part);
-      }
-    }
-  }
-
-  @Override
-  public void declareOutputFields(OutputFieldsDeclarer declarer) {
-    declarer.declare(_outFields);
-  }
-
-  private class ExpireCallback implements TimeCacheMap.ExpiredCallback<List<Object>, Map<GlobalStreamId, Tuple>> {
-    @Override
-    public void expire(List<Object> id, Map<GlobalStreamId, Tuple> tuples) {
-      for (Tuple tuple : tuples.values()) {
-        _collector.fail(tuple);
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/storm/starter/bolt/SlidingWindowSumBolt.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/storm/starter/bolt/SlidingWindowSumBolt.java b/examples/storm-starter/src/jvm/storm/starter/bolt/SlidingWindowSumBolt.java
deleted file mode 100644
index ef3a0b8..0000000
--- a/examples/storm-starter/src/jvm/storm/starter/bolt/SlidingWindowSumBolt.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter.bolt;
-
-import backtype.storm.task.OutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.base.BaseWindowedBolt;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.Values;
-import backtype.storm.windowing.TupleWindow;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.List;
-import java.util.Map;
-
-/**
- * Computes sliding window sum
- */
-public class SlidingWindowSumBolt extends BaseWindowedBolt {
-    private static final Logger LOG = LoggerFactory.getLogger(SlidingWindowSumBolt.class);
-
-    private int sum = 0;
-    private OutputCollector collector;
-
-    @Override
-    public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
-        this.collector = collector;
-    }
-
-    @Override
-    public void execute(TupleWindow inputWindow) {
-            /*
-             * The inputWindow gives a view of
-             * (a) all the events in the window
-             * (b) events that expired since last activation of the window
-             * (c) events that newly arrived since last activation of the window
-             */
-        List<Tuple> tuplesInWindow = inputWindow.get();
-        List<Tuple> newTuples = inputWindow.getNew();
-        List<Tuple> expiredTuples = inputWindow.getExpired();
-
-        LOG.debug("Events in current window: " + tuplesInWindow.size());
-            /*
-             * Instead of iterating over all the tuples in the window to compute
-             * the sum, the values for the new events are added and old events are
-             * subtracted. Similar optimizations might be possible in other
-             * windowing computations.
-             */
-        for (Tuple tuple : newTuples) {
-            sum += (int) tuple.getValue(0);
-        }
-        for (Tuple tuple : expiredTuples) {
-            sum -= (int) tuple.getValue(0);
-        }
-        collector.emit(new Values(sum));
-    }
-
-    @Override
-    public void declareOutputFields(OutputFieldsDeclarer declarer) {
-        declarer.declare(new Fields("sum"));
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/storm/starter/bolt/TotalRankingsBolt.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/storm/starter/bolt/TotalRankingsBolt.java b/examples/storm-starter/src/jvm/storm/starter/bolt/TotalRankingsBolt.java
deleted file mode 100644
index 0e1bb05..0000000
--- a/examples/storm-starter/src/jvm/storm/starter/bolt/TotalRankingsBolt.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter.bolt;
-
-import backtype.storm.tuple.Tuple;
-import org.apache.log4j.Logger;
-import storm.starter.tools.Rankings;
-
-/**
- * This bolt merges incoming {@link Rankings}.
- * <p/>
- * It can be used to merge intermediate rankings generated by {@link IntermediateRankingsBolt} into a final,
- * consolidated ranking. To do so, configure this bolt with a globalGrouping on {@link IntermediateRankingsBolt}.
- */
-public final class TotalRankingsBolt extends AbstractRankerBolt {
-
-  private static final long serialVersionUID = -8447525895532302198L;
-  private static final Logger LOG = Logger.getLogger(TotalRankingsBolt.class);
-
-  public TotalRankingsBolt() {
-    super();
-  }
-
-  public TotalRankingsBolt(int topN) {
-    super(topN);
-  }
-
-  public TotalRankingsBolt(int topN, int emitFrequencyInSeconds) {
-    super(topN, emitFrequencyInSeconds);
-  }
-
-  @Override
-  void updateRankingsWithTuple(Tuple tuple) {
-    Rankings rankingsToBeMerged = (Rankings) tuple.getValue(0);
-    super.getRankings().updateWith(rankingsToBeMerged);
-    super.getRankings().pruneZeroCounts();
-  }
-
-  @Override
-  Logger getLogger() {
-    return LOG;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/storm/starter/spout/RandomIntegerSpout.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/storm/starter/spout/RandomIntegerSpout.java b/examples/storm-starter/src/jvm/storm/starter/spout/RandomIntegerSpout.java
deleted file mode 100644
index 5778c8e..0000000
--- a/examples/storm-starter/src/jvm/storm/starter/spout/RandomIntegerSpout.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter.spout;
-
-import backtype.storm.spout.SpoutOutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.base.BaseRichSpout;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Values;
-import backtype.storm.utils.Utils;
-
-import java.util.Map;
-import java.util.Random;
-
-/**
- * Emits a random integer and a timestamp value (offset by one day),
- * every 100 ms. The ts field can be used in tuple time based windowing.
- */
-public class RandomIntegerSpout extends BaseRichSpout {
-    private SpoutOutputCollector collector;
-    private Random rand;
-
-    @Override
-    public void declareOutputFields(OutputFieldsDeclarer declarer) {
-        declarer.declare(new Fields("value", "ts"));
-    }
-
-    @Override
-    public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
-        this.collector = collector;
-        this.rand = new Random();
-    }
-
-    @Override
-    public void nextTuple() {
-        Utils.sleep(100);
-        collector.emit(new Values(rand.nextInt(1000), System.currentTimeMillis() - (24 * 60 * 60 * 1000)));
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/storm/starter/spout/RandomSentenceSpout.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/storm/starter/spout/RandomSentenceSpout.java b/examples/storm-starter/src/jvm/storm/starter/spout/RandomSentenceSpout.java
deleted file mode 100644
index 813b10c..0000000
--- a/examples/storm-starter/src/jvm/storm/starter/spout/RandomSentenceSpout.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter.spout;
-
-import backtype.storm.spout.SpoutOutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.base.BaseRichSpout;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Values;
-import backtype.storm.utils.Utils;
-
-import java.util.Map;
-import java.util.Random;
-
-public class RandomSentenceSpout extends BaseRichSpout {
-  SpoutOutputCollector _collector;
-  Random _rand;
-
-
-  @Override
-  public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
-    _collector = collector;
-    _rand = new Random();
-  }
-
-  @Override
-  public void nextTuple() {
-    Utils.sleep(100);
-    String[] sentences = new String[]{ "the cow jumped over the moon", "an apple a day keeps the doctor away",
-        "four score and seven years ago", "snow white and the seven dwarfs", "i am at two with nature" };
-    String sentence = sentences[_rand.nextInt(sentences.length)];
-    _collector.emit(new Values(sentence));
-  }
-
-  @Override
-  public void ack(Object id) {
-  }
-
-  @Override
-  public void fail(Object id) {
-  }
-
-  @Override
-  public void declareOutputFields(OutputFieldsDeclarer declarer) {
-    declarer.declare(new Fields("word"));
-  }
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/storm/starter/spout/TwitterSampleSpout.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/storm/starter/spout/TwitterSampleSpout.java b/examples/storm-starter/src/jvm/storm/starter/spout/TwitterSampleSpout.java
deleted file mode 100644
index 40f8d72..0000000
--- a/examples/storm-starter/src/jvm/storm/starter/spout/TwitterSampleSpout.java
+++ /dev/null
@@ -1,164 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package storm.starter.spout;
-
-import java.util.Map;
-import java.util.concurrent.LinkedBlockingQueue;
-
-import twitter4j.FilterQuery;
-import twitter4j.StallWarning;
-import twitter4j.Status;
-import twitter4j.StatusDeletionNotice;
-import twitter4j.StatusListener;
-import twitter4j.TwitterStream;
-import twitter4j.TwitterStreamFactory;
-import twitter4j.auth.AccessToken;
-import twitter4j.conf.ConfigurationBuilder;
-
-import backtype.storm.Config;
-import backtype.storm.spout.SpoutOutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.topology.base.BaseRichSpout;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Values;
-import backtype.storm.utils.Utils;
-
-@SuppressWarnings("serial")
-public class TwitterSampleSpout extends BaseRichSpout {
-
-	SpoutOutputCollector _collector;
-	LinkedBlockingQueue<Status> queue = null;
-	TwitterStream _twitterStream;
-	String consumerKey;
-	String consumerSecret;
-	String accessToken;
-	String accessTokenSecret;
-	String[] keyWords;
-
-	public TwitterSampleSpout(String consumerKey, String consumerSecret,
-			String accessToken, String accessTokenSecret, String[] keyWords) {
-		this.consumerKey = consumerKey;
-		this.consumerSecret = consumerSecret;
-		this.accessToken = accessToken;
-		this.accessTokenSecret = accessTokenSecret;
-		this.keyWords = keyWords;
-	}
-
-	public TwitterSampleSpout() {
-		// TODO Auto-generated constructor stub
-	}
-
-	@Override
-	public void open(Map conf, TopologyContext context,
-			SpoutOutputCollector collector) {
-		queue = new LinkedBlockingQueue<Status>(1000);
-		_collector = collector;
-
-		StatusListener listener = new StatusListener() {
-
-			@Override
-			public void onStatus(Status status) {
-			
-				queue.offer(status);
-			}
-
-			@Override
-			public void onDeletionNotice(StatusDeletionNotice sdn) {
-			}
-
-			@Override
-			public void onTrackLimitationNotice(int i) {
-			}
-
-			@Override
-			public void onScrubGeo(long l, long l1) {
-			}
-
-			@Override
-			public void onException(Exception ex) {
-			}
-
-			@Override
-			public void onStallWarning(StallWarning arg0) {
-				// TODO Auto-generated method stub
-
-			}
-
-		};
-
-		TwitterStream twitterStream = new TwitterStreamFactory(
-				new ConfigurationBuilder().setJSONStoreEnabled(true).build())
-				.getInstance();
-
-		twitterStream.addListener(listener);
-		twitterStream.setOAuthConsumer(consumerKey, consumerSecret);
-		AccessToken token = new AccessToken(accessToken, accessTokenSecret);
-		twitterStream.setOAuthAccessToken(token);
-		
-		if (keyWords.length == 0) {
-
-			twitterStream.sample();
-		}
-
-		else {
-
-			FilterQuery query = new FilterQuery().track(keyWords);
-			twitterStream.filter(query);
-		}
-
-	}
-
-	@Override
-	public void nextTuple() {
-		Status ret = queue.poll();
-		if (ret == null) {
-			Utils.sleep(50);
-		} else {
-			_collector.emit(new Values(ret));
-
-		}
-	}
-
-	@Override
-	public void close() {
-		_twitterStream.shutdown();
-	}
-
-	@Override
-	public Map<String, Object> getComponentConfiguration() {
-		Config ret = new Config();
-		ret.setMaxTaskParallelism(1);
-		return ret;
-	}
-
-	@Override
-	public void ack(Object id) {
-	}
-
-	@Override
-	public void fail(Object id) {
-	}
-
-	@Override
-	public void declareOutputFields(OutputFieldsDeclarer declarer) {
-		declarer.declare(new Fields("tweet"));
-	}
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/storm/starter/tools/NthLastModifiedTimeTracker.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/storm/starter/tools/NthLastModifiedTimeTracker.java b/examples/storm-starter/src/jvm/storm/starter/tools/NthLastModifiedTimeTracker.java
deleted file mode 100644
index 08df8cf..0000000
--- a/examples/storm-starter/src/jvm/storm/starter/tools/NthLastModifiedTimeTracker.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter.tools;
-
-import backtype.storm.utils.Time;
-import org.apache.commons.collections.buffer.CircularFifoBuffer;
-
-/**
- * This class tracks the time-since-last-modify of a "thing" in a rolling fashion.
- * <p/>
- * For example, create a 5-slot tracker to track the five most recent time-since-last-modify.
- * <p/>
- * You must manually "mark" that the "something" that you want to track -- in terms of modification times -- has just
- * been modified.
- */
-public class NthLastModifiedTimeTracker {
-
-  private static final int MILLIS_IN_SEC = 1000;
-
-  private final CircularFifoBuffer lastModifiedTimesMillis;
-
-  public NthLastModifiedTimeTracker(int numTimesToTrack) {
-    if (numTimesToTrack < 1) {
-      throw new IllegalArgumentException(
-          "numTimesToTrack must be greater than zero (you requested " + numTimesToTrack + ")");
-    }
-    lastModifiedTimesMillis = new CircularFifoBuffer(numTimesToTrack);
-    initLastModifiedTimesMillis();
-  }
-
-  private void initLastModifiedTimesMillis() {
-    long nowCached = now();
-    for (int i = 0; i < lastModifiedTimesMillis.maxSize(); i++) {
-      lastModifiedTimesMillis.add(Long.valueOf(nowCached));
-    }
-  }
-
-  private long now() {
-    return Time.currentTimeMillis();
-  }
-
-  public int secondsSinceOldestModification() {
-    long modifiedTimeMillis = ((Long) lastModifiedTimesMillis.get()).longValue();
-    return (int) ((now() - modifiedTimeMillis) / MILLIS_IN_SEC);
-  }
-
-  public void markAsModified() {
-    updateLastModifiedTime();
-  }
-
-  private void updateLastModifiedTime() {
-    lastModifiedTimesMillis.add(now());
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/examples/storm-starter/src/jvm/storm/starter/tools/Rankable.java
----------------------------------------------------------------------
diff --git a/examples/storm-starter/src/jvm/storm/starter/tools/Rankable.java b/examples/storm-starter/src/jvm/storm/starter/tools/Rankable.java
deleted file mode 100644
index 85e3d1d..0000000
--- a/examples/storm-starter/src/jvm/storm/starter/tools/Rankable.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.starter.tools;
-
-public interface Rankable extends Comparable<Rankable> {
-
-  Object getObject();
-
-  long getCount();
-
-  /**
-   * Note: We do not defensively copy the object wrapped by the Rankable.  It is passed as is.
-   *
-   * @return a defensive copy
-   */
-  Rankable copy();
-}


[53/53] [abbrv] storm git commit: Added STORM-1202 to Changelog and moved version to 1.0.0-SNAPSHOT

Posted by bo...@apache.org.
Added STORM-1202 to Changelog and moved version to 1.0.0-SNAPSHOT


Project: http://git-wip-us.apache.org/repos/asf/storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/storm/commit/a4f9f8bc
Tree: http://git-wip-us.apache.org/repos/asf/storm/tree/a4f9f8bc
Diff: http://git-wip-us.apache.org/repos/asf/storm/diff/a4f9f8bc

Branch: refs/heads/master
Commit: a4f9f8bc5b4ca85de487a0a868e519ddcb94e852
Parents: b5fd753
Author: Robert (Bobby) Evans <ev...@yahoo-inc.com>
Authored: Mon Jan 11 14:55:39 2016 -0600
Committer: Robert (Bobby) Evans <ev...@yahoo-inc.com>
Committed: Mon Jan 11 14:55:39 2016 -0600

----------------------------------------------------------------------
 CHANGELOG.md                                             | 3 ++-
 examples/storm-starter/pom.xml                           | 2 +-
 external/flux/flux-core/pom.xml                          | 2 +-
 external/flux/flux-examples/pom.xml                      | 2 +-
 external/flux/flux-wrappers/pom.xml                      | 2 +-
 external/flux/pom.xml                                    | 2 +-
 external/sql/pom.xml                                     | 2 +-
 external/sql/storm-sql-core/pom.xml                      | 2 +-
 external/sql/storm-sql-kafka/pom.xml                     | 2 +-
 external/sql/storm-sql-runtime/pom.xml                   | 2 +-
 external/storm-cassandra/pom.xml                         | 2 +-
 external/storm-elasticsearch/pom.xml                     | 2 +-
 external/storm-eventhubs/pom.xml                         | 4 ++--
 external/storm-hbase/pom.xml                             | 2 +-
 external/storm-hdfs/pom.xml                              | 2 +-
 external/storm-hive/pom.xml                              | 2 +-
 external/storm-jdbc/pom.xml                              | 2 +-
 external/storm-kafka/pom.xml                             | 2 +-
 external/storm-metrics/pom.xml                           | 2 +-
 external/storm-redis/pom.xml                             | 2 +-
 external/storm-solr/pom.xml                              | 2 +-
 pom.xml                                                  | 2 +-
 storm-buildtools/maven-shade-clojure-transformer/pom.xml | 2 +-
 storm-buildtools/storm-maven-plugins/pom.xml             | 2 +-
 storm-core/pom.xml                                       | 2 +-
 storm-dist/binary/pom.xml                                | 2 +-
 storm-dist/source/pom.xml                                | 2 +-
 storm-multilang/javascript/pom.xml                       | 2 +-
 storm-multilang/python/pom.xml                           | 2 +-
 storm-multilang/ruby/pom.xml                             | 2 +-
 storm-rename-hack/pom.xml                                | 2 +-
 31 files changed, 33 insertions(+), 32 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/storm/blob/a4f9f8bc/CHANGELOG.md
----------------------------------------------------------------------
diff --git a/CHANGELOG.md b/CHANGELOG.md
index b11a291..d1b930c 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,4 +1,5 @@
-## 0.11.0
+## 1.0.0
+ * STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability
  * STORM-468: java.io.NotSerializableException should be explained
  * STORM-1348: refactor API to remove Insert/Update builder in Cassandra connector
  * STORM-1206: Reduce logviewer memory usage through directory stream

http://git-wip-us.apache.org/repos/asf/storm/blob/a4f9f8bc/examples/storm-starter/pom.xml
----------------------------------------------------------------------
diff --git a/examples/storm-starter/pom.xml b/examples/storm-starter/pom.xml
index 7bb8f13..d7e47bf 100644
--- a/examples/storm-starter/pom.xml
+++ b/examples/storm-starter/pom.xml
@@ -20,7 +20,7 @@
   <parent>
       <artifactId>storm</artifactId>
       <groupId>org.apache.storm</groupId>
-      <version>0.11.0-SNAPSHOT</version>
+      <version>1.0.0-SNAPSHOT</version>
       <relativePath>../../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/storm/blob/a4f9f8bc/external/flux/flux-core/pom.xml
----------------------------------------------------------------------
diff --git a/external/flux/flux-core/pom.xml b/external/flux/flux-core/pom.xml
index bd5a18f..538a37c 100644
--- a/external/flux/flux-core/pom.xml
+++ b/external/flux/flux-core/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <groupId>org.apache.storm</groupId>
         <artifactId>flux</artifactId>
-        <version>0.11.0-SNAPSHOT</version>
+        <version>1.0.0-SNAPSHOT</version>
         <relativePath>../pom.xml</relativePath>
     </parent>
 

http://git-wip-us.apache.org/repos/asf/storm/blob/a4f9f8bc/external/flux/flux-examples/pom.xml
----------------------------------------------------------------------
diff --git a/external/flux/flux-examples/pom.xml b/external/flux/flux-examples/pom.xml
index 537b4e0..926f8d0 100644
--- a/external/flux/flux-examples/pom.xml
+++ b/external/flux/flux-examples/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <groupId>org.apache.storm</groupId>
         <artifactId>flux</artifactId>
-        <version>0.11.0-SNAPSHOT</version>
+        <version>1.0.0-SNAPSHOT</version>
         <relativePath>../pom.xml</relativePath>
     </parent>
 

http://git-wip-us.apache.org/repos/asf/storm/blob/a4f9f8bc/external/flux/flux-wrappers/pom.xml
----------------------------------------------------------------------
diff --git a/external/flux/flux-wrappers/pom.xml b/external/flux/flux-wrappers/pom.xml
index be042ff..c34a53a 100644
--- a/external/flux/flux-wrappers/pom.xml
+++ b/external/flux/flux-wrappers/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <groupId>org.apache.storm</groupId>
         <artifactId>flux</artifactId>
-        <version>0.11.0-SNAPSHOT</version>
+        <version>1.0.0-SNAPSHOT</version>
         <relativePath>../pom.xml</relativePath>
     </parent>
 

http://git-wip-us.apache.org/repos/asf/storm/blob/a4f9f8bc/external/flux/pom.xml
----------------------------------------------------------------------
diff --git a/external/flux/pom.xml b/external/flux/pom.xml
index bf975cb..7b38161 100644
--- a/external/flux/pom.xml
+++ b/external/flux/pom.xml
@@ -26,7 +26,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>0.11.0-SNAPSHOT</version>
+        <version>1.0.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 

http://git-wip-us.apache.org/repos/asf/storm/blob/a4f9f8bc/external/sql/pom.xml
----------------------------------------------------------------------
diff --git a/external/sql/pom.xml b/external/sql/pom.xml
index 7884d39..787bc89 100644
--- a/external/sql/pom.xml
+++ b/external/sql/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>0.11.0-SNAPSHOT</version>
+        <version>1.0.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 

http://git-wip-us.apache.org/repos/asf/storm/blob/a4f9f8bc/external/sql/storm-sql-core/pom.xml
----------------------------------------------------------------------
diff --git a/external/sql/storm-sql-core/pom.xml b/external/sql/storm-sql-core/pom.xml
index 1e0da8c..6b62e88 100644
--- a/external/sql/storm-sql-core/pom.xml
+++ b/external/sql/storm-sql-core/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>0.11.0-SNAPSHOT</version>
+        <version>1.0.0-SNAPSHOT</version>
         <relativePath>../../../pom.xml</relativePath>
     </parent>
 

http://git-wip-us.apache.org/repos/asf/storm/blob/a4f9f8bc/external/sql/storm-sql-kafka/pom.xml
----------------------------------------------------------------------
diff --git a/external/sql/storm-sql-kafka/pom.xml b/external/sql/storm-sql-kafka/pom.xml
index 0f6bd19..fb754e7 100644
--- a/external/sql/storm-sql-kafka/pom.xml
+++ b/external/sql/storm-sql-kafka/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>0.11.0-SNAPSHOT</version>
+        <version>1.0.0-SNAPSHOT</version>
         <relativePath>../../../pom.xml</relativePath>
     </parent>
 

http://git-wip-us.apache.org/repos/asf/storm/blob/a4f9f8bc/external/sql/storm-sql-runtime/pom.xml
----------------------------------------------------------------------
diff --git a/external/sql/storm-sql-runtime/pom.xml b/external/sql/storm-sql-runtime/pom.xml
index 7126ece..4645647 100644
--- a/external/sql/storm-sql-runtime/pom.xml
+++ b/external/sql/storm-sql-runtime/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>0.11.0-SNAPSHOT</version>
+        <version>1.0.0-SNAPSHOT</version>
         <relativePath>../../../pom.xml</relativePath>
     </parent>
 

http://git-wip-us.apache.org/repos/asf/storm/blob/a4f9f8bc/external/storm-cassandra/pom.xml
----------------------------------------------------------------------
diff --git a/external/storm-cassandra/pom.xml b/external/storm-cassandra/pom.xml
index 446b18b..bb8634f 100644
--- a/external/storm-cassandra/pom.xml
+++ b/external/storm-cassandra/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>0.11.0-SNAPSHOT</version>
+        <version>1.0.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 

http://git-wip-us.apache.org/repos/asf/storm/blob/a4f9f8bc/external/storm-elasticsearch/pom.xml
----------------------------------------------------------------------
diff --git a/external/storm-elasticsearch/pom.xml b/external/storm-elasticsearch/pom.xml
index afe2e69..91583ce 100644
--- a/external/storm-elasticsearch/pom.xml
+++ b/external/storm-elasticsearch/pom.xml
@@ -22,7 +22,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>0.11.0-SNAPSHOT</version>
+        <version>1.0.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 

http://git-wip-us.apache.org/repos/asf/storm/blob/a4f9f8bc/external/storm-eventhubs/pom.xml
----------------------------------------------------------------------
diff --git a/external/storm-eventhubs/pom.xml b/external/storm-eventhubs/pom.xml
index b227f24..146d4d8 100755
--- a/external/storm-eventhubs/pom.xml
+++ b/external/storm-eventhubs/pom.xml
@@ -21,12 +21,12 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>0.11.0-SNAPSHOT</version>
+        <version>1.0.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
     
     <artifactId>storm-eventhubs</artifactId>
-    <version>0.11.0-SNAPSHOT</version>
+    <version>1.0.0-SNAPSHOT</version>
     <packaging>jar</packaging>
     <name>storm-eventhubs</name>
     <description>EventHubs Storm Spout</description>

http://git-wip-us.apache.org/repos/asf/storm/blob/a4f9f8bc/external/storm-hbase/pom.xml
----------------------------------------------------------------------
diff --git a/external/storm-hbase/pom.xml b/external/storm-hbase/pom.xml
index 0321b1a..13d160d 100644
--- a/external/storm-hbase/pom.xml
+++ b/external/storm-hbase/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>0.11.0-SNAPSHOT</version>
+        <version>1.0.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 

http://git-wip-us.apache.org/repos/asf/storm/blob/a4f9f8bc/external/storm-hdfs/pom.xml
----------------------------------------------------------------------
diff --git a/external/storm-hdfs/pom.xml b/external/storm-hdfs/pom.xml
index 8c7fe44..da4148a 100644
--- a/external/storm-hdfs/pom.xml
+++ b/external/storm-hdfs/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>0.11.0-SNAPSHOT</version>
+        <version>1.0.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 

http://git-wip-us.apache.org/repos/asf/storm/blob/a4f9f8bc/external/storm-hive/pom.xml
----------------------------------------------------------------------
diff --git a/external/storm-hive/pom.xml b/external/storm-hive/pom.xml
index f842c25..547ea28 100644
--- a/external/storm-hive/pom.xml
+++ b/external/storm-hive/pom.xml
@@ -22,7 +22,7 @@
   <parent>
     <artifactId>storm</artifactId>
     <groupId>org.apache.storm</groupId>
-    <version>0.11.0-SNAPSHOT</version>
+    <version>1.0.0-SNAPSHOT</version>
     <relativePath>../../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/storm/blob/a4f9f8bc/external/storm-jdbc/pom.xml
----------------------------------------------------------------------
diff --git a/external/storm-jdbc/pom.xml b/external/storm-jdbc/pom.xml
index 489a8fd..89425ec 100644
--- a/external/storm-jdbc/pom.xml
+++ b/external/storm-jdbc/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>0.11.0-SNAPSHOT</version>
+        <version>1.0.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 

http://git-wip-us.apache.org/repos/asf/storm/blob/a4f9f8bc/external/storm-kafka/pom.xml
----------------------------------------------------------------------
diff --git a/external/storm-kafka/pom.xml b/external/storm-kafka/pom.xml
index 8f2867b..1afd0fa 100644
--- a/external/storm-kafka/pom.xml
+++ b/external/storm-kafka/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>0.11.0-SNAPSHOT</version>
+        <version>1.0.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 

http://git-wip-us.apache.org/repos/asf/storm/blob/a4f9f8bc/external/storm-metrics/pom.xml
----------------------------------------------------------------------
diff --git a/external/storm-metrics/pom.xml b/external/storm-metrics/pom.xml
index a2fa3ec..b460563 100644
--- a/external/storm-metrics/pom.xml
+++ b/external/storm-metrics/pom.xml
@@ -20,7 +20,7 @@
   <parent>
       <artifactId>storm</artifactId>
       <groupId>org.apache.storm</groupId>
-      <version>0.11.0-SNAPSHOT</version>
+      <version>1.0.0-SNAPSHOT</version>
       <relativePath>../../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/storm/blob/a4f9f8bc/external/storm-redis/pom.xml
----------------------------------------------------------------------
diff --git a/external/storm-redis/pom.xml b/external/storm-redis/pom.xml
index 9033998..cbd3e79 100644
--- a/external/storm-redis/pom.xml
+++ b/external/storm-redis/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>0.11.0-SNAPSHOT</version>
+        <version>1.0.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 

http://git-wip-us.apache.org/repos/asf/storm/blob/a4f9f8bc/external/storm-solr/pom.xml
----------------------------------------------------------------------
diff --git a/external/storm-solr/pom.xml b/external/storm-solr/pom.xml
index d5b7622..db56f35 100644
--- a/external/storm-solr/pom.xml
+++ b/external/storm-solr/pom.xml
@@ -22,7 +22,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>0.11.0-SNAPSHOT</version>
+        <version>1.0.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
     <modelVersion>4.0.0</modelVersion>

http://git-wip-us.apache.org/repos/asf/storm/blob/a4f9f8bc/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index bced62b..257e473 100644
--- a/pom.xml
+++ b/pom.xml
@@ -27,7 +27,7 @@
 
     <groupId>org.apache.storm</groupId>
     <artifactId>storm</artifactId>
-    <version>0.11.0-SNAPSHOT</version>
+    <version>1.0.0-SNAPSHOT</version>
     <packaging>pom</packaging>
     <name>Storm</name>
     <description>Distributed and fault-tolerant realtime computation</description>

http://git-wip-us.apache.org/repos/asf/storm/blob/a4f9f8bc/storm-buildtools/maven-shade-clojure-transformer/pom.xml
----------------------------------------------------------------------
diff --git a/storm-buildtools/maven-shade-clojure-transformer/pom.xml b/storm-buildtools/maven-shade-clojure-transformer/pom.xml
index 6489027..7eccb43 100644
--- a/storm-buildtools/maven-shade-clojure-transformer/pom.xml
+++ b/storm-buildtools/maven-shade-clojure-transformer/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>0.11.0-SNAPSHOT</version>
+        <version>1.0.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 

http://git-wip-us.apache.org/repos/asf/storm/blob/a4f9f8bc/storm-buildtools/storm-maven-plugins/pom.xml
----------------------------------------------------------------------
diff --git a/storm-buildtools/storm-maven-plugins/pom.xml b/storm-buildtools/storm-maven-plugins/pom.xml
index de2155a..c65c4b2 100644
--- a/storm-buildtools/storm-maven-plugins/pom.xml
+++ b/storm-buildtools/storm-maven-plugins/pom.xml
@@ -22,7 +22,7 @@
   <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>0.11.0-SNAPSHOT</version>
+        <version>1.0.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
   </parent>
   <groupId>org.apache.storm</groupId>

http://git-wip-us.apache.org/repos/asf/storm/blob/a4f9f8bc/storm-core/pom.xml
----------------------------------------------------------------------
diff --git a/storm-core/pom.xml b/storm-core/pom.xml
index 08fffa4..cfc985a 100644
--- a/storm-core/pom.xml
+++ b/storm-core/pom.xml
@@ -20,7 +20,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>0.11.0-SNAPSHOT</version>
+        <version>1.0.0-SNAPSHOT</version>
         <relativePath>..</relativePath>
     </parent>
     <groupId>org.apache.storm</groupId>

http://git-wip-us.apache.org/repos/asf/storm/blob/a4f9f8bc/storm-dist/binary/pom.xml
----------------------------------------------------------------------
diff --git a/storm-dist/binary/pom.xml b/storm-dist/binary/pom.xml
index b82cf13..ad23eb8 100644
--- a/storm-dist/binary/pom.xml
+++ b/storm-dist/binary/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>0.11.0-SNAPSHOT</version>
+        <version>1.0.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
     <groupId>org.apache.storm</groupId>

http://git-wip-us.apache.org/repos/asf/storm/blob/a4f9f8bc/storm-dist/source/pom.xml
----------------------------------------------------------------------
diff --git a/storm-dist/source/pom.xml b/storm-dist/source/pom.xml
index 5b83c07..83d7902 100644
--- a/storm-dist/source/pom.xml
+++ b/storm-dist/source/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>0.11.0-SNAPSHOT</version>
+        <version>1.0.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
     <groupId>org.apache.storm</groupId>

http://git-wip-us.apache.org/repos/asf/storm/blob/a4f9f8bc/storm-multilang/javascript/pom.xml
----------------------------------------------------------------------
diff --git a/storm-multilang/javascript/pom.xml b/storm-multilang/javascript/pom.xml
index 53ffff7..0659cee 100644
--- a/storm-multilang/javascript/pom.xml
+++ b/storm-multilang/javascript/pom.xml
@@ -22,7 +22,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>0.11.0-SNAPSHOT</version>
+        <version>1.0.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
     <groupId>org.apache.storm</groupId>

http://git-wip-us.apache.org/repos/asf/storm/blob/a4f9f8bc/storm-multilang/python/pom.xml
----------------------------------------------------------------------
diff --git a/storm-multilang/python/pom.xml b/storm-multilang/python/pom.xml
index 61798ce..84a7923 100644
--- a/storm-multilang/python/pom.xml
+++ b/storm-multilang/python/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>0.11.0-SNAPSHOT</version>
+        <version>1.0.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
     <groupId>org.apache.storm</groupId>

http://git-wip-us.apache.org/repos/asf/storm/blob/a4f9f8bc/storm-multilang/ruby/pom.xml
----------------------------------------------------------------------
diff --git a/storm-multilang/ruby/pom.xml b/storm-multilang/ruby/pom.xml
index 08eb555..2588f09 100644
--- a/storm-multilang/ruby/pom.xml
+++ b/storm-multilang/ruby/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>0.11.0-SNAPSHOT</version>
+        <version>1.0.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
     <groupId>org.apache.storm</groupId>

http://git-wip-us.apache.org/repos/asf/storm/blob/a4f9f8bc/storm-rename-hack/pom.xml
----------------------------------------------------------------------
diff --git a/storm-rename-hack/pom.xml b/storm-rename-hack/pom.xml
index 88e269b..72a2acc 100644
--- a/storm-rename-hack/pom.xml
+++ b/storm-rename-hack/pom.xml
@@ -20,7 +20,7 @@
   <parent>
     <artifactId>storm</artifactId>
     <groupId>org.apache.storm</groupId>
-    <version>0.11.0-SNAPSHOT</version>
+    <version>1.0.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 


[17/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/daemon/supervisor.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/daemon/supervisor.clj b/storm-core/src/clj/org/apache/storm/daemon/supervisor.clj
new file mode 100644
index 0000000..e4b44b0
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/daemon/supervisor.clj
@@ -0,0 +1,1219 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+(ns org.apache.storm.daemon.supervisor
+  (:import [java.io File IOException FileOutputStream])
+  (:import [org.apache.storm.scheduler ISupervisor]
+           [org.apache.storm.utils LocalState Time Utils]
+           [org.apache.storm.daemon Shutdownable]
+           [org.apache.storm Constants]
+           [org.apache.storm.cluster ClusterStateContext DaemonType]
+           [java.net JarURLConnection]
+           [java.net URI]
+           [org.apache.commons.io FileUtils])
+  (:use [org.apache.storm config util log timer local-state])
+  (:import [org.apache.storm.generated AuthorizationException KeyNotFoundException WorkerResources])
+  (:import [org.apache.storm.utils NimbusLeaderNotFoundException VersionInfo])
+  (:import [java.nio.file Files StandardCopyOption])
+  (:import [org.apache.storm Config])
+  (:import [org.apache.storm.generated WorkerResources ProfileAction])
+  (:import [org.apache.storm.localizer LocalResource])
+  (:use [org.apache.storm.daemon common])
+  (:require [org.apache.storm.command [healthcheck :as healthcheck]])
+  (:require [org.apache.storm.daemon [worker :as worker]]
+            [org.apache.storm [process-simulator :as psim] [cluster :as cluster] [event :as event]]
+            [clojure.set :as set])
+  (:import [org.apache.thrift.transport TTransportException])
+  (:import [org.apache.zookeeper data.ACL ZooDefs$Ids ZooDefs$Perms])
+  (:import [org.yaml.snakeyaml Yaml]
+           [org.yaml.snakeyaml.constructor SafeConstructor])
+  (:require [metrics.gauges :refer [defgauge]])
+  (:require [metrics.meters :refer [defmeter mark!]])
+  (:gen-class
+    :methods [^{:static true} [launch [org.apache.storm.scheduler.ISupervisor] void]]))
+
+(defmeter supervisor:num-workers-launched)
+
+(defmulti download-storm-code cluster-mode)
+(defmulti launch-worker (fn [supervisor & _] (cluster-mode (:conf supervisor))))
+
+(def STORM-VERSION (VersionInfo/getVersion))
+
+(defprotocol SupervisorDaemon
+  (get-id [this])
+  (get-conf [this])
+  (shutdown-all-workers [this])
+  )
+
+(defn- assignments-snapshot [storm-cluster-state callback assignment-versions]
+  (let [storm-ids (.assignments storm-cluster-state callback)]
+    (let [new-assignments
+          (->>
+           (dofor [sid storm-ids]
+                  (let [recorded-version (:version (get assignment-versions sid))]
+                    (if-let [assignment-version (.assignment-version storm-cluster-state sid callback)]
+                      (if (= assignment-version recorded-version)
+                        {sid (get assignment-versions sid)}
+                        {sid (.assignment-info-with-version storm-cluster-state sid callback)})
+                      {sid nil})))
+           (apply merge)
+           (filter-val not-nil?))
+          new-profiler-actions
+          (->>
+            (dofor [sid (distinct storm-ids)]
+                   (if-let [topo-profile-actions (.get-topology-profile-requests storm-cluster-state sid false)]
+                      {sid topo-profile-actions}))
+           (apply merge))]
+         
+      {:assignments (into {} (for [[k v] new-assignments] [k (:data v)]))
+       :profiler-actions new-profiler-actions
+       :versions new-assignments})))
+
+(defn- read-my-executors [assignments-snapshot storm-id assignment-id]
+  (let [assignment (get assignments-snapshot storm-id)
+        my-slots-resources (into {}
+                                 (filter (fn [[[node _] _]] (= node assignment-id))
+                                         (:worker->resources assignment)))
+        my-executors (filter (fn [[_ [node _]]] (= node assignment-id))
+                             (:executor->node+port assignment))
+        port-executors (apply merge-with
+                              concat
+                              (for [[executor [_ port]] my-executors]
+                                {port [executor]}
+                                ))]
+    (into {} (for [[port executors] port-executors]
+               ;; need to cast to int b/c it might be a long (due to how yaml parses things)
+               ;; doall is to avoid serialization/deserialization problems with lazy seqs
+               [(Integer. port) (mk-local-assignment storm-id (doall executors) (get my-slots-resources [assignment-id port]))]
+               ))))
+
+(defn- read-assignments
+  "Returns map from port to struct containing :storm-id, :executors and :resources"
+  ([assignments-snapshot assignment-id]
+     (->> (dofor [sid (keys assignments-snapshot)] (read-my-executors assignments-snapshot sid assignment-id))
+          (apply merge-with (fn [& ignored] (throw-runtime "Should not have multiple topologies assigned to one port")))))
+  ([assignments-snapshot assignment-id existing-assignment retries]
+     (try (let [assignments (read-assignments assignments-snapshot assignment-id)]
+            (reset! retries 0)
+            assignments)
+          (catch RuntimeException e
+            (if (> @retries 2) (throw e) (swap! retries inc))
+            (log-warn (.getMessage e) ": retrying " @retries " of 3")
+            existing-assignment))))
+
+(defn- read-storm-code-locations
+  [assignments-snapshot]
+  (map-val :master-code-dir assignments-snapshot))
+
+(defn- read-downloaded-storm-ids [conf]
+  (map #(url-decode %) (read-dir-contents (supervisor-stormdist-root conf)))
+  )
+
+(defn read-worker-heartbeat [conf id]
+  (let [local-state (worker-state conf id)]
+    (try
+      (ls-worker-heartbeat local-state)
+      (catch Exception e
+        (log-warn e "Failed to read local heartbeat for workerId : " id ",Ignoring exception.")
+        nil))))
+
+
+(defn my-worker-ids [conf]
+  (read-dir-contents (worker-root conf)))
+
+(defn read-worker-heartbeats
+  "Returns map from worker id to heartbeat"
+  [conf]
+  (let [ids (my-worker-ids conf)]
+    (into {}
+      (dofor [id ids]
+        [id (read-worker-heartbeat conf id)]))
+    ))
+
+
+(defn matches-an-assignment? [worker-heartbeat assigned-executors]
+  (let [local-assignment (assigned-executors (:port worker-heartbeat))]
+    (and local-assignment
+         (= (:storm-id worker-heartbeat) (:storm-id local-assignment))
+         (= (disj (set (:executors worker-heartbeat)) Constants/SYSTEM_EXECUTOR_ID)
+            (set (:executors local-assignment))))))
+
+(let [dead-workers (atom #{})]
+  (defn get-dead-workers []
+    @dead-workers)
+  (defn add-dead-worker [worker]
+    (swap! dead-workers conj worker))
+  (defn remove-dead-worker [worker]
+    (swap! dead-workers disj worker)))
+
+(defn is-worker-hb-timed-out? [now hb conf]
+  (> (- now (:time-secs hb))
+     (conf SUPERVISOR-WORKER-TIMEOUT-SECS)))
+
+(defn read-allocated-workers
+  "Returns map from worker id to worker heartbeat. if the heartbeat is nil, then the worker is dead (timed out or never wrote heartbeat)"
+  [supervisor assigned-executors now]
+  (let [conf (:conf supervisor)
+        ^LocalState local-state (:local-state supervisor)
+        id->heartbeat (read-worker-heartbeats conf)
+        approved-ids (set (keys (ls-approved-workers local-state)))]
+    (into
+     {}
+     (dofor [[id hb] id->heartbeat]
+            (let [state (cond
+                         (not hb)
+                           :not-started
+                         (or (not (contains? approved-ids id))
+                             (not (matches-an-assignment? hb assigned-executors)))
+                           :disallowed
+                         (or
+                          (when (get (get-dead-workers) id)
+                            (log-message "Worker Process " id " has died!")
+                            true)
+                          (is-worker-hb-timed-out? now hb conf))
+                           :timed-out
+                         true
+                           :valid)]
+              (log-debug "Worker " id " is " state ": " (pr-str hb) " at supervisor time-secs " now)
+              [id [state hb]]
+              ))
+     )))
+
+(defn- wait-for-worker-launch [conf id start-time]
+  (let [state (worker-state conf id)]
+    (loop []
+      (let [hb (ls-worker-heartbeat state)]
+        (when (and
+               (not hb)
+               (<
+                (- (current-time-secs) start-time)
+                (conf SUPERVISOR-WORKER-START-TIMEOUT-SECS)
+                ))
+          (log-message id " still hasn't started")
+          (Time/sleep 500)
+          (recur)
+          )))
+    (when-not (ls-worker-heartbeat state)
+      (log-message "Worker " id " failed to start")
+      )))
+
+(defn- wait-for-workers-launch [conf ids]
+  (let [start-time (current-time-secs)]
+    (doseq [id ids]
+      (wait-for-worker-launch conf id start-time))
+    ))
+
+(defn generate-supervisor-id []
+  (uuid))
+
+(defnk worker-launcher [conf user args :environment {} :log-prefix nil :exit-code-callback nil :directory nil]
+  (let [_ (when (clojure.string/blank? user)
+            (throw (java.lang.IllegalArgumentException.
+                     "User cannot be blank when calling worker-launcher.")))
+        wl-initial (conf SUPERVISOR-WORKER-LAUNCHER)
+        storm-home (System/getProperty "storm.home")
+        wl (if wl-initial wl-initial (str storm-home "/bin/worker-launcher"))
+        command (concat [wl user] args)]
+    (log-message "Running as user:" user " command:" (pr-str command))
+    (launch-process command :environment environment :log-prefix log-prefix :exit-code-callback exit-code-callback :directory directory)
+  ))
+
+(defnk worker-launcher-and-wait [conf user args :environment {} :log-prefix nil]
+  (let [process (worker-launcher conf user args :environment environment)]
+    (if log-prefix
+      (read-and-log-stream log-prefix (.getInputStream process)))
+      (try
+        (.waitFor process)
+      (catch InterruptedException e
+        (log-message log-prefix " interrupted.")))
+      (.exitValue process)))
+
+(defn- rmr-as-user
+  "Launches a process owned by the given user that deletes the given path
+  recursively.  Throws RuntimeException if the directory is not removed."
+  [conf id path]
+  (let [user (Utils/getFileOwner path)]
+    (worker-launcher-and-wait conf
+      user
+      ["rmr" path]
+      :log-prefix (str "rmr " id))
+    (if (exists-file? path)
+      (throw (RuntimeException. (str path " was not deleted"))))))
+
+(defn try-cleanup-worker [conf id]
+  (try
+    (if (.exists (File. (worker-root conf id)))
+      (do
+        (if (conf SUPERVISOR-RUN-WORKER-AS-USER)
+          (rmr-as-user conf id (worker-root conf id))
+          (do
+            (rmr (worker-heartbeats-root conf id))
+            ;; this avoids a race condition with worker or subprocess writing pid around same time
+            (rmr (worker-pids-root conf id))
+            (rmr (worker-root conf id))))
+        (remove-worker-user! conf id)
+        (remove-dead-worker id)
+      ))
+  (catch IOException e
+    (log-warn-error e "Failed to cleanup worker " id ". Will retry later"))
+  (catch RuntimeException e
+    (log-warn-error e "Failed to cleanup worker " id ". Will retry later")
+    )
+  (catch java.io.FileNotFoundException e (log-message (.getMessage e)))
+    ))
+
+(defn shutdown-worker [supervisor id]
+  (log-message "Shutting down " (:supervisor-id supervisor) ":" id)
+  (let [conf (:conf supervisor)
+        pids (read-dir-contents (worker-pids-root conf id))
+        thread-pid (@(:worker-thread-pids-atom supervisor) id)
+        shutdown-sleep-secs (conf SUPERVISOR-WORKER-SHUTDOWN-SLEEP-SECS)
+        as-user (conf SUPERVISOR-RUN-WORKER-AS-USER)
+        user (get-worker-user conf id)]
+    (when thread-pid
+      (psim/kill-process thread-pid))
+    (doseq [pid pids]
+      (if as-user
+        (worker-launcher-and-wait conf user ["signal" pid "15"] :log-prefix (str "kill -15 " pid))
+        (kill-process-with-sig-term pid)))
+    (when-not (empty? pids)  
+      (log-message "Sleep " shutdown-sleep-secs " seconds for execution of cleanup threads on worker.")
+      (sleep-secs shutdown-sleep-secs))
+    (doseq [pid pids]
+      (if as-user
+        (worker-launcher-and-wait conf user ["signal" pid "9"] :log-prefix (str "kill -9 " pid))
+        (force-kill-process pid))
+      (if as-user
+        (rmr-as-user conf id (worker-pid-path conf id pid))
+        (try
+          (rmpath (worker-pid-path conf id pid))
+          (catch Exception e)))) ;; on windows, the supervisor may still holds the lock on the worker directory
+    (try-cleanup-worker conf id))
+  (log-message "Shut down " (:supervisor-id supervisor) ":" id))
+
+(def SUPERVISOR-ZK-ACLS
+  [(first ZooDefs$Ids/CREATOR_ALL_ACL)
+   (ACL. (bit-or ZooDefs$Perms/READ ZooDefs$Perms/CREATE) ZooDefs$Ids/ANYONE_ID_UNSAFE)])
+
+(defn supervisor-data [conf shared-context ^ISupervisor isupervisor]
+  {:conf conf
+   :shared-context shared-context
+   :isupervisor isupervisor
+   :active (atom true)
+   :uptime (uptime-computer)
+   :version STORM-VERSION
+   :worker-thread-pids-atom (atom {})
+   :storm-cluster-state (cluster/mk-storm-cluster-state conf :acls (when
+                                                                     (Utils/isZkAuthenticationConfiguredStormServer
+                                                                       conf)
+                                                                     SUPERVISOR-ZK-ACLS)
+                                                        :context (ClusterStateContext. DaemonType/SUPERVISOR))
+   :local-state (supervisor-state conf)
+   :supervisor-id (.getSupervisorId isupervisor)
+   :assignment-id (.getAssignmentId isupervisor)
+   :my-hostname (hostname conf)
+   :curr-assignment (atom nil) ;; used for reporting used ports when heartbeating
+   :heartbeat-timer (mk-timer :kill-fn (fn [t]
+                               (log-error t "Error when processing event")
+                               (exit-process! 20 "Error when processing an event")
+                               ))
+   :event-timer (mk-timer :kill-fn (fn [t]
+                                         (log-error t "Error when processing event")
+                                         (exit-process! 20 "Error when processing an event")
+                                         ))
+   :blob-update-timer (mk-timer :kill-fn (defn blob-update-timer
+                                           [t]
+                                           (log-error t "Error when processing event")
+                                           (exit-process! 20 "Error when processing a event"))
+                                :timer-name "blob-update-timer")
+   :localizer (Utils/createLocalizer conf (supervisor-local-dir conf))
+   :assignment-versions (atom {})
+   :sync-retry (atom 0)
+   :download-lock (Object.)
+   :stormid->profiler-actions (atom {})
+   })
+
+(defn required-topo-files-exist?
+  [conf storm-id]
+  (let [stormroot (supervisor-stormdist-root conf storm-id)
+        stormjarpath (supervisor-stormjar-path stormroot)
+        stormcodepath (supervisor-stormcode-path stormroot)
+        stormconfpath (supervisor-stormconf-path stormroot)]
+    (and (every? exists-file? [stormroot stormconfpath stormcodepath])
+         (or (local-mode? conf)
+             (exists-file? stormjarpath)))))
+
+(defn get-worker-assignment-helper-msg
+  [assignment supervisor port id]
+  (str (pr-str assignment) " for this supervisor " (:supervisor-id supervisor) " on port "
+    port " with id " id))
+
+(defn get-valid-new-worker-ids
+  [conf supervisor reassign-executors new-worker-ids]
+  (into {}
+    (remove nil?
+      (dofor [[port assignment] reassign-executors]
+        (let [id (new-worker-ids port)
+              storm-id (:storm-id assignment)
+              ^WorkerResources resources (:resources assignment)
+              mem-onheap (.get_mem_on_heap resources)]
+          ;; This condition checks for required files exist before launching the worker
+          (if (required-topo-files-exist? conf storm-id)
+            (do
+              (log-message "Launching worker with assignment "
+                (get-worker-assignment-helper-msg assignment supervisor port id))
+              (local-mkdirs (worker-pids-root conf id))
+              (local-mkdirs (worker-heartbeats-root conf id))
+              (launch-worker supervisor
+                (:storm-id assignment)
+                port
+                id
+                mem-onheap)
+              [id port])
+            (do
+              (log-message "Missing topology storm code, so can't launch worker with assignment "
+                (get-worker-assignment-helper-msg assignment supervisor port id))
+              nil)))))))
+
+(defn sync-processes [supervisor]
+  (let [conf (:conf supervisor)
+        ^LocalState local-state (:local-state supervisor)
+        storm-cluster-state (:storm-cluster-state supervisor)
+        assigned-executors (defaulted (ls-local-assignments local-state) {})
+        now (current-time-secs)
+        allocated (read-allocated-workers supervisor assigned-executors now)
+        keepers (filter-val
+                 (fn [[state _]] (= state :valid))
+                 allocated)
+        keep-ports (set (for [[id [_ hb]] keepers] (:port hb)))
+        reassign-executors (select-keys-pred (complement keep-ports) assigned-executors)
+        new-worker-ids (into
+                        {}
+                        (for [port (keys reassign-executors)]
+                          [port (uuid)]))]
+    ;; 1. to kill are those in allocated that are dead or disallowed
+    ;; 2. kill the ones that should be dead
+    ;;     - read pids, kill -9 and individually remove file
+    ;;     - rmr heartbeat dir, rmdir pid dir, rmdir id dir (catch exception and log)
+    ;; 3. of the rest, figure out what assignments aren't yet satisfied
+    ;; 4. generate new worker ids, write new "approved workers" to LS
+    ;; 5. create local dir for worker id
+    ;; 5. launch new workers (give worker-id, port, and supervisor-id)
+    ;; 6. wait for workers launch
+
+    (log-debug "Syncing processes")
+    (log-debug "Assigned executors: " assigned-executors)
+    (log-debug "Allocated: " allocated)
+    (doseq [[id [state heartbeat]] allocated]
+      (when (not= :valid state)
+        (log-message
+         "Shutting down and clearing state for id " id
+         ". Current supervisor time: " now
+         ". State: " state
+         ", Heartbeat: " (pr-str heartbeat))
+        (shutdown-worker supervisor id)))
+    (let [valid-new-worker-ids (get-valid-new-worker-ids conf supervisor reassign-executors new-worker-ids)]
+      (ls-approved-workers! local-state
+                        (merge
+                          (select-keys (ls-approved-workers local-state)
+                            (keys keepers))
+                          valid-new-worker-ids))
+      (wait-for-workers-launch conf (keys valid-new-worker-ids)))))
+
+(defn assigned-storm-ids-from-port-assignments [assignment]
+  (->> assignment
+       vals
+       (map :storm-id)
+       set))
+
+(defn shutdown-disallowed-workers [supervisor]
+  (let [conf (:conf supervisor)
+        ^LocalState local-state (:local-state supervisor)
+        assigned-executors (defaulted (ls-local-assignments local-state) {})
+        now (current-time-secs)
+        allocated (read-allocated-workers supervisor assigned-executors now)
+        disallowed (keys (filter-val
+                                  (fn [[state _]] (= state :disallowed))
+                                  allocated))]
+    (log-debug "Allocated workers " allocated)
+    (log-debug "Disallowed workers " disallowed)
+    (doseq [id disallowed]
+      (shutdown-worker supervisor id))
+    ))
+
+(defn get-blob-localname
+  "Given the blob information either gets the localname field if it exists,
+  else routines the default value passed in."
+  [blob-info defaultValue]
+  (or (get blob-info "localname") defaultValue))
+
+(defn should-uncompress-blob?
+  "Given the blob information returns the value of the uncompress field, handling it either being
+  a string or a boolean value, or if it's not specified then returns false"
+  [blob-info]
+  (Boolean. (get blob-info "uncompress")))
+
+(defn remove-blob-references
+  "Remove a reference to a blob when its no longer needed."
+  [localizer storm-id conf]
+  (let [storm-conf (read-supervisor-storm-conf conf storm-id)
+        blobstore-map (storm-conf TOPOLOGY-BLOBSTORE-MAP)
+        user (storm-conf TOPOLOGY-SUBMITTER-USER)
+        topo-name (storm-conf TOPOLOGY-NAME)]
+    (if blobstore-map
+      (doseq [[k, v] blobstore-map]
+        (.removeBlobReference localizer
+          k
+          user
+          topo-name
+          (should-uncompress-blob? v))))))
+
+(defn blobstore-map-to-localresources
+  "Returns a list of LocalResources based on the blobstore-map passed in."
+  [blobstore-map]
+  (if blobstore-map
+    (for [[k, v] blobstore-map] (LocalResource. k (should-uncompress-blob? v)))
+    ()))
+
+(defn add-blob-references
+  "For each of the downloaded topologies, adds references to the blobs that the topologies are
+  using. This is used to reconstruct the cache on restart."
+  [localizer storm-id conf]
+  (let [storm-conf (read-supervisor-storm-conf conf storm-id)
+        blobstore-map (storm-conf TOPOLOGY-BLOBSTORE-MAP)
+        user (storm-conf TOPOLOGY-SUBMITTER-USER)
+        topo-name (storm-conf TOPOLOGY-NAME)
+        localresources (blobstore-map-to-localresources blobstore-map)]
+    (if blobstore-map
+      (.addReferences localizer localresources user topo-name))))
+
+(defn rm-topo-files
+  [conf storm-id localizer rm-blob-refs?]
+  (let [path (supervisor-stormdist-root conf storm-id)]
+    (try
+      (if rm-blob-refs?
+        (remove-blob-references localizer storm-id conf))
+      (if (conf SUPERVISOR-RUN-WORKER-AS-USER)
+        (rmr-as-user conf storm-id path)
+        (rmr (supervisor-stormdist-root conf storm-id)))
+      (catch Exception e
+        (log-message e (str "Exception removing: " storm-id))))))
+
+(defn verify-downloaded-files
+  "Check for the files exists to avoid supervisor crashing
+   Also makes sure there is no necessity for locking"
+  [conf localizer assigned-storm-ids all-downloaded-storm-ids]
+  (remove nil?
+    (into #{}
+      (for [storm-id all-downloaded-storm-ids
+            :when (contains? assigned-storm-ids storm-id)]
+        (when-not (required-topo-files-exist? conf storm-id)
+          (log-debug "Files not present in topology directory")
+          (rm-topo-files conf storm-id localizer false)
+          storm-id)))))
+
+(defn mk-synchronize-supervisor [supervisor sync-processes event-manager processes-event-manager]
+  (fn this []
+    (let [conf (:conf supervisor)
+          storm-cluster-state (:storm-cluster-state supervisor)
+          ^ISupervisor isupervisor (:isupervisor supervisor)
+          ^LocalState local-state (:local-state supervisor)
+          sync-callback (fn [& ignored] (.add event-manager this))
+          assignment-versions @(:assignment-versions supervisor)
+          {assignments-snapshot :assignments
+           storm-id->profiler-actions :profiler-actions
+           versions :versions}
+          (assignments-snapshot storm-cluster-state sync-callback assignment-versions)
+          storm-code-map (read-storm-code-locations assignments-snapshot)
+          all-downloaded-storm-ids (set (read-downloaded-storm-ids conf))
+          existing-assignment (ls-local-assignments local-state)
+          all-assignment (read-assignments assignments-snapshot
+                                           (:assignment-id supervisor)
+                                           existing-assignment
+                                           (:sync-retry supervisor))
+          new-assignment (->> all-assignment
+                              (filter-key #(.confirmAssigned isupervisor %)))
+          assigned-storm-ids (assigned-storm-ids-from-port-assignments new-assignment)
+          localizer (:localizer supervisor)
+          checked-downloaded-storm-ids (set (verify-downloaded-files conf localizer assigned-storm-ids all-downloaded-storm-ids))
+          downloaded-storm-ids (set/difference all-downloaded-storm-ids checked-downloaded-storm-ids)]
+
+      (log-debug "Synchronizing supervisor")
+      (log-debug "Storm code map: " storm-code-map)
+      (log-debug "All assignment: " all-assignment)
+      (log-debug "New assignment: " new-assignment)
+      (log-debug "Assigned Storm Ids " assigned-storm-ids)
+      (log-debug "All Downloaded Ids " all-downloaded-storm-ids)
+      (log-debug "Checked Downloaded Ids " checked-downloaded-storm-ids)
+      (log-debug "Downloaded Ids " downloaded-storm-ids)
+      (log-debug "Storm Ids Profiler Actions " storm-id->profiler-actions)
+      ;; download code first
+      ;; This might take awhile
+      ;;   - should this be done separately from usual monitoring?
+      ;; should we only download when topology is assigned to this supervisor?
+      (doseq [[storm-id master-code-dir] storm-code-map]
+        (when (and (not (downloaded-storm-ids storm-id))
+                   (assigned-storm-ids storm-id))
+          (log-message "Downloading code for storm id " storm-id)
+          (try-cause
+            (download-storm-code conf storm-id master-code-dir localizer)
+
+            (catch NimbusLeaderNotFoundException e
+              (log-warn-error e "Nimbus leader was not available."))
+            (catch TTransportException e
+              (log-warn-error e "There was a connection problem with nimbus.")))
+          (log-message "Finished downloading code for storm id " storm-id)))
+
+      (log-debug "Writing new assignment "
+                 (pr-str new-assignment))
+      (doseq [p (set/difference (set (keys existing-assignment))
+                                (set (keys new-assignment)))]
+        (.killedWorker isupervisor (int p)))
+      (.assigned isupervisor (keys new-assignment))
+      (ls-local-assignments! local-state
+            new-assignment)
+      (reset! (:assignment-versions supervisor) versions)
+      (reset! (:stormid->profiler-actions supervisor) storm-id->profiler-actions)
+
+      (reset! (:curr-assignment supervisor) new-assignment)
+      ;; remove any downloaded code that's no longer assigned or active
+      ;; important that this happens after setting the local assignment so that
+      ;; synchronize-supervisor doesn't try to launch workers for which the
+      ;; resources don't exist
+      (if on-windows? (shutdown-disallowed-workers supervisor))
+      (doseq [storm-id all-downloaded-storm-ids]
+        (when-not (storm-code-map storm-id)
+          (log-message "Removing code for storm id "
+                       storm-id)
+          (rm-topo-files conf storm-id localizer true)))
+      (.add processes-event-manager sync-processes))))
+
+(defn mk-supervisor-capacities
+  [conf]
+  {Config/SUPERVISOR_MEMORY_CAPACITY_MB (double (conf SUPERVISOR-MEMORY-CAPACITY-MB))
+   Config/SUPERVISOR_CPU_CAPACITY (double (conf SUPERVISOR-CPU-CAPACITY))})
+
+(defn update-blobs-for-topology!
+  "Update each blob listed in the topology configuration if the latest version of the blob
+   has not been downloaded."
+  [conf storm-id localizer]
+  (let [storm-conf (read-supervisor-storm-conf conf storm-id)
+        blobstore-map (storm-conf TOPOLOGY-BLOBSTORE-MAP)
+        user (storm-conf TOPOLOGY-SUBMITTER-USER)
+        localresources (blobstore-map-to-localresources blobstore-map)]
+    (try
+      (.updateBlobs localizer localresources user)
+      (catch AuthorizationException authExp
+        (log-error authExp))
+      (catch KeyNotFoundException knf
+        (log-error knf)))))
+
+(defn update-blobs-for-all-topologies-fn
+  "Returns a function that downloads all blobs listed in the topology configuration for all topologies assigned
+  to this supervisor, and creates version files with a suffix. The returned function is intended to be run periodically
+  by a timer, created elsewhere."
+  [supervisor]
+  (fn []
+    (try-cause
+      (let [conf (:conf supervisor)
+            downloaded-storm-ids (set (read-downloaded-storm-ids conf))
+            new-assignment @(:curr-assignment supervisor)
+            assigned-storm-ids (assigned-storm-ids-from-port-assignments new-assignment)]
+        (doseq [topology-id downloaded-storm-ids]
+          (let [storm-root (supervisor-stormdist-root conf topology-id)]
+            (when (assigned-storm-ids topology-id)
+              (log-debug "Checking Blob updates for storm topology id " topology-id " With target_dir: " storm-root)
+              (update-blobs-for-topology! conf topology-id (:localizer supervisor))))))
+      (catch TTransportException e
+        (log-error
+          e
+          "Network error while updating blobs, will retry again later"))
+      (catch NimbusLeaderNotFoundException e
+        (log-error
+          e
+          "Nimbus unavailable to update blobs, will retry again later")))))
+
+(defn jvm-cmd [cmd]
+  (let [java-home (.get (System/getenv) "JAVA_HOME")]
+    (if (nil? java-home)
+      cmd
+      (str java-home file-path-separator "bin" file-path-separator cmd))))
+
+(defn java-cmd []
+  (jvm-cmd "java"))
+
+(defn jmap-dump-cmd [profile-cmd pid target-dir]
+  [profile-cmd pid "jmap" target-dir])
+
+(defn jstack-dump-cmd [profile-cmd pid target-dir]
+  [profile-cmd pid "jstack" target-dir])
+
+(defn jprofile-start [profile-cmd pid]
+  [profile-cmd pid "start"])
+
+(defn jprofile-stop [profile-cmd pid target-dir]
+  [profile-cmd pid "stop" target-dir])
+
+(defn jprofile-dump [profile-cmd pid workers-artifacts-directory]
+  [profile-cmd pid "dump" workers-artifacts-directory])
+
+(defn jprofile-jvm-restart [profile-cmd pid]
+  [profile-cmd pid "kill"])
+
+(defn- delete-topology-profiler-action [storm-cluster-state storm-id profile-action]
+  (log-message "Deleting profiler action.." profile-action)
+  (.delete-topology-profile-requests storm-cluster-state storm-id profile-action))
+
+(defnk launch-profiler-action-for-worker
+  "Launch profiler action for a worker"
+  [conf user target-dir command :environment {} :exit-code-on-profile-action nil :log-prefix nil]
+  (if-let [run-worker-as-user (conf SUPERVISOR-RUN-WORKER-AS-USER)]
+    (let [container-file (container-file-path target-dir)
+          script-file (script-file-path target-dir)]
+      (log-message "Running as user:" user " command:" (shell-cmd command))
+      (if (exists-file? container-file) (rmr-as-user conf container-file container-file))
+      (if (exists-file? script-file) (rmr-as-user conf script-file script-file))
+      (worker-launcher
+        conf
+        user
+        ["profiler" target-dir (write-script target-dir command :environment environment)]
+        :log-prefix log-prefix
+        :exit-code-callback exit-code-on-profile-action
+        :directory (File. target-dir)))
+    (launch-process
+      command
+      :environment environment
+      :log-prefix log-prefix
+      :exit-code-callback exit-code-on-profile-action
+      :directory (File. target-dir))))
+
+(defn mk-run-profiler-actions-for-all-topologies
+  "Returns a function that downloads all profile-actions listed for all topologies assigned
+  to this supervisor, executes those actions as user and deletes them from zookeeper."
+  [supervisor]
+  (fn []
+    (try
+      (let [conf (:conf supervisor)
+            stormid->profiler-actions @(:stormid->profiler-actions supervisor)
+            storm-cluster-state (:storm-cluster-state supervisor)
+            hostname (:my-hostname supervisor)
+            profile-cmd (conf WORKER-PROFILER-COMMAND)
+            new-assignment @(:curr-assignment supervisor)
+            assigned-storm-ids (assigned-storm-ids-from-port-assignments new-assignment)]
+        (doseq [[storm-id profiler-actions] stormid->profiler-actions]
+          (when (not (empty? profiler-actions))
+            (doseq [pro-action profiler-actions]
+              (if (= hostname (:host pro-action))
+                (let [port (:port pro-action)
+                      action ^ProfileAction (:action pro-action)
+                      stop? (> (System/currentTimeMillis) (:timestamp pro-action))
+                      target-dir (worker-artifacts-root conf storm-id port)
+                      storm-conf (read-supervisor-storm-conf conf storm-id)
+                      user (storm-conf TOPOLOGY-SUBMITTER-USER)
+                      environment (if-let [env (storm-conf TOPOLOGY-ENVIRONMENT)] env {})
+                      worker-pid (slurp (worker-artifacts-pid-path conf storm-id port))
+                      log-prefix (str "ProfilerAction process " storm-id ":" port " PROFILER_ACTION: " action " ")
+                      ;; Until PROFILER_STOP action is invalid, keep launching profiler start in case worker restarted
+                      ;; The profiler plugin script validates if JVM is recording before starting another recording.
+                      command (cond
+                                (= action ProfileAction/JMAP_DUMP) (jmap-dump-cmd profile-cmd worker-pid target-dir)
+                                (= action ProfileAction/JSTACK_DUMP) (jstack-dump-cmd profile-cmd worker-pid target-dir)
+                                (= action ProfileAction/JPROFILE_DUMP) (jprofile-dump profile-cmd worker-pid target-dir)
+                                (= action ProfileAction/JVM_RESTART) (jprofile-jvm-restart profile-cmd worker-pid)
+                                (and (not stop?)
+                                     (= action ProfileAction/JPROFILE_STOP))
+                                  (jprofile-start profile-cmd worker-pid) ;; Ensure the profiler is still running
+                                (and stop? (= action ProfileAction/JPROFILE_STOP)) (jprofile-stop profile-cmd worker-pid target-dir))
+                      action-on-exit (fn [exit-code]
+                                       (log-message log-prefix " profile-action exited for code: " exit-code)
+                                       (if (and (= exit-code 0) stop?)
+                                         (delete-topology-profiler-action storm-cluster-state storm-id pro-action)))
+                      command (->> command (map str) (filter (complement empty?)))]
+
+                  (try
+                    (launch-profiler-action-for-worker conf
+                      user
+                      target-dir
+                      command
+                      :environment environment
+                      :exit-code-on-profile-action action-on-exit
+                      :log-prefix log-prefix)
+                    (catch IOException ioe
+                      (log-error ioe
+                        (str "Error in processing ProfilerAction '" action "' for " storm-id ":" port ", will retry later.")))
+                    (catch RuntimeException rte
+                      (log-error rte
+                        (str "Error in processing ProfilerAction '" action "' for " storm-id ":" port ", will retry later."))))))))))
+      (catch Exception e
+        (log-error e "Error running profiler actions, will retry again later")))))
+
+;; in local state, supervisor stores who its current assignments are
+;; another thread launches events to restart any dead processes if necessary
+(defserverfn mk-supervisor [conf shared-context ^ISupervisor isupervisor]
+  (log-message "Starting Supervisor with conf " conf)
+  (.prepare isupervisor conf (supervisor-isupervisor-dir conf))
+  (FileUtils/cleanDirectory (File. (supervisor-tmp-dir conf)))
+  (let [supervisor (supervisor-data conf shared-context isupervisor)
+        [event-manager processes-event-manager :as managers] [(event/event-manager false) (event/event-manager false)]
+        sync-processes (partial sync-processes supervisor)
+        synchronize-supervisor (mk-synchronize-supervisor supervisor sync-processes event-manager processes-event-manager)
+        synchronize-blobs-fn (update-blobs-for-all-topologies-fn supervisor)
+        downloaded-storm-ids (set (read-downloaded-storm-ids conf))
+        run-profiler-actions-fn (mk-run-profiler-actions-for-all-topologies supervisor)
+        heartbeat-fn (fn [] (.supervisor-heartbeat!
+                               (:storm-cluster-state supervisor)
+                               (:supervisor-id supervisor)
+                               (->SupervisorInfo (current-time-secs)
+                                                 (:my-hostname supervisor)
+                                                 (:assignment-id supervisor)
+                                                 (keys @(:curr-assignment supervisor))
+                                                  ;; used ports
+                                                 (.getMetadata isupervisor)
+                                                 (conf SUPERVISOR-SCHEDULER-META)
+                                                 ((:uptime supervisor))
+                                                 (:version supervisor)
+                                                 (mk-supervisor-capacities conf))))]
+    (heartbeat-fn)
+
+    ;; should synchronize supervisor so it doesn't launch anything after being down (optimization)
+    (schedule-recurring (:heartbeat-timer supervisor)
+                        0
+                        (conf SUPERVISOR-HEARTBEAT-FREQUENCY-SECS)
+                        heartbeat-fn)
+    (doseq [storm-id downloaded-storm-ids]
+      (add-blob-references (:localizer supervisor) storm-id
+        conf))
+    ;; do this after adding the references so we don't try to clean things being used
+    (.startCleaner (:localizer supervisor))
+
+    (when (conf SUPERVISOR-ENABLE)
+      ;; This isn't strictly necessary, but it doesn't hurt and ensures that the machine stays up
+      ;; to date even if callbacks don't all work exactly right
+      (schedule-recurring (:event-timer supervisor) 0 10 (fn [] (.add event-manager synchronize-supervisor)))
+      (schedule-recurring (:event-timer supervisor)
+                          0
+                          (conf SUPERVISOR-MONITOR-FREQUENCY-SECS)
+                          (fn [] (.add processes-event-manager sync-processes)))
+
+      ;; Blob update thread. Starts with 30 seconds delay, every 30 seconds
+      (schedule-recurring (:blob-update-timer supervisor)
+                          30
+                          30
+                          (fn [] (.add event-manager synchronize-blobs-fn)))
+
+      (schedule-recurring (:event-timer supervisor)
+                          (* 60 5)
+                          (* 60 5)
+                          (fn [] (let [health-code (healthcheck/health-check conf)
+                                       ids (my-worker-ids conf)]
+                                   (if (not (= health-code 0))
+                                     (do
+                                       (doseq [id ids]
+                                         (shutdown-worker supervisor id))
+                                       (throw (RuntimeException. "Supervisor failed health check. Exiting.")))))))
+
+      ;; Launch a thread that Runs profiler commands . Starts with 30 seconds delay, every 30 seconds
+      (schedule-recurring (:event-timer supervisor)
+                          30
+                          30
+                          (fn [] (.add event-manager run-profiler-actions-fn))))
+    (log-message "Starting supervisor with id " (:supervisor-id supervisor) " at host " (:my-hostname supervisor))
+    (reify
+     Shutdownable
+     (shutdown [this]
+               (log-message "Shutting down supervisor " (:supervisor-id supervisor))
+               (reset! (:active supervisor) false)
+               (cancel-timer (:heartbeat-timer supervisor))
+               (cancel-timer (:event-timer supervisor))
+               (cancel-timer (:blob-update-timer supervisor))
+               (.shutdown event-manager)
+               (.shutdown processes-event-manager)
+               (.shutdown (:localizer supervisor))
+               (.disconnect (:storm-cluster-state supervisor)))
+     SupervisorDaemon
+     (get-conf [this]
+       conf)
+     (get-id [this]
+       (:supervisor-id supervisor))
+     (shutdown-all-workers [this]
+       (let [ids (my-worker-ids conf)]
+         (doseq [id ids]
+           (shutdown-worker supervisor id)
+           )))
+     DaemonCommon
+     (waiting? [this]
+       (or (not @(:active supervisor))
+           (and
+            (timer-waiting? (:heartbeat-timer supervisor))
+            (timer-waiting? (:event-timer supervisor))
+            (every? (memfn waiting?) managers)))
+           ))))
+
+(defn kill-supervisor [supervisor]
+  (.shutdown supervisor)
+  )
+
+(defn setup-storm-code-dir
+  [conf storm-conf dir]
+ (if (conf SUPERVISOR-RUN-WORKER-AS-USER)
+  (worker-launcher-and-wait conf (storm-conf TOPOLOGY-SUBMITTER-USER) ["code-dir" dir] :log-prefix (str "setup conf for " dir))))
+
+(defn setup-blob-permission
+  [conf storm-conf path]
+  (if (conf SUPERVISOR-RUN-WORKER-AS-USER)
+    (worker-launcher-and-wait conf (storm-conf TOPOLOGY-SUBMITTER-USER) ["blob" path] :log-prefix (str "setup blob permissions for " path))))
+
+(defn download-blobs-for-topology!
+  "Download all blobs listed in the topology configuration for a given topology."
+  [conf stormconf-path localizer tmproot]
+  (let [storm-conf (read-supervisor-storm-conf-given-path conf stormconf-path)
+        blobstore-map (storm-conf TOPOLOGY-BLOBSTORE-MAP)
+        user (storm-conf TOPOLOGY-SUBMITTER-USER)
+        topo-name (storm-conf TOPOLOGY-NAME)
+        user-dir (.getLocalUserFileCacheDir localizer user)
+        localresources (blobstore-map-to-localresources blobstore-map)]
+    (when localresources
+      (when-not (.exists user-dir)
+        (FileUtils/forceMkdir user-dir))
+      (try
+        (let [localized-resources (.getBlobs localizer localresources user topo-name user-dir)]
+          (setup-blob-permission conf storm-conf (.toString user-dir))
+          (doseq [local-rsrc localized-resources]
+            (let [rsrc-file-path (File. (.getFilePath local-rsrc))
+                  key-name (.getName rsrc-file-path)
+                  blob-symlink-target-name (.getName (File. (.getCurrentSymlinkPath local-rsrc)))
+                  symlink-name (get-blob-localname (get blobstore-map key-name) key-name)]
+              (create-symlink! tmproot (.getParent rsrc-file-path) symlink-name
+                blob-symlink-target-name))))
+        (catch AuthorizationException authExp
+          (log-error authExp))
+        (catch KeyNotFoundException knf
+          (log-error knf))))))
+
+(defn get-blob-file-names
+  [blobstore-map]
+  (if blobstore-map
+    (for [[k, data] blobstore-map]
+      (get-blob-localname data k))))
+
+(defn download-blobs-for-topology-succeed?
+  "Assert if all blobs are downloaded for the given topology"
+  [stormconf-path target-dir]
+  (let [storm-conf (clojurify-structure (Utils/fromCompressedJsonConf (FileUtils/readFileToByteArray (File. stormconf-path))))
+        blobstore-map (storm-conf TOPOLOGY-BLOBSTORE-MAP)
+        file-names (get-blob-file-names blobstore-map)]
+    (if-not (empty? file-names)
+      (every? #(Utils/checkFileExists target-dir %) file-names)
+      true)))
+
+;; distributed implementation
+(defmethod download-storm-code
+  :distributed [conf storm-id master-code-dir localizer]
+  ;; Downloading to permanent location is atomic
+  (let [tmproot (str (supervisor-tmp-dir conf) file-path-separator (uuid))
+        stormroot (supervisor-stormdist-root conf storm-id)
+        blobstore (Utils/getClientBlobStoreForSupervisor conf)]
+    (FileUtils/forceMkdir (File. tmproot))
+    (if-not on-windows?
+      (Utils/restrictPermissions tmproot)
+      (if (conf SUPERVISOR-RUN-WORKER-AS-USER)
+        (throw-runtime (str "ERROR: Windows doesn't implement setting the correct permissions"))))
+    (Utils/downloadResourcesAsSupervisor (master-stormjar-key storm-id)
+      (supervisor-stormjar-path tmproot) blobstore)
+    (Utils/downloadResourcesAsSupervisor (master-stormcode-key storm-id)
+      (supervisor-stormcode-path tmproot) blobstore)
+    (Utils/downloadResourcesAsSupervisor (master-stormconf-key storm-id)
+      (supervisor-stormconf-path tmproot) blobstore)
+    (.shutdown blobstore)
+    (extract-dir-from-jar (supervisor-stormjar-path tmproot) RESOURCES-SUBDIR tmproot)
+    (download-blobs-for-topology! conf (supervisor-stormconf-path tmproot) localizer
+      tmproot)
+    (if (download-blobs-for-topology-succeed? (supervisor-stormconf-path tmproot) tmproot)
+      (do
+        (log-message "Successfully downloaded blob resources for storm-id " storm-id)
+        (FileUtils/forceMkdir (File. stormroot))
+        (Files/move (.toPath (File. tmproot)) (.toPath (File. stormroot))
+          (doto (make-array StandardCopyOption 1) (aset 0 StandardCopyOption/ATOMIC_MOVE)))
+        (setup-storm-code-dir conf (read-supervisor-storm-conf conf storm-id) stormroot))
+      (do
+        (log-message "Failed to download blob resources for storm-id " storm-id)
+        (rmr tmproot)))))
+
+(defn write-log-metadata-to-yaml-file! [storm-id port data conf]
+  (let [file (get-log-metadata-file conf storm-id port)]
+    ;;run worker as user needs the directory to have special permissions
+    ;; or it is insecure
+    (when (not (.exists (.getParentFile file)))
+      (if (conf SUPERVISOR-RUN-WORKER-AS-USER)
+        (do (FileUtils/forceMkdir (.getParentFile file))
+            (setup-storm-code-dir conf (read-supervisor-storm-conf conf storm-id) (.getCanonicalPath (.getParentFile file))))
+        (.mkdirs (.getParentFile file))))
+    (let [writer (java.io.FileWriter. file)
+          yaml (Yaml.)]
+      (try
+        (.dump yaml data writer)
+        (finally
+          (.close writer))))))
+
+(defn write-log-metadata! [storm-conf user worker-id storm-id port conf]
+  (let [data {TOPOLOGY-SUBMITTER-USER user
+              "worker-id" worker-id
+              LOGS-GROUPS (sort (distinct (remove nil?
+                                           (concat
+                                             (storm-conf LOGS-GROUPS)
+                                             (storm-conf TOPOLOGY-GROUPS)))))
+              LOGS-USERS (sort (distinct (remove nil?
+                                           (concat
+                                             (storm-conf LOGS-USERS)
+                                             (storm-conf TOPOLOGY-USERS)))))}]
+    (write-log-metadata-to-yaml-file! storm-id port data conf)))
+
+(defn jlp [stormroot conf]
+  (let [resource-root (str stormroot File/separator RESOURCES-SUBDIR)
+        os (clojure.string/replace (System/getProperty "os.name") #"\s+" "_")
+        arch (System/getProperty "os.arch")
+        arch-resource-root (str resource-root File/separator os "-" arch)]
+    (str arch-resource-root File/pathSeparator resource-root File/pathSeparator (conf JAVA-LIBRARY-PATH))))
+
+(defn substitute-childopts
+  "Generates runtime childopts by replacing keys with topology-id, worker-id, port, mem-onheap"
+  [value worker-id topology-id port mem-onheap]
+  (let [replacement-map {"%ID%"          (str port)
+                         "%WORKER-ID%"   (str worker-id)
+                         "%TOPOLOGY-ID%"    (str topology-id)
+                         "%WORKER-PORT%" (str port)
+                         "%HEAP-MEM%" (str mem-onheap)}
+        sub-fn #(reduce (fn [string entry]
+                          (apply clojure.string/replace string entry))
+                        %
+                        replacement-map)]
+    (cond
+      (nil? value) nil
+      (sequential? value) (vec (map sub-fn value))
+      :else (-> value sub-fn (clojure.string/split #"\s+")))))
+
+
+(defn create-blobstore-links
+  "Create symlinks in worker launch directory for all blobs"
+  [conf storm-id worker-id]
+  (let [stormroot (supervisor-stormdist-root conf storm-id)
+        storm-conf (read-supervisor-storm-conf conf storm-id)
+        workerroot (worker-root conf worker-id)
+        blobstore-map (storm-conf TOPOLOGY-BLOBSTORE-MAP)
+        blob-file-names (get-blob-file-names blobstore-map)
+        resource-file-names (cons RESOURCES-SUBDIR blob-file-names)]
+    (log-message "Creating symlinks for worker-id: " worker-id " storm-id: "
+      storm-id " for files(" (count resource-file-names) "): " (pr-str resource-file-names))
+    (create-symlink! workerroot stormroot RESOURCES-SUBDIR)
+    (doseq [file-name blob-file-names]
+      (create-symlink! workerroot stormroot file-name file-name))))
+
+(defn create-artifacts-link
+  "Create a symlink from workder directory to its port artifacts directory"
+  [conf storm-id port worker-id]
+  (let [worker-dir (worker-root conf worker-id)
+        topo-dir (worker-artifacts-root conf storm-id)]
+    (log-message "Creating symlinks for worker-id: " worker-id " storm-id: "
+                 storm-id " to its port artifacts directory")
+    (if (.exists (File. worker-dir))
+      (create-symlink! worker-dir topo-dir "artifacts" port))))
+
+(defmethod launch-worker
+    :distributed [supervisor storm-id port worker-id mem-onheap]
+    (let [conf (:conf supervisor)
+          run-worker-as-user (conf SUPERVISOR-RUN-WORKER-AS-USER)
+          storm-home (System/getProperty "storm.home")
+          storm-options (System/getProperty "storm.options")
+          storm-conf-file (System/getProperty "storm.conf.file")
+          storm-log-dir LOG-DIR
+          storm-log-conf-dir (conf STORM-LOG4J2-CONF-DIR)
+          storm-log4j2-conf-dir (if storm-log-conf-dir
+                                  (if (is-absolute-path? storm-log-conf-dir)
+                                    storm-log-conf-dir
+                                    (str storm-home file-path-separator storm-log-conf-dir))
+                                  (str storm-home file-path-separator "log4j2"))
+          stormroot (supervisor-stormdist-root conf storm-id)
+          jlp (jlp stormroot conf)
+          stormjar (supervisor-stormjar-path stormroot)
+          storm-conf (read-supervisor-storm-conf conf storm-id)
+          topo-classpath (if-let [cp (storm-conf TOPOLOGY-CLASSPATH)]
+                           [cp]
+                           [])
+          classpath (-> (worker-classpath)
+                        (add-to-classpath [stormjar])
+                        (add-to-classpath topo-classpath))
+          top-gc-opts (storm-conf TOPOLOGY-WORKER-GC-CHILDOPTS)
+          mem-onheap (if (and mem-onheap (> mem-onheap 0)) ;; not nil and not zero
+                       (int (Math/ceil mem-onheap)) ;; round up
+                       (storm-conf WORKER-HEAP-MEMORY-MB)) ;; otherwise use default value
+          gc-opts (substitute-childopts (if top-gc-opts top-gc-opts (conf WORKER-GC-CHILDOPTS)) worker-id storm-id port mem-onheap)
+          topo-worker-logwriter-childopts (storm-conf TOPOLOGY-WORKER-LOGWRITER-CHILDOPTS)
+          user (storm-conf TOPOLOGY-SUBMITTER-USER)
+          logfilename "worker.log"
+          workers-artifacts (worker-artifacts-root conf)
+          logging-sensitivity (storm-conf TOPOLOGY-LOGGING-SENSITIVITY "S3")
+          worker-childopts (when-let [s (conf WORKER-CHILDOPTS)]
+                             (substitute-childopts s worker-id storm-id port mem-onheap))
+          topo-worker-childopts (when-let [s (storm-conf TOPOLOGY-WORKER-CHILDOPTS)]
+                                  (substitute-childopts s worker-id storm-id port mem-onheap))
+          worker--profiler-childopts (if (conf WORKER-PROFILER-ENABLED)
+                                       (substitute-childopts (conf WORKER-PROFILER-CHILDOPTS) worker-id storm-id port mem-onheap)
+                                       "")
+          topology-worker-environment (if-let [env (storm-conf TOPOLOGY-ENVIRONMENT)]
+                                        (merge env {"LD_LIBRARY_PATH" jlp})
+                                        {"LD_LIBRARY_PATH" jlp})
+          command (concat
+                    [(java-cmd) "-cp" classpath 
+                     topo-worker-logwriter-childopts
+                     (str "-Dlogfile.name=" logfilename)
+                     (str "-Dstorm.home=" storm-home)
+                     (str "-Dworkers.artifacts=" workers-artifacts)
+                     (str "-Dstorm.id=" storm-id)
+                     (str "-Dworker.id=" worker-id)
+                     (str "-Dworker.port=" port)
+                     (str "-Dstorm.log.dir=" storm-log-dir)
+                     (str "-Dlog4j.configurationFile=" storm-log4j2-conf-dir file-path-separator "worker.xml")
+                     (str "-DLog4jContextSelector=org.apache.logging.log4j.core.selector.BasicContextSelector")
+                     "org.apache.storm.LogWriter"]
+                    [(java-cmd) "-server"]
+                    worker-childopts
+                    topo-worker-childopts
+                    gc-opts
+                    worker--profiler-childopts
+                    [(str "-Djava.library.path=" jlp)
+                     (str "-Dlogfile.name=" logfilename)
+                     (str "-Dstorm.home=" storm-home)
+                     (str "-Dworkers.artifacts=" workers-artifacts)
+                     (str "-Dstorm.conf.file=" storm-conf-file)
+                     (str "-Dstorm.options=" storm-options)
+                     (str "-Dstorm.log.dir=" storm-log-dir)
+                     (str "-Dlogging.sensitivity=" logging-sensitivity)
+                     (str "-Dlog4j.configurationFile=" storm-log4j2-conf-dir file-path-separator "worker.xml")
+                     (str "-DLog4jContextSelector=org.apache.logging.log4j.core.selector.BasicContextSelector")
+                     (str "-Dstorm.id=" storm-id)
+                     (str "-Dworker.id=" worker-id)
+                     (str "-Dworker.port=" port)
+                     "-cp" classpath
+                     "org.apache.storm.daemon.worker"
+                     storm-id
+                     (:assignment-id supervisor)
+                     port
+                     worker-id])
+          command (->> command (map str) (filter (complement empty?)))]
+      (log-message "Launching worker with command: " (shell-cmd command))
+      (write-log-metadata! storm-conf user worker-id storm-id port conf)
+      (set-worker-user! conf worker-id user)
+      (create-artifacts-link conf storm-id port worker-id)
+      (let [log-prefix (str "Worker Process " worker-id)
+            callback (fn [exit-code]
+                       (log-message log-prefix " exited with code: " exit-code)
+                       (add-dead-worker worker-id))
+            worker-dir (worker-root conf worker-id)]
+        (remove-dead-worker worker-id)
+        (create-blobstore-links conf storm-id worker-id)
+        (if run-worker-as-user
+          (worker-launcher conf user ["worker" worker-dir (write-script worker-dir command :environment topology-worker-environment)] :log-prefix log-prefix :exit-code-callback callback :directory (File. worker-dir))
+          (launch-process command :environment topology-worker-environment :log-prefix log-prefix :exit-code-callback callback :directory (File. worker-dir)))
+        )))
+
+;; local implementation
+
+(defn resources-jar []
+  (->> (.split (current-classpath) File/pathSeparator)
+       (filter #(.endsWith  % ".jar"))
+       (filter #(zip-contains-dir? % RESOURCES-SUBDIR))
+       first ))
+
+(defmethod download-storm-code
+  :local [conf storm-id master-code-dir localizer]
+  (let [tmproot (str (supervisor-tmp-dir conf) file-path-separator (uuid))
+        stormroot (supervisor-stormdist-root conf storm-id)
+        blob-store (Utils/getNimbusBlobStore conf master-code-dir nil)]
+    (try
+      (FileUtils/forceMkdir (File. tmproot))
+      (.readBlobTo blob-store (master-stormcode-key storm-id) (FileOutputStream. (supervisor-stormcode-path tmproot)) nil)
+      (.readBlobTo blob-store (master-stormconf-key storm-id) (FileOutputStream. (supervisor-stormconf-path tmproot)) nil)
+      (finally
+        (.shutdown blob-store)))
+    (FileUtils/moveDirectory (File. tmproot) (File. stormroot))
+    (setup-storm-code-dir conf (read-supervisor-storm-conf conf storm-id) stormroot)
+    (let [classloader (.getContextClassLoader (Thread/currentThread))
+          resources-jar (resources-jar)
+          url (.getResource classloader RESOURCES-SUBDIR)
+          target-dir (str stormroot file-path-separator RESOURCES-SUBDIR)]
+      (cond
+        resources-jar
+        (do
+          (log-message "Extracting resources from jar at " resources-jar " to " target-dir)
+          (extract-dir-from-jar resources-jar RESOURCES-SUBDIR stormroot))
+        url
+        (do
+          (log-message "Copying resources at " (str url) " to " target-dir)
+          (FileUtils/copyDirectory (File. (.getFile url)) (File. target-dir)))))))
+
+(defmethod launch-worker
+    :local [supervisor storm-id port worker-id mem-onheap]
+    (let [conf (:conf supervisor)
+          pid (uuid)
+          worker (worker/mk-worker conf
+                                   (:shared-context supervisor)
+                                   storm-id
+                                   (:assignment-id supervisor)
+                                   port
+                                   worker-id)]
+      (set-worker-user! conf worker-id "")
+      (psim/register-process pid worker)
+      (swap! (:worker-thread-pids-atom supervisor) assoc worker-id pid)
+      ))
+
+(defn -launch
+  [supervisor]
+  (log-message "Starting supervisor for storm version '" STORM-VERSION "'")
+  (let [conf (read-storm-config)]
+    (validate-distributed-mode! conf)
+    (let [supervisor (mk-supervisor conf nil supervisor)]
+      (add-shutdown-hook-with-force-kill-in-1-sec #(.shutdown supervisor)))
+    (defgauge supervisor:num-slots-used-gauge #(count (my-worker-ids conf)))
+    (start-metrics-reporters)))
+
+(defn standalone-supervisor []
+  (let [conf-atom (atom nil)
+        id-atom (atom nil)]
+    (reify ISupervisor
+      (prepare [this conf local-dir]
+        (reset! conf-atom conf)
+        (let [state (LocalState. local-dir)
+              curr-id (if-let [id (ls-supervisor-id state)]
+                        id
+                        (generate-supervisor-id))]
+          (ls-supervisor-id! state curr-id)
+          (reset! id-atom curr-id))
+        )
+      (confirmAssigned [this port]
+        true)
+      (getMetadata [this]
+        (doall (map int (get @conf-atom SUPERVISOR-SLOTS-PORTS))))
+      (getSupervisorId [this]
+        @id-atom)
+      (getAssignmentId [this]
+        @id-atom)
+      (killedWorker [this port]
+        )
+      (assigned [this ports]
+        ))))
+
+(defn -main []
+  (setup-default-uncaught-exception-handler)
+  (-launch (standalone-supervisor)))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/daemon/task.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/daemon/task.clj b/storm-core/src/clj/org/apache/storm/daemon/task.clj
new file mode 100644
index 0000000..1ae9b22
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/daemon/task.clj
@@ -0,0 +1,189 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+(ns org.apache.storm.daemon.task
+  (:use [org.apache.storm.daemon common])
+  (:use [org.apache.storm config util log])
+  (:import [org.apache.storm.hooks ITaskHook])
+  (:import [org.apache.storm.tuple Tuple TupleImpl])
+  (:import [org.apache.storm.grouping LoadMapping])
+  (:import [org.apache.storm.generated SpoutSpec Bolt StateSpoutSpec StormTopology])
+  (:import [org.apache.storm.hooks.info SpoutAckInfo SpoutFailInfo
+            EmitInfo BoltFailInfo BoltAckInfo])
+  (:import [org.apache.storm.task TopologyContext ShellBolt WorkerTopologyContext])
+  (:import [org.apache.storm.utils Utils])
+  (:import [org.apache.storm.generated ShellComponent JavaObject])
+  (:import [org.apache.storm.spout ShellSpout])
+  (:import [java.util Collection List ArrayList])
+  (:require [org.apache.storm
+             [thrift :as thrift]
+             [stats :as stats]])
+  (:require [org.apache.storm.daemon.builtin-metrics :as builtin-metrics]))
+
+(defn mk-topology-context-builder [worker executor-data topology]
+  (let [conf (:conf worker)]
+    #(TopologyContext.
+      topology
+      (:storm-conf worker)
+      (:task->component worker)
+      (:component->sorted-tasks worker)
+      (:component->stream->fields worker)
+      (:storm-id worker)
+      (supervisor-storm-resources-path
+        (supervisor-stormdist-root conf (:storm-id worker)))
+      (worker-pids-root conf (:worker-id worker))
+      (int %)
+      (:port worker)
+      (:task-ids worker)
+      (:default-shared-resources worker)
+      (:user-shared-resources worker)
+      (:shared-executor-data executor-data)
+      (:interval->task->metric-registry executor-data)
+      (:open-or-prepare-was-called? executor-data))))
+
+(defn system-topology-context [worker executor-data tid]
+  ((mk-topology-context-builder
+    worker
+    executor-data
+    (:system-topology worker))
+   tid))
+
+(defn user-topology-context [worker executor-data tid]
+  ((mk-topology-context-builder
+    worker
+    executor-data
+    (:topology worker))
+   tid))
+
+(defn- get-task-object [^StormTopology topology component-id]
+  (let [spouts (.get_spouts topology)
+        bolts (.get_bolts topology)
+        state-spouts (.get_state_spouts topology)
+        obj (Utils/getSetComponentObject
+             (cond
+              (contains? spouts component-id) (.get_spout_object ^SpoutSpec (get spouts component-id))
+              (contains? bolts component-id) (.get_bolt_object ^Bolt (get bolts component-id))
+              (contains? state-spouts component-id) (.get_state_spout_object ^StateSpoutSpec (get state-spouts component-id))
+              true (throw-runtime "Could not find " component-id " in " topology)))
+        obj (if (instance? ShellComponent obj)
+              (if (contains? spouts component-id)
+                (ShellSpout. obj)
+                (ShellBolt. obj))
+              obj )
+        obj (if (instance? JavaObject obj)
+              (thrift/instantiate-java-object obj)
+              obj )]
+    obj
+    ))
+
+(defn get-context-hooks [^TopologyContext context]
+  (.getHooks context))
+
+(defn hooks-empty? [^Collection hooks]
+  (.isEmpty hooks))
+
+(defmacro apply-hooks [topology-context method-sym info-form]
+  (let [hook-sym (with-meta (gensym "hook") {:tag 'org.apache.storm.hooks.ITaskHook})]
+    `(let [hooks# (get-context-hooks ~topology-context)]
+       (when-not (hooks-empty? hooks#)
+         (let [info# ~info-form]
+           (fast-list-iter [~hook-sym hooks#]
+             (~method-sym ~hook-sym info#)
+             ))))))
+
+
+;; TODO: this is all expensive... should be precomputed
+(defn send-unanchored
+  [task-data stream values]
+    (let [^TopologyContext topology-context (:system-context task-data)
+          tasks-fn (:tasks-fn task-data)
+          transfer-fn (-> task-data :executor-data :transfer-fn)
+          out-tuple (TupleImpl. topology-context
+                                 values
+                                 (.getThisTaskId topology-context)
+                                 stream)]
+      (fast-list-iter [t (tasks-fn stream values)]
+        (transfer-fn t out-tuple))))
+
+(defn mk-tasks-fn [task-data]
+  (let [task-id (:task-id task-data)
+        executor-data (:executor-data task-data)
+        ^LoadMapping load-mapping (:load-mapping (:worker executor-data))
+        component-id (:component-id executor-data)
+        ^WorkerTopologyContext worker-context (:worker-context executor-data)
+        storm-conf (:storm-conf executor-data)
+        emit-sampler (mk-stats-sampler storm-conf)
+        stream->component->grouper (:stream->component->grouper executor-data)
+        user-context (:user-context task-data)
+        executor-stats (:stats executor-data)
+        debug? (= true (storm-conf TOPOLOGY-DEBUG))]
+        
+    (fn ([^Integer out-task-id ^String stream ^List values]
+          (when debug?
+            (log-message "Emitting direct: " out-task-id "; " component-id " " stream " " values))
+          (let [target-component (.getComponentId worker-context out-task-id)
+                component->grouping (get stream->component->grouper stream)
+                grouping (get component->grouping target-component)
+                out-task-id (if grouping out-task-id)]
+            (when (and (not-nil? grouping) (not= :direct grouping))
+              (throw (IllegalArgumentException. "Cannot emitDirect to a task expecting a regular grouping")))                          
+            (apply-hooks user-context .emit (EmitInfo. values stream task-id [out-task-id]))
+            (when (emit-sampler)
+              (stats/emitted-tuple! executor-stats stream)
+              (if out-task-id
+                (stats/transferred-tuples! executor-stats stream 1)))
+            (if out-task-id [out-task-id])
+            ))
+        ([^String stream ^List values]
+           (when debug?
+             (log-message "Emitting: " component-id " " stream " " values))
+           (let [out-tasks (ArrayList.)]
+             (fast-map-iter [[out-component grouper] (get stream->component->grouper stream)]
+               (when (= :direct grouper)
+                  ;;  TODO: this is wrong, need to check how the stream was declared
+                  (throw (IllegalArgumentException. "Cannot do regular emit to direct stream")))
+               (let [comp-tasks (grouper task-id values load-mapping)]
+                 (if (or (sequential? comp-tasks) (instance? Collection comp-tasks))
+                   (.addAll out-tasks comp-tasks)
+                   (.add out-tasks comp-tasks)
+                   )))
+             (apply-hooks user-context .emit (EmitInfo. values stream task-id out-tasks))
+             (when (emit-sampler)
+               (stats/emitted-tuple! executor-stats stream)
+               (stats/transferred-tuples! executor-stats stream (count out-tasks)))
+             out-tasks)))
+    ))
+
+(defn mk-task-data [executor-data task-id]
+  (recursive-map
+    :executor-data executor-data
+    :task-id task-id
+    :system-context (system-topology-context (:worker executor-data) executor-data task-id)
+    :user-context (user-topology-context (:worker executor-data) executor-data task-id)
+    :builtin-metrics (builtin-metrics/make-data (:type executor-data) (:stats executor-data))
+    :tasks-fn (mk-tasks-fn <>)
+    :object (get-task-object (.getRawTopology ^TopologyContext (:system-context <>)) (:component-id executor-data))))
+
+
+(defn mk-task [executor-data task-id]
+  (let [task-data (mk-task-data executor-data task-id)
+        storm-conf (:storm-conf executor-data)]
+    (doseq [klass (storm-conf TOPOLOGY-AUTO-TASK-HOOKS)]
+      (.addTaskHook ^TopologyContext (:user-context task-data) (-> klass Class/forName .newInstance)))
+    ;; when this is called, the threads for the executor haven't been started yet,
+    ;; so we won't be risking trampling on the single-threaded claim strategy disruptor queue
+    (send-unanchored task-data SYSTEM-STREAM-ID ["startup"])
+    task-data
+    ))


[51/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability


Project: http://git-wip-us.apache.org/repos/asf/storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/storm/commit/d839d1bf
Tree: http://git-wip-us.apache.org/repos/asf/storm/tree/d839d1bf
Diff: http://git-wip-us.apache.org/repos/asf/storm/diff/d839d1bf

Branch: refs/heads/master
Commit: d839d1bf88b855edda344fc548f0701e2a018655
Parents: a9a7bab
Author: Robert (Bobby) Evans <ev...@yahoo-inc.com>
Authored: Mon Jan 11 14:42:34 2016 -0600
Committer: Robert (Bobby) Evans <ev...@yahoo-inc.com>
Committed: Mon Jan 11 14:42:34 2016 -0600

----------------------------------------------------------------------
 DEVELOPER.md                                    |    12 +-
 SECURITY.md                                     |    14 +-
 bin/storm-config.cmd                            |     4 +-
 bin/storm.cmd                                   |    38 +-
 bin/storm.py                                    |    48 +-
 conf/defaults.yaml                              |    44 +-
 conf/storm.yaml.example                         |     2 +-
 examples/storm-starter/README.markdown          |     4 +-
 .../multilang/resources/randomsentence.js       |     2 +-
 .../org/apache/storm/starter/clj/word_count.clj |    95 +
 .../src/clj/storm/starter/clj/word_count.clj    |    95 -
 .../apache/storm/starter/BasicDRPCTopology.java |    78 +
 .../starter/BlobStoreAPIWordCountTopology.java  |   304 +
 .../storm/starter/ExclamationTopology.java      |    87 +
 .../storm/starter/FastWordCountTopology.java    |   198 +
 .../storm/starter/InOrderDeliveryTest.java      |   175 +
 .../org/apache/storm/starter/ManualDRPC.java    |    68 +
 .../storm/starter/MultipleLoggerTopology.java   |   105 +
 .../apache/storm/starter/PrintSampleStream.java |    58 +
 .../org/apache/storm/starter/ReachTopology.java |   196 +
 .../starter/ResourceAwareExampleTopology.java   |   106 +
 .../apache/storm/starter/RollingTopWords.java   |   130 +
 .../apache/storm/starter/SingleJoinExample.java |    64 +
 .../storm/starter/SkewedRollingTopWords.java    |   132 +
 .../storm/starter/SlidingTupleTsTopology.java   |    62 +
 .../storm/starter/SlidingWindowTopology.java    |   108 +
 .../storm/starter/ThroughputVsLatency.java      |   432 +
 .../storm/starter/TransactionalGlobalCount.java |   174 +
 .../storm/starter/TransactionalWords.java       |   246 +
 .../apache/storm/starter/WordCountTopology.java |   106 +
 .../storm/starter/WordCountTopologyNode.java    |   121 +
 .../storm/starter/bolt/AbstractRankerBolt.java  |   110 +
 .../starter/bolt/IntermediateRankingsBolt.java  |    58 +
 .../apache/storm/starter/bolt/PrinterBolt.java  |    37 +
 .../storm/starter/bolt/RollingCountAggBolt.java |    77 +
 .../storm/starter/bolt/RollingCountBolt.java    |   142 +
 .../storm/starter/bolt/SingleJoinBolt.java      |   114 +
 .../starter/bolt/SlidingWindowSumBolt.java      |    80 +
 .../storm/starter/bolt/TotalRankingsBolt.java   |    59 +
 .../storm/starter/spout/RandomIntegerSpout.java |    55 +
 .../starter/spout/RandomSentenceSpout.java      |    64 +
 .../storm/starter/spout/TwitterSampleSpout.java |   164 +
 .../tools/NthLastModifiedTimeTracker.java       |    70 +
 .../apache/storm/starter/tools/Rankable.java    |    32 +
 .../starter/tools/RankableObjectWithFields.java |   148 +
 .../apache/storm/starter/tools/Rankings.java    |   156 +
 .../starter/tools/SlidingWindowCounter.java     |   119 +
 .../storm/starter/tools/SlotBasedCounter.java   |   118 +
 .../starter/trident/TridentKafkaWordCount.java  |   229 +
 .../storm/starter/trident/TridentReach.java     |   156 +
 .../storm/starter/trident/TridentWordCount.java |    85 +
 .../apache/storm/starter/util/StormRunner.java  |    48 +
 .../jvm/storm/starter/BasicDRPCTopology.java    |    78 -
 .../starter/BlobStoreAPIWordCountTopology.java  |   304 -
 .../jvm/storm/starter/ExclamationTopology.java  |    87 -
 .../storm/starter/FastWordCountTopology.java    |   198 -
 .../jvm/storm/starter/InOrderDeliveryTest.java  |   175 -
 .../src/jvm/storm/starter/ManualDRPC.java       |    68 -
 .../storm/starter/MultipleLoggerTopology.java   |   105 -
 .../jvm/storm/starter/PrintSampleStream.java    |    58 -
 .../src/jvm/storm/starter/ReachTopology.java    |   196 -
 .../starter/ResourceAwareExampleTopology.java   |   106 -
 .../src/jvm/storm/starter/RollingTopWords.java  |   130 -
 .../jvm/storm/starter/SingleJoinExample.java    |    64 -
 .../storm/starter/SkewedRollingTopWords.java    |   132 -
 .../storm/starter/SlidingTupleTsTopology.java   |    62 -
 .../storm/starter/SlidingWindowTopology.java    |   108 -
 .../jvm/storm/starter/ThroughputVsLatency.java  |   432 -
 .../storm/starter/TransactionalGlobalCount.java |   174 -
 .../jvm/storm/starter/TransactionalWords.java   |   246 -
 .../jvm/storm/starter/WordCountTopology.java    |   106 -
 .../storm/starter/WordCountTopologyNode.java    |   121 -
 .../storm/starter/bolt/AbstractRankerBolt.java  |   110 -
 .../starter/bolt/IntermediateRankingsBolt.java  |    58 -
 .../src/jvm/storm/starter/bolt/PrinterBolt.java |    37 -
 .../storm/starter/bolt/RollingCountAggBolt.java |    77 -
 .../storm/starter/bolt/RollingCountBolt.java    |   142 -
 .../jvm/storm/starter/bolt/SingleJoinBolt.java  |   114 -
 .../starter/bolt/SlidingWindowSumBolt.java      |    80 -
 .../storm/starter/bolt/TotalRankingsBolt.java   |    59 -
 .../storm/starter/spout/RandomIntegerSpout.java |    55 -
 .../starter/spout/RandomSentenceSpout.java      |    64 -
 .../storm/starter/spout/TwitterSampleSpout.java |   164 -
 .../tools/NthLastModifiedTimeTracker.java       |    70 -
 .../src/jvm/storm/starter/tools/Rankable.java   |    32 -
 .../starter/tools/RankableObjectWithFields.java |   148 -
 .../src/jvm/storm/starter/tools/Rankings.java   |   156 -
 .../starter/tools/SlidingWindowCounter.java     |   119 -
 .../storm/starter/tools/SlotBasedCounter.java   |   118 -
 .../starter/trident/TridentKafkaWordCount.java  |   229 -
 .../jvm/storm/starter/trident/TridentReach.java |   156 -
 .../storm/starter/trident/TridentWordCount.java |    85 -
 .../src/jvm/storm/starter/util/StormRunner.java |    48 -
 .../bolt/IntermediateRankingsBoltTest.java      |   146 +
 .../starter/bolt/RollingCountBoltTest.java      |   113 +
 .../starter/bolt/TotalRankingsBoltTest.java     |   147 +
 .../tools/NthLastModifiedTimeTrackerTest.java   |   125 +
 .../tools/RankableObjectWithFieldsTest.java     |   252 +
 .../storm/starter/tools/RankingsTest.java       |   368 +
 .../starter/tools/SlidingWindowCounterTest.java |   106 +
 .../starter/tools/SlotBasedCounterTest.java     |   181 +
 .../bolt/IntermediateRankingsBoltTest.java      |   146 -
 .../starter/bolt/RollingCountBoltTest.java      |   113 -
 .../starter/bolt/TotalRankingsBoltTest.java     |   147 -
 .../tools/NthLastModifiedTimeTrackerTest.java   |   125 -
 .../tools/RankableObjectWithFieldsTest.java     |   252 -
 .../jvm/storm/starter/tools/RankingsTest.java   |   368 -
 .../starter/tools/SlidingWindowCounterTest.java |   106 -
 .../starter/tools/SlotBasedCounterTest.java     |   181 -
 external/flux/README.md                         |    50 +-
 .../main/java/org/apache/storm/flux/Flux.java   |    14 +-
 .../java/org/apache/storm/flux/FluxBuilder.java |    16 +-
 .../apache/storm/flux/api/TopologySource.java   |     2 +-
 .../storm/flux/model/ExecutionContext.java      |     6 +-
 .../org/apache/storm/flux/model/ObjectDef.java  |     2 +-
 .../apache/storm/flux/model/TopologyDef.java    |     2 +-
 .../java/org/apache/storm/flux/TCKTest.java     |     4 +-
 .../apache/storm/flux/test/SimpleTopology.java  |     4 +-
 .../storm/flux/test/SimpleTopologySource.java   |     4 +-
 .../test/SimpleTopologyWithConfigParam.java     |     6 +-
 .../org/apache/storm/flux/test/TestBolt.java    |     8 +-
 .../storm/flux/test/TridentTopologySource.java  |    24 +-
 .../src/test/resources/configs/bad_hbase.yaml   |    10 +-
 .../resources/configs/config-methods-test.yaml  |     2 +-
 .../resources/configs/diamond-topology.yaml     |     2 +-
 .../src/test/resources/configs/hdfs_test.yaml   |     2 +-
 .../src/test/resources/configs/kafka_test.yaml  |    14 +-
 .../src/test/resources/configs/shell_test.yaml  |     2 +-
 .../test/resources/configs/simple_hbase.yaml    |    10 +-
 .../resources/configs/substitution-test.yaml    |     2 +-
 .../src/test/resources/configs/tck.yaml         |     6 +-
 .../storm/flux/examples/TestPrintBolt.java      |    10 +-
 .../storm/flux/examples/TestWindowBolt.java     |    14 +-
 .../apache/storm/flux/examples/WordCounter.java |    18 +-
 .../src/main/resources/kafka_spout.yaml         |    14 +-
 .../src/main/resources/multilang.yaml           |     2 +-
 .../src/main/resources/simple_hbase.yaml        |     6 +-
 .../src/main/resources/simple_hdfs.yaml         |     4 +-
 .../src/main/resources/simple_windowing.yaml    |     6 +-
 .../src/main/resources/simple_wordcount.yaml    |     4 +-
 .../flux/wrappers/bolts/FluxShellBolt.java      |     8 +-
 .../storm/flux/wrappers/bolts/LogInfoBolt.java  |     8 +-
 .../flux/wrappers/spouts/FluxShellSpout.java    |     8 +-
 .../main/resources/resources/randomsentence.js  |     2 +-
 external/sql/README.md                          |     6 +-
 .../src/jvm/org/apache/storm/sql/StormSql.java  |     4 +-
 .../jvm/org/apache/storm/sql/StormSqlImpl.java  |     6 +-
 .../org/apache/storm/sql/StormSqlRunner.java    |     6 +-
 .../backends/standalone/PlanCompiler.java       |     2 +-
 .../compiler/backends/trident/PlanCompiler.java |    18 +-
 .../backends/trident/RelNodeCompiler.java       |     2 +-
 .../test/org/apache/storm/sql/TestStormSql.java |    12 +-
 .../storm/sql/compiler/TestExprSemantic.java    |     2 +-
 .../backends/standalone/TestPlanCompiler.java   |     2 +-
 .../backends/trident/TestPlanCompiler.java      |    16 +-
 .../org/apache/storm/sql/kafka/JsonScheme.java  |     8 +-
 .../sql/kafka/KafkaDataSourcesProvider.java     |    26 +-
 .../storm/sql/kafka/TestJsonRepresentation.java |     2 +-
 .../sql/kafka/TestKafkaDataSourcesProvider.java |     4 +-
 .../sql/runtime/AbstractChannelHandler.java     |     2 +-
 .../sql/runtime/AbstractValuesProcessor.java    |     2 +-
 .../storm/sql/runtime/ChannelContext.java       |     2 +-
 .../storm/sql/runtime/ChannelHandler.java       |     2 +-
 .../org/apache/storm/sql/runtime/Channels.java  |     2 +-
 .../sql/runtime/ISqlTridentDataSource.java      |     6 +-
 .../trident/AbstractTridentProcessor.java       |     4 +-
 .../test/org/apache/storm/sql/TestUtils.java    |    20 +-
 .../AbstractExecutionResultHandler.java         |     4 +-
 .../cassandra/BaseExecutionResultHandler.java   |     4 +-
 .../storm/cassandra/ExecutionResultHandler.java |     4 +-
 .../storm/cassandra/Murmur3StreamGrouping.java  |    12 +-
 .../storm/cassandra/bolt/BaseCassandraBolt.java |    20 +-
 .../bolt/BatchCassandraWriterBolt.java          |    10 +-
 .../cassandra/bolt/CassandraWriterBolt.java     |     2 +-
 .../cassandra/bolt/GroupingBatchBuilder.java    |     2 +-
 .../bolt/PairBatchStatementTuples.java          |     2 +-
 .../cassandra/bolt/PairStatementTuple.java      |     2 +-
 .../storm/cassandra/client/CassandraConf.java   |     2 +-
 .../cassandra/executor/AsyncResultHandler.java  |     2 +-
 .../executor/ExecutionResultCollector.java      |     8 +-
 .../executor/impl/BatchAsyncResultHandler.java  |     4 +-
 .../executor/impl/SingleAsyncResultHandler.java |     4 +-
 .../query/BaseCQLStatementTupleMapper.java      |     4 +-
 .../query/CQLResultSetValuesMapper.java         |     4 +-
 .../query/CQLStatementTupleMapper.java          |     4 +-
 .../storm/cassandra/query/ContextQuery.java     |     2 +-
 .../apache/storm/cassandra/query/CqlMapper.java |     2 +-
 .../impl/BatchCQLStatementTupleMapper.java      |     4 +-
 .../impl/BoundCQLStatementTupleMapper.java      |     2 +-
 .../query/impl/RoutingKeyGenerator.java         |     2 +-
 .../query/impl/SimpleCQLStatementMapper.java    |     4 +-
 .../cassandra/query/selector/FieldSelector.java |     2 +-
 .../cassandra/trident/state/CassandraQuery.java |     8 +-
 .../cassandra/trident/state/CassandraState.java |    10 +-
 .../trident/state/CassandraStateFactory.java    |     6 +-
 .../trident/state/CassandraStateUpdater.java    |     6 +-
 .../state/TridentResultSetValuesMapper.java     |     6 +-
 .../cassandra/DynamicStatementBuilderTest.java  |     6 +-
 .../apache/storm/cassandra/WeatherSpout.java    |    14 +-
 .../storm/cassandra/bolt/BaseTopologyTest.java  |    10 +-
 .../bolt/BatchCassandraWriterBoltTest.java      |     2 +-
 .../cassandra/bolt/CassandraWriterBoltTest.java |     2 +-
 .../cassandra/trident/TridentTopologyTest.java  |    18 +-
 .../cassandra/trident/WeatherBatchSpout.java    |     8 +-
 .../elasticsearch/ElasticsearchGetRequest.java  |     2 +-
 .../elasticsearch/EsLookupResultOutput.java     |     4 +-
 .../elasticsearch/bolt/AbstractEsBolt.java      |    10 +-
 .../storm/elasticsearch/bolt/EsIndexBolt.java   |     8 +-
 .../storm/elasticsearch/bolt/EsLookupBolt.java  |     6 +-
 .../elasticsearch/bolt/EsPercolateBolt.java     |    12 +-
 .../common/DefaultEsTupleMapper.java            |     2 +-
 .../elasticsearch/common/EsTupleMapper.java     |     2 +-
 .../storm/elasticsearch/trident/EsState.java    |     6 +-
 .../elasticsearch/trident/EsStateFactory.java   |     6 +-
 .../storm/elasticsearch/trident/EsUpdater.java  |     6 +-
 .../bolt/AbstractEsBoltIntegrationTest.java     |     2 +-
 .../elasticsearch/bolt/AbstractEsBoltTest.java  |     4 +-
 .../elasticsearch/bolt/EsIndexBoltTest.java     |     4 +-
 .../elasticsearch/bolt/EsIndexTopology.java     |    18 +-
 .../bolt/EsLookupBoltIntegrationTest.java       |    10 +-
 .../elasticsearch/bolt/EsLookupBoltTest.java    |     8 +-
 .../elasticsearch/bolt/EsPercolateBoltTest.java |     6 +-
 .../storm/elasticsearch/common/EsTestUtil.java  |    16 +-
 .../trident/TridentEsTopology.java              |    22 +-
 .../eventhubs/bolt/DefaultEventDataFormat.java  |     2 +-
 .../storm/eventhubs/bolt/EventHubBolt.java      |    10 +-
 .../storm/eventhubs/bolt/IEventDataFormat.java  |     2 +-
 .../storm/eventhubs/samples/EventCount.java     |    12 +-
 .../storm/eventhubs/samples/EventHubLoop.java   |     4 +-
 .../samples/OpaqueTridentEventCount.java        |    14 +-
 .../samples/TransactionalTridentEventCount.java |    18 +-
 .../eventhubs/samples/bolt/GlobalCountBolt.java |    14 +-
 .../samples/bolt/PartialCountBolt.java          |    14 +-
 .../storm/eventhubs/spout/EventDataScheme.java  |     2 +-
 .../eventhubs/spout/EventHubReceiverImpl.java   |     6 +-
 .../storm/eventhubs/spout/EventHubSpout.java    |    12 +-
 .../storm/eventhubs/spout/IEventDataScheme.java |     2 +-
 .../storm/eventhubs/trident/Coordinator.java    |     4 +-
 .../trident/OpaqueTridentEventHubEmitter.java   |     6 +-
 .../trident/OpaqueTridentEventHubSpout.java     |     6 +-
 .../storm/eventhubs/trident/Partition.java      |     2 +-
 .../TransactionalTridentEventHubEmitter.java    |     8 +-
 .../TransactionalTridentEventHubSpout.java      |     6 +-
 .../spout/EventHubSpoutCallerMock.java          |     4 +-
 .../spout/SpoutOutputCollectorMock.java         |     2 +-
 .../eventhubs/trident/TridentCollectorMock.java |     2 +-
 .../storm/hbase/bolt/AbstractHBaseBolt.java     |     8 +-
 .../org/apache/storm/hbase/bolt/HBaseBolt.java  |     8 +-
 .../storm/hbase/bolt/HBaseLookupBolt.java       |     8 +-
 .../storm/hbase/bolt/mapper/HBaseMapper.java    |     4 +-
 .../hbase/bolt/mapper/HBaseValueMapper.java     |     6 +-
 .../hbase/bolt/mapper/SimpleHBaseMapper.java    |     4 +-
 .../apache/storm/hbase/security/AutoHBase.java  |     8 +-
 .../storm/hbase/security/HBaseSecurityUtil.java |     2 +-
 .../mapper/SimpleTridentHBaseMapper.java        |     6 +-
 .../trident/mapper/TridentHBaseMapper.java      |     6 +-
 .../hbase/trident/state/HBaseMapState.java      |    10 +-
 .../storm/hbase/trident/state/HBaseQuery.java   |     8 +-
 .../storm/hbase/trident/state/HBaseState.java   |    12 +-
 .../hbase/trident/state/HBaseStateFactory.java  |     6 +-
 .../storm/hbase/trident/state/HBaseUpdater.java |     6 +-
 .../storm/hbase/topology/LookupWordCount.java   |    10 +-
 .../hbase/topology/PersistentWordCount.java     |    10 +-
 .../storm/hbase/topology/TotalWordCounter.java  |    16 +-
 .../hbase/topology/WordCountValueMapper.java    |     8 +-
 .../storm/hbase/topology/WordCounter.java       |    16 +-
 .../apache/storm/hbase/topology/WordSpout.java  |    12 +-
 .../storm/hbase/trident/PrintFunction.java      |     6 +-
 .../storm/hbase/trident/WordCountTrident.java   |    22 +-
 .../storm/hdfs/blobstore/HdfsBlobStore.java     |    32 +-
 .../storm/hdfs/blobstore/HdfsBlobStoreFile.java |     4 +-
 .../storm/hdfs/blobstore/HdfsBlobStoreImpl.java |     6 +-
 .../hdfs/blobstore/HdfsClientBlobStore.java     |    18 +-
 .../storm/hdfs/bolt/AbstractHdfsBolt.java       |    16 +-
 .../storm/hdfs/bolt/AvroGenericRecordBolt.java  |    10 +-
 .../org/apache/storm/hdfs/bolt/HdfsBolt.java    |     6 +-
 .../storm/hdfs/bolt/SequenceFileBolt.java       |     6 +-
 .../hdfs/bolt/format/DefaultFileNameFormat.java |     2 +-
 .../hdfs/bolt/format/DefaultSequenceFormat.java |     2 +-
 .../hdfs/bolt/format/DelimitedRecordFormat.java |     4 +-
 .../storm/hdfs/bolt/format/FileNameFormat.java  |     2 +-
 .../storm/hdfs/bolt/format/RecordFormat.java    |     2 +-
 .../storm/hdfs/bolt/format/SequenceFormat.java  |     2 +-
 .../hdfs/bolt/rotation/FileRotationPolicy.java  |     2 +-
 .../bolt/rotation/FileSizeRotationPolicy.java   |     2 +-
 .../hdfs/bolt/rotation/NoRotationPolicy.java    |     2 +-
 .../hdfs/bolt/rotation/TimedRotationPolicy.java |     2 +-
 .../storm/hdfs/bolt/sync/CountSyncPolicy.java   |     2 +-
 .../apache/storm/hdfs/bolt/sync/SyncPolicy.java |     2 +-
 .../storm/hdfs/common/security/AutoHDFS.java    |     8 +-
 .../hdfs/common/security/HdfsSecurityUtil.java  |     4 +-
 .../apache/storm/hdfs/trident/HdfsState.java    |    12 +-
 .../storm/hdfs/trident/HdfsStateFactory.java    |     6 +-
 .../apache/storm/hdfs/trident/HdfsUpdater.java  |     6 +-
 .../trident/format/DefaultSequenceFormat.java   |     2 +-
 .../trident/format/DelimitedRecordFormat.java   |     4 +-
 .../storm/hdfs/trident/format/RecordFormat.java |     2 +-
 .../hdfs/trident/format/SequenceFormat.java     |     2 +-
 .../trident/rotation/FileRotationPolicy.java    |     2 +-
 .../rotation/FileSizeRotationPolicy.java        |     2 +-
 .../hdfs/trident/rotation/NoRotationPolicy.java |     2 +-
 .../trident/rotation/TimedRotationPolicy.java   |     2 +-
 .../hdfs/trident/sync/CountSyncPolicy.java      |     2 +-
 .../storm/hdfs/trident/sync/SyncPolicy.java     |     2 +-
 .../storm/hdfs/blobstore/BlobStoreTest.java     |    32 +-
 .../hdfs/blobstore/HdfsBlobStoreImplTest.java   |     6 +-
 .../hdfs/bolt/AvroGenericRecordBoltTest.java    |    18 +-
 .../storm/hdfs/bolt/HdfsFileTopology.java       |    26 +-
 .../storm/hdfs/bolt/SequenceFileTopology.java   |    26 +-
 .../apache/storm/hdfs/bolt/TestHdfsBolt.java    |    20 +-
 .../storm/hdfs/bolt/TestSequenceFileBolt.java   |    20 +-
 .../storm/hdfs/trident/FixedBatchSpout.java     |    12 +-
 .../storm/hdfs/trident/HdfsStateTest.java       |     8 +-
 .../storm/hdfs/trident/TridentFileTopology.java |    26 +-
 .../hdfs/trident/TridentSequenceTopology.java   |    26 +-
 .../org/apache/storm/hive/bolt/HiveBolt.java    |    14 +-
 .../bolt/mapper/DelimitedRecordHiveMapper.java  |     6 +-
 .../storm/hive/bolt/mapper/HiveMapper.java      |     6 +-
 .../hive/bolt/mapper/JsonRecordHiveMapper.java  |     6 +-
 .../apache/storm/hive/common/HiveWriter.java    |     2 +-
 .../apache/storm/hive/trident/HiveState.java    |    10 +-
 .../storm/hive/trident/HiveStateFactory.java    |     6 +-
 .../apache/storm/hive/trident/HiveUpdater.java  |     6 +-
 .../apache/storm/hive/bolt/HiveTopology.java    |    20 +-
 .../hive/bolt/HiveTopologyPartitioned.java      |    22 +-
 .../apache/storm/hive/bolt/TestHiveBolt.java    |    18 +-
 .../storm/hive/common/TestHiveWriter.java       |    14 +-
 .../storm/hive/trident/TridentHiveTopology.java |    26 +-
 .../storm/jdbc/bolt/AbstractJdbcBolt.java       |     8 +-
 .../apache/storm/jdbc/bolt/JdbcInsertBolt.java  |     8 +-
 .../apache/storm/jdbc/bolt/JdbcLookupBolt.java  |     6 +-
 .../storm/jdbc/mapper/JdbcLookupMapper.java     |     6 +-
 .../apache/storm/jdbc/mapper/JdbcMapper.java    |     2 +-
 .../jdbc/mapper/SimpleJdbcLookupMapper.java     |     8 +-
 .../storm/jdbc/mapper/SimpleJdbcMapper.java     |     2 +-
 .../storm/jdbc/trident/state/JdbcQuery.java     |     8 +-
 .../storm/jdbc/trident/state/JdbcState.java     |    12 +-
 .../jdbc/trident/state/JdbcStateFactory.java    |     6 +-
 .../storm/jdbc/trident/state/JdbcUpdater.java   |     6 +-
 .../storm/jdbc/bolt/JdbcLookupBoltTest.java     |     2 +-
 .../org/apache/storm/jdbc/spout/UserSpout.java  |    12 +-
 .../jdbc/topology/AbstractUserTopology.java     |    10 +-
 .../jdbc/topology/UserPersistanceTopology.java  |     4 +-
 .../UserPersistanceTridentTopology.java         |    10 +-
 external/storm-kafka/README.md                  |     8 +-
 .../src/jvm/org/apache/storm/kafka/Broker.java  |    86 +
 .../jvm/org/apache/storm/kafka/BrokerHosts.java |    25 +
 .../storm/kafka/ByteBufferSerializer.java       |    41 +
 .../storm/kafka/DynamicBrokersReader.java       |   213 +
 .../kafka/DynamicPartitionConnections.java      |    98 +
 .../ExponentialBackoffMsgRetryManager.java      |   184 +
 .../storm/kafka/FailedFetchException.java       |    29 +
 .../storm/kafka/FailedMsgRetryManager.java      |    29 +
 .../org/apache/storm/kafka/IntSerializer.java   |    42 +
 .../jvm/org/apache/storm/kafka/KafkaConfig.java |    53 +
 .../jvm/org/apache/storm/kafka/KafkaError.java  |    43 +
 .../jvm/org/apache/storm/kafka/KafkaSpout.java  |   198 +
 .../jvm/org/apache/storm/kafka/KafkaUtils.java  |   275 +
 .../org/apache/storm/kafka/KeyValueScheme.java  |    27 +
 .../kafka/KeyValueSchemeAsMultiScheme.java      |    38 +
 .../storm/kafka/MessageMetadataScheme.java      |    27 +
 .../MessageMetadataSchemeAsMultiScheme.java     |    41 +
 .../jvm/org/apache/storm/kafka/Partition.java   |    87 +
 .../storm/kafka/PartitionCoordinator.java       |    28 +
 .../apache/storm/kafka/PartitionManager.java    |   316 +
 .../jvm/org/apache/storm/kafka/SpoutConfig.java |    46 +
 .../apache/storm/kafka/StaticCoordinator.java   |    52 +
 .../jvm/org/apache/storm/kafka/StaticHosts.java |    38 +
 .../storm/kafka/StaticPartitionConnections.java |    52 +
 .../storm/kafka/StringKeyValueScheme.java       |    38 +
 .../kafka/StringMessageAndMetadataScheme.java   |    43 +
 .../storm/kafka/StringMultiSchemeWithTopic.java |    48 +
 .../org/apache/storm/kafka/StringScheme.java    |    50 +
 .../kafka/TopicOffsetOutOfRangeException.java   |    25 +
 .../org/apache/storm/kafka/ZkCoordinator.java   |   113 +
 .../src/jvm/org/apache/storm/kafka/ZkHosts.java |    36 +
 .../src/jvm/org/apache/storm/kafka/ZkState.java |   116 +
 .../org/apache/storm/kafka/bolt/KafkaBolt.java  |   178 +
 .../FieldNameBasedTupleToKafkaMapper.java       |    48 +
 .../kafka/bolt/mapper/TupleToKafkaMapper.java   |    32 +
 .../bolt/selector/DefaultTopicSelector.java     |    34 +
 .../kafka/bolt/selector/KafkaTopicSelector.java |    26 +
 .../apache/storm/kafka/trident/Coordinator.java |    51 +
 .../storm/kafka/trident/DefaultCoordinator.java |    31 +
 .../trident/GlobalPartitionInformation.java     |   112 +
 .../storm/kafka/trident/IBatchCoordinator.java  |    26 +
 .../storm/kafka/trident/IBrokerReader.java      |    30 +
 .../apache/storm/kafka/trident/MaxMetric.java   |    40 +
 .../kafka/trident/OpaqueTridentKafkaSpout.java  |    60 +
 .../storm/kafka/trident/StaticBrokerReader.java |    49 +
 .../trident/TransactionalTridentKafkaSpout.java |    58 +
 .../storm/kafka/trident/TridentKafkaConfig.java |    37 +
 .../kafka/trident/TridentKafkaEmitter.java      |   287 +
 .../storm/kafka/trident/TridentKafkaState.java  |   102 +
 .../kafka/trident/TridentKafkaStateFactory.java |    63 +
 .../kafka/trident/TridentKafkaUpdater.java      |    31 +
 .../storm/kafka/trident/ZkBrokerReader.java     |    84 +
 .../FieldNameBasedTupleToKafkaMapper.java       |    41 +
 .../mapper/TridentTupleToKafkaMapper.java       |    28 +
 .../trident/selector/DefaultTopicSelector.java  |    34 +
 .../trident/selector/KafkaTopicSelector.java    |    26 +
 .../storm-kafka/src/jvm/storm/kafka/Broker.java |    86 -
 .../src/jvm/storm/kafka/BrokerHosts.java        |    25 -
 .../jvm/storm/kafka/ByteBufferSerializer.java   |    41 -
 .../jvm/storm/kafka/DynamicBrokersReader.java   |   213 -
 .../kafka/DynamicPartitionConnections.java      |    98 -
 .../ExponentialBackoffMsgRetryManager.java      |   184 -
 .../jvm/storm/kafka/FailedFetchException.java   |    29 -
 .../jvm/storm/kafka/FailedMsgRetryManager.java  |    29 -
 .../src/jvm/storm/kafka/IntSerializer.java      |    42 -
 .../src/jvm/storm/kafka/KafkaConfig.java        |    53 -
 .../src/jvm/storm/kafka/KafkaError.java         |    43 -
 .../src/jvm/storm/kafka/KafkaSpout.java         |   198 -
 .../src/jvm/storm/kafka/KafkaUtils.java         |   275 -
 .../src/jvm/storm/kafka/KeyValueScheme.java     |    27 -
 .../kafka/KeyValueSchemeAsMultiScheme.java      |    38 -
 .../jvm/storm/kafka/MessageMetadataScheme.java  |    27 -
 .../MessageMetadataSchemeAsMultiScheme.java     |    41 -
 .../src/jvm/storm/kafka/Partition.java          |    87 -
 .../jvm/storm/kafka/PartitionCoordinator.java   |    28 -
 .../src/jvm/storm/kafka/PartitionManager.java   |   316 -
 .../src/jvm/storm/kafka/SpoutConfig.java        |    46 -
 .../src/jvm/storm/kafka/StaticCoordinator.java  |    52 -
 .../src/jvm/storm/kafka/StaticHosts.java        |    38 -
 .../storm/kafka/StaticPartitionConnections.java |    52 -
 .../jvm/storm/kafka/StringKeyValueScheme.java   |    38 -
 .../kafka/StringMessageAndMetadataScheme.java   |    43 -
 .../storm/kafka/StringMultiSchemeWithTopic.java |    48 -
 .../src/jvm/storm/kafka/StringScheme.java       |    50 -
 .../kafka/TopicOffsetOutOfRangeException.java   |    25 -
 .../src/jvm/storm/kafka/ZkCoordinator.java      |   113 -
 .../src/jvm/storm/kafka/ZkHosts.java            |    36 -
 .../src/jvm/storm/kafka/ZkState.java            |   116 -
 .../src/jvm/storm/kafka/bolt/KafkaBolt.java     |   178 -
 .../FieldNameBasedTupleToKafkaMapper.java       |    48 -
 .../kafka/bolt/mapper/TupleToKafkaMapper.java   |    32 -
 .../bolt/selector/DefaultTopicSelector.java     |    34 -
 .../kafka/bolt/selector/KafkaTopicSelector.java |    26 -
 .../jvm/storm/kafka/trident/Coordinator.java    |    51 -
 .../storm/kafka/trident/DefaultCoordinator.java |    31 -
 .../trident/GlobalPartitionInformation.java     |   112 -
 .../storm/kafka/trident/IBatchCoordinator.java  |    26 -
 .../jvm/storm/kafka/trident/IBrokerReader.java  |    30 -
 .../src/jvm/storm/kafka/trident/MaxMetric.java  |    40 -
 .../kafka/trident/OpaqueTridentKafkaSpout.java  |    60 -
 .../storm/kafka/trident/StaticBrokerReader.java |    49 -
 .../trident/TransactionalTridentKafkaSpout.java |    58 -
 .../storm/kafka/trident/TridentKafkaConfig.java |    37 -
 .../kafka/trident/TridentKafkaEmitter.java      |   287 -
 .../storm/kafka/trident/TridentKafkaState.java  |   102 -
 .../kafka/trident/TridentKafkaStateFactory.java |    63 -
 .../kafka/trident/TridentKafkaUpdater.java      |    31 -
 .../jvm/storm/kafka/trident/ZkBrokerReader.java |    84 -
 .../FieldNameBasedTupleToKafkaMapper.java       |    41 -
 .../mapper/TridentTupleToKafkaMapper.java       |    28 -
 .../trident/selector/DefaultTopicSelector.java  |    34 -
 .../trident/selector/KafkaTopicSelector.java    |    26 -
 .../storm/kafka/DynamicBrokersReaderTest.java   |   252 +
 .../ExponentialBackoffMsgRetryManagerTest.java  |   235 +
 .../org/apache/storm/kafka/KafkaErrorTest.java  |    56 +
 .../org/apache/storm/kafka/KafkaTestBroker.java |    92 +
 .../org/apache/storm/kafka/KafkaUtilsTest.java  |   295 +
 .../storm/kafka/StringKeyValueSchemeTest.java   |    62 +
 .../apache/storm/kafka/TestStringScheme.java    |    40 +
 .../test/org/apache/storm/kafka/TestUtils.java  |   101 +
 .../apache/storm/kafka/TridentKafkaTest.java    |    81 +
 .../storm/kafka/TridentKafkaTopology.java       |    91 +
 .../apache/storm/kafka/ZkCoordinatorTest.java   |   148 +
 .../apache/storm/kafka/bolt/KafkaBoltTest.java  |   341 +
 .../storm/kafka/DynamicBrokersReaderTest.java   |   252 -
 .../ExponentialBackoffMsgRetryManagerTest.java  |   235 -
 .../src/test/storm/kafka/KafkaErrorTest.java    |    56 -
 .../src/test/storm/kafka/KafkaTestBroker.java   |    92 -
 .../src/test/storm/kafka/KafkaUtilsTest.java    |   295 -
 .../storm/kafka/StringKeyValueSchemeTest.java   |    62 -
 .../src/test/storm/kafka/TestStringScheme.java  |    40 -
 .../src/test/storm/kafka/TestUtils.java         |   101 -
 .../src/test/storm/kafka/TridentKafkaTest.java  |    81 -
 .../test/storm/kafka/TridentKafkaTopology.java  |    91 -
 .../src/test/storm/kafka/ZkCoordinatorTest.java |   148 -
 .../test/storm/kafka/bolt/KafkaBoltTest.java    |   341 -
 .../metrics/hdrhistogram/HistogramMetric.java   |     2 +-
 .../apache/storm/metrics/sigar/CPUMetric.java   |     2 +-
 .../storm/redis/bolt/AbstractRedisBolt.java     |     6 +-
 .../storm/redis/bolt/RedisLookupBolt.java       |     6 +-
 .../apache/storm/redis/bolt/RedisStoreBolt.java |     4 +-
 .../redis/common/mapper/RedisLookupMapper.java  |     6 +-
 .../storm/redis/common/mapper/TupleMapper.java  |     2 +-
 .../trident/state/AbstractRedisMapState.java    |    12 +-
 .../state/AbstractRedisStateQuerier.java        |    10 +-
 .../state/AbstractRedisStateUpdater.java        |     8 +-
 .../storm/redis/trident/state/Options.java      |     2 +-
 .../trident/state/RedisClusterMapState.java     |    28 +-
 .../redis/trident/state/RedisClusterState.java  |     6 +-
 .../redis/trident/state/RedisMapState.java      |    28 +-
 .../storm/redis/trident/state/RedisState.java   |     6 +-
 .../storm/redis/topology/LookupWordCount.java   |    24 +-
 .../redis/topology/PersistentWordCount.java     |    16 +-
 .../storm/redis/topology/WordCounter.java       |    18 +-
 .../apache/storm/redis/topology/WordSpout.java  |    12 +-
 .../storm/redis/trident/PrintFunction.java      |     6 +-
 .../redis/trident/WordCountLookupMapper.java    |    10 +-
 .../redis/trident/WordCountStoreMapper.java     |     4 +-
 .../redis/trident/WordCountTridentRedis.java    |    20 +-
 .../trident/WordCountTridentRedisCluster.java   |    20 +-
 .../WordCountTridentRedisClusterMap.java        |    26 +-
 .../redis/trident/WordCountTridentRedisMap.java |    26 +-
 .../apache/storm/solr/bolt/SolrUpdateBolt.java  |    10 +-
 .../storm/solr/mapper/SolrFieldsMapper.java     |     2 +-
 .../storm/solr/mapper/SolrJsonMapper.java       |     2 +-
 .../apache/storm/solr/mapper/SolrMapper.java    |     4 +-
 .../apache/storm/solr/trident/SolrState.java    |     6 +-
 .../storm/solr/trident/SolrStateFactory.java    |     6 +-
 .../apache/storm/solr/trident/SolrUpdater.java  |     6 +-
 .../storm/solr/spout/SolrFieldsSpout.java       |    12 +-
 .../apache/storm/solr/spout/SolrJsonSpout.java  |    12 +-
 .../storm/solr/topology/SolrFieldsTopology.java |     4 +-
 .../storm/solr/topology/SolrJsonTopology.java   |     4 +-
 .../storm/solr/topology/SolrTopology.java       |     8 +-
 .../solr/trident/SolrFieldsTridentTopology.java |    10 +-
 .../solr/trident/SolrJsonTridentTopology.java   |    10 +-
 log4j2/cluster.xml                              |     6 +-
 pom.xml                                         |     6 +-
 storm-core/pom.xml                              |     8 +-
 .../src/clj/backtype/storm/LocalCluster.clj     |   106 -
 storm-core/src/clj/backtype/storm/LocalDRPC.clj |    56 -
 .../src/clj/backtype/storm/MockAutoCred.clj     |    58 -
 storm-core/src/clj/backtype/storm/blobstore.clj |    28 -
 storm-core/src/clj/backtype/storm/clojure.clj   |   201 -
 storm-core/src/clj/backtype/storm/cluster.clj   |   691 -
 .../cluster_state/zookeeper_state_factory.clj   |   161 -
 .../src/clj/backtype/storm/command/activate.clj |    24 -
 .../clj/backtype/storm/command/blobstore.clj    |   162 -
 .../clj/backtype/storm/command/config_value.clj |    24 -
 .../clj/backtype/storm/command/deactivate.clj   |    24 -
 .../backtype/storm/command/dev_zookeeper.clj    |    26 -
 .../clj/backtype/storm/command/get_errors.clj   |    52 -
 .../clj/backtype/storm/command/healthcheck.clj  |    88 -
 .../clj/backtype/storm/command/heartbeats.clj   |    52 -
 .../backtype/storm/command/kill_topology.clj    |    29 -
 .../clj/backtype/storm/command/kill_workers.clj |    33 -
 .../src/clj/backtype/storm/command/list.clj     |    38 -
 .../src/clj/backtype/storm/command/monitor.clj  |    37 -
 .../clj/backtype/storm/command/rebalance.clj    |    46 -
 .../backtype/storm/command/set_log_level.clj    |    75 -
 .../backtype/storm/command/shell_submission.clj |    33 -
 .../storm/command/upload_credentials.clj        |    35 -
 storm-core/src/clj/backtype/storm/config.clj    |   331 -
 storm-core/src/clj/backtype/storm/converter.clj |   277 -
 .../src/clj/backtype/storm/daemon/acker.clj     |   107 -
 .../backtype/storm/daemon/builtin_metrics.clj   |    98 -
 .../src/clj/backtype/storm/daemon/common.clj    |   402 -
 .../src/clj/backtype/storm/daemon/drpc.clj      |   274 -
 .../src/clj/backtype/storm/daemon/executor.clj  |   855 -
 .../src/clj/backtype/storm/daemon/logviewer.clj |  1199 -
 .../src/clj/backtype/storm/daemon/nimbus.clj    |  2259 -
 .../clj/backtype/storm/daemon/supervisor.clj    |  1219 -
 .../src/clj/backtype/storm/daemon/task.clj      |   189 -
 .../src/clj/backtype/storm/daemon/worker.clj    |   763 -
 storm-core/src/clj/backtype/storm/disruptor.clj |    89 -
 storm-core/src/clj/backtype/storm/event.clj     |    71 -
 .../src/clj/backtype/storm/local_state.clj      |   131 -
 storm-core/src/clj/backtype/storm/log.clj       |    56 -
 .../src/clj/backtype/storm/messaging/loader.clj |    34 -
 .../src/clj/backtype/storm/messaging/local.clj  |    23 -
 .../src/clj/backtype/storm/metric/testing.clj   |    68 -
 .../clj/backtype/storm/process_simulator.clj    |    51 -
 .../storm/scheduler/DefaultScheduler.clj        |    77 -
 .../backtype/storm/scheduler/EvenScheduler.clj  |    81 -
 .../storm/scheduler/IsolationScheduler.clj      |   219 -
 storm-core/src/clj/backtype/storm/stats.clj     |  1521 -
 storm-core/src/clj/backtype/storm/testing.clj   |   701 -
 storm-core/src/clj/backtype/storm/testing4j.clj |   184 -
 storm-core/src/clj/backtype/storm/thrift.clj    |   284 -
 storm-core/src/clj/backtype/storm/timer.clj     |   128 -
 storm-core/src/clj/backtype/storm/ui/core.clj   |  1273 -
 .../src/clj/backtype/storm/ui/helpers.clj       |   240 -
 storm-core/src/clj/backtype/storm/util.clj      |  1118 -
 storm-core/src/clj/backtype/storm/zookeeper.clj |   308 -
 .../src/clj/org/apache/storm/LocalCluster.clj   |   106 +
 .../src/clj/org/apache/storm/LocalDRPC.clj      |    56 +
 .../src/clj/org/apache/storm/MockAutoCred.clj   |    58 +
 .../src/clj/org/apache/storm/blobstore.clj      |    28 +
 storm-core/src/clj/org/apache/storm/clojure.clj |   201 +
 storm-core/src/clj/org/apache/storm/cluster.clj |   691 +
 .../cluster_state/zookeeper_state_factory.clj   |   161 +
 .../clj/org/apache/storm/command/activate.clj   |    24 +
 .../clj/org/apache/storm/command/blobstore.clj  |   162 +
 .../org/apache/storm/command/config_value.clj   |    24 +
 .../clj/org/apache/storm/command/deactivate.clj |    24 +
 .../org/apache/storm/command/dev_zookeeper.clj  |    26 +
 .../clj/org/apache/storm/command/get_errors.clj |    52 +
 .../org/apache/storm/command/healthcheck.clj    |    88 +
 .../clj/org/apache/storm/command/heartbeats.clj |    52 +
 .../org/apache/storm/command/kill_topology.clj  |    29 +
 .../org/apache/storm/command/kill_workers.clj   |    33 +
 .../src/clj/org/apache/storm/command/list.clj   |    38 +
 .../clj/org/apache/storm/command/monitor.clj    |    37 +
 .../clj/org/apache/storm/command/rebalance.clj  |    46 +
 .../org/apache/storm/command/set_log_level.clj  |    75 +
 .../apache/storm/command/shell_submission.clj   |    33 +
 .../apache/storm/command/upload_credentials.clj |    35 +
 storm-core/src/clj/org/apache/storm/config.clj  |   331 +
 .../src/clj/org/apache/storm/converter.clj      |   277 +
 .../src/clj/org/apache/storm/daemon/acker.clj   |   107 +
 .../org/apache/storm/daemon/builtin_metrics.clj |    98 +
 .../src/clj/org/apache/storm/daemon/common.clj  |   402 +
 .../src/clj/org/apache/storm/daemon/drpc.clj    |   274 +
 .../clj/org/apache/storm/daemon/executor.clj    |   855 +
 .../clj/org/apache/storm/daemon/logviewer.clj   |  1199 +
 .../src/clj/org/apache/storm/daemon/nimbus.clj  |  2259 +
 .../clj/org/apache/storm/daemon/supervisor.clj  |  1219 +
 .../src/clj/org/apache/storm/daemon/task.clj    |   189 +
 .../src/clj/org/apache/storm/daemon/worker.clj  |   763 +
 .../src/clj/org/apache/storm/disruptor.clj      |    89 +
 storm-core/src/clj/org/apache/storm/event.clj   |    71 +
 .../src/clj/org/apache/storm/local_state.clj    |   131 +
 storm-core/src/clj/org/apache/storm/log.clj     |    56 +
 .../clj/org/apache/storm/messaging/loader.clj   |    34 +
 .../clj/org/apache/storm/messaging/local.clj    |    23 +
 .../src/clj/org/apache/storm/metric/testing.clj |    68 +
 .../org/apache/storm/pacemaker/pacemaker.clj    |     6 +-
 .../storm/pacemaker/pacemaker_state_factory.clj |    12 +-
 .../clj/org/apache/storm/process_simulator.clj  |    51 +
 .../apache/storm/scheduler/DefaultScheduler.clj |    77 +
 .../apache/storm/scheduler/EvenScheduler.clj    |    81 +
 .../storm/scheduler/IsolationScheduler.clj      |   219 +
 storm-core/src/clj/org/apache/storm/stats.clj   |  1521 +
 storm-core/src/clj/org/apache/storm/testing.clj |   701 +
 .../src/clj/org/apache/storm/testing4j.clj      |   184 +
 storm-core/src/clj/org/apache/storm/thrift.clj  |   284 +
 storm-core/src/clj/org/apache/storm/timer.clj   |   128 +
 .../clj/org/apache/storm/trident/testing.clj    |    79 +
 storm-core/src/clj/org/apache/storm/ui/core.clj |  1273 +
 .../src/clj/org/apache/storm/ui/helpers.clj     |   240 +
 storm-core/src/clj/org/apache/storm/util.clj    |  1118 +
 .../src/clj/org/apache/storm/zookeeper.clj      |   308 +
 storm-core/src/clj/storm/trident/testing.clj    |    79 -
 storm-core/src/genthrift.sh                     |     6 +-
 storm-core/src/jvm/backtype/storm/Config.java   |  2335 -
 .../src/jvm/backtype/storm/Constants.java       |    36 -
 .../backtype/storm/ICredentialsListener.java    |    32 -
 .../src/jvm/backtype/storm/ILocalCluster.java   |    49 -
 .../src/jvm/backtype/storm/ILocalDRPC.java      |    27 -
 .../src/jvm/backtype/storm/ISubmitterHook.java  |    31 -
 .../src/jvm/backtype/storm/LogWriter.java       |    83 -
 .../src/jvm/backtype/storm/StormSubmitter.java  |   496 -
 .../storm/blobstore/AtomicOutputStream.java     |    32 -
 .../storm/blobstore/BlobKeySequenceInfo.java    |    40 -
 .../jvm/backtype/storm/blobstore/BlobStore.java |   447 -
 .../storm/blobstore/BlobStoreAclHandler.java    |   399 -
 .../backtype/storm/blobstore/BlobStoreFile.java |    50 -
 .../storm/blobstore/BlobStoreUtils.java         |   257 -
 .../storm/blobstore/BlobSynchronizer.java       |   124 -
 .../storm/blobstore/ClientBlobStore.java        |   184 -
 .../storm/blobstore/FileBlobStoreImpl.java      |   248 -
 .../storm/blobstore/InputStreamWithMeta.java    |    26 -
 .../jvm/backtype/storm/blobstore/KeyFilter.java |    22 -
 .../storm/blobstore/KeySequenceNumber.java      |   229 -
 .../storm/blobstore/LocalFsBlobStore.java       |   311 -
 .../storm/blobstore/LocalFsBlobStoreFile.java   |   159 -
 .../storm/blobstore/NimbusBlobStore.java        |   420 -
 .../jvm/backtype/storm/clojure/ClojureBolt.java |   119 -
 .../backtype/storm/clojure/ClojureSpout.java    |   153 -
 .../backtype/storm/clojure/RichShellBolt.java   |    51 -
 .../backtype/storm/clojure/RichShellSpout.java  |    51 -
 .../backtype/storm/cluster/ClusterState.java    |   217 -
 .../storm/cluster/ClusterStateContext.java      |    41 -
 .../storm/cluster/ClusterStateFactory.java      |    28 -
 .../storm/cluster/ClusterStateListener.java     |    22 -
 .../backtype/storm/cluster/ConnectionState.java |    24 -
 .../jvm/backtype/storm/cluster/DaemonType.java  |    27 -
 .../storm/coordination/BatchBoltExecutor.java   |   108 -
 .../coordination/BatchOutputCollector.java      |    46 -
 .../coordination/BatchOutputCollectorImpl.java  |    53 -
 .../coordination/BatchSubtopologyBuilder.java   |   447 -
 .../storm/coordination/CoordinatedBolt.java     |   382 -
 .../backtype/storm/coordination/IBatchBolt.java |    30 -
 .../daemon/ClientJarTransformerRunner.java      |    41 -
 .../backtype/storm/daemon/DirectoryCleaner.java |   177 -
 .../backtype/storm/daemon/JarTransformer.java   |    31 -
 .../jvm/backtype/storm/daemon/Shutdownable.java |    22 -
 .../storm/drpc/DRPCInvocationsClient.java       |   113 -
 .../src/jvm/backtype/storm/drpc/DRPCSpout.java  |   261 -
 .../src/jvm/backtype/storm/drpc/JoinResult.java |    75 -
 .../jvm/backtype/storm/drpc/KeyedFairBolt.java  |    93 -
 .../storm/drpc/LinearDRPCInputDeclarer.java     |    52 -
 .../storm/drpc/LinearDRPCTopologyBuilder.java   |   393 -
 .../jvm/backtype/storm/drpc/PrepareRequest.java |    59 -
 .../jvm/backtype/storm/drpc/ReturnResults.java  |   124 -
 .../backtype/storm/generated/AccessControl.java |   627 -
 .../storm/generated/AccessControlType.java      |    62 -
 .../storm/generated/AlreadyAliveException.java  |   406 -
 .../backtype/storm/generated/Assignment.java    |  1159 -
 .../storm/generated/AuthorizationException.java |   406 -
 .../storm/generated/BeginDownloadResult.java    |   608 -
 .../src/jvm/backtype/storm/generated/Bolt.java  |   514 -
 .../storm/generated/BoltAggregateStats.java     |   704 -
 .../jvm/backtype/storm/generated/BoltStats.java |  1390 -
 .../storm/generated/ClusterSummary.java         |   879 -
 .../storm/generated/ClusterWorkerHeartbeat.java |   768 -
 .../storm/generated/CommonAggregateStats.java   |   902 -
 .../generated/ComponentAggregateStats.java      |   752 -
 .../storm/generated/ComponentCommon.java        |   852 -
 .../storm/generated/ComponentObject.java        |   462 -
 .../storm/generated/ComponentPageInfo.java      |  2194 -
 .../backtype/storm/generated/ComponentType.java |    62 -
 .../backtype/storm/generated/Credentials.java   |   458 -
 .../storm/generated/DRPCExecutionException.java |   406 -
 .../backtype/storm/generated/DRPCRequest.java   |   507 -
 .../backtype/storm/generated/DebugOptions.java  |   506 -
 .../storm/generated/DistributedRPC.java         |  1328 -
 .../generated/DistributedRPCInvocations.java    |  2935 --
 .../jvm/backtype/storm/generated/ErrorInfo.java |   714 -
 .../storm/generated/ExecutorAggregateStats.java |   526 -
 .../backtype/storm/generated/ExecutorInfo.java  |   499 -
 .../storm/generated/ExecutorSpecificStats.java  |   387 -
 .../backtype/storm/generated/ExecutorStats.java |   915 -
 .../storm/generated/ExecutorSummary.java        |   922 -
 .../storm/generated/GetInfoOptions.java         |   422 -
 .../storm/generated/GlobalStreamId.java         |   507 -
 .../jvm/backtype/storm/generated/Grouping.java  |   800 -
 .../generated/HBAuthorizationException.java     |   406 -
 .../storm/generated/HBExecutionException.java   |   406 -
 .../jvm/backtype/storm/generated/HBMessage.java |   636 -
 .../backtype/storm/generated/HBMessageData.java |   640 -
 .../jvm/backtype/storm/generated/HBNodes.java   |   461 -
 .../jvm/backtype/storm/generated/HBPulse.java   |   522 -
 .../jvm/backtype/storm/generated/HBRecords.java |   466 -
 .../storm/generated/HBServerMessageType.java    |   113 -
 .../generated/InvalidTopologyException.java     |   406 -
 .../backtype/storm/generated/JavaObject.java    |   561 -
 .../backtype/storm/generated/JavaObjectArg.java |   631 -
 .../generated/KeyAlreadyExistsException.java    |   406 -
 .../storm/generated/KeyNotFoundException.java   |   406 -
 .../backtype/storm/generated/KillOptions.java   |   407 -
 .../storm/generated/LSApprovedWorkers.java      |   458 -
 .../generated/LSSupervisorAssignments.java      |   471 -
 .../storm/generated/LSSupervisorId.java         |   406 -
 .../backtype/storm/generated/LSTopoHistory.java |   805 -
 .../storm/generated/LSTopoHistoryList.java      |   460 -
 .../storm/generated/LSWorkerHeartbeat.java      |   755 -
 .../storm/generated/ListBlobsResult.java        |   556 -
 .../storm/generated/LocalAssignment.java        |   676 -
 .../storm/generated/LocalStateData.java         |   471 -
 .../jvm/backtype/storm/generated/LogConfig.java |   475 -
 .../jvm/backtype/storm/generated/LogLevel.java  |   836 -
 .../storm/generated/LogLevelAction.java         |    65 -
 .../jvm/backtype/storm/generated/Nimbus.java    | 44114 -----------------
 .../backtype/storm/generated/NimbusSummary.java |   796 -
 .../jvm/backtype/storm/generated/NodeInfo.java  |   556 -
 .../storm/generated/NotAliveException.java      |   406 -
 .../backtype/storm/generated/NullStruct.java    |   300 -
 .../storm/generated/NumErrorsChoice.java        |    65 -
 .../backtype/storm/generated/ProfileAction.java |    74 -
 .../storm/generated/ProfileRequest.java         |   631 -
 .../storm/generated/ReadableBlobMeta.java       |   510 -
 .../storm/generated/RebalanceOptions.java       |   664 -
 .../storm/generated/SettableBlobMeta.java       |   567 -
 .../storm/generated/ShellComponent.java         |   516 -
 .../storm/generated/SpecificAggregateStats.java |   387 -
 .../storm/generated/SpoutAggregateStats.java    |   407 -
 .../jvm/backtype/storm/generated/SpoutSpec.java |   514 -
 .../backtype/storm/generated/SpoutStats.java    |   917 -
 .../storm/generated/StateSpoutSpec.java         |   514 -
 .../jvm/backtype/storm/generated/StormBase.java |  1382 -
 .../backtype/storm/generated/StormTopology.java |   962 -
 .../backtype/storm/generated/StreamInfo.java    |   554 -
 .../backtype/storm/generated/SubmitOptions.java |   533 -
 .../storm/generated/SupervisorInfo.java         |  1446 -
 .../storm/generated/SupervisorSummary.java      |  1265 -
 .../storm/generated/ThriftSerializedObject.java |   516 -
 .../storm/generated/TopologyActionOptions.java  |   387 -
 .../storm/generated/TopologyHistoryInfo.java    |   461 -
 .../backtype/storm/generated/TopologyInfo.java  |  2144 -
 .../storm/generated/TopologyInitialStatus.java  |    62 -
 .../storm/generated/TopologyPageInfo.java       |  2597 -
 .../backtype/storm/generated/TopologyStats.java |  1094 -
 .../storm/generated/TopologyStatus.java         |    68 -
 .../storm/generated/TopologySummary.java        |  1901 -
 .../storm/generated/WorkerResources.java        |   605 -
 .../storm/grouping/CustomStreamGrouping.java    |    43 -
 .../src/jvm/backtype/storm/grouping/Load.java   |    77 -
 .../grouping/LoadAwareCustomStreamGrouping.java |    24 -
 .../grouping/LoadAwareShuffleGrouping.java      |    76 -
 .../backtype/storm/grouping/LoadMapping.java    |    64 -
 .../storm/grouping/PartialKeyGrouping.java      |   106 -
 .../storm/grouping/ShuffleGrouping.java         |    65 -
 .../jvm/backtype/storm/hooks/BaseTaskHook.java  |    61 -
 .../backtype/storm/hooks/BaseWorkerHook.java    |    51 -
 .../src/jvm/backtype/storm/hooks/ITaskHook.java |    38 -
 .../jvm/backtype/storm/hooks/IWorkerHook.java   |    44 -
 .../backtype/storm/hooks/info/BoltAckInfo.java  |    32 -
 .../storm/hooks/info/BoltExecuteInfo.java       |    32 -
 .../backtype/storm/hooks/info/BoltFailInfo.java |    32 -
 .../jvm/backtype/storm/hooks/info/EmitInfo.java |    35 -
 .../backtype/storm/hooks/info/SpoutAckInfo.java |    30 -
 .../storm/hooks/info/SpoutFailInfo.java         |    30 -
 .../backtype/storm/localizer/LocalResource.java |    44 -
 .../storm/localizer/LocalizedResource.java      |   130 -
 .../LocalizedResourceRetentionSet.java          |   140 -
 .../storm/localizer/LocalizedResourceSet.java   |   101 -
 .../jvm/backtype/storm/localizer/Localizer.java |   695 -
 .../storm/logging/ThriftAccessLogger.java       |    27 -
 .../logging/filters/AccessLoggingFilter.java    |    52 -
 .../storm/messaging/ConnectionWithStatus.java   |    49 -
 .../DeserializingConnectionCallback.java        |    60 -
 .../backtype/storm/messaging/IConnection.java   |    63 -
 .../storm/messaging/IConnectionCallback.java    |    31 -
 .../jvm/backtype/storm/messaging/IContext.java  |    59 -
 .../backtype/storm/messaging/TaskMessage.java   |    53 -
 .../storm/messaging/TransportFactory.java       |    57 -
 .../backtype/storm/messaging/local/Context.java |   164 -
 .../backtype/storm/messaging/netty/Client.java  |   578 -
 .../backtype/storm/messaging/netty/Context.java |   113 -
 .../storm/messaging/netty/ControlMessage.java   |    75 -
 .../messaging/netty/INettySerializable.java     |    26 -
 .../storm/messaging/netty/ISaslClient.java      |    28 -
 .../storm/messaging/netty/ISaslServer.java      |    26 -
 .../backtype/storm/messaging/netty/IServer.java |    26 -
 .../netty/KerberosSaslClientHandler.java        |   152 -
 .../netty/KerberosSaslNettyClient.java          |   203 -
 .../netty/KerberosSaslNettyClientState.java     |    31 -
 .../netty/KerberosSaslNettyServer.java          |   210 -
 .../netty/KerberosSaslNettyServerState.java     |    30 -
 .../netty/KerberosSaslServerHandler.java        |   133 -
 .../storm/messaging/netty/MessageBatch.java     |   118 -
 .../storm/messaging/netty/MessageBuffer.java    |    58 -
 .../storm/messaging/netty/MessageDecoder.java   |   144 -
 .../storm/messaging/netty/MessageEncoder.java   |    43 -
 .../netty/NettyRenameThreadFactory.java         |    56 -
 .../netty/NettyUncaughtExceptionHandler.java    |    35 -
 .../storm/messaging/netty/SaslMessageToken.java |   114 -
 .../storm/messaging/netty/SaslNettyClient.java  |   154 -
 .../messaging/netty/SaslNettyClientState.java   |    31 -
 .../storm/messaging/netty/SaslNettyServer.java  |   157 -
 .../messaging/netty/SaslNettyServerState.java   |    30 -
 .../messaging/netty/SaslStormClientHandler.java |   158 -
 .../netty/SaslStormServerAuthorizeHandler.java  |    83 -
 .../messaging/netty/SaslStormServerHandler.java |   153 -
 .../storm/messaging/netty/SaslUtils.java        |    68 -
 .../backtype/storm/messaging/netty/Server.java  |   273 -
 .../messaging/netty/StormClientHandler.java     |    91 -
 .../netty/StormClientPipelineFactory.java       |    56 -
 .../messaging/netty/StormServerHandler.java     |    74 -
 .../netty/StormServerPipelineFactory.java       |    57 -
 .../backtype/storm/metric/EventLoggerBolt.java  |    67 -
 .../storm/metric/FileBasedEventLogger.java      |   132 -
 .../metric/HttpForwardingMetricsConsumer.java   |    85 -
 .../metric/HttpForwardingMetricsServer.java     |   118 -
 .../jvm/backtype/storm/metric/IEventLogger.java |    70 -
 .../storm/metric/LoggingMetricsConsumer.java    |    76 -
 .../storm/metric/MetricsConsumerBolt.java       |    63 -
 .../jvm/backtype/storm/metric/SystemBolt.java   |   165 -
 .../storm/metric/api/AssignableMetric.java      |    34 -
 .../storm/metric/api/CombinedMetric.java        |    38 -
 .../backtype/storm/metric/api/CountMetric.java  |    39 -
 .../backtype/storm/metric/api/ICombiner.java    |    23 -
 .../jvm/backtype/storm/metric/api/IMetric.java  |    22 -
 .../storm/metric/api/IMetricsConsumer.java      |    60 -
 .../jvm/backtype/storm/metric/api/IReducer.java |    24 -
 .../storm/metric/api/IStatefulObject.java       |    22 -
 .../backtype/storm/metric/api/MeanReducer.java  |    53 -
 .../storm/metric/api/MultiCountMetric.java      |    45 -
 .../storm/metric/api/MultiReducedMetric.java    |    50 -
 .../storm/metric/api/ReducedMetric.java         |    38 -
 .../backtype/storm/metric/api/StateMetric.java  |    31 -
 .../metric/api/rpc/AssignableShellMetric.java   |    30 -
 .../metric/api/rpc/CombinedShellMetric.java     |    31 -
 .../storm/metric/api/rpc/CountShellMetric.java  |    37 -
 .../storm/metric/api/rpc/IShellMetric.java      |    31 -
 .../metric/api/rpc/ReducedShellMetric.java      |    32 -
 .../metric/internal/CountStatAndMetric.java     |   211 -
 .../metric/internal/LatencyStatAndMetric.java   |   262 -
 .../storm/metric/internal/MetricStatTimer.java  |    27 -
 .../internal/MultiCountStatAndMetric.java       |   112 -
 .../internal/MultiLatencyStatAndMetric.java     |   109 -
 .../storm/metric/internal/RateTracker.java      |   165 -
 .../jvm/backtype/storm/multilang/BoltMsg.java   |    79 -
 .../backtype/storm/multilang/ISerializer.java   |    82 -
 .../storm/multilang/JsonSerializer.java         |   204 -
 .../storm/multilang/NoOutputException.java      |    40 -
 .../jvm/backtype/storm/multilang/ShellMsg.java  |   184 -
 .../jvm/backtype/storm/multilang/SpoutMsg.java  |    50 -
 .../AbstractDNSToSwitchMapping.java             |    96 -
 .../networktopography/DNSToSwitchMapping.java   |    49 -
 .../DefaultRackDNSToSwitchMapping.java          |    52 -
 .../storm/nimbus/DefaultTopologyValidator.java  |    31 -
 .../backtype/storm/nimbus/ILeaderElector.java   |    73 -
 .../nimbus/ITopologyActionNotifierPlugin.java   |    43 -
 .../storm/nimbus/ITopologyValidator.java        |    28 -
 .../jvm/backtype/storm/nimbus/NimbusInfo.java   |   116 -
 .../backtype/storm/planner/CompoundSpout.java   |    25 -
 .../backtype/storm/planner/CompoundTask.java    |    25 -
 .../jvm/backtype/storm/planner/TaskBundle.java  |    33 -
 .../jvm/backtype/storm/scheduler/Cluster.java   |   684 -
 .../storm/scheduler/ExecutorDetails.java        |    54 -
 .../jvm/backtype/storm/scheduler/INimbus.java   |    49 -
 .../backtype/storm/scheduler/IScheduler.java    |    40 -
 .../backtype/storm/scheduler/ISupervisor.java   |    45 -
 .../storm/scheduler/SchedulerAssignment.java    |    58 -
 .../scheduler/SchedulerAssignmentImpl.java      |   105 -
 .../storm/scheduler/SupervisorDetails.java      |   132 -
 .../backtype/storm/scheduler/Topologies.java    |    84 -
 .../storm/scheduler/TopologyDetails.java        |   515 -
 .../backtype/storm/scheduler/WorkerSlot.java    |    77 -
 .../scheduler/multitenant/DefaultPool.java      |   219 -
 .../storm/scheduler/multitenant/FreePool.java   |   125 -
 .../scheduler/multitenant/IsolatedPool.java     |   363 -
 .../multitenant/MultitenantScheduler.java       |    98 -
 .../storm/scheduler/multitenant/Node.java       |   340 -
 .../storm/scheduler/multitenant/NodePool.java   |   296 -
 .../storm/scheduler/resource/Component.java     |    54 -
 .../storm/scheduler/resource/RAS_Node.java      |   528 -
 .../storm/scheduler/resource/RAS_Nodes.java     |   160 -
 .../resource/ResourceAwareScheduler.java        |   414 -
 .../storm/scheduler/resource/ResourceUtils.java |   184 -
 .../scheduler/resource/SchedulingResult.java    |   116 -
 .../scheduler/resource/SchedulingStatus.java    |    40 -
 .../backtype/storm/scheduler/resource/User.java |   348 -
 .../eviction/DefaultEvictionStrategy.java       |   126 -
 .../strategies/eviction/IEvictionStrategy.java  |    47 -
 .../DefaultSchedulingPriorityStrategy.java      |    81 -
 .../priority/ISchedulingPriorityStrategy.java   |    41 -
 .../DefaultResourceAwareStrategy.java           |   488 -
 .../strategies/scheduling/IStrategy.java        |    50 -
 .../storm/security/INimbusCredentialPlugin.java |    47 -
 .../backtype/storm/security/auth/AuthUtils.java |   348 -
 .../auth/DefaultHttpCredentialsPlugin.java      |    96 -
 .../security/auth/DefaultPrincipalToLocal.java  |    42 -
 .../storm/security/auth/IAuthorizer.java        |    53 -
 .../storm/security/auth/IAutoCredentials.java   |    55 -
 .../security/auth/ICredentialsRenewer.java      |    40 -
 .../auth/IGroupMappingServiceProvider.java      |    42 -
 .../security/auth/IHttpCredentialsPlugin.java   |    48 -
 .../storm/security/auth/IPrincipalToLocal.java  |    41 -
 .../storm/security/auth/ITransportPlugin.java   |    57 -
 .../security/auth/KerberosPrincipalToLocal.java |    45 -
 .../storm/security/auth/NimbusPrincipal.java    |    29 -
 .../storm/security/auth/ReqContext.java         |   154 -
 .../security/auth/SaslTransportPlugin.java      |   174 -
 .../security/auth/ShellBasedGroupsMapping.java  |    94 -
 .../security/auth/SimpleTransportPlugin.java    |   161 -
 .../security/auth/SingleUserPrincipal.java      |    53 -
 .../storm/security/auth/TBackoffConnect.java    |    76 -
 .../storm/security/auth/ThriftClient.java       |   114 -
 .../security/auth/ThriftConnectionType.java     |    77 -
 .../storm/security/auth/ThriftServer.java       |    78 -
 .../auth/authorizer/DRPCAuthorizerBase.java     |    63 -
 .../authorizer/DRPCSimpleACLAuthorizer.java     |   174 -
 .../auth/authorizer/DenyAuthorizer.java         |    47 -
 .../authorizer/ImpersonationAuthorizer.java     |   172 -
 .../auth/authorizer/NoopAuthorizer.java         |    47 -
 .../auth/authorizer/SimpleACLAuthorizer.java    |   167 -
 .../authorizer/SimpleWhitelistAuthorizer.java   |    60 -
 .../auth/digest/ClientCallbackHandler.java      |   108 -
 .../auth/digest/DigestSaslTransportPlugin.java  |    68 -
 .../auth/digest/ServerCallbackHandler.java      |   132 -
 .../storm/security/auth/kerberos/AutoTGT.java   |   277 -
 .../auth/kerberos/AutoTGTKrb5LoginModule.java   |   108 -
 .../kerberos/AutoTGTKrb5LoginModuleTest.java    |    44 -
 .../auth/kerberos/ClientCallbackHandler.java    |   104 -
 .../kerberos/KerberosSaslTransportPlugin.java   |   210 -
 .../security/auth/kerberos/NoOpTTrasport.java   |    54 -
 .../auth/kerberos/ServerCallbackHandler.java    |    96 -
 .../auth/kerberos/jaas_kerberos_cluster.conf    |    49 -
 .../auth/kerberos/jaas_kerberos_launcher.conf   |    31 -
 .../serialization/BlowfishTupleSerializer.java  |    92 -
 .../storm/serialization/DefaultKryoFactory.java |    64 -
 .../DefaultSerializationDelegate.java           |    58 -
 .../GzipBridgeSerializationDelegate.java        |    65 -
 .../GzipBridgeThriftSerializationDelegate.java  |    64 -
 .../GzipSerializationDelegate.java              |    64 -
 .../GzipThriftSerializationDelegate.java        |    56 -
 .../storm/serialization/IKryoDecorator.java     |    23 -
 .../storm/serialization/IKryoFactory.java       |    40 -
 .../storm/serialization/ITupleDeserializer.java |    24 -
 .../storm/serialization/ITupleSerializer.java   |    26 -
 .../serialization/KryoTupleDeserializer.java    |    56 -
 .../serialization/KryoTupleSerializer.java      |    60 -
 .../serialization/KryoValuesDeserializer.java   |    50 -
 .../serialization/KryoValuesSerializer.java     |    58 -
 .../serialization/SerializableSerializer.java   |    61 -
 .../serialization/SerializationDelegate.java    |    35 -
 .../serialization/SerializationFactory.java     |   223 -
 .../ThriftSerializationDelegate.java            |    52 -
 .../types/ArrayListSerializer.java              |    32 -
 .../serialization/types/HashMapSerializer.java  |    32 -
 .../serialization/types/HashSetSerializer.java  |    32 -
 .../types/ListDelegateSerializer.java           |    32 -
 .../storm/spout/IMultiSchemableSpout.java       |    23 -
 .../backtype/storm/spout/ISchemableSpout.java   |    24 -
 .../src/jvm/backtype/storm/spout/ISpout.java    |   105 -
 .../storm/spout/ISpoutOutputCollector.java      |    32 -
 .../storm/spout/ISpoutWaitStrategy.java         |    34 -
 .../jvm/backtype/storm/spout/MultiScheme.java   |    29 -
 .../storm/spout/NothingEmptyEmitStrategy.java   |    31 -
 .../backtype/storm/spout/RawMultiScheme.java    |    40 -
 .../src/jvm/backtype/storm/spout/RawScheme.java |    37 -
 .../src/jvm/backtype/storm/spout/Scheme.java    |    29 -
 .../storm/spout/SchemeAsMultiScheme.java        |    42 -
 .../jvm/backtype/storm/spout/ShellSpout.java    |   280 -
 .../storm/spout/SleepSpoutWaitStrategy.java     |    41 -
 .../storm/spout/SpoutOutputCollector.java       |   139 -
 .../jvm/backtype/storm/state/IStateSpout.java   |    29 -
 .../storm/state/IStateSpoutOutputCollector.java |    22 -
 .../backtype/storm/state/ISubscribedState.java  |    25 -
 .../state/ISynchronizeOutputCollector.java      |    24 -
 .../storm/state/StateSpoutOutputCollector.java  |    28 -
 .../storm/state/SynchronizeOutputCollector.java |    30 -
 .../storm/task/GeneralTopologyContext.java      |   199 -
 .../src/jvm/backtype/storm/task/IBolt.java      |    84 -
 .../jvm/backtype/storm/task/IErrorReporter.java |    22 -
 .../backtype/storm/task/IMetricsContext.java    |    31 -
 .../backtype/storm/task/IOutputCollector.java   |    32 -
 .../backtype/storm/task/OutputCollector.java    |   225 -
 .../src/jvm/backtype/storm/task/ShellBolt.java  |   406 -
 .../backtype/storm/task/TopologyContext.java    |   389 -
 .../storm/task/WorkerTopologyContext.java       |   106 -
 .../backtype/storm/testing/AckFailDelegate.java |    25 -
 .../storm/testing/AckFailMapTracker.java        |    52 -
 .../jvm/backtype/storm/testing/AckTracker.java  |    52 -
 .../AlternateRackDNSToSwitchMapping.java        |    65 -
 .../backtype/storm/testing/BatchNumberList.java |    72 -
 .../storm/testing/BatchProcessWord.java         |    39 -
 .../backtype/storm/testing/BatchRepeatA.java    |    48 -
 .../jvm/backtype/storm/testing/BoltTracker.java |    42 -
 .../storm/testing/CompleteTopologyParam.java    |    87 -
 .../storm/testing/CountingBatchBolt.java        |    55 -
 .../storm/testing/CountingCommitBolt.java       |    57 -
 .../jvm/backtype/storm/testing/FeederSpout.java |   100 -
 .../jvm/backtype/storm/testing/FixedTuple.java  |    42 -
 .../backtype/storm/testing/FixedTupleSpout.java |   179 -
 .../testing/ForwardingMetricsConsumer.java      |   100 -
 .../backtype/storm/testing/IdentityBolt.java    |    42 -
 .../backtype/storm/testing/IntegrationTest.java |    38 -
 .../storm/testing/KeyedCountingBatchBolt.java   |    61 -
 .../testing/KeyedCountingCommitterBolt.java     |    24 -
 .../storm/testing/KeyedSummingBatchBolt.java    |    61 -
 .../storm/testing/MemoryTransactionalSpout.java |   180 -
 .../testing/MemoryTransactionalSpoutMeta.java   |    38 -
 .../backtype/storm/testing/MkClusterParam.java  |    57 -
 .../backtype/storm/testing/MkTupleParam.java    |    51 -
 .../backtype/storm/testing/MockedSources.java   |    60 -
 .../jvm/backtype/storm/testing/NGrouping.java   |    50 -
 .../storm/testing/NonRichBoltTracker.java       |    51 -
 .../testing/OpaqueMemoryTransactionalSpout.java |   186 -
 .../storm/testing/PrepareBatchBolt.java         |    52 -
 .../storm/testing/PythonShellMetricsBolt.java   |    49 -
 .../storm/testing/PythonShellMetricsSpout.java  |    52 -
 .../testing/SingleUserSimpleTransport.java      |    37 -
 .../backtype/storm/testing/SpoutTracker.java    |   111 -
 .../storm/testing/TestAggregatesCounter.java    |    63 -
 .../backtype/storm/testing/TestConfBolt.java    |    62 -
 .../storm/testing/TestEventLogSpout.java        |   139 -
 .../storm/testing/TestEventOrderCheckBolt.java  |    76 -
 .../backtype/storm/testing/TestGlobalCount.java |    60 -
 .../src/jvm/backtype/storm/testing/TestJob.java |    43 -
 .../storm/testing/TestKryoDecorator.java        |    31 -
 .../backtype/storm/testing/TestPlannerBolt.java |    45 -
 .../storm/testing/TestPlannerSpout.java         |    85 -
 .../backtype/storm/testing/TestSerObject.java   |    37 -
 .../storm/testing/TestWordBytesCounter.java     |    27 -
 .../backtype/storm/testing/TestWordCounter.java |    65 -
 .../backtype/storm/testing/TestWordSpout.java   |    86 -
 .../backtype/storm/testing/TrackedTopology.java |    34 -
 .../storm/testing/TupleCaptureBolt.java         |    83 -
 .../topology/BaseConfigurationDeclarer.java     |    83 -
 .../storm/topology/BasicBoltExecutor.java       |    67 -
 .../storm/topology/BasicOutputCollector.java    |    62 -
 .../backtype/storm/topology/BoltDeclarer.java   |    26 -
 .../ComponentConfigurationDeclarer.java         |    32 -
 .../storm/topology/FailedException.java         |    36 -
 .../jvm/backtype/storm/topology/IBasicBolt.java |    33 -
 .../storm/topology/IBasicOutputCollector.java   |    27 -
 .../jvm/backtype/storm/topology/IComponent.java |    44 -
 .../jvm/backtype/storm/topology/IRichBolt.java  |    29 -
 .../jvm/backtype/storm/topology/IRichSpout.java |    29 -
 .../storm/topology/IRichStateSpout.java         |    25 -
 .../backtype/storm/topology/IWindowedBolt.java  |    40 -
 .../backtype/storm/topology/InputDeclarer.java  |   184 -
 .../storm/topology/OutputFieldsDeclarer.java    |    32 -
 .../storm/topology/OutputFieldsGetter.java      |    53 -
 .../storm/topology/ReportedFailedException.java |    36 -
 .../backtype/storm/topology/SpoutDeclarer.java  |    22 -
 .../storm/topology/TopologyBuilder.java         |   433 -
 .../storm/topology/WindowedBoltExecutor.java    |   308 -
 .../storm/topology/base/BaseBasicBolt.java      |    33 -
 .../storm/topology/base/BaseBatchBolt.java      |    24 -
 .../storm/topology/base/BaseComponent.java      |    28 -
 ...BaseOpaquePartitionedTransactionalSpout.java |    25 -
 .../base/BasePartitionedTransactionalSpout.java |    25 -
 .../storm/topology/base/BaseRichBolt.java       |    26 -
 .../storm/topology/base/BaseRichSpout.java      |    46 -
 .../topology/base/BaseTransactionalBolt.java    |    24 -
 .../topology/base/BaseTransactionalSpout.java   |    24 -
 .../storm/topology/base/BaseWindowedBolt.java   |   212 -
 .../storm/transactional/ICommitter.java         |    26 -
 .../ICommitterTransactionalSpout.java           |    31 -
 .../transactional/ITransactionalSpout.java      |    94 -
 .../storm/transactional/TransactionAttempt.java |    61 -
 .../TransactionalSpoutBatchExecutor.java        |    96 -
 .../TransactionalSpoutCoordinator.java          |   217 -
 .../TransactionalTopologyBuilder.java           |   521 -
 .../IOpaquePartitionedTransactionalSpout.java   |    56 -
 .../IPartitionedTransactionalSpout.java         |    69 -
 ...uePartitionedTransactionalSpoutExecutor.java |   160 -
 .../PartitionedTransactionalSpoutExecutor.java  |   142 -
 .../state/RotatingTransactionalState.java       |   149 -
 .../state/TestTransactionalState.java           |    47 -
 .../transactional/state/TransactionalState.java |   173 -
 .../backtype/storm/tuple/AddressedTuple.java    |    48 -
 .../src/jvm/backtype/storm/tuple/Fields.java    |   117 -
 .../src/jvm/backtype/storm/tuple/ITuple.java    |   214 -
 .../src/jvm/backtype/storm/tuple/MessageId.java |    94 -
 .../src/jvm/backtype/storm/tuple/Tuple.java     |    68 -
 .../src/jvm/backtype/storm/tuple/TupleImpl.java |   356 -
 .../src/jvm/backtype/storm/tuple/Values.java    |    37 -
 .../storm/ui/InvalidRequestException.java       |    37 -
 .../storm/utils/BufferFileInputStream.java      |    54 -
 .../backtype/storm/utils/BufferInputStream.java |    53 -
 .../backtype/storm/utils/CRC32OutputStream.java |    44 -
 .../backtype/storm/utils/ClojureTimerTask.java  |    35 -
 .../src/jvm/backtype/storm/utils/Container.java |    24 -
 .../jvm/backtype/storm/utils/DRPCClient.java    |    64 -
 .../utils/DisruptorBackpressureCallback.java    |    27 -
 .../backtype/storm/utils/DisruptorQueue.java    |   544 -
 .../storm/utils/ExtendedThreadPoolExecutor.java |    67 -
 .../storm/utils/IndifferentAccessMap.java       |   177 -
 .../backtype/storm/utils/InprocMessaging.java   |    59 -
 .../storm/utils/KeyedRoundRobinQueue.java       |    68 -
 .../jvm/backtype/storm/utils/ListDelegate.java  |   156 -
 .../jvm/backtype/storm/utils/LocalState.java    |   189 -
 .../src/jvm/backtype/storm/utils/Monitor.java   |   252 -
 .../jvm/backtype/storm/utils/MutableInt.java    |    43 -
 .../jvm/backtype/storm/utils/MutableLong.java   |    43 -
 .../jvm/backtype/storm/utils/MutableObject.java |    38 -
 .../jvm/backtype/storm/utils/NimbusClient.java  |   118 -
 .../utils/NimbusLeaderNotFoundException.java    |    41 -
 .../storm/utils/RegisteredGlobalState.java      |    62 -
 .../jvm/backtype/storm/utils/RotatingMap.java   |   128 -
 .../backtype/storm/utils/ServiceRegistry.java   |    47 -
 .../storm/utils/ShellBoltMessageQueue.java      |   121 -
 .../jvm/backtype/storm/utils/ShellProcess.java  |   210 -
 .../jvm/backtype/storm/utils/ShellUtils.java    |   505 -
 .../StormBoundedExponentialBackoffRetry.java    |    76 -
 .../src/jvm/backtype/storm/utils/TestUtils.java |    34 -
 .../storm/utils/ThriftTopologyUtils.java        |    66 -
 .../src/jvm/backtype/storm/utils/Time.java      |   119 -
 .../jvm/backtype/storm/utils/TimeCacheMap.java  |   125 -
 .../backtype/storm/utils/TransferDrainer.java   |   132 -
 .../jvm/backtype/storm/utils/TupleUtils.java    |    46 -
 .../src/jvm/backtype/storm/utils/Utils.java     |  1373 -
 .../jvm/backtype/storm/utils/VersionInfo.java   |   131 -
 .../backtype/storm/utils/VersionedStore.java    |   187 -
 .../storm/utils/WindowedTimeThrottler.java      |    51 -
 .../storm/utils/WorkerBackpressureCallback.java |    26 -
 .../storm/utils/WorkerBackpressureThread.java   |    59 -
 .../jvm/backtype/storm/utils/WritableUtils.java |   375 -
 .../backtype/storm/utils/ZookeeperAuthInfo.java |    53 -
 .../storm/utils/ZookeeperServerCnxnFactory.java |    84 -
 .../storm/validation/ConfigValidation.java      |   700 -
 .../validation/ConfigValidationAnnotations.java |   218 -
 .../storm/validation/ConfigValidationUtils.java |   175 -
 .../storm/windowing/CountEvictionPolicy.java    |    75 -
 .../storm/windowing/CountTriggerPolicy.java     |    68 -
 .../src/jvm/backtype/storm/windowing/Event.java |    49 -
 .../jvm/backtype/storm/windowing/EventImpl.java |    51 -
 .../storm/windowing/EvictionPolicy.java         |    74 -
 .../storm/windowing/TimeEvictionPolicy.java     |    71 -
 .../storm/windowing/TimeTriggerPolicy.java      |   128 -
 .../storm/windowing/TriggerHandler.java         |    31 -
 .../backtype/storm/windowing/TriggerPolicy.java |    42 -
 .../backtype/storm/windowing/TupleWindow.java   |    26 -
 .../storm/windowing/TupleWindowImpl.java        |    61 -
 .../storm/windowing/WaterMarkEvent.java         |    38 -
 .../windowing/WaterMarkEventGenerator.java      |   116 -
 .../windowing/WatermarkCountEvictionPolicy.java |    65 -
 .../windowing/WatermarkCountTriggerPolicy.java  |    83 -
 .../windowing/WatermarkTimeEvictionPolicy.java  |    77 -
 .../windowing/WatermarkTimeTriggerPolicy.java   |   109 -
 .../jvm/backtype/storm/windowing/Window.java    |    48 -
 .../windowing/WindowLifecycleListener.java      |    42 -
 .../backtype/storm/windowing/WindowManager.java |   289 -
 storm-core/src/jvm/org/apache/storm/Config.java |  2335 +
 .../src/jvm/org/apache/storm/Constants.java     |    36 +
 .../org/apache/storm/ICredentialsListener.java  |    32 +
 .../src/jvm/org/apache/storm/ILocalCluster.java |    49 +
 .../src/jvm/org/apache/storm/ILocalDRPC.java    |    27 +
 .../jvm/org/apache/storm/ISubmitterHook.java    |    31 +
 .../src/jvm/org/apache/storm/LogWriter.java     |    83 +
 .../jvm/org/apache/storm/StormSubmitter.java    |   496 +
 .../storm/blobstore/AtomicOutputStream.java     |    32 +
 .../storm/blobstore/BlobKeySequenceInfo.java    |    40 +
 .../org/apache/storm/blobstore/BlobStore.java   |   447 +
 .../storm/blobstore/BlobStoreAclHandler.java    |   399 +
 .../apache/storm/blobstore/BlobStoreFile.java   |    50 +
 .../apache/storm/blobstore/BlobStoreUtils.java  |   257 +
 .../storm/blobstore/BlobSynchronizer.java       |   124 +
 .../apache/storm/blobstore/ClientBlobStore.java |   184 +
 .../storm/blobstore/FileBlobStoreImpl.java      |   248 +
 .../storm/blobstore/InputStreamWithMeta.java    |    26 +
 .../org/apache/storm/blobstore/KeyFilter.java   |    22 +
 .../storm/blobstore/KeySequenceNumber.java      |   229 +
 .../storm/blobstore/LocalFsBlobStore.java       |   311 +
 .../storm/blobstore/LocalFsBlobStoreFile.java   |   159 +
 .../apache/storm/blobstore/NimbusBlobStore.java |   420 +
 .../org/apache/storm/clojure/ClojureBolt.java   |   119 +
 .../org/apache/storm/clojure/ClojureSpout.java  |   153 +
 .../org/apache/storm/clojure/RichShellBolt.java |    51 +
 .../apache/storm/clojure/RichShellSpout.java    |    51 +
 .../org/apache/storm/cluster/ClusterState.java  |   217 +
 .../storm/cluster/ClusterStateContext.java      |    41 +
 .../storm/cluster/ClusterStateFactory.java      |    28 +
 .../storm/cluster/ClusterStateListener.java     |    22 +
 .../apache/storm/cluster/ConnectionState.java   |    24 +
 .../org/apache/storm/cluster/DaemonType.java    |    27 +
 .../storm/coordination/BatchBoltExecutor.java   |   108 +
 .../coordination/BatchOutputCollector.java      |    46 +
 .../coordination/BatchOutputCollectorImpl.java  |    53 +
 .../coordination/BatchSubtopologyBuilder.java   |   447 +
 .../storm/coordination/CoordinatedBolt.java     |   382 +
 .../apache/storm/coordination/IBatchBolt.java   |    30 +
 .../daemon/ClientJarTransformerRunner.java      |    41 +
 .../apache/storm/daemon/DirectoryCleaner.java   |   177 +
 .../org/apache/storm/daemon/JarTransformer.java |    31 +
 .../org/apache/storm/daemon/Shutdownable.java   |    22 +
 .../storm/drpc/DRPCInvocationsClient.java       |   113 +
 .../jvm/org/apache/storm/drpc/DRPCSpout.java    |   261 +
 .../jvm/org/apache/storm/drpc/JoinResult.java   |    75 +
 .../org/apache/storm/drpc/KeyedFairBolt.java    |    93 +
 .../storm/drpc/LinearDRPCInputDeclarer.java     |    52 +
 .../storm/drpc/LinearDRPCTopologyBuilder.java   |   393 +
 .../org/apache/storm/drpc/PrepareRequest.java   |    59 +
 .../org/apache/storm/drpc/ReturnResults.java    |   124 +
 .../apache/storm/generated/AccessControl.java   |   627 +
 .../storm/generated/AccessControlType.java      |    62 +
 .../storm/generated/AlreadyAliveException.java  |   406 +
 .../org/apache/storm/generated/Assignment.java  |  1159 +
 .../storm/generated/AuthorizationException.java |   406 +
 .../storm/generated/BeginDownloadResult.java    |   608 +
 .../jvm/org/apache/storm/generated/Bolt.java    |   514 +
 .../storm/generated/BoltAggregateStats.java     |   704 +
 .../org/apache/storm/generated/BoltStats.java   |  1390 +
 .../apache/storm/generated/ClusterSummary.java  |   879 +
 .../storm/generated/ClusterWorkerHeartbeat.java |   768 +
 .../storm/generated/CommonAggregateStats.java   |   902 +
 .../generated/ComponentAggregateStats.java      |   752 +
 .../apache/storm/generated/ComponentCommon.java |   852 +
 .../apache/storm/generated/ComponentObject.java |   462 +
 .../storm/generated/ComponentPageInfo.java      |  2194 +
 .../apache/storm/generated/ComponentType.java   |    62 +
 .../org/apache/storm/generated/Credentials.java |   458 +
 .../storm/generated/DRPCExecutionException.java |   406 +
 .../org/apache/storm/generated/DRPCRequest.java |   507 +
 .../apache/storm/generated/DebugOptions.java    |   506 +
 .../apache/storm/generated/DistributedRPC.java  |  1328 +
 .../generated/DistributedRPCInvocations.java    |  2935 ++
 .../org/apache/storm/generated/ErrorInfo.java   |   714 +
 .../storm/generated/ExecutorAggregateStats.java |   526 +
 .../apache/storm/generated/ExecutorInfo.java    |   499 +
 .../storm/generated/ExecutorSpecificStats.java  |   387 +
 .../apache/storm/generated/ExecutorStats.java   |   915 +
 .../apache/storm/generated/ExecutorSummary.java |   922 +
 .../apache/storm/generated/GetInfoOptions.java  |   422 +
 .../apache/storm/generated/GlobalStreamId.java  |   507 +
 .../org/apache/storm/generated/Grouping.java    |   800 +
 .../generated/HBAuthorizationException.java     |   406 +
 .../storm/generated/HBExecutionException.java   |   406 +
 .../org/apache/storm/generated/HBMessage.java   |   636 +
 .../apache/storm/generated/HBMessageData.java   |   640 +
 .../jvm/org/apache/storm/generated/HBNodes.java |   461 +
 .../jvm/org/apache/storm/generated/HBPulse.java |   522 +
 .../org/apache/storm/generated/HBRecords.java   |   466 +
 .../storm/generated/HBServerMessageType.java    |   113 +
 .../generated/InvalidTopologyException.java     |   406 +
 .../org/apache/storm/generated/JavaObject.java  |   561 +
 .../apache/storm/generated/JavaObjectArg.java   |   631 +
 .../generated/KeyAlreadyExistsException.java    |   406 +
 .../storm/generated/KeyNotFoundException.java   |   406 +
 .../org/apache/storm/generated/KillOptions.java |   407 +
 .../storm/generated/LSApprovedWorkers.java      |   458 +
 .../generated/LSSupervisorAssignments.java      |   471 +
 .../apache/storm/generated/LSSupervisorId.java  |   406 +
 .../apache/storm/generated/LSTopoHistory.java   |   805 +
 .../storm/generated/LSTopoHistoryList.java      |   460 +
 .../storm/generated/LSWorkerHeartbeat.java      |   755 +
 .../apache/storm/generated/ListBlobsResult.java |   556 +
 .../apache/storm/generated/LocalAssignment.java |   676 +
 .../apache/storm/generated/LocalStateData.java  |   471 +
 .../org/apache/storm/generated/LogConfig.java   |   475 +
 .../org/apache/storm/generated/LogLevel.java    |   836 +
 .../apache/storm/generated/LogLevelAction.java  |    65 +
 .../jvm/org/apache/storm/generated/Nimbus.java  | 44114 +++++++++++++++++
 .../apache/storm/generated/NimbusSummary.java   |   796 +
 .../org/apache/storm/generated/NodeInfo.java    |   556 +
 .../storm/generated/NotAliveException.java      |   406 +
 .../org/apache/storm/generated/NullStruct.java  |   300 +
 .../apache/storm/generated/NumErrorsChoice.java |    65 +
 .../apache/storm/generated/ProfileAction.java   |    74 +
 .../apache/storm/generated/ProfileRequest.java  |   631 +
 .../storm/generated/ReadableBlobMeta.java       |   510 +
 .../storm/generated/RebalanceOptions.java       |   664 +
 .../storm/generated/SettableBlobMeta.java       |   567 +
 .../apache/storm/generated/ShellComponent.java  |   516 +
 .../storm/generated/SpecificAggregateStats.java |   387 +
 .../storm/generated/SpoutAggregateStats.java    |   407 +
 .../org/apache/storm/generated/SpoutSpec.java   |   514 +
 .../org/apache/storm/generated/SpoutStats.java  |   917 +
 .../apache/storm/generated/StateSpoutSpec.java  |   514 +
 .../org/apache/storm/generated/StormBase.java   |  1382 +
 .../apache/storm/generated/StormTopology.java   |   962 +
 .../org/apache/storm/generated/StreamInfo.java  |   554 +
 .../apache/storm/generated/SubmitOptions.java   |   533 +
 .../apache/storm/generated/SupervisorInfo.java  |  1446 +
 .../storm/generated/SupervisorSummary.java      |  1265 +
 .../storm/generated/ThriftSerializedObject.java |   516 +
 .../storm/generated/TopologyActionOptions.java  |   387 +
 .../storm/generated/TopologyHistoryInfo.java    |   461 +
 .../apache/storm/generated/TopologyInfo.java    |  2144 +
 .../storm/generated/TopologyInitialStatus.java  |    62 +
 .../storm/generated/TopologyPageInfo.java       |  2597 +
 .../apache/storm/generated/TopologyStats.java   |  1094 +
 .../apache/storm/generated/TopologyStatus.java  |    68 +
 .../apache/storm/generated/TopologySummary.java |  1901 +
 .../apache/storm/generated/WorkerResources.java |   605 +
 .../storm/grouping/CustomStreamGrouping.java    |    43 +
 .../src/jvm/org/apache/storm/grouping/Load.java |    77 +
 .../grouping/LoadAwareCustomStreamGrouping.java |    24 +
 .../grouping/LoadAwareShuffleGrouping.java      |    76 +
 .../org/apache/storm/grouping/LoadMapping.java  |    64 +
 .../storm/grouping/PartialKeyGrouping.java      |   106 +
 .../apache/storm/grouping/ShuffleGrouping.java  |    65 +
 .../org/apache/storm/hooks/BaseTaskHook.java    |    61 +
 .../org/apache/storm/hooks/BaseWorkerHook.java  |    51 +
 .../jvm/org/apache/storm/hooks/ITaskHook.java   |    38 +
 .../jvm/org/apache/storm/hooks/IWorkerHook.java |    44 +
 .../apache/storm/hooks/info/BoltAckInfo.java    |    32 +
 .../storm/hooks/info/BoltExecuteInfo.java       |    32 +
 .../apache/storm/hooks/info/BoltFailInfo.java   |    32 +
 .../org/apache/storm/hooks/info/EmitInfo.java   |    35 +
 .../apache/storm/hooks/info/SpoutAckInfo.java   |    30 +
 .../apache/storm/hooks/info/SpoutFailInfo.java  |    30 +
 .../apache/storm/localizer/LocalResource.java   |    44 +
 .../storm/localizer/LocalizedResource.java      |   130 +
 .../LocalizedResourceRetentionSet.java          |   140 +
 .../storm/localizer/LocalizedResourceSet.java   |   101 +
 .../org/apache/storm/localizer/Localizer.java   |   695 +
 .../storm/logging/ThriftAccessLogger.java       |    27 +
 .../logging/filters/AccessLoggingFilter.java    |    52 +
 .../storm/messaging/ConnectionWithStatus.java   |    49 +
 .../DeserializingConnectionCallback.java        |    60 +
 .../org/apache/storm/messaging/IConnection.java |    63 +
 .../storm/messaging/IConnectionCallback.java    |    31 +
 .../org/apache/storm/messaging/IContext.java    |    59 +
 .../org/apache/storm/messaging/TaskMessage.java |    53 +
 .../storm/messaging/TransportFactory.java       |    57 +
 .../apache/storm/messaging/local/Context.java   |   164 +
 .../apache/storm/messaging/netty/Client.java    |   578 +
 .../apache/storm/messaging/netty/Context.java   |   113 +
 .../storm/messaging/netty/ControlMessage.java   |    75 +
 .../messaging/netty/INettySerializable.java     |    26 +
 .../storm/messaging/netty/ISaslClient.java      |    28 +
 .../storm/messaging/netty/ISaslServer.java      |    26 +
 .../apache/storm/messaging/netty/IServer.java   |    26 +
 .../netty/KerberosSaslClientHandler.java        |   152 +
 .../netty/KerberosSaslNettyClient.java          |   203 +
 .../netty/KerberosSaslNettyClientState.java     |    31 +
 .../netty/KerberosSaslNettyServer.java          |   210 +
 .../netty/KerberosSaslNettyServerState.java     |    30 +
 .../netty/KerberosSaslServerHandler.java        |   133 +
 .../storm/messaging/netty/MessageBatch.java     |   118 +
 .../storm/messaging/netty/MessageBuffer.java    |    58 +
 .../storm/messaging/netty/MessageDecoder.java   |   144 +
 .../storm/messaging/netty/MessageEncoder.java   |    43 +
 .../netty/NettyRenameThreadFactory.java         |    56 +
 .../netty/NettyUncaughtExceptionHandler.java    |    35 +
 .../storm/messaging/netty/SaslMessageToken.java |   114 +
 .../storm/messaging/netty/SaslNettyClient.java  |   154 +
 .../messaging/netty/SaslNettyClientState.java   |    31 +
 .../storm/messaging/netty/SaslNettyServer.java  |   157 +
 .../messaging/netty/SaslNettyServerState.java   |    30 +
 .../messaging/netty/SaslStormClientHandler.java |   158 +
 .../netty/SaslStormServerAuthorizeHandler.java  |    83 +
 .../messaging/netty/SaslStormServerHandler.java |   153 +
 .../apache/storm/messaging/netty/SaslUtils.java |    68 +
 .../apache/storm/messaging/netty/Server.java    |   273 +
 .../messaging/netty/StormClientHandler.java     |    91 +
 .../netty/StormClientPipelineFactory.java       |    56 +
 .../messaging/netty/StormServerHandler.java     |    74 +
 .../netty/StormServerPipelineFactory.java       |    57 +
 .../apache/storm/metric/EventLoggerBolt.java    |    67 +
 .../storm/metric/FileBasedEventLogger.java      |   132 +
 .../metric/HttpForwardingMetricsConsumer.java   |    85 +
 .../metric/HttpForwardingMetricsServer.java     |   118 +
 .../org/apache/storm/metric/IEventLogger.java   |    70 +
 .../storm/metric/LoggingMetricsConsumer.java    |    76 +
 .../storm/metric/MetricsConsumerBolt.java       |    63 +
 .../jvm/org/apache/storm/metric/SystemBolt.java |   165 +
 .../storm/metric/api/AssignableMetric.java      |    34 +
 .../apache/storm/metric/api/CombinedMetric.java |    38 +
 .../apache/storm/metric/api/CountMetric.java    |    39 +
 .../org/apache/storm/metric/api/ICombiner.java  |    23 +
 .../org/apache/storm/metric/api/IMetric.java    |    22 +
 .../storm/metric/api/IMetricsConsumer.java      |    60 +
 .../org/apache/storm/metric/api/IReducer.java   |    24 +
 .../storm/metric/api/IStatefulObject.java       |    22 +
 .../apache/storm/metric/api/MeanReducer.java    |    53 +
 .../storm/metric/api/MultiCountMetric.java      |    45 +
 .../storm/metric/api/MultiReducedMetric.java    |    50 +
 .../apache/storm/metric/api/ReducedMetric.java  |    38 +
 .../apache/storm/metric/api/StateMetric.java    |    31 +
 .../metric/api/rpc/AssignableShellMetric.java   |    30 +
 .../metric/api/rpc/CombinedShellMetric.java     |    31 +
 .../storm/metric/api/rpc/CountShellMetric.java  |    37 +
 .../storm/metric/api/rpc/IShellMetric.java      |    31 +
 .../metric/api/rpc/ReducedShellMetric.java      |    32 +
 .../metric/internal/CountStatAndMetric.java     |   211 +
 .../metric/internal/LatencyStatAndMetric.java   |   262 +
 .../storm/metric/internal/MetricStatTimer.java  |    27 +
 .../internal/MultiCountStatAndMetric.java       |   112 +
 .../internal/MultiLatencyStatAndMetric.java     |   109 +
 .../storm/metric/internal/RateTracker.java      |   165 +
 .../jvm/org/apache/storm/multilang/BoltMsg.java |    79 +
 .../org/apache/storm/multilang/ISerializer.java |    82 +
 .../apache/storm/multilang/JsonSerializer.java  |   204 +
 .../storm/multilang/NoOutputException.java      |    40 +
 .../org/apache/storm/multilang/ShellMsg.java    |   184 +
 .../org/apache/storm/multilang/SpoutMsg.java    |    50 +
 .../AbstractDNSToSwitchMapping.java             |    96 +
 .../networktopography/DNSToSwitchMapping.java   |    49 +
 .../DefaultRackDNSToSwitchMapping.java          |    52 +
 .../storm/nimbus/DefaultTopologyValidator.java  |    31 +
 .../org/apache/storm/nimbus/ILeaderElector.java |    73 +
 .../nimbus/ITopologyActionNotifierPlugin.java   |    43 +
 .../apache/storm/nimbus/ITopologyValidator.java |    28 +
 .../jvm/org/apache/storm/nimbus/NimbusInfo.java |   116 +
 .../storm/pacemaker/IServerMessageHandler.java  |     2 +-
 .../apache/storm/pacemaker/PacemakerClient.java |    12 +-
 .../storm/pacemaker/PacemakerClientHandler.java |     4 +-
 .../apache/storm/pacemaker/PacemakerServer.java |    10 +-
 .../storm/pacemaker/codec/ThriftDecoder.java    |    10 +-
 .../storm/pacemaker/codec/ThriftEncoder.java    |    14 +-
 .../pacemaker/codec/ThriftNettyClientCodec.java |     6 +-
 .../pacemaker/codec/ThriftNettyServerCodec.java |    14 +-
 .../org/apache/storm/planner/CompoundSpout.java |    25 +
 .../org/apache/storm/planner/CompoundTask.java  |    25 +
 .../org/apache/storm/planner/TaskBundle.java    |    33 +
 .../jvm/org/apache/storm/scheduler/Cluster.java |   684 +
 .../apache/storm/scheduler/ExecutorDetails.java |    54 +
 .../jvm/org/apache/storm/scheduler/INimbus.java |    49 +
 .../org/apache/storm/scheduler/IScheduler.java  |    40 +
 .../org/apache/storm/scheduler/ISupervisor.java |    45 +
 .../storm/scheduler/SchedulerAssignment.java    |    58 +
 .../scheduler/SchedulerAssignmentImpl.java      |   105 +
 .../storm/scheduler/SupervisorDetails.java      |   132 +
 .../org/apache/storm/scheduler/Topologies.java  |    84 +
 .../apache/storm/scheduler/TopologyDetails.java |   515 +
 .../org/apache/storm/scheduler/WorkerSlot.java  |    77 +
 .../scheduler/multitenant/DefaultPool.java      |   219 +
 .../storm/scheduler/multitenant/FreePool.java   |   125 +
 .../scheduler/multitenant/IsolatedPool.java     |   363 +
 .../multitenant/MultitenantScheduler.java       |    98 +
 .../storm/scheduler/multitenant/Node.java       |   340 +
 .../storm/scheduler/multitenant/NodePool.java   |   296 +
 .../storm/scheduler/resource/Component.java     |    54 +
 .../storm/scheduler/resource/RAS_Node.java      |   528 +
 .../storm/scheduler/resource/RAS_Nodes.java     |   160 +
 .../resource/ResourceAwareScheduler.java        |   414 +
 .../storm/scheduler/resource/ResourceUtils.java |   184 +
 .../scheduler/resource/SchedulingResult.java    |   116 +
 .../scheduler/resource/SchedulingStatus.java    |    40 +
 .../apache/storm/scheduler/resource/User.java   |   348 +
 .../eviction/DefaultEvictionStrategy.java       |   126 +
 .../strategies/eviction/IEvictionStrategy.java  |    47 +
 .../DefaultSchedulingPriorityStrategy.java      |    81 +
 .../priority/ISchedulingPriorityStrategy.java   |    41 +
 .../DefaultResourceAwareStrategy.java           |   488 +
 .../strategies/scheduling/IStrategy.java        |    50 +
 .../storm/security/INimbusCredentialPlugin.java |    47 +
 .../apache/storm/security/auth/AuthUtils.java   |   348 +
 .../auth/DefaultHttpCredentialsPlugin.java      |    96 +
 .../security/auth/DefaultPrincipalToLocal.java  |    42 +
 .../apache/storm/security/auth/IAuthorizer.java |    53 +
 .../storm/security/auth/IAutoCredentials.java   |    55 +
 .../security/auth/ICredentialsRenewer.java      |    40 +
 .../auth/IGroupMappingServiceProvider.java      |    42 +
 .../security/auth/IHttpCredentialsPlugin.java   |    48 +
 .../storm/security/auth/IPrincipalToLocal.java  |    41 +
 .../storm/security/auth/ITransportPlugin.java   |    57 +
 .../security/auth/KerberosPrincipalToLocal.java |    45 +
 .../storm/security/auth/NimbusPrincipal.java    |    29 +
 .../apache/storm/security/auth/ReqContext.java  |   154 +
 .../security/auth/SaslTransportPlugin.java      |   174 +
 .../security/auth/ShellBasedGroupsMapping.java  |    94 +
 .../security/auth/SimpleTransportPlugin.java    |   161 +
 .../security/auth/SingleUserPrincipal.java      |    53 +
 .../storm/security/auth/TBackoffConnect.java    |    76 +
 .../storm/security/auth/ThriftClient.java       |   114 +
 .../security/auth/ThriftConnectionType.java     |    77 +
 .../storm/security/auth/ThriftServer.java       |    78 +
 .../auth/authorizer/DRPCAuthorizerBase.java     |    63 +
 .../authorizer/DRPCSimpleACLAuthorizer.java     |   174 +
 .../auth/authorizer/DenyAuthorizer.java         |    47 +
 .../authorizer/ImpersonationAuthorizer.java     |   172 +
 .../auth/authorizer/NoopAuthorizer.java         |    47 +
 .../auth/authorizer/SimpleACLAuthorizer.java    |   167 +
 .../authorizer/SimpleWhitelistAuthorizer.java   |    60 +
 .../auth/digest/ClientCallbackHandler.java      |   108 +
 .../auth/digest/DigestSaslTransportPlugin.java  |    68 +
 .../auth/digest/ServerCallbackHandler.java      |   132 +
 .../storm/security/auth/kerberos/AutoTGT.java   |   277 +
 .../auth/kerberos/AutoTGTKrb5LoginModule.java   |   108 +
 .../kerberos/AutoTGTKrb5LoginModuleTest.java    |    44 +
 .../auth/kerberos/ClientCallbackHandler.java    |   104 +
 .../kerberos/KerberosSaslTransportPlugin.java   |   210 +
 .../security/auth/kerberos/NoOpTTrasport.java   |    54 +
 .../auth/kerberos/ServerCallbackHandler.java    |    96 +
 .../auth/kerberos/jaas_kerberos_cluster.conf    |    49 +
 .../auth/kerberos/jaas_kerberos_launcher.conf   |    31 +
 .../serialization/BlowfishTupleSerializer.java  |    92 +
 .../storm/serialization/DefaultKryoFactory.java |    64 +
 .../DefaultSerializationDelegate.java           |    58 +
 .../GzipBridgeSerializationDelegate.java        |    65 +
 .../GzipBridgeThriftSerializationDelegate.java  |    64 +
 .../GzipSerializationDelegate.java              |    64 +
 .../GzipThriftSerializationDelegate.java        |    56 +
 .../storm/serialization/IKryoDecorator.java     |    23 +
 .../storm/serialization/IKryoFactory.java       |    40 +
 .../storm/serialization/ITupleDeserializer.java |    24 +
 .../storm/serialization/ITupleSerializer.java   |    26 +
 .../serialization/KryoTupleDeserializer.java    |    56 +
 .../serialization/KryoTupleSerializer.java      |    60 +
 .../serialization/KryoValuesDeserializer.java   |    50 +
 .../serialization/KryoValuesSerializer.java     |    58 +
 .../serialization/SerializableSerializer.java   |    61 +
 .../serialization/SerializationDelegate.java    |    35 +
 .../serialization/SerializationFactory.java     |   223 +
 .../ThriftSerializationDelegate.java            |    52 +
 .../types/ArrayListSerializer.java              |    32 +
 .../serialization/types/HashMapSerializer.java  |    32 +
 .../serialization/types/HashSetSerializer.java  |    32 +
 .../types/ListDelegateSerializer.java           |    32 +
 .../storm/spout/IMultiSchemableSpout.java       |    23 +
 .../org/apache/storm/spout/ISchemableSpout.java |    24 +
 .../src/jvm/org/apache/storm/spout/ISpout.java  |   105 +
 .../storm/spout/ISpoutOutputCollector.java      |    32 +
 .../apache/storm/spout/ISpoutWaitStrategy.java  |    34 +
 .../jvm/org/apache/storm/spout/MultiScheme.java |    29 +
 .../storm/spout/NothingEmptyEmitStrategy.java   |    31 +
 .../org/apache/storm/spout/RawMultiScheme.java  |    40 +
 .../jvm/org/apache/storm/spout/RawScheme.java   |    37 +
 .../src/jvm/org/apache/storm/spout/Scheme.java  |    29 +
 .../apache/storm/spout/SchemeAsMultiScheme.java |    42 +
 .../jvm/org/apache/storm/spout/ShellSpout.java  |   280 +
 .../storm/spout/SleepSpoutWaitStrategy.java     |    41 +
 .../storm/spout/SpoutOutputCollector.java       |   139 +
 .../jvm/org/apache/storm/state/IStateSpout.java |    29 +
 .../storm/state/IStateSpoutOutputCollector.java |    22 +
 .../apache/storm/state/ISubscribedState.java    |    25 +
 .../state/ISynchronizeOutputCollector.java      |    24 +
 .../storm/state/StateSpoutOutputCollector.java  |    28 +
 .../storm/state/SynchronizeOutputCollector.java |    30 +
 .../storm/task/GeneralTopologyContext.java      |   199 +
 .../src/jvm/org/apache/storm/task/IBolt.java    |    84 +
 .../org/apache/storm/task/IErrorReporter.java   |    22 +
 .../org/apache/storm/task/IMetricsContext.java  |    31 +
 .../org/apache/storm/task/IOutputCollector.java |    32 +
 .../org/apache/storm/task/OutputCollector.java  |   225 +
 .../jvm/org/apache/storm/task/ShellBolt.java    |   406 +
 .../org/apache/storm/task/TopologyContext.java  |   389 +
 .../storm/task/WorkerTopologyContext.java       |   106 +
 .../apache/storm/testing/AckFailDelegate.java   |    25 +
 .../apache/storm/testing/AckFailMapTracker.java |    52 +
 .../org/apache/storm/testing/AckTracker.java    |    52 +
 .../AlternateRackDNSToSwitchMapping.java        |    65 +
 .../apache/storm/testing/BatchNumberList.java   |    72 +
 .../apache/storm/testing/BatchProcessWord.java  |    39 +
 .../org/apache/storm/testing/BatchRepeatA.java  |    48 +
 .../org/apache/storm/testing/BoltTracker.java   |    42 +
 .../storm/testing/CompleteTopologyParam.java    |    87 +
 .../apache/storm/testing/CountingBatchBolt.java |    55 +
 .../storm/testing/CountingCommitBolt.java       |    57 +
 .../org/apache/storm/testing/FeederSpout.java   |   100 +
 .../org/apache/storm/testing/FixedTuple.java    |    42 +
 .../apache/storm/testing/FixedTupleSpout.java   |   179 +
 .../testing/ForwardingMetricsConsumer.java      |   100 +
 .../org/apache/storm/testing/IdentityBolt.java  |    42 +
 .../apache/storm/testing/IntegrationTest.java   |    38 +
 .../storm/testing/KeyedCountingBatchBolt.java   |    61 +
 .../testing/KeyedCountingCommitterBolt.java     |    24 +
 .../storm/testing/KeyedSummingBatchBolt.java    |    61 +
 .../storm/testing/MemoryTransactionalSpout.java |   180 +
 .../testing/MemoryTransactionalSpoutMeta.java   |    38 +
 .../apache/storm/testing/MkClusterParam.java    |    57 +
 .../org/apache/storm/testing/MkTupleParam.java  |    51 +
 .../org/apache/storm/testing/MockedSources.java |    60 +
 .../jvm/org/apache/storm/testing/NGrouping.java |    50 +
 .../storm/testing/NonRichBoltTracker.java       |    51 +
 .../testing/OpaqueMemoryTransactionalSpout.java |   186 +
 .../apache/storm/testing/PrepareBatchBolt.java  |    52 +
 .../storm/testing/PythonShellMetricsBolt.java   |    49 +
 .../storm/testing/PythonShellMetricsSpout.java  |    52 +
 .../testing/SingleUserSimpleTransport.java      |    37 +
 .../org/apache/storm/testing/SpoutTracker.java  |   111 +
 .../storm/testing/TestAggregatesCounter.java    |    63 +
 .../org/apache/storm/testing/TestConfBolt.java  |    62 +
 .../apache/storm/testing/TestEventLogSpout.java |   139 +
 .../storm/testing/TestEventOrderCheckBolt.java  |    76 +
 .../apache/storm/testing/TestGlobalCount.java   |    60 +
 .../jvm/org/apache/storm/testing/TestJob.java   |    43 +
 .../apache/storm/testing/TestKryoDecorator.java |    31 +
 .../apache/storm/testing/TestPlannerBolt.java   |    45 +
 .../apache/storm/testing/TestPlannerSpout.java  |    85 +
 .../org/apache/storm/testing/TestSerObject.java |    37 +
 .../storm/testing/TestWordBytesCounter.java     |    27 +
 .../apache/storm/testing/TestWordCounter.java   |    65 +
 .../org/apache/storm/testing/TestWordSpout.java |    86 +
 .../apache/storm/testing/TrackedTopology.java   |    34 +
 .../apache/storm/testing/TupleCaptureBolt.java  |    83 +
 .../topology/BaseConfigurationDeclarer.java     |    83 +
 .../storm/topology/BasicBoltExecutor.java       |    67 +
 .../storm/topology/BasicOutputCollector.java    |    62 +
 .../org/apache/storm/topology/BoltDeclarer.java |    26 +
 .../ComponentConfigurationDeclarer.java         |    32 +
 .../apache/storm/topology/FailedException.java  |    36 +
 .../org/apache/storm/topology/IBasicBolt.java   |    33 +
 .../storm/topology/IBasicOutputCollector.java   |    27 +
 .../org/apache/storm/topology/IComponent.java   |    44 +
 .../org/apache/storm/topology/IRichBolt.java    |    29 +
 .../org/apache/storm/topology/IRichSpout.java   |    29 +
 .../apache/storm/topology/IRichStateSpout.java  |    25 +
 .../apache/storm/topology/IWindowedBolt.java    |    40 +
 .../apache/storm/topology/InputDeclarer.java    |   184 +
 .../storm/topology/OutputFieldsDeclarer.java    |    32 +
 .../storm/topology/OutputFieldsGetter.java      |    53 +
 .../storm/topology/ReportedFailedException.java |    36 +
 .../apache/storm/topology/SpoutDeclarer.java    |    22 +
 .../apache/storm/topology/TopologyBuilder.java  |   433 +
 .../storm/topology/WindowedBoltExecutor.java    |   308 +
 .../storm/topology/base/BaseBasicBolt.java      |    33 +
 .../storm/topology/base/BaseBatchBolt.java      |    24 +
 .../storm/topology/base/BaseComponent.java      |    28 +
 ...BaseOpaquePartitionedTransactionalSpout.java |    25 +
 .../base/BasePartitionedTransactionalSpout.java |    25 +
 .../storm/topology/base/BaseRichBolt.java       |    26 +
 .../storm/topology/base/BaseRichSpout.java      |    46 +
 .../topology/base/BaseTransactionalBolt.java    |    24 +
 .../topology/base/BaseTransactionalSpout.java   |    24 +
 .../storm/topology/base/BaseWindowedBolt.java   |   212 +
 .../apache/storm/transactional/ICommitter.java  |    26 +
 .../ICommitterTransactionalSpout.java           |    31 +
 .../transactional/ITransactionalSpout.java      |    94 +
 .../storm/transactional/TransactionAttempt.java |    61 +
 .../TransactionalSpoutBatchExecutor.java        |    96 +
 .../TransactionalSpoutCoordinator.java          |   217 +
 .../TransactionalTopologyBuilder.java           |   521 +
 .../IOpaquePartitionedTransactionalSpout.java   |    56 +
 .../IPartitionedTransactionalSpout.java         |    69 +
 ...uePartitionedTransactionalSpoutExecutor.java |   160 +
 .../PartitionedTransactionalSpoutExecutor.java  |   142 +
 .../state/RotatingTransactionalState.java       |   149 +
 .../state/TestTransactionalState.java           |    47 +
 .../transactional/state/TransactionalState.java |   173 +
 .../jvm/org/apache/storm/trident/JoinType.java  |    30 +
 .../jvm/org/apache/storm/trident/Stream.java    |   377 +
 .../org/apache/storm/trident/TridentState.java  |    40 +
 .../apache/storm/trident/TridentTopology.java   |   827 +
 .../trident/drpc/ReturnResultsReducer.java      |   121 +
 .../fluent/ChainedAggregatorDeclarer.java       |   183 +
 .../fluent/ChainedFullAggregatorDeclarer.java   |    32 +
 .../ChainedPartitionAggregatorDeclarer.java     |    32 +
 .../trident/fluent/GlobalAggregationScheme.java |    26 +
 .../storm/trident/fluent/GroupedStream.java     |   174 +
 .../trident/fluent/IAggregatableStream.java     |    31 +
 .../fluent/IChainedAggregatorDeclarer.java      |    24 +
 .../storm/trident/fluent/UniqueIdGen.java       |    34 +
 .../storm/trident/graph/GraphGrouper.java       |   119 +
 .../org/apache/storm/trident/graph/Group.java   |    84 +
 .../storm/trident/operation/Aggregator.java     |    26 +
 .../storm/trident/operation/Assembly.java       |    25 +
 .../storm/trident/operation/BaseAggregator.java |    23 +
 .../storm/trident/operation/BaseFilter.java     |    23 +
 .../storm/trident/operation/BaseFunction.java   |    23 +
 .../trident/operation/BaseMultiReducer.java     |    33 +
 .../storm/trident/operation/BaseOperation.java  |    32 +
 .../trident/operation/CombinerAggregator.java   |    29 +
 .../storm/trident/operation/EachOperation.java  |    22 +
 .../apache/storm/trident/operation/Filter.java  |    24 +
 .../storm/trident/operation/Function.java       |    24 +
 .../trident/operation/GroupedMultiReducer.java  |    31 +
 .../storm/trident/operation/MultiReducer.java   |    31 +
 .../storm/trident/operation/Operation.java      |    26 +
 .../trident/operation/ReducerAggregator.java    |    26 +
 .../trident/operation/TridentCollector.java     |    26 +
 .../operation/TridentMultiReducerContext.java   |    36 +
 .../operation/TridentOperationContext.java      |    65 +
 .../storm/trident/operation/builtin/Count.java  |    41 +
 .../storm/trident/operation/builtin/Debug.java  |    39 +
 .../storm/trident/operation/builtin/Equals.java |    38 +
 .../trident/operation/builtin/FilterNull.java   |    31 +
 .../storm/trident/operation/builtin/FirstN.java |   125 +
 .../storm/trident/operation/builtin/MapGet.java |    38 +
 .../storm/trident/operation/builtin/Negate.java |    48 +
 .../trident/operation/builtin/SnapshotGet.java  |    44 +
 .../storm/trident/operation/builtin/Sum.java    |    42 +
 .../operation/builtin/TupleCollectionGet.java   |    46 +
 .../operation/impl/CaptureCollector.java        |    42 +
 .../operation/impl/ChainedAggregatorImpl.java   |   113 +
 .../trident/operation/impl/ChainedResult.java   |    53 +
 .../operation/impl/CombinerAggStateUpdater.java |    56 +
 .../impl/CombinerAggregatorCombineImpl.java     |    61 +
 .../impl/CombinerAggregatorInitImpl.java        |    49 +
 .../trident/operation/impl/FilterExecutor.java  |    53 +
 .../operation/impl/GlobalBatchToPartition.java  |    29 +
 .../trident/operation/impl/GroupCollector.java  |    48 +
 .../operation/impl/GroupedAggregator.java       |    96 +
 .../impl/GroupedMultiReducerExecutor.java       |    95 +
 .../operation/impl/IdentityMultiReducer.java    |    51 +
 .../impl/IndexHashBatchToPartition.java         |    29 +
 .../operation/impl/JoinerMultiReducer.java      |   159 +
 .../operation/impl/ReducerAggStateUpdater.java  |    53 +
 .../operation/impl/ReducerAggregatorImpl.java   |    56 +
 .../storm/trident/operation/impl/Result.java    |    27 +
 .../operation/impl/SingleEmitAggregator.java    |    95 +
 .../trident/operation/impl/TrueFilter.java      |    40 +
 .../storm/trident/partition/GlobalGrouping.java |    42 +
 .../trident/partition/IdentityGrouping.java     |    59 +
 .../trident/partition/IndexHashGrouping.java    |    53 +
 .../storm/trident/planner/BridgeReceiver.java   |    38 +
 .../org/apache/storm/trident/planner/Node.java  |    67 +
 .../storm/trident/planner/NodeStateInfo.java    |    31 +
 .../storm/trident/planner/PartitionNode.java    |    50 +
 .../storm/trident/planner/ProcessorContext.java |    29 +
 .../storm/trident/planner/ProcessorNode.java    |    33 +
 .../apache/storm/trident/planner/SpoutNode.java |    39 +
 .../storm/trident/planner/SubtopologyBolt.java  |   217 +
 .../storm/trident/planner/TridentProcessor.java |    40 +
 .../storm/trident/planner/TupleReceiver.java    |    27 +
 .../planner/processor/AggregateProcessor.java   |    84 +
 .../planner/processor/AppendCollector.java      |    62 +
 .../planner/processor/EachProcessor.java        |    80 +
 .../planner/processor/FreshCollector.java       |    59 +
 .../processor/MultiReducerProcessor.java        |    93 +
 .../processor/PartitionPersistProcessor.java    |   107 +
 .../planner/processor/ProjectedProcessor.java   |    73 +
 .../planner/processor/StateQueryProcessor.java  |   107 +
 .../planner/processor/TridentContext.java       |    76 +
 .../storm/trident/spout/BatchSpoutExecutor.java |    92 +
 .../apache/storm/trident/spout/IBatchID.java    |    24 +
 .../apache/storm/trident/spout/IBatchSpout.java |    33 +
 .../trident/spout/ICommitterTridentSpout.java   |    31 +
 .../spout/IOpaquePartitionedTridentSpout.java   |    63 +
 .../trident/spout/IPartitionedTridentSpout.java |    77 +
 .../storm/trident/spout/ISpoutPartition.java    |    25 +
 .../storm/trident/spout/ITridentDataSource.java |    26 +
 .../storm/trident/spout/ITridentSpout.java      |   125 +
 .../OpaquePartitionedTridentSpoutExecutor.java  |   201 +
 .../spout/PartitionedTridentSpoutExecutor.java  |   171 +
 .../trident/spout/RichSpoutBatchExecutor.java   |   204 +
 .../storm/trident/spout/RichSpoutBatchId.java   |    49 +
 .../spout/RichSpoutBatchIdSerializer.java       |    38 +
 .../trident/spout/RichSpoutBatchTriggerer.java  |   182 +
 .../trident/spout/TridentSpoutCoordinator.java  |    94 +
 .../trident/spout/TridentSpoutExecutor.java     |   138 +
 .../storm/trident/state/BaseQueryFunction.java  |    25 +
 .../storm/trident/state/BaseStateUpdater.java   |    25 +
 .../trident/state/CombinerValueUpdater.java     |    36 +
 .../storm/trident/state/ITupleCollection.java   |    26 +
 .../state/JSONNonTransactionalSerializer.java   |    44 +
 .../trident/state/JSONOpaqueSerializer.java     |    52 +
 .../state/JSONTransactionalSerializer.java      |    50 +
 .../apache/storm/trident/state/OpaqueValue.java |    75 +
 .../storm/trident/state/QueryFunction.java      |    28 +
 .../storm/trident/state/ReadOnlyState.java      |    31 +
 .../trident/state/ReducerValueUpdater.java      |    41 +
 .../apache/storm/trident/state/Serializer.java  |    26 +
 .../org/apache/storm/trident/state/State.java   |    39 +
 .../storm/trident/state/StateFactory.java       |    26 +
 .../apache/storm/trident/state/StateSpec.java   |    30 +
 .../apache/storm/trident/state/StateType.java   |    25 +
 .../storm/trident/state/StateUpdater.java       |    33 +
 .../storm/trident/state/TransactionalValue.java |    44 +
 .../storm/trident/state/ValueUpdater.java       |    23 +
 .../trident/state/map/CachedBatchReadsMap.java  |    80 +
 .../storm/trident/state/map/CachedMap.java      |    78 +
 .../storm/trident/state/map/IBackingMap.java    |    26 +
 .../state/map/MapCombinerAggStateUpdater.java   |    84 +
 .../state/map/MapReducerAggStateUpdater.java    |    91 +
 .../storm/trident/state/map/MapState.java       |    26 +
 .../state/map/MicroBatchIBackingMap.java        |    85 +
 .../trident/state/map/NonTransactionalMap.java  |    67 +
 .../storm/trident/state/map/OpaqueMap.java      |   124 +
 .../trident/state/map/ReadOnlyMapState.java     |    26 +
 .../trident/state/map/RemovableMapState.java    |    25 +
 .../trident/state/map/SnapshottableMap.java     |    76 +
 .../trident/state/map/TransactionalMap.java     |   109 +
 .../state/snapshot/ReadOnlySnapshottable.java   |    24 +
 .../trident/state/snapshot/Snapshottable.java   |    27 +
 .../trident/testing/CountAsAggregator.java      |    47 +
 .../storm/trident/testing/FeederBatchSpout.java |   185 +
 .../testing/FeederCommitterBatchSpout.java      |    96 +
 .../storm/trident/testing/FixedBatchSpout.java  |    97 +
 .../apache/storm/trident/testing/IFeeder.java   |    23 +
 .../trident/testing/LRUMemoryMapState.java      |   154 +
 .../storm/trident/testing/MemoryBackingMap.java |    47 +
 .../storm/trident/testing/MemoryMapState.java   |   176 +
 .../org/apache/storm/trident/testing/Split.java |    36 +
 .../storm/trident/testing/StringLength.java     |    32 +
 .../storm/trident/testing/TrueFilter.java       |    30 +
 .../storm/trident/testing/TuplifyArgs.java      |    37 +
 .../storm/trident/topology/BatchInfo.java       |    33 +
 .../trident/topology/ITridentBatchBolt.java     |    32 +
 .../topology/MasterBatchCoordinator.java        |   289 +
 .../trident/topology/TransactionAttempt.java    |    66 +
 .../trident/topology/TridentBoltExecutor.java   |   435 +
 .../topology/TridentTopologyBuilder.java        |   734 +
 .../state/RotatingTransactionalState.java       |   147 +
 .../topology/state/TestTransactionalState.java  |    47 +
 .../topology/state/TransactionalState.java      |   171 +
 .../apache/storm/trident/tuple/ComboList.java   |    92 +
 .../apache/storm/trident/tuple/ConsList.java    |    44 +
 .../storm/trident/tuple/TridentTuple.java       |    34 +
 .../storm/trident/tuple/TridentTupleView.java   |   361 +
 .../storm/trident/tuple/ValuePointer.java       |    60 +
 .../storm/trident/util/ErrorEdgeFactory.java    |    28 +
 .../apache/storm/trident/util/IndexedEdge.java  |    50 +
 .../org/apache/storm/trident/util/LRUMap.java   |    35 +
 .../apache/storm/trident/util/TridentUtils.java |   117 +
 .../org/apache/storm/tuple/AddressedTuple.java  |    48 +
 .../src/jvm/org/apache/storm/tuple/Fields.java  |   117 +
 .../src/jvm/org/apache/storm/tuple/ITuple.java  |   214 +
 .../jvm/org/apache/storm/tuple/MessageId.java   |    94 +
 .../src/jvm/org/apache/storm/tuple/Tuple.java   |    68 +
 .../jvm/org/apache/storm/tuple/TupleImpl.java   |   356 +
 .../src/jvm/org/apache/storm/tuple/Values.java  |    37 +
 .../storm/ui/InvalidRequestException.java       |    37 +
 .../storm/utils/BufferFileInputStream.java      |    54 +
 .../apache/storm/utils/BufferInputStream.java   |    53 +
 .../apache/storm/utils/CRC32OutputStream.java   |    44 +
 .../apache/storm/utils/ClojureTimerTask.java    |    35 +
 .../jvm/org/apache/storm/utils/Container.java   |    24 +
 .../jvm/org/apache/storm/utils/DRPCClient.java  |    64 +
 .../utils/DisruptorBackpressureCallback.java    |    27 +
 .../org/apache/storm/utils/DisruptorQueue.java  |   544 +
 .../storm/utils/ExtendedThreadPoolExecutor.java |    67 +
 .../storm/utils/IndifferentAccessMap.java       |   177 +
 .../org/apache/storm/utils/InprocMessaging.java |    59 +
 .../storm/utils/KeyedRoundRobinQueue.java       |    68 +
 .../org/apache/storm/utils/ListDelegate.java    |   156 +
 .../jvm/org/apache/storm/utils/LocalState.java  |   189 +
 .../src/jvm/org/apache/storm/utils/Monitor.java |   252 +
 .../jvm/org/apache/storm/utils/MutableInt.java  |    43 +
 .../jvm/org/apache/storm/utils/MutableLong.java |    43 +
 .../org/apache/storm/utils/MutableObject.java   |    38 +
 .../org/apache/storm/utils/NimbusClient.java    |   118 +
 .../utils/NimbusLeaderNotFoundException.java    |    41 +
 .../storm/utils/RegisteredGlobalState.java      |    62 +
 .../jvm/org/apache/storm/utils/RotatingMap.java |   128 +
 .../org/apache/storm/utils/ServiceRegistry.java |    47 +
 .../storm/utils/ShellBoltMessageQueue.java      |   121 +
 .../org/apache/storm/utils/ShellProcess.java    |   210 +
 .../jvm/org/apache/storm/utils/ShellUtils.java  |   505 +
 .../StormBoundedExponentialBackoffRetry.java    |    76 +
 .../jvm/org/apache/storm/utils/TestUtils.java   |    34 +
 .../apache/storm/utils/ThriftTopologyUtils.java |    66 +
 .../src/jvm/org/apache/storm/utils/Time.java    |   119 +
 .../org/apache/storm/utils/TimeCacheMap.java    |   125 +
 .../org/apache/storm/utils/TransferDrainer.java |   132 +
 .../jvm/org/apache/storm/utils/TupleUtils.java  |    46 +
 .../src/jvm/org/apache/storm/utils/Utils.java   |  1373 +
 .../jvm/org/apache/storm/utils/VersionInfo.java |   131 +
 .../org/apache/storm/utils/VersionedStore.java  |   187 +
 .../storm/utils/WindowedTimeThrottler.java      |    51 +
 .../storm/utils/WorkerBackpressureCallback.java |    26 +
 .../storm/utils/WorkerBackpressureThread.java   |    59 +
 .../org/apache/storm/utils/WritableUtils.java   |   375 +
 .../apache/storm/utils/ZookeeperAuthInfo.java   |    53 +
 .../storm/utils/ZookeeperServerCnxnFactory.java |    84 +
 .../storm/validation/ConfigValidation.java      |   700 +
 .../validation/ConfigValidationAnnotations.java |   218 +
 .../storm/validation/ConfigValidationUtils.java |   175 +
 .../storm/windowing/CountEvictionPolicy.java    |    75 +
 .../storm/windowing/CountTriggerPolicy.java     |    68 +
 .../jvm/org/apache/storm/windowing/Event.java   |    49 +
 .../org/apache/storm/windowing/EventImpl.java   |    51 +
 .../apache/storm/windowing/EvictionPolicy.java  |    74 +
 .../storm/windowing/TimeEvictionPolicy.java     |    71 +
 .../storm/windowing/TimeTriggerPolicy.java      |   128 +
 .../apache/storm/windowing/TriggerHandler.java  |    31 +
 .../apache/storm/windowing/TriggerPolicy.java   |    42 +
 .../org/apache/storm/windowing/TupleWindow.java |    26 +
 .../apache/storm/windowing/TupleWindowImpl.java |    61 +
 .../apache/storm/windowing/WaterMarkEvent.java  |    38 +
 .../windowing/WaterMarkEventGenerator.java      |   116 +
 .../windowing/WatermarkCountEvictionPolicy.java |    65 +
 .../windowing/WatermarkCountTriggerPolicy.java  |    83 +
 .../windowing/WatermarkTimeEvictionPolicy.java  |    77 +
 .../windowing/WatermarkTimeTriggerPolicy.java   |   109 +
 .../jvm/org/apache/storm/windowing/Window.java  |    48 +
 .../windowing/WindowLifecycleListener.java      |    42 +
 .../apache/storm/windowing/WindowManager.java   |   289 +
 storm-core/src/jvm/storm/trident/JoinType.java  |    30 -
 storm-core/src/jvm/storm/trident/Stream.java    |   377 -
 .../src/jvm/storm/trident/TridentState.java     |    40 -
 .../src/jvm/storm/trident/TridentTopology.java  |   827 -
 .../trident/drpc/ReturnResultsReducer.java      |   121 -
 .../fluent/ChainedAggregatorDeclarer.java       |   183 -
 .../fluent/ChainedFullAggregatorDeclarer.java   |    32 -
 .../ChainedPartitionAggregatorDeclarer.java     |    32 -
 .../trident/fluent/GlobalAggregationScheme.java |    26 -
 .../jvm/storm/trident/fluent/GroupedStream.java |   174 -
 .../trident/fluent/IAggregatableStream.java     |    31 -
 .../fluent/IChainedAggregatorDeclarer.java      |    24 -
 .../jvm/storm/trident/fluent/UniqueIdGen.java   |    34 -
 .../jvm/storm/trident/graph/GraphGrouper.java   |   119 -
 .../src/jvm/storm/trident/graph/Group.java      |    84 -
 .../jvm/storm/trident/operation/Aggregator.java |    26 -
 .../jvm/storm/trident/operation/Assembly.java   |    25 -
 .../storm/trident/operation/BaseAggregator.java |    23 -
 .../jvm/storm/trident/operation/BaseFilter.java |    23 -
 .../storm/trident/operation/BaseFunction.java   |    23 -
 .../trident/operation/BaseMultiReducer.java     |    33 -
 .../storm/trident/operation/BaseOperation.java  |    32 -
 .../trident/operation/CombinerAggregator.java   |    29 -
 .../storm/trident/operation/EachOperation.java  |    22 -
 .../src/jvm/storm/trident/operation/Filter.java |    24 -
 .../jvm/storm/trident/operation/Function.java   |    24 -
 .../trident/operation/GroupedMultiReducer.java  |    31 -
 .../storm/trident/operation/MultiReducer.java   |    31 -
 .../jvm/storm/trident/operation/Operation.java  |    26 -
 .../trident/operation/ReducerAggregator.java    |    26 -
 .../trident/operation/TridentCollector.java     |    26 -
 .../operation/TridentMultiReducerContext.java   |    36 -
 .../operation/TridentOperationContext.java      |    65 -
 .../storm/trident/operation/builtin/Count.java  |    41 -
 .../storm/trident/operation/builtin/Debug.java  |    39 -
 .../storm/trident/operation/builtin/Equals.java |    38 -
 .../trident/operation/builtin/FilterNull.java   |    31 -
 .../storm/trident/operation/builtin/FirstN.java |   125 -
 .../storm/trident/operation/builtin/MapGet.java |    38 -
 .../storm/trident/operation/builtin/Negate.java |    48 -
 .../trident/operation/builtin/SnapshotGet.java  |    44 -
 .../storm/trident/operation/builtin/Sum.java    |    42 -
 .../operation/builtin/TupleCollectionGet.java   |    46 -
 .../operation/impl/CaptureCollector.java        |    42 -
 .../operation/impl/ChainedAggregatorImpl.java   |   113 -
 .../trident/operation/impl/ChainedResult.java   |    53 -
 .../operation/impl/CombinerAggStateUpdater.java |    56 -
 .../impl/CombinerAggregatorCombineImpl.java     |    61 -
 .../impl/CombinerAggregatorInitImpl.java        |    49 -
 .../trident/operation/impl/FilterExecutor.java  |    53 -
 .../operation/impl/GlobalBatchToPartition.java  |    29 -
 .../trident/operation/impl/GroupCollector.java  |    48 -
 .../operation/impl/GroupedAggregator.java       |    96 -
 .../impl/GroupedMultiReducerExecutor.java       |    95 -
 .../operation/impl/IdentityMultiReducer.java    |    51 -
 .../impl/IndexHashBatchToPartition.java         |    29 -
 .../operation/impl/JoinerMultiReducer.java      |   159 -
 .../operation/impl/ReducerAggStateUpdater.java  |    53 -
 .../operation/impl/ReducerAggregatorImpl.java   |    56 -
 .../storm/trident/operation/impl/Result.java    |    27 -
 .../operation/impl/SingleEmitAggregator.java    |    95 -
 .../trident/operation/impl/TrueFilter.java      |    40 -
 .../storm/trident/partition/GlobalGrouping.java |    42 -
 .../trident/partition/IdentityGrouping.java     |    59 -
 .../trident/partition/IndexHashGrouping.java    |    53 -
 .../storm/trident/planner/BridgeReceiver.java   |    38 -
 .../src/jvm/storm/trident/planner/Node.java     |    67 -
 .../storm/trident/planner/NodeStateInfo.java    |    31 -
 .../storm/trident/planner/PartitionNode.java    |    50 -
 .../storm/trident/planner/ProcessorContext.java |    29 -
 .../storm/trident/planner/ProcessorNode.java    |    33 -
 .../jvm/storm/trident/planner/SpoutNode.java    |    39 -
 .../storm/trident/planner/SubtopologyBolt.java  |   217 -
 .../storm/trident/planner/TridentProcessor.java |    40 -
 .../storm/trident/planner/TupleReceiver.java    |    27 -
 .../planner/processor/AggregateProcessor.java   |    84 -
 .../planner/processor/AppendCollector.java      |    62 -
 .../planner/processor/EachProcessor.java        |    80 -
 .../planner/processor/FreshCollector.java       |    59 -
 .../processor/MultiReducerProcessor.java        |    93 -
 .../processor/PartitionPersistProcessor.java    |   107 -
 .../planner/processor/ProjectedProcessor.java   |    73 -
 .../planner/processor/StateQueryProcessor.java  |   107 -
 .../planner/processor/TridentContext.java       |    76 -
 .../storm/trident/spout/BatchSpoutExecutor.java |    92 -
 .../src/jvm/storm/trident/spout/IBatchID.java   |    24 -
 .../jvm/storm/trident/spout/IBatchSpout.java    |    33 -
 .../trident/spout/ICommitterTridentSpout.java   |    31 -
 .../spout/IOpaquePartitionedTridentSpout.java   |    63 -
 .../trident/spout/IPartitionedTridentSpout.java |    77 -
 .../storm/trident/spout/ISpoutPartition.java    |    25 -
 .../storm/trident/spout/ITridentDataSource.java |    26 -
 .../jvm/storm/trident/spout/ITridentSpout.java  |   125 -
 .../OpaquePartitionedTridentSpoutExecutor.java  |   201 -
 .../spout/PartitionedTridentSpoutExecutor.java  |   171 -
 .../trident/spout/RichSpoutBatchExecutor.java   |   204 -
 .../storm/trident/spout/RichSpoutBatchId.java   |    49 -
 .../spout/RichSpoutBatchIdSerializer.java       |    38 -
 .../trident/spout/RichSpoutBatchTriggerer.java  |   182 -
 .../trident/spout/TridentSpoutCoordinator.java  |    94 -
 .../trident/spout/TridentSpoutExecutor.java     |   138 -
 .../storm/trident/state/BaseQueryFunction.java  |    25 -
 .../storm/trident/state/BaseStateUpdater.java   |    25 -
 .../trident/state/CombinerValueUpdater.java     |    36 -
 .../storm/trident/state/ITupleCollection.java   |    26 -
 .../state/JSONNonTransactionalSerializer.java   |    44 -
 .../trident/state/JSONOpaqueSerializer.java     |    52 -
 .../state/JSONTransactionalSerializer.java      |    50 -
 .../jvm/storm/trident/state/OpaqueValue.java    |    75 -
 .../jvm/storm/trident/state/QueryFunction.java  |    28 -
 .../jvm/storm/trident/state/ReadOnlyState.java  |    31 -
 .../trident/state/ReducerValueUpdater.java      |    41 -
 .../src/jvm/storm/trident/state/Serializer.java |    26 -
 .../src/jvm/storm/trident/state/State.java      |    39 -
 .../jvm/storm/trident/state/StateFactory.java   |    26 -
 .../src/jvm/storm/trident/state/StateSpec.java  |    30 -
 .../src/jvm/storm/trident/state/StateType.java  |    25 -
 .../jvm/storm/trident/state/StateUpdater.java   |    33 -
 .../storm/trident/state/TransactionalValue.java |    44 -
 .../jvm/storm/trident/state/ValueUpdater.java   |    23 -
 .../trident/state/map/CachedBatchReadsMap.java  |    80 -
 .../jvm/storm/trident/state/map/CachedMap.java  |    78 -
 .../storm/trident/state/map/IBackingMap.java    |    26 -
 .../state/map/MapCombinerAggStateUpdater.java   |    84 -
 .../state/map/MapReducerAggStateUpdater.java    |    91 -
 .../jvm/storm/trident/state/map/MapState.java   |    26 -
 .../state/map/MicroBatchIBackingMap.java        |    85 -
 .../trident/state/map/NonTransactionalMap.java  |    67 -
 .../jvm/storm/trident/state/map/OpaqueMap.java  |   124 -
 .../trident/state/map/ReadOnlyMapState.java     |    26 -
 .../trident/state/map/RemovableMapState.java    |    25 -
 .../trident/state/map/SnapshottableMap.java     |    76 -
 .../trident/state/map/TransactionalMap.java     |   109 -
 .../state/snapshot/ReadOnlySnapshottable.java   |    24 -
 .../trident/state/snapshot/Snapshottable.java   |    27 -
 .../trident/testing/CountAsAggregator.java      |    47 -
 .../storm/trident/testing/FeederBatchSpout.java |   185 -
 .../testing/FeederCommitterBatchSpout.java      |    96 -
 .../storm/trident/testing/FixedBatchSpout.java  |    97 -
 .../src/jvm/storm/trident/testing/IFeeder.java  |    23 -
 .../trident/testing/LRUMemoryMapState.java      |   154 -
 .../storm/trident/testing/MemoryBackingMap.java |    47 -
 .../storm/trident/testing/MemoryMapState.java   |   176 -
 .../src/jvm/storm/trident/testing/Split.java    |    36 -
 .../jvm/storm/trident/testing/StringLength.java |    32 -
 .../jvm/storm/trident/testing/TrueFilter.java   |    30 -
 .../jvm/storm/trident/testing/TuplifyArgs.java  |    37 -
 .../jvm/storm/trident/topology/BatchInfo.java   |    33 -
 .../trident/topology/ITridentBatchBolt.java     |    32 -
 .../topology/MasterBatchCoordinator.java        |   289 -
 .../trident/topology/TransactionAttempt.java    |    66 -
 .../trident/topology/TridentBoltExecutor.java   |   435 -
 .../topology/TridentTopologyBuilder.java        |   734 -
 .../state/RotatingTransactionalState.java       |   147 -
 .../topology/state/TestTransactionalState.java  |    47 -
 .../topology/state/TransactionalState.java      |   171 -
 .../src/jvm/storm/trident/tuple/ComboList.java  |    92 -
 .../src/jvm/storm/trident/tuple/ConsList.java   |    44 -
 .../jvm/storm/trident/tuple/TridentTuple.java   |    34 -
 .../storm/trident/tuple/TridentTupleView.java   |   361 -
 .../jvm/storm/trident/tuple/ValuePointer.java   |    60 -
 .../storm/trident/util/ErrorEdgeFactory.java    |    28 -
 .../src/jvm/storm/trident/util/IndexedEdge.java |    50 -
 .../src/jvm/storm/trident/util/LRUMap.java      |    35 -
 .../jvm/storm/trident/util/TridentUtils.java    |   117 -
 storm-core/src/storm.thrift                     |     4 +-
 .../test/clj/backtype/storm/clojure_test.clj    |   145 -
 .../test/clj/backtype/storm/cluster_test.clj    |   321 -
 .../test/clj/backtype/storm/drpc_test.clj       |   241 -
 .../test/clj/backtype/storm/fields_test.clj     |    59 -
 .../test/clj/backtype/storm/grouping_test.clj   |   150 -
 .../clj/backtype/storm/local_state_test.clj     |    55 -
 .../test/clj/backtype/storm/logviewer_test.clj  |   765 -
 .../storm/messaging/netty_integration_test.clj  |    57 -
 .../storm/messaging/netty_unit_test.clj         |   327 -
 .../test/clj/backtype/storm/messaging_test.clj  |    63 -
 .../test/clj/backtype/storm/metrics_test.clj    |   359 -
 .../test/clj/backtype/storm/nimbus_test.clj     |  1516 -
 .../scheduler/multitenant_scheduler_test.clj    |   859 -
 .../scheduler/resource_aware_scheduler_test.clj |   734 -
 .../test/clj/backtype/storm/scheduler_test.clj  |   281 -
 .../storm/security/auth/AuthUtils_test.clj      |    75 -
 .../auth/DefaultHttpCredentialsPlugin_test.clj  |    75 -
 .../storm/security/auth/ReqContext_test.clj     |    73 -
 .../security/auth/SaslTransportPlugin_test.clj  |    43 -
 .../storm/security/auth/ThriftClient_test.clj   |    60 -
 .../storm/security/auth/ThriftServer_test.clj   |    31 -
 .../backtype/storm/security/auth/auth_test.clj  |   460 -
 .../authorizer/DRPCSimpleACLAuthorizer_test.clj |   241 -
 .../security/auth/auto_login_module_test.clj    |    91 -
 .../storm/security/auth/drpc-auth-alice.jaas    |    22 -
 .../storm/security/auth/drpc-auth-bob.jaas      |    22 -
 .../storm/security/auth/drpc-auth-charlie.jaas  |    22 -
 .../storm/security/auth/drpc-auth-server.jaas   |    23 -
 .../storm/security/auth/drpc_auth_test.clj      |   312 -
 .../storm/security/auth/jaas_digest.conf        |    30 -
 .../security/auth/jaas_digest_bad_password.conf |    31 -
 .../auth/jaas_digest_missing_client.conf        |    23 -
 .../security/auth/jaas_digest_unknown_user.conf |    31 -
 .../storm/security/auth/nimbus_auth_test.clj    |   179 -
 .../BlowfishTupleSerializer_test.clj            |    78 -
 .../serialization/SerializationFactory_test.clj |    54 -
 .../clj/backtype/storm/serialization_test.clj   |    98 -
 .../test/clj/backtype/storm/submitter_test.clj  |    75 -
 .../clj/backtype/storm/subtopology_test.clj     |    62 -
 .../test/clj/backtype/storm/supervisor_test.clj |   734 -
 .../test/clj/backtype/storm/tick_tuple_test.clj |    49 -
 .../clj/backtype/storm/transactional_test.clj   |   736 -
 .../test/clj/backtype/storm/tuple_test.clj      |    51 -
 .../utils/ZookeeperServerCnxnFactory_test.clj   |    35 -
 .../test/clj/backtype/storm/utils_test.clj      |   110 -
 .../clj/backtype/storm/versioned_store_test.clj |    45 -
 .../test/clj/backtype/storm/worker_test.clj     |   206 -
 .../backtype/storm/integration_test.clj         |   622 -
 .../backtype/storm/testing4j_test.clj           |   212 -
 .../org/apache/storm/integration_test.clj       |   622 +
 .../org/apache/storm/testing4j_test.clj         |   212 +
 .../apache/storm/trident/integration_test.clj   |   292 +
 .../storm/trident/integration_test.clj          |   292 -
 .../test/clj/org/apache/storm/clojure_test.clj  |   145 +
 .../test/clj/org/apache/storm/cluster_test.clj  |   321 +
 .../test/clj/org/apache/storm/drpc_test.clj     |   241 +
 .../test/clj/org/apache/storm/fields_test.clj   |    59 +
 .../test/clj/org/apache/storm/grouping_test.clj |   150 +
 .../clj/org/apache/storm/local_state_test.clj   |    55 +
 .../clj/org/apache/storm/logviewer_test.clj     |   765 +
 .../storm/messaging/netty_integration_test.clj  |    57 +
 .../apache/storm/messaging/netty_unit_test.clj  |   327 +
 .../clj/org/apache/storm/messaging_test.clj     |    63 +
 .../test/clj/org/apache/storm/metrics_test.clj  |   359 +
 .../test/clj/org/apache/storm/nimbus_test.clj   |  1516 +
 .../storm/pacemaker_state_factory_test.clj      |     4 +-
 .../clj/org/apache/storm/pacemaker_test.clj     |     2 +-
 .../scheduler/multitenant_scheduler_test.clj    |   859 +
 .../scheduler/resource_aware_scheduler_test.clj |   734 +
 .../clj/org/apache/storm/scheduler_test.clj     |   281 +
 .../storm/security/auth/AuthUtils_test.clj      |    75 +
 .../auth/DefaultHttpCredentialsPlugin_test.clj  |    75 +
 .../storm/security/auth/ReqContext_test.clj     |    73 +
 .../security/auth/SaslTransportPlugin_test.clj  |    43 +
 .../storm/security/auth/ThriftClient_test.clj   |    60 +
 .../storm/security/auth/ThriftServer_test.clj   |    31 +
 .../apache/storm/security/auth/auth_test.clj    |   460 +
 .../authorizer/DRPCSimpleACLAuthorizer_test.clj |   241 +
 .../security/auth/auto_login_module_test.clj    |    91 +
 .../storm/security/auth/drpc-auth-alice.jaas    |    22 +
 .../storm/security/auth/drpc-auth-bob.jaas      |    22 +
 .../storm/security/auth/drpc-auth-charlie.jaas  |    22 +
 .../storm/security/auth/drpc-auth-server.jaas   |    23 +
 .../storm/security/auth/drpc_auth_test.clj      |   312 +
 .../apache/storm/security/auth/jaas_digest.conf |    30 +
 .../security/auth/jaas_digest_bad_password.conf |    31 +
 .../auth/jaas_digest_missing_client.conf        |    23 +
 .../security/auth/jaas_digest_unknown_user.conf |    31 +
 .../storm/security/auth/nimbus_auth_test.clj    |   179 +
 .../BlowfishTupleSerializer_test.clj            |    78 +
 .../serialization/SerializationFactory_test.clj |    54 +
 .../clj/org/apache/storm/serialization_test.clj |    98 +
 .../clj/org/apache/storm/submitter_test.clj     |    75 +
 .../clj/org/apache/storm/subtopology_test.clj   |    62 +
 .../clj/org/apache/storm/supervisor_test.clj    |   734 +
 .../clj/org/apache/storm/tick_tuple_test.clj    |    49 +
 .../clj/org/apache/storm/transactional_test.clj |   736 +
 .../clj/org/apache/storm/trident/state_test.clj |   150 +
 .../clj/org/apache/storm/trident/tuple_test.clj |   122 +
 .../test/clj/org/apache/storm/tuple_test.clj    |    51 +
 .../utils/ZookeeperServerCnxnFactory_test.clj   |    35 +
 .../test/clj/org/apache/storm/utils_test.clj    |   110 +
 .../org/apache/storm/versioned_store_test.clj   |    45 +
 .../test/clj/org/apache/storm/worker_test.clj   |   206 +
 .../test/clj/storm/trident/state_test.clj       |   150 -
 .../test/clj/storm/trident/tuple_test.clj       |   122 -
 .../jvm/backtype/storm/TestConfigValidate.java  |   744 -
 .../backtype/storm/blobstore/BlobStoreTest.java |   460 -
 .../storm/blobstore/BlobSynchronizerTest.java   |   137 -
 .../storm/blobstore/ClientBlobStoreTest.java    |   179 -
 .../storm/grouping/PartialKeyGroupingTest.java  |    66 -
 .../LocalizedResourceRetentionSetTest.java      |    85 -
 .../localizer/LocalizedResourceSetTest.java     |    74 -
 .../backtype/storm/localizer/LocalizerTest.java |   667 -
 .../jvm/backtype/storm/localizer/localtest.zip  |   Bin 6378 -> 0 bytes
 .../storm/localizer/localtestwithsymlink.jar    |   Bin 6591 -> 0 bytes
 .../storm/localizer/localtestwithsymlink.tar    |   Bin 24576 -> 0 bytes
 .../storm/localizer/localtestwithsymlink.tar.gz |   Bin 6106 -> 0 bytes
 .../storm/localizer/localtestwithsymlink.tgz    |   Bin 6106 -> 0 bytes
 .../metric/internal/CountStatAndMetricTest.java |    86 -
 .../internal/LatencyStatAndMetricTest.java      |    83 -
 .../storm/metric/internal/RateTrackerTest.java  |    94 -
 .../nimbus/InMemoryTopologyActionNotifier.java  |    53 -
 .../resource/TestResourceAwareScheduler.java    |  1227 -
 .../storm/scheduler/resource/TestUser.java      |   111 -
 .../TestUtilsForResourceAwareScheduler.java     |   288 -
 .../GzipBridgeSerializationDelegateTest.java    |    82 -
 ...ipBridgeThriftSerializationDelegateTest.java |    71 -
 .../ThriftBridgeSerializationDelegateTest.java  |    60 -
 .../storm/topology/TopologyBuilderTest.java     |    53 -
 .../topology/WindowedBoltExecutorTest.java      |   142 -
 .../utils/DisruptorQueueBackpressureTest.java   |   110 -
 .../storm/utils/DisruptorQueueTest.java         |   187 -
 .../backtype/storm/utils/MockTupleHelpers.java  |    40 -
 .../storm/utils/ShellBoltMessageQueueTest.java  |    85 -
 ...StormBoundedExponentialBackoffRetryTest.java |   101 -
 .../storm/utils/ThriftTopologyUtilsTest.java    |    94 -
 .../windowing/WaterMarkEventGeneratorTest.java  |   117 -
 .../storm/windowing/WindowManagerTest.java      |   494 -
 .../org/apache/storm/TestConfigValidate.java    |   744 +
 .../apache/storm/blobstore/BlobStoreTest.java   |   460 +
 .../storm/blobstore/BlobSynchronizerTest.java   |   137 +
 .../storm/blobstore/ClientBlobStoreTest.java    |   179 +
 .../storm/grouping/PartialKeyGroupingTest.java  |    66 +
 .../LocalizedResourceRetentionSetTest.java      |    85 +
 .../localizer/LocalizedResourceSetTest.java     |    74 +
 .../apache/storm/localizer/LocalizerTest.java   |   667 +
 .../org/apache/storm/localizer/localtest.zip    |   Bin 0 -> 6378 bytes
 .../storm/localizer/localtestwithsymlink.jar    |   Bin 0 -> 6591 bytes
 .../storm/localizer/localtestwithsymlink.tar    |   Bin 0 -> 24576 bytes
 .../storm/localizer/localtestwithsymlink.tar.gz |   Bin 0 -> 6106 bytes
 .../storm/localizer/localtestwithsymlink.tgz    |   Bin 0 -> 6106 bytes
 .../metric/internal/CountStatAndMetricTest.java |    86 +
 .../internal/LatencyStatAndMetricTest.java      |    83 +
 .../storm/metric/internal/RateTrackerTest.java  |    94 +
 .../nimbus/InMemoryTopologyActionNotifier.java  |    53 +
 .../resource/TestResourceAwareScheduler.java    |  1227 +
 .../storm/scheduler/resource/TestUser.java      |   111 +
 .../TestUtilsForResourceAwareScheduler.java     |   288 +
 .../GzipBridgeSerializationDelegateTest.java    |    82 +
 ...ipBridgeThriftSerializationDelegateTest.java |    71 +
 .../ThriftBridgeSerializationDelegateTest.java  |    60 +
 .../storm/topology/TopologyBuilderTest.java     |    53 +
 .../topology/WindowedBoltExecutorTest.java      |   142 +
 .../utils/DisruptorQueueBackpressureTest.java   |   110 +
 .../apache/storm/utils/DisruptorQueueTest.java  |   187 +
 .../apache/storm/utils/MockTupleHelpers.java    |    40 +
 .../storm/utils/ShellBoltMessageQueueTest.java  |    85 +
 ...StormBoundedExponentialBackoffRetryTest.java |   101 +
 .../storm/utils/ThriftTopologyUtilsTest.java    |    94 +
 .../windowing/WaterMarkEventGeneratorTest.java  |   117 +
 .../storm/windowing/WindowManagerTest.java      |   494 +
 .../jvm/storm/trident/TestTridentTopology.java  |    20 +-
 .../storm/hack/StormShadeTransformer.java       |     2 +-
 2242 files changed, 215608 insertions(+), 215608 deletions(-)
----------------------------------------------------------------------



[05/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/generated/Bolt.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/generated/Bolt.java b/storm-core/src/jvm/backtype/storm/generated/Bolt.java
deleted file mode 100644
index df1a007..0000000
--- a/storm-core/src/jvm/backtype/storm/generated/Bolt.java
+++ /dev/null
@@ -1,514 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * Autogenerated by Thrift Compiler (0.9.3)
- *
- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
- *  @generated
- */
-package backtype.storm.generated;
-
-import org.apache.thrift.scheme.IScheme;
-import org.apache.thrift.scheme.SchemeFactory;
-import org.apache.thrift.scheme.StandardScheme;
-
-import org.apache.thrift.scheme.TupleScheme;
-import org.apache.thrift.protocol.TTupleProtocol;
-import org.apache.thrift.protocol.TProtocolException;
-import org.apache.thrift.EncodingUtils;
-import org.apache.thrift.TException;
-import org.apache.thrift.async.AsyncMethodCallback;
-import org.apache.thrift.server.AbstractNonblockingServer.*;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.EnumMap;
-import java.util.Set;
-import java.util.HashSet;
-import java.util.EnumSet;
-import java.util.Collections;
-import java.util.BitSet;
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import javax.annotation.Generated;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
-public class Bolt implements org.apache.thrift.TBase<Bolt, Bolt._Fields>, java.io.Serializable, Cloneable, Comparable<Bolt> {
-  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Bolt");
-
-  private static final org.apache.thrift.protocol.TField BOLT_OBJECT_FIELD_DESC = new org.apache.thrift.protocol.TField("bolt_object", org.apache.thrift.protocol.TType.STRUCT, (short)1);
-  private static final org.apache.thrift.protocol.TField COMMON_FIELD_DESC = new org.apache.thrift.protocol.TField("common", org.apache.thrift.protocol.TType.STRUCT, (short)2);
-
-  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
-  static {
-    schemes.put(StandardScheme.class, new BoltStandardSchemeFactory());
-    schemes.put(TupleScheme.class, new BoltTupleSchemeFactory());
-  }
-
-  private ComponentObject bolt_object; // required
-  private ComponentCommon common; // required
-
-  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
-  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-    BOLT_OBJECT((short)1, "bolt_object"),
-    COMMON((short)2, "common");
-
-    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
-
-    static {
-      for (_Fields field : EnumSet.allOf(_Fields.class)) {
-        byName.put(field.getFieldName(), field);
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, or null if its not found.
-     */
-    public static _Fields findByThriftId(int fieldId) {
-      switch(fieldId) {
-        case 1: // BOLT_OBJECT
-          return BOLT_OBJECT;
-        case 2: // COMMON
-          return COMMON;
-        default:
-          return null;
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, throwing an exception
-     * if it is not found.
-     */
-    public static _Fields findByThriftIdOrThrow(int fieldId) {
-      _Fields fields = findByThriftId(fieldId);
-      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
-      return fields;
-    }
-
-    /**
-     * Find the _Fields constant that matches name, or null if its not found.
-     */
-    public static _Fields findByName(String name) {
-      return byName.get(name);
-    }
-
-    private final short _thriftId;
-    private final String _fieldName;
-
-    _Fields(short thriftId, String fieldName) {
-      _thriftId = thriftId;
-      _fieldName = fieldName;
-    }
-
-    public short getThriftFieldId() {
-      return _thriftId;
-    }
-
-    public String getFieldName() {
-      return _fieldName;
-    }
-  }
-
-  // isset id assignments
-  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
-  static {
-    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.BOLT_OBJECT, new org.apache.thrift.meta_data.FieldMetaData("bolt_object", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ComponentObject.class)));
-    tmpMap.put(_Fields.COMMON, new org.apache.thrift.meta_data.FieldMetaData("common", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ComponentCommon.class)));
-    metaDataMap = Collections.unmodifiableMap(tmpMap);
-    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Bolt.class, metaDataMap);
-  }
-
-  public Bolt() {
-  }
-
-  public Bolt(
-    ComponentObject bolt_object,
-    ComponentCommon common)
-  {
-    this();
-    this.bolt_object = bolt_object;
-    this.common = common;
-  }
-
-  /**
-   * Performs a deep copy on <i>other</i>.
-   */
-  public Bolt(Bolt other) {
-    if (other.is_set_bolt_object()) {
-      this.bolt_object = new ComponentObject(other.bolt_object);
-    }
-    if (other.is_set_common()) {
-      this.common = new ComponentCommon(other.common);
-    }
-  }
-
-  public Bolt deepCopy() {
-    return new Bolt(this);
-  }
-
-  @Override
-  public void clear() {
-    this.bolt_object = null;
-    this.common = null;
-  }
-
-  public ComponentObject get_bolt_object() {
-    return this.bolt_object;
-  }
-
-  public void set_bolt_object(ComponentObject bolt_object) {
-    this.bolt_object = bolt_object;
-  }
-
-  public void unset_bolt_object() {
-    this.bolt_object = null;
-  }
-
-  /** Returns true if field bolt_object is set (has been assigned a value) and false otherwise */
-  public boolean is_set_bolt_object() {
-    return this.bolt_object != null;
-  }
-
-  public void set_bolt_object_isSet(boolean value) {
-    if (!value) {
-      this.bolt_object = null;
-    }
-  }
-
-  public ComponentCommon get_common() {
-    return this.common;
-  }
-
-  public void set_common(ComponentCommon common) {
-    this.common = common;
-  }
-
-  public void unset_common() {
-    this.common = null;
-  }
-
-  /** Returns true if field common is set (has been assigned a value) and false otherwise */
-  public boolean is_set_common() {
-    return this.common != null;
-  }
-
-  public void set_common_isSet(boolean value) {
-    if (!value) {
-      this.common = null;
-    }
-  }
-
-  public void setFieldValue(_Fields field, Object value) {
-    switch (field) {
-    case BOLT_OBJECT:
-      if (value == null) {
-        unset_bolt_object();
-      } else {
-        set_bolt_object((ComponentObject)value);
-      }
-      break;
-
-    case COMMON:
-      if (value == null) {
-        unset_common();
-      } else {
-        set_common((ComponentCommon)value);
-      }
-      break;
-
-    }
-  }
-
-  public Object getFieldValue(_Fields field) {
-    switch (field) {
-    case BOLT_OBJECT:
-      return get_bolt_object();
-
-    case COMMON:
-      return get_common();
-
-    }
-    throw new IllegalStateException();
-  }
-
-  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
-  public boolean isSet(_Fields field) {
-    if (field == null) {
-      throw new IllegalArgumentException();
-    }
-
-    switch (field) {
-    case BOLT_OBJECT:
-      return is_set_bolt_object();
-    case COMMON:
-      return is_set_common();
-    }
-    throw new IllegalStateException();
-  }
-
-  @Override
-  public boolean equals(Object that) {
-    if (that == null)
-      return false;
-    if (that instanceof Bolt)
-      return this.equals((Bolt)that);
-    return false;
-  }
-
-  public boolean equals(Bolt that) {
-    if (that == null)
-      return false;
-
-    boolean this_present_bolt_object = true && this.is_set_bolt_object();
-    boolean that_present_bolt_object = true && that.is_set_bolt_object();
-    if (this_present_bolt_object || that_present_bolt_object) {
-      if (!(this_present_bolt_object && that_present_bolt_object))
-        return false;
-      if (!this.bolt_object.equals(that.bolt_object))
-        return false;
-    }
-
-    boolean this_present_common = true && this.is_set_common();
-    boolean that_present_common = true && that.is_set_common();
-    if (this_present_common || that_present_common) {
-      if (!(this_present_common && that_present_common))
-        return false;
-      if (!this.common.equals(that.common))
-        return false;
-    }
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    List<Object> list = new ArrayList<Object>();
-
-    boolean present_bolt_object = true && (is_set_bolt_object());
-    list.add(present_bolt_object);
-    if (present_bolt_object)
-      list.add(bolt_object);
-
-    boolean present_common = true && (is_set_common());
-    list.add(present_common);
-    if (present_common)
-      list.add(common);
-
-    return list.hashCode();
-  }
-
-  @Override
-  public int compareTo(Bolt other) {
-    if (!getClass().equals(other.getClass())) {
-      return getClass().getName().compareTo(other.getClass().getName());
-    }
-
-    int lastComparison = 0;
-
-    lastComparison = Boolean.valueOf(is_set_bolt_object()).compareTo(other.is_set_bolt_object());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_bolt_object()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.bolt_object, other.bolt_object);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_common()).compareTo(other.is_set_common());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_common()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.common, other.common);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    return 0;
-  }
-
-  public _Fields fieldForId(int fieldId) {
-    return _Fields.findByThriftId(fieldId);
-  }
-
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
-    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
-  }
-
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
-    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder("Bolt(");
-    boolean first = true;
-
-    sb.append("bolt_object:");
-    if (this.bolt_object == null) {
-      sb.append("null");
-    } else {
-      sb.append(this.bolt_object);
-    }
-    first = false;
-    if (!first) sb.append(", ");
-    sb.append("common:");
-    if (this.common == null) {
-      sb.append("null");
-    } else {
-      sb.append(this.common);
-    }
-    first = false;
-    sb.append(")");
-    return sb.toString();
-  }
-
-  public void validate() throws org.apache.thrift.TException {
-    // check for required fields
-    if (!is_set_bolt_object()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'bolt_object' is unset! Struct:" + toString());
-    }
-
-    if (!is_set_common()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'common' is unset! Struct:" + toString());
-    }
-
-    // check for sub-struct validity
-    if (common != null) {
-      common.validate();
-    }
-  }
-
-  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
-    try {
-      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
-    try {
-      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private static class BoltStandardSchemeFactory implements SchemeFactory {
-    public BoltStandardScheme getScheme() {
-      return new BoltStandardScheme();
-    }
-  }
-
-  private static class BoltStandardScheme extends StandardScheme<Bolt> {
-
-    public void read(org.apache.thrift.protocol.TProtocol iprot, Bolt struct) throws org.apache.thrift.TException {
-      org.apache.thrift.protocol.TField schemeField;
-      iprot.readStructBegin();
-      while (true)
-      {
-        schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
-          break;
-        }
-        switch (schemeField.id) {
-          case 1: // BOLT_OBJECT
-            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
-              struct.bolt_object = new ComponentObject();
-              struct.bolt_object.read(iprot);
-              struct.set_bolt_object_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 2: // COMMON
-            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
-              struct.common = new ComponentCommon();
-              struct.common.read(iprot);
-              struct.set_common_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          default:
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-        }
-        iprot.readFieldEnd();
-      }
-      iprot.readStructEnd();
-      struct.validate();
-    }
-
-    public void write(org.apache.thrift.protocol.TProtocol oprot, Bolt struct) throws org.apache.thrift.TException {
-      struct.validate();
-
-      oprot.writeStructBegin(STRUCT_DESC);
-      if (struct.bolt_object != null) {
-        oprot.writeFieldBegin(BOLT_OBJECT_FIELD_DESC);
-        struct.bolt_object.write(oprot);
-        oprot.writeFieldEnd();
-      }
-      if (struct.common != null) {
-        oprot.writeFieldBegin(COMMON_FIELD_DESC);
-        struct.common.write(oprot);
-        oprot.writeFieldEnd();
-      }
-      oprot.writeFieldStop();
-      oprot.writeStructEnd();
-    }
-
-  }
-
-  private static class BoltTupleSchemeFactory implements SchemeFactory {
-    public BoltTupleScheme getScheme() {
-      return new BoltTupleScheme();
-    }
-  }
-
-  private static class BoltTupleScheme extends TupleScheme<Bolt> {
-
-    @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, Bolt struct) throws org.apache.thrift.TException {
-      TTupleProtocol oprot = (TTupleProtocol) prot;
-      struct.bolt_object.write(oprot);
-      struct.common.write(oprot);
-    }
-
-    @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, Bolt struct) throws org.apache.thrift.TException {
-      TTupleProtocol iprot = (TTupleProtocol) prot;
-      struct.bolt_object = new ComponentObject();
-      struct.bolt_object.read(iprot);
-      struct.set_bolt_object_isSet(true);
-      struct.common = new ComponentCommon();
-      struct.common.read(iprot);
-      struct.set_common_isSet(true);
-    }
-  }
-
-}
-

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/generated/BoltAggregateStats.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/generated/BoltAggregateStats.java b/storm-core/src/jvm/backtype/storm/generated/BoltAggregateStats.java
deleted file mode 100644
index 061f3fb..0000000
--- a/storm-core/src/jvm/backtype/storm/generated/BoltAggregateStats.java
+++ /dev/null
@@ -1,704 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * Autogenerated by Thrift Compiler (0.9.3)
- *
- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
- *  @generated
- */
-package backtype.storm.generated;
-
-import org.apache.thrift.scheme.IScheme;
-import org.apache.thrift.scheme.SchemeFactory;
-import org.apache.thrift.scheme.StandardScheme;
-
-import org.apache.thrift.scheme.TupleScheme;
-import org.apache.thrift.protocol.TTupleProtocol;
-import org.apache.thrift.protocol.TProtocolException;
-import org.apache.thrift.EncodingUtils;
-import org.apache.thrift.TException;
-import org.apache.thrift.async.AsyncMethodCallback;
-import org.apache.thrift.server.AbstractNonblockingServer.*;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.EnumMap;
-import java.util.Set;
-import java.util.HashSet;
-import java.util.EnumSet;
-import java.util.Collections;
-import java.util.BitSet;
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import javax.annotation.Generated;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
-public class BoltAggregateStats implements org.apache.thrift.TBase<BoltAggregateStats, BoltAggregateStats._Fields>, java.io.Serializable, Cloneable, Comparable<BoltAggregateStats> {
-  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("BoltAggregateStats");
-
-  private static final org.apache.thrift.protocol.TField EXECUTE_LATENCY_MS_FIELD_DESC = new org.apache.thrift.protocol.TField("execute_latency_ms", org.apache.thrift.protocol.TType.DOUBLE, (short)1);
-  private static final org.apache.thrift.protocol.TField PROCESS_LATENCY_MS_FIELD_DESC = new org.apache.thrift.protocol.TField("process_latency_ms", org.apache.thrift.protocol.TType.DOUBLE, (short)2);
-  private static final org.apache.thrift.protocol.TField EXECUTED_FIELD_DESC = new org.apache.thrift.protocol.TField("executed", org.apache.thrift.protocol.TType.I64, (short)3);
-  private static final org.apache.thrift.protocol.TField CAPACITY_FIELD_DESC = new org.apache.thrift.protocol.TField("capacity", org.apache.thrift.protocol.TType.DOUBLE, (short)4);
-
-  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
-  static {
-    schemes.put(StandardScheme.class, new BoltAggregateStatsStandardSchemeFactory());
-    schemes.put(TupleScheme.class, new BoltAggregateStatsTupleSchemeFactory());
-  }
-
-  private double execute_latency_ms; // optional
-  private double process_latency_ms; // optional
-  private long executed; // optional
-  private double capacity; // optional
-
-  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
-  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-    EXECUTE_LATENCY_MS((short)1, "execute_latency_ms"),
-    PROCESS_LATENCY_MS((short)2, "process_latency_ms"),
-    EXECUTED((short)3, "executed"),
-    CAPACITY((short)4, "capacity");
-
-    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
-
-    static {
-      for (_Fields field : EnumSet.allOf(_Fields.class)) {
-        byName.put(field.getFieldName(), field);
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, or null if its not found.
-     */
-    public static _Fields findByThriftId(int fieldId) {
-      switch(fieldId) {
-        case 1: // EXECUTE_LATENCY_MS
-          return EXECUTE_LATENCY_MS;
-        case 2: // PROCESS_LATENCY_MS
-          return PROCESS_LATENCY_MS;
-        case 3: // EXECUTED
-          return EXECUTED;
-        case 4: // CAPACITY
-          return CAPACITY;
-        default:
-          return null;
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, throwing an exception
-     * if it is not found.
-     */
-    public static _Fields findByThriftIdOrThrow(int fieldId) {
-      _Fields fields = findByThriftId(fieldId);
-      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
-      return fields;
-    }
-
-    /**
-     * Find the _Fields constant that matches name, or null if its not found.
-     */
-    public static _Fields findByName(String name) {
-      return byName.get(name);
-    }
-
-    private final short _thriftId;
-    private final String _fieldName;
-
-    _Fields(short thriftId, String fieldName) {
-      _thriftId = thriftId;
-      _fieldName = fieldName;
-    }
-
-    public short getThriftFieldId() {
-      return _thriftId;
-    }
-
-    public String getFieldName() {
-      return _fieldName;
-    }
-  }
-
-  // isset id assignments
-  private static final int __EXECUTE_LATENCY_MS_ISSET_ID = 0;
-  private static final int __PROCESS_LATENCY_MS_ISSET_ID = 1;
-  private static final int __EXECUTED_ISSET_ID = 2;
-  private static final int __CAPACITY_ISSET_ID = 3;
-  private byte __isset_bitfield = 0;
-  private static final _Fields optionals[] = {_Fields.EXECUTE_LATENCY_MS,_Fields.PROCESS_LATENCY_MS,_Fields.EXECUTED,_Fields.CAPACITY};
-  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
-  static {
-    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.EXECUTE_LATENCY_MS, new org.apache.thrift.meta_data.FieldMetaData("execute_latency_ms", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE)));
-    tmpMap.put(_Fields.PROCESS_LATENCY_MS, new org.apache.thrift.meta_data.FieldMetaData("process_latency_ms", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE)));
-    tmpMap.put(_Fields.EXECUTED, new org.apache.thrift.meta_data.FieldMetaData("executed", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
-    tmpMap.put(_Fields.CAPACITY, new org.apache.thrift.meta_data.FieldMetaData("capacity", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE)));
-    metaDataMap = Collections.unmodifiableMap(tmpMap);
-    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(BoltAggregateStats.class, metaDataMap);
-  }
-
-  public BoltAggregateStats() {
-  }
-
-  /**
-   * Performs a deep copy on <i>other</i>.
-   */
-  public BoltAggregateStats(BoltAggregateStats other) {
-    __isset_bitfield = other.__isset_bitfield;
-    this.execute_latency_ms = other.execute_latency_ms;
-    this.process_latency_ms = other.process_latency_ms;
-    this.executed = other.executed;
-    this.capacity = other.capacity;
-  }
-
-  public BoltAggregateStats deepCopy() {
-    return new BoltAggregateStats(this);
-  }
-
-  @Override
-  public void clear() {
-    set_execute_latency_ms_isSet(false);
-    this.execute_latency_ms = 0.0;
-    set_process_latency_ms_isSet(false);
-    this.process_latency_ms = 0.0;
-    set_executed_isSet(false);
-    this.executed = 0;
-    set_capacity_isSet(false);
-    this.capacity = 0.0;
-  }
-
-  public double get_execute_latency_ms() {
-    return this.execute_latency_ms;
-  }
-
-  public void set_execute_latency_ms(double execute_latency_ms) {
-    this.execute_latency_ms = execute_latency_ms;
-    set_execute_latency_ms_isSet(true);
-  }
-
-  public void unset_execute_latency_ms() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __EXECUTE_LATENCY_MS_ISSET_ID);
-  }
-
-  /** Returns true if field execute_latency_ms is set (has been assigned a value) and false otherwise */
-  public boolean is_set_execute_latency_ms() {
-    return EncodingUtils.testBit(__isset_bitfield, __EXECUTE_LATENCY_MS_ISSET_ID);
-  }
-
-  public void set_execute_latency_ms_isSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __EXECUTE_LATENCY_MS_ISSET_ID, value);
-  }
-
-  public double get_process_latency_ms() {
-    return this.process_latency_ms;
-  }
-
-  public void set_process_latency_ms(double process_latency_ms) {
-    this.process_latency_ms = process_latency_ms;
-    set_process_latency_ms_isSet(true);
-  }
-
-  public void unset_process_latency_ms() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __PROCESS_LATENCY_MS_ISSET_ID);
-  }
-
-  /** Returns true if field process_latency_ms is set (has been assigned a value) and false otherwise */
-  public boolean is_set_process_latency_ms() {
-    return EncodingUtils.testBit(__isset_bitfield, __PROCESS_LATENCY_MS_ISSET_ID);
-  }
-
-  public void set_process_latency_ms_isSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __PROCESS_LATENCY_MS_ISSET_ID, value);
-  }
-
-  public long get_executed() {
-    return this.executed;
-  }
-
-  public void set_executed(long executed) {
-    this.executed = executed;
-    set_executed_isSet(true);
-  }
-
-  public void unset_executed() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __EXECUTED_ISSET_ID);
-  }
-
-  /** Returns true if field executed is set (has been assigned a value) and false otherwise */
-  public boolean is_set_executed() {
-    return EncodingUtils.testBit(__isset_bitfield, __EXECUTED_ISSET_ID);
-  }
-
-  public void set_executed_isSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __EXECUTED_ISSET_ID, value);
-  }
-
-  public double get_capacity() {
-    return this.capacity;
-  }
-
-  public void set_capacity(double capacity) {
-    this.capacity = capacity;
-    set_capacity_isSet(true);
-  }
-
-  public void unset_capacity() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __CAPACITY_ISSET_ID);
-  }
-
-  /** Returns true if field capacity is set (has been assigned a value) and false otherwise */
-  public boolean is_set_capacity() {
-    return EncodingUtils.testBit(__isset_bitfield, __CAPACITY_ISSET_ID);
-  }
-
-  public void set_capacity_isSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __CAPACITY_ISSET_ID, value);
-  }
-
-  public void setFieldValue(_Fields field, Object value) {
-    switch (field) {
-    case EXECUTE_LATENCY_MS:
-      if (value == null) {
-        unset_execute_latency_ms();
-      } else {
-        set_execute_latency_ms((Double)value);
-      }
-      break;
-
-    case PROCESS_LATENCY_MS:
-      if (value == null) {
-        unset_process_latency_ms();
-      } else {
-        set_process_latency_ms((Double)value);
-      }
-      break;
-
-    case EXECUTED:
-      if (value == null) {
-        unset_executed();
-      } else {
-        set_executed((Long)value);
-      }
-      break;
-
-    case CAPACITY:
-      if (value == null) {
-        unset_capacity();
-      } else {
-        set_capacity((Double)value);
-      }
-      break;
-
-    }
-  }
-
-  public Object getFieldValue(_Fields field) {
-    switch (field) {
-    case EXECUTE_LATENCY_MS:
-      return get_execute_latency_ms();
-
-    case PROCESS_LATENCY_MS:
-      return get_process_latency_ms();
-
-    case EXECUTED:
-      return get_executed();
-
-    case CAPACITY:
-      return get_capacity();
-
-    }
-    throw new IllegalStateException();
-  }
-
-  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
-  public boolean isSet(_Fields field) {
-    if (field == null) {
-      throw new IllegalArgumentException();
-    }
-
-    switch (field) {
-    case EXECUTE_LATENCY_MS:
-      return is_set_execute_latency_ms();
-    case PROCESS_LATENCY_MS:
-      return is_set_process_latency_ms();
-    case EXECUTED:
-      return is_set_executed();
-    case CAPACITY:
-      return is_set_capacity();
-    }
-    throw new IllegalStateException();
-  }
-
-  @Override
-  public boolean equals(Object that) {
-    if (that == null)
-      return false;
-    if (that instanceof BoltAggregateStats)
-      return this.equals((BoltAggregateStats)that);
-    return false;
-  }
-
-  public boolean equals(BoltAggregateStats that) {
-    if (that == null)
-      return false;
-
-    boolean this_present_execute_latency_ms = true && this.is_set_execute_latency_ms();
-    boolean that_present_execute_latency_ms = true && that.is_set_execute_latency_ms();
-    if (this_present_execute_latency_ms || that_present_execute_latency_ms) {
-      if (!(this_present_execute_latency_ms && that_present_execute_latency_ms))
-        return false;
-      if (this.execute_latency_ms != that.execute_latency_ms)
-        return false;
-    }
-
-    boolean this_present_process_latency_ms = true && this.is_set_process_latency_ms();
-    boolean that_present_process_latency_ms = true && that.is_set_process_latency_ms();
-    if (this_present_process_latency_ms || that_present_process_latency_ms) {
-      if (!(this_present_process_latency_ms && that_present_process_latency_ms))
-        return false;
-      if (this.process_latency_ms != that.process_latency_ms)
-        return false;
-    }
-
-    boolean this_present_executed = true && this.is_set_executed();
-    boolean that_present_executed = true && that.is_set_executed();
-    if (this_present_executed || that_present_executed) {
-      if (!(this_present_executed && that_present_executed))
-        return false;
-      if (this.executed != that.executed)
-        return false;
-    }
-
-    boolean this_present_capacity = true && this.is_set_capacity();
-    boolean that_present_capacity = true && that.is_set_capacity();
-    if (this_present_capacity || that_present_capacity) {
-      if (!(this_present_capacity && that_present_capacity))
-        return false;
-      if (this.capacity != that.capacity)
-        return false;
-    }
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    List<Object> list = new ArrayList<Object>();
-
-    boolean present_execute_latency_ms = true && (is_set_execute_latency_ms());
-    list.add(present_execute_latency_ms);
-    if (present_execute_latency_ms)
-      list.add(execute_latency_ms);
-
-    boolean present_process_latency_ms = true && (is_set_process_latency_ms());
-    list.add(present_process_latency_ms);
-    if (present_process_latency_ms)
-      list.add(process_latency_ms);
-
-    boolean present_executed = true && (is_set_executed());
-    list.add(present_executed);
-    if (present_executed)
-      list.add(executed);
-
-    boolean present_capacity = true && (is_set_capacity());
-    list.add(present_capacity);
-    if (present_capacity)
-      list.add(capacity);
-
-    return list.hashCode();
-  }
-
-  @Override
-  public int compareTo(BoltAggregateStats other) {
-    if (!getClass().equals(other.getClass())) {
-      return getClass().getName().compareTo(other.getClass().getName());
-    }
-
-    int lastComparison = 0;
-
-    lastComparison = Boolean.valueOf(is_set_execute_latency_ms()).compareTo(other.is_set_execute_latency_ms());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_execute_latency_ms()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.execute_latency_ms, other.execute_latency_ms);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_process_latency_ms()).compareTo(other.is_set_process_latency_ms());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_process_latency_ms()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.process_latency_ms, other.process_latency_ms);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_executed()).compareTo(other.is_set_executed());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_executed()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.executed, other.executed);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_capacity()).compareTo(other.is_set_capacity());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_capacity()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.capacity, other.capacity);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    return 0;
-  }
-
-  public _Fields fieldForId(int fieldId) {
-    return _Fields.findByThriftId(fieldId);
-  }
-
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
-    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
-  }
-
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
-    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder("BoltAggregateStats(");
-    boolean first = true;
-
-    if (is_set_execute_latency_ms()) {
-      sb.append("execute_latency_ms:");
-      sb.append(this.execute_latency_ms);
-      first = false;
-    }
-    if (is_set_process_latency_ms()) {
-      if (!first) sb.append(", ");
-      sb.append("process_latency_ms:");
-      sb.append(this.process_latency_ms);
-      first = false;
-    }
-    if (is_set_executed()) {
-      if (!first) sb.append(", ");
-      sb.append("executed:");
-      sb.append(this.executed);
-      first = false;
-    }
-    if (is_set_capacity()) {
-      if (!first) sb.append(", ");
-      sb.append("capacity:");
-      sb.append(this.capacity);
-      first = false;
-    }
-    sb.append(")");
-    return sb.toString();
-  }
-
-  public void validate() throws org.apache.thrift.TException {
-    // check for required fields
-    // check for sub-struct validity
-  }
-
-  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
-    try {
-      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
-    try {
-      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
-      __isset_bitfield = 0;
-      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private static class BoltAggregateStatsStandardSchemeFactory implements SchemeFactory {
-    public BoltAggregateStatsStandardScheme getScheme() {
-      return new BoltAggregateStatsStandardScheme();
-    }
-  }
-
-  private static class BoltAggregateStatsStandardScheme extends StandardScheme<BoltAggregateStats> {
-
-    public void read(org.apache.thrift.protocol.TProtocol iprot, BoltAggregateStats struct) throws org.apache.thrift.TException {
-      org.apache.thrift.protocol.TField schemeField;
-      iprot.readStructBegin();
-      while (true)
-      {
-        schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
-          break;
-        }
-        switch (schemeField.id) {
-          case 1: // EXECUTE_LATENCY_MS
-            if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) {
-              struct.execute_latency_ms = iprot.readDouble();
-              struct.set_execute_latency_ms_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 2: // PROCESS_LATENCY_MS
-            if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) {
-              struct.process_latency_ms = iprot.readDouble();
-              struct.set_process_latency_ms_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 3: // EXECUTED
-            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
-              struct.executed = iprot.readI64();
-              struct.set_executed_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 4: // CAPACITY
-            if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) {
-              struct.capacity = iprot.readDouble();
-              struct.set_capacity_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          default:
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-        }
-        iprot.readFieldEnd();
-      }
-      iprot.readStructEnd();
-      struct.validate();
-    }
-
-    public void write(org.apache.thrift.protocol.TProtocol oprot, BoltAggregateStats struct) throws org.apache.thrift.TException {
-      struct.validate();
-
-      oprot.writeStructBegin(STRUCT_DESC);
-      if (struct.is_set_execute_latency_ms()) {
-        oprot.writeFieldBegin(EXECUTE_LATENCY_MS_FIELD_DESC);
-        oprot.writeDouble(struct.execute_latency_ms);
-        oprot.writeFieldEnd();
-      }
-      if (struct.is_set_process_latency_ms()) {
-        oprot.writeFieldBegin(PROCESS_LATENCY_MS_FIELD_DESC);
-        oprot.writeDouble(struct.process_latency_ms);
-        oprot.writeFieldEnd();
-      }
-      if (struct.is_set_executed()) {
-        oprot.writeFieldBegin(EXECUTED_FIELD_DESC);
-        oprot.writeI64(struct.executed);
-        oprot.writeFieldEnd();
-      }
-      if (struct.is_set_capacity()) {
-        oprot.writeFieldBegin(CAPACITY_FIELD_DESC);
-        oprot.writeDouble(struct.capacity);
-        oprot.writeFieldEnd();
-      }
-      oprot.writeFieldStop();
-      oprot.writeStructEnd();
-    }
-
-  }
-
-  private static class BoltAggregateStatsTupleSchemeFactory implements SchemeFactory {
-    public BoltAggregateStatsTupleScheme getScheme() {
-      return new BoltAggregateStatsTupleScheme();
-    }
-  }
-
-  private static class BoltAggregateStatsTupleScheme extends TupleScheme<BoltAggregateStats> {
-
-    @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, BoltAggregateStats struct) throws org.apache.thrift.TException {
-      TTupleProtocol oprot = (TTupleProtocol) prot;
-      BitSet optionals = new BitSet();
-      if (struct.is_set_execute_latency_ms()) {
-        optionals.set(0);
-      }
-      if (struct.is_set_process_latency_ms()) {
-        optionals.set(1);
-      }
-      if (struct.is_set_executed()) {
-        optionals.set(2);
-      }
-      if (struct.is_set_capacity()) {
-        optionals.set(3);
-      }
-      oprot.writeBitSet(optionals, 4);
-      if (struct.is_set_execute_latency_ms()) {
-        oprot.writeDouble(struct.execute_latency_ms);
-      }
-      if (struct.is_set_process_latency_ms()) {
-        oprot.writeDouble(struct.process_latency_ms);
-      }
-      if (struct.is_set_executed()) {
-        oprot.writeI64(struct.executed);
-      }
-      if (struct.is_set_capacity()) {
-        oprot.writeDouble(struct.capacity);
-      }
-    }
-
-    @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, BoltAggregateStats struct) throws org.apache.thrift.TException {
-      TTupleProtocol iprot = (TTupleProtocol) prot;
-      BitSet incoming = iprot.readBitSet(4);
-      if (incoming.get(0)) {
-        struct.execute_latency_ms = iprot.readDouble();
-        struct.set_execute_latency_ms_isSet(true);
-      }
-      if (incoming.get(1)) {
-        struct.process_latency_ms = iprot.readDouble();
-        struct.set_process_latency_ms_isSet(true);
-      }
-      if (incoming.get(2)) {
-        struct.executed = iprot.readI64();
-        struct.set_executed_isSet(true);
-      }
-      if (incoming.get(3)) {
-        struct.capacity = iprot.readDouble();
-        struct.set_capacity_isSet(true);
-      }
-    }
-  }
-
-}
-


[22/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/cluster.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/cluster.clj b/storm-core/src/clj/org/apache/storm/cluster.clj
new file mode 100644
index 0000000..5aef266
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/cluster.clj
@@ -0,0 +1,691 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+
+(ns org.apache.storm.cluster
+  (:import [org.apache.zookeeper.data Stat ACL Id]
+           [org.apache.storm.generated SupervisorInfo Assignment StormBase ClusterWorkerHeartbeat ErrorInfo Credentials NimbusSummary
+            LogConfig ProfileAction ProfileRequest NodeInfo]
+           [java.io Serializable])
+  (:import [org.apache.zookeeper KeeperException KeeperException$NoNodeException ZooDefs ZooDefs$Ids ZooDefs$Perms])
+  (:import [org.apache.curator.framework CuratorFramework])
+  (:import [org.apache.storm.utils Utils])
+  (:import [org.apache.storm.cluster ClusterState ClusterStateContext ClusterStateListener ConnectionState])
+  (:import [java.security MessageDigest])
+  (:import [org.apache.zookeeper.server.auth DigestAuthenticationProvider])
+  (:import [org.apache.storm.nimbus NimbusInfo])
+  (:use [org.apache.storm util log config converter])
+  (:require [org.apache.storm [zookeeper :as zk]])
+  (:require [org.apache.storm.daemon [common :as common]]))
+
+(defn mk-topo-only-acls
+  [topo-conf]
+  (let [payload (.get topo-conf STORM-ZOOKEEPER-TOPOLOGY-AUTH-PAYLOAD)]
+    (when (Utils/isZkAuthenticationConfiguredTopology topo-conf)
+      [(first ZooDefs$Ids/CREATOR_ALL_ACL)
+       (ACL. ZooDefs$Perms/READ (Id. "digest" (DigestAuthenticationProvider/generateDigest payload)))])))
+ 
+(defnk mk-distributed-cluster-state
+  [conf :auth-conf nil :acls nil :context (ClusterStateContext.)]
+  (let [clazz (Class/forName (or (conf STORM-CLUSTER-STATE-STORE)
+                                 "org.apache.storm.cluster_state.zookeeper_state_factory"))
+        state-instance (.newInstance clazz)]
+    (log-debug "Creating cluster state: " (.toString clazz))
+    (or (.mkState state-instance conf auth-conf acls context)
+        nil)))
+
+(defprotocol StormClusterState
+  (assignments [this callback])
+  (assignment-info [this storm-id callback])
+  (assignment-info-with-version [this storm-id callback])
+  (assignment-version [this storm-id callback])
+  ;returns key information under /storm/blobstore/key
+  (blobstore-info [this blob-key])
+  ;returns list of nimbus summaries stored under /stormroot/nimbuses/<nimbus-ids> -> <data>
+  (nimbuses [this])
+  ;adds the NimbusSummary to /stormroot/nimbuses/nimbus-id
+  (add-nimbus-host! [this nimbus-id nimbus-summary])
+
+  (active-storms [this])
+  (storm-base [this storm-id callback])
+  (get-worker-heartbeat [this storm-id node port])
+  (get-worker-profile-requests [this storm-id nodeinfo thrift?])
+  (get-topology-profile-requests [this storm-id thrift?])
+  (set-worker-profile-request [this storm-id profile-request])
+  (delete-topology-profile-requests [this storm-id profile-request])
+  (executor-beats [this storm-id executor->node+port])
+  (supervisors [this callback])
+  (supervisor-info [this supervisor-id]) ;; returns nil if doesn't exist
+  (setup-heartbeats! [this storm-id])
+  (teardown-heartbeats! [this storm-id])
+  (teardown-topology-errors! [this storm-id])
+  (heartbeat-storms [this])
+  (error-topologies [this])
+  (set-topology-log-config! [this storm-id log-config])
+  (topology-log-config [this storm-id cb])
+  (worker-heartbeat! [this storm-id node port info])
+  (remove-worker-heartbeat! [this storm-id node port])
+  (supervisor-heartbeat! [this supervisor-id info])
+  (worker-backpressure! [this storm-id node port info])
+  (topology-backpressure [this storm-id callback])
+  (setup-backpressure! [this storm-id])
+  (remove-worker-backpressure! [this storm-id node port])
+  (activate-storm! [this storm-id storm-base])
+  (update-storm! [this storm-id new-elems])
+  (remove-storm-base! [this storm-id])
+  (set-assignment! [this storm-id info])
+  ;; sets up information related to key consisting of nimbus
+  ;; host:port and version info of the blob
+  (setup-blobstore! [this key nimbusInfo versionInfo])
+  (active-keys [this])
+  (blobstore [this callback])
+  (remove-storm! [this storm-id])
+  (remove-blobstore-key! [this blob-key])
+  (remove-key-version! [this blob-key])
+  (report-error [this storm-id component-id node port error])
+  (errors [this storm-id component-id])
+  (last-error [this storm-id component-id])
+  (set-credentials! [this storm-id creds topo-conf])
+  (credentials [this storm-id callback])
+  (disconnect [this]))
+
+(def ASSIGNMENTS-ROOT "assignments")
+(def CODE-ROOT "code")
+(def STORMS-ROOT "storms")
+(def SUPERVISORS-ROOT "supervisors")
+(def WORKERBEATS-ROOT "workerbeats")
+(def BACKPRESSURE-ROOT "backpressure")
+(def ERRORS-ROOT "errors")
+(def BLOBSTORE-ROOT "blobstore")
+; Stores the latest update sequence for a blob
+(def BLOBSTORE-MAX-KEY-SEQUENCE-NUMBER-ROOT "blobstoremaxkeysequencenumber")
+(def NIMBUSES-ROOT "nimbuses")
+(def CREDENTIALS-ROOT "credentials")
+(def LOGCONFIG-ROOT "logconfigs")
+(def PROFILERCONFIG-ROOT "profilerconfigs")
+
+(def ASSIGNMENTS-SUBTREE (str "/" ASSIGNMENTS-ROOT))
+(def STORMS-SUBTREE (str "/" STORMS-ROOT))
+(def SUPERVISORS-SUBTREE (str "/" SUPERVISORS-ROOT))
+(def WORKERBEATS-SUBTREE (str "/" WORKERBEATS-ROOT))
+(def BACKPRESSURE-SUBTREE (str "/" BACKPRESSURE-ROOT))
+(def ERRORS-SUBTREE (str "/" ERRORS-ROOT))
+;; Blobstore subtree /storm/blobstore
+(def BLOBSTORE-SUBTREE (str "/" BLOBSTORE-ROOT))
+(def BLOBSTORE-MAX-KEY-SEQUENCE-NUMBER-SUBTREE (str "/" BLOBSTORE-MAX-KEY-SEQUENCE-NUMBER-ROOT))
+(def NIMBUSES-SUBTREE (str "/" NIMBUSES-ROOT))
+(def CREDENTIALS-SUBTREE (str "/" CREDENTIALS-ROOT))
+(def LOGCONFIG-SUBTREE (str "/" LOGCONFIG-ROOT))
+(def PROFILERCONFIG-SUBTREE (str "/" PROFILERCONFIG-ROOT))
+
+(defn supervisor-path
+  [id]
+  (str SUPERVISORS-SUBTREE "/" id))
+
+(defn assignment-path
+  [id]
+  (str ASSIGNMENTS-SUBTREE "/" id))
+
+(defn blobstore-path
+  [key]
+  (str BLOBSTORE-SUBTREE "/" key))
+
+(defn blobstore-max-key-sequence-number-path
+  [key]
+  (str BLOBSTORE-MAX-KEY-SEQUENCE-NUMBER-SUBTREE "/" key))
+
+(defn nimbus-path
+  [id]
+  (str NIMBUSES-SUBTREE "/" id))
+
+(defn storm-path
+  [id]
+  (str STORMS-SUBTREE "/" id))
+
+(defn workerbeat-storm-root
+  [storm-id]
+  (str WORKERBEATS-SUBTREE "/" storm-id))
+
+(defn workerbeat-path
+  [storm-id node port]
+  (str (workerbeat-storm-root storm-id) "/" node "-" port))
+
+(defn backpressure-storm-root
+  [storm-id]
+  (str BACKPRESSURE-SUBTREE "/" storm-id))
+
+(defn backpressure-path
+  [storm-id node port]
+  (str (backpressure-storm-root storm-id) "/" node "-" port))
+
+(defn error-storm-root
+  [storm-id]
+  (str ERRORS-SUBTREE "/" storm-id))
+
+(defn error-path
+  [storm-id component-id]
+  (str (error-storm-root storm-id) "/" (url-encode component-id)))
+
+(def last-error-path-seg "last-error")
+
+(defn last-error-path
+  [storm-id component-id]
+  (str (error-storm-root storm-id)
+       "/"
+       (url-encode component-id)
+       "-"
+       last-error-path-seg))
+
+(defn credentials-path
+  [storm-id]
+  (str CREDENTIALS-SUBTREE "/" storm-id))
+
+(defn log-config-path
+  [storm-id]
+  (str LOGCONFIG-SUBTREE "/" storm-id))
+
+(defn profiler-config-path
+  ([storm-id]
+   (str PROFILERCONFIG-SUBTREE "/" storm-id))
+  ([storm-id host port request-type]
+   (str (profiler-config-path storm-id) "/" host "_" port "_" request-type)))
+
+(defn- issue-callback!
+  [cb-atom]
+  (let [cb @cb-atom]
+    (reset! cb-atom nil)
+    (when cb
+      (cb))))
+
+(defn- issue-map-callback!
+  [cb-atom id]
+  (let [cb (@cb-atom id)]
+    (swap! cb-atom dissoc id)
+    (when cb
+      (cb id))))
+
+(defn- maybe-deserialize
+  [ser clazz]
+  (when ser
+    (Utils/deserialize ser clazz)))
+
+(defrecord TaskError [error time-secs host port])
+
+(defn- parse-error-path
+  [^String p]
+  (Long/parseLong (.substring p 1)))
+
+(defn convert-executor-beats
+  "Ensures that we only return heartbeats for executors assigned to
+  this worker."
+  [executors worker-hb]
+  (let [executor-stats (:executor-stats worker-hb)]
+    (->> executors
+         (map (fn [t]
+                (if (contains? executor-stats t)
+                  {t {:time-secs (:time-secs worker-hb)
+                      :uptime (:uptime worker-hb)
+                      :stats (get executor-stats t)}})))
+         (into {}))))
+
+;; Watches should be used for optimization. When ZK is reconnecting, they're not guaranteed to be called.
+(defnk mk-storm-cluster-state
+  [cluster-state-spec :acls nil :context (ClusterStateContext.)]
+  (let [[solo? cluster-state] (if (instance? ClusterState cluster-state-spec)
+                                [false cluster-state-spec]
+                                [true (mk-distributed-cluster-state cluster-state-spec :auth-conf cluster-state-spec :acls acls :context context)])
+        assignment-info-callback (atom {})
+        assignment-info-with-version-callback (atom {})
+        assignment-version-callback (atom {})
+        supervisors-callback (atom nil)
+        backpressure-callback (atom {})   ;; we want to reigister a topo directory getChildren callback for all workers of this dir
+        assignments-callback (atom nil)
+        storm-base-callback (atom {})
+        blobstore-callback (atom nil)
+        credentials-callback (atom {})
+        log-config-callback (atom {})
+        state-id (.register
+                  cluster-state
+                  (fn [type path]
+                    (let [[subtree & args] (tokenize-path path)]
+                      (condp = subtree
+                         ASSIGNMENTS-ROOT (if (empty? args)
+                                             (issue-callback! assignments-callback)
+                                             (do
+                                               (issue-map-callback! assignment-info-callback (first args))
+                                               (issue-map-callback! assignment-version-callback (first args))
+                                               (issue-map-callback! assignment-info-with-version-callback (first args))))
+                         SUPERVISORS-ROOT (issue-callback! supervisors-callback)
+                         BLOBSTORE-ROOT (issue-callback! blobstore-callback) ;; callback register for blobstore
+                         STORMS-ROOT (issue-map-callback! storm-base-callback (first args))
+                         CREDENTIALS-ROOT (issue-map-callback! credentials-callback (first args))
+                         LOGCONFIG-ROOT (issue-map-callback! log-config-callback (first args))
+                         BACKPRESSURE-ROOT (issue-map-callback! backpressure-callback (first args))
+                         ;; this should never happen
+                         (exit-process! 30 "Unknown callback for subtree " subtree args)))))]
+    (doseq [p [ASSIGNMENTS-SUBTREE STORMS-SUBTREE SUPERVISORS-SUBTREE WORKERBEATS-SUBTREE ERRORS-SUBTREE BLOBSTORE-SUBTREE NIMBUSES-SUBTREE
+               LOGCONFIG-SUBTREE]]
+      (.mkdirs cluster-state p acls))
+    (reify
+      StormClusterState
+
+      (assignments
+        [this callback]
+        (when callback
+          (reset! assignments-callback callback))
+        (.get_children cluster-state ASSIGNMENTS-SUBTREE (not-nil? callback)))
+
+      (assignment-info
+        [this storm-id callback]
+        (when callback
+          (swap! assignment-info-callback assoc storm-id callback))
+        (clojurify-assignment (maybe-deserialize (.get_data cluster-state (assignment-path storm-id) (not-nil? callback)) Assignment)))
+
+      (assignment-info-with-version 
+        [this storm-id callback]
+        (when callback
+          (swap! assignment-info-with-version-callback assoc storm-id callback))
+        (let [{data :data version :version} 
+              (.get_data_with_version cluster-state (assignment-path storm-id) (not-nil? callback))]
+        {:data (clojurify-assignment (maybe-deserialize data Assignment))
+         :version version}))
+
+      (assignment-version 
+        [this storm-id callback]
+        (when callback
+          (swap! assignment-version-callback assoc storm-id callback))
+        (.get_version cluster-state (assignment-path storm-id) (not-nil? callback)))
+
+      ;; blobstore state
+      (blobstore
+        [this callback]
+        (when callback
+          (reset! blobstore-callback callback))
+        (.sync_path cluster-state BLOBSTORE-SUBTREE)
+        (.get_children cluster-state BLOBSTORE-SUBTREE (not-nil? callback)))
+
+      (nimbuses
+        [this]
+        (map #(maybe-deserialize (.get_data cluster-state (nimbus-path %1) false) NimbusSummary)
+          (.get_children cluster-state NIMBUSES-SUBTREE false)))
+
+      (add-nimbus-host!
+        [this nimbus-id nimbus-summary]
+        ;explicit delete for ephmeral node to ensure this session creates the entry.
+        (.delete_node cluster-state (nimbus-path nimbus-id))
+
+        (.add_listener cluster-state (reify ClusterStateListener
+                        (^void stateChanged[this ^ConnectionState newState]
+                          (log-message "Connection state listener invoked, zookeeper connection state has changed to " newState)
+                          (if (.equals newState ConnectionState/RECONNECTED)
+                            (do
+                              (log-message "Connection state has changed to reconnected so setting nimbuses entry one more time")
+                              (.set_ephemeral_node cluster-state (nimbus-path nimbus-id) (Utils/serialize nimbus-summary) acls))))))
+        
+        (.set_ephemeral_node cluster-state (nimbus-path nimbus-id) (Utils/serialize nimbus-summary) acls))
+
+      (setup-blobstore!
+        [this key nimbusInfo versionInfo]
+        (let [path (str (blobstore-path key) "/" (.toHostPortString nimbusInfo) "-" versionInfo)]
+          (log-message "setup-path" path)
+          (.mkdirs cluster-state (blobstore-path key) acls)
+          ;we delete the node first to ensure the node gets created as part of this session only.
+          (.delete_node_blobstore cluster-state (str (blobstore-path key)) (.toHostPortString nimbusInfo))
+          (.set_ephemeral_node cluster-state path nil acls)))
+
+      (blobstore-info
+        [this blob-key]
+        (let [path (blobstore-path blob-key)]
+          (.sync_path cluster-state path)
+          (.get_children cluster-state path false)))
+
+      (active-storms
+        [this]
+        (.get_children cluster-state STORMS-SUBTREE false))
+
+      (active-keys
+        [this]
+        (.get_children cluster-state BLOBSTORE-SUBTREE false))
+
+      (heartbeat-storms
+        [this]
+        (.get_worker_hb_children cluster-state WORKERBEATS-SUBTREE false))
+
+      (error-topologies
+        [this]
+        (.get_children cluster-state ERRORS-SUBTREE false))
+
+      (get-worker-heartbeat
+        [this storm-id node port]
+        (let [worker-hb (.get_worker_hb cluster-state (workerbeat-path storm-id node port) false)]
+          (if worker-hb
+            (-> worker-hb
+              (maybe-deserialize ClusterWorkerHeartbeat)
+              clojurify-zk-worker-hb))))
+
+      (executor-beats
+        [this storm-id executor->node+port]
+        ;; need to take executor->node+port in explicitly so that we don't run into a situation where a
+        ;; long dead worker with a skewed clock overrides all the timestamps. By only checking heartbeats
+        ;; with an assigned node+port, and only reading executors from that heartbeat that are actually assigned,
+        ;; we avoid situations like that
+        (let [node+port->executors (reverse-map executor->node+port)
+              all-heartbeats (for [[[node port] executors] node+port->executors]
+                               (->> (get-worker-heartbeat this storm-id node port)
+                                    (convert-executor-beats executors)
+                                    ))]
+          (apply merge all-heartbeats)))
+
+      (supervisors
+        [this callback]
+        (when callback
+          (reset! supervisors-callback callback))
+        (.get_children cluster-state SUPERVISORS-SUBTREE (not-nil? callback)))
+
+      (supervisor-info
+        [this supervisor-id]
+        (clojurify-supervisor-info (maybe-deserialize (.get_data cluster-state (supervisor-path supervisor-id) false) SupervisorInfo)))
+
+      (topology-log-config
+        [this storm-id cb]
+        (when cb
+          (swap! log-config-callback assoc storm-id cb))
+        (maybe-deserialize (.get_data cluster-state (log-config-path storm-id) (not-nil? cb)) LogConfig))
+
+      (set-topology-log-config!
+        [this storm-id log-config]
+        (.set_data cluster-state (log-config-path storm-id) (Utils/serialize log-config) acls))
+
+      (set-worker-profile-request
+        [this storm-id profile-request]
+        (let [request-type (.get_action profile-request)
+              host (.get_node (.get_nodeInfo profile-request))
+              port (first (.get_port (.get_nodeInfo profile-request)))]
+          (.set_data cluster-state
+                     (profiler-config-path storm-id host port request-type)
+                     (Utils/serialize profile-request)
+                     acls)))
+
+      (get-topology-profile-requests
+        [this storm-id thrift?]
+        (let [path (profiler-config-path storm-id)
+              requests (if (.node_exists cluster-state path false)
+                         (dofor [c (.get_children cluster-state path false)]
+                                (let [raw (.get_data cluster-state (str path "/" c) false)
+                                      request (maybe-deserialize raw ProfileRequest)]
+                                      (if thrift?
+                                        request
+                                        (clojurify-profile-request request)))))]
+          requests))
+
+      (delete-topology-profile-requests
+        [this storm-id profile-request]
+        (let [profile-request-inst (thriftify-profile-request profile-request)
+              action (:action profile-request)
+              host (:host profile-request)
+              port (:port profile-request)]
+          (.delete_node cluster-state
+           (profiler-config-path storm-id host port action))))
+          
+      (get-worker-profile-requests
+        [this storm-id node-info thrift?]
+        (let [host (:host node-info)
+              port (:port node-info)
+              profile-requests (get-topology-profile-requests this storm-id thrift?)]
+          (if thrift?
+            (filter #(and (= host (.get_node (.get_nodeInfo %))) (= port (first (.get_port (.get_nodeInfo  %)))))
+                    profile-requests)
+            (filter #(and (= host (:host %)) (= port (:port %)))
+                    profile-requests))))
+      
+      (worker-heartbeat!
+        [this storm-id node port info]
+        (let [thrift-worker-hb (thriftify-zk-worker-hb info)]
+          (if thrift-worker-hb
+            (.set_worker_hb cluster-state (workerbeat-path storm-id node port) (Utils/serialize thrift-worker-hb) acls))))
+
+      (remove-worker-heartbeat!
+        [this storm-id node port]
+        (.delete_worker_hb cluster-state (workerbeat-path storm-id node port)))
+
+      (setup-heartbeats!
+        [this storm-id]
+        (.mkdirs cluster-state (workerbeat-storm-root storm-id) acls))
+
+      (teardown-heartbeats!
+        [this storm-id]
+        (try-cause
+          (.delete_worker_hb cluster-state (workerbeat-storm-root storm-id))
+          (catch KeeperException e
+            (log-warn-error e "Could not teardown heartbeats for " storm-id))))
+
+      (worker-backpressure!
+        [this storm-id node port on?]
+        "if znode exists and to be not on?, delete; if exists and on?, do nothing;
+        if not exists and to be on?, create; if not exists and not on?, do nothing"
+        (let [path (backpressure-path storm-id node port)
+              existed (.node_exists cluster-state path false)]
+          (if existed
+            (if (not on?)
+              (.delete_node cluster-state path))   ;; delete the znode since the worker is not congested
+            (if on?
+              (.set_ephemeral_node cluster-state path nil acls))))) ;; create the znode since worker is congested
+    
+      (topology-backpressure
+        [this storm-id callback]
+        "if the backpresure/storm-id dir is empty, this topology has throttle-on, otherwise not."
+        (when callback
+          (swap! backpressure-callback assoc storm-id callback))
+        (let [path (backpressure-storm-root storm-id)
+              children (.get_children cluster-state path (not-nil? callback))]
+              (> (count children) 0)))
+      
+      (setup-backpressure!
+        [this storm-id]
+        (.mkdirs cluster-state (backpressure-storm-root storm-id) acls))
+
+      (remove-worker-backpressure!
+        [this storm-id node port]
+        (.delete_node cluster-state (backpressure-path storm-id node port)))
+
+      (teardown-topology-errors!
+        [this storm-id]
+        (try-cause
+          (.delete_node cluster-state (error-storm-root storm-id))
+          (catch KeeperException e
+            (log-warn-error e "Could not teardown errors for " storm-id))))
+
+      (supervisor-heartbeat!
+        [this supervisor-id info]
+        (let [thrift-supervisor-info (thriftify-supervisor-info info)]
+          (.set_ephemeral_node cluster-state (supervisor-path supervisor-id) (Utils/serialize thrift-supervisor-info) acls)))
+
+      (activate-storm!
+        [this storm-id storm-base]
+        (let [thrift-storm-base (thriftify-storm-base storm-base)]
+          (.set_data cluster-state (storm-path storm-id) (Utils/serialize thrift-storm-base) acls)))
+
+      (update-storm!
+        [this storm-id new-elems]
+        (let [base (storm-base this storm-id nil)
+              executors (:component->executors base)
+              component->debug (:component->debug base)
+              new-elems (update new-elems :component->executors (partial merge executors))
+              new-elems (update new-elems :component->debug (partial merge-with merge component->debug))]
+          (.set_data cluster-state (storm-path storm-id)
+                    (-> base
+                        (merge new-elems)
+                        thriftify-storm-base
+                        Utils/serialize)
+                    acls)))
+
+      (storm-base
+        [this storm-id callback]
+        (when callback
+          (swap! storm-base-callback assoc storm-id callback))
+        (clojurify-storm-base (maybe-deserialize (.get_data cluster-state (storm-path storm-id) (not-nil? callback)) StormBase)))
+
+      (remove-storm-base!
+        [this storm-id]
+        (.delete_node cluster-state (storm-path storm-id)))
+
+      (set-assignment!
+        [this storm-id info]
+        (let [thrift-assignment (thriftify-assignment info)]
+          (.set_data cluster-state (assignment-path storm-id) (Utils/serialize thrift-assignment) acls)))
+
+      (remove-blobstore-key!
+        [this blob-key]
+        (log-debug "removing key" blob-key)
+        (.delete_node cluster-state (blobstore-path blob-key)))
+
+      (remove-key-version!
+        [this blob-key]
+        (.delete_node cluster-state (blobstore-max-key-sequence-number-path blob-key)))
+
+      (remove-storm!
+        [this storm-id]
+        (.delete_node cluster-state (assignment-path storm-id))
+        (.delete_node cluster-state (credentials-path storm-id))
+        (.delete_node cluster-state (log-config-path storm-id))
+        (.delete_node cluster-state (profiler-config-path storm-id))
+        (remove-storm-base! this storm-id))
+
+      (set-credentials!
+         [this storm-id creds topo-conf]
+         (let [topo-acls (mk-topo-only-acls topo-conf)
+               path (credentials-path storm-id)
+               thriftified-creds (thriftify-credentials creds)]
+           (.set_data cluster-state path (Utils/serialize thriftified-creds) topo-acls)))
+
+      (credentials
+        [this storm-id callback]
+        (when callback
+          (swap! credentials-callback assoc storm-id callback))
+        (clojurify-crdentials (maybe-deserialize (.get_data cluster-state (credentials-path storm-id) (not-nil? callback)) Credentials)))
+
+      (report-error
+         [this storm-id component-id node port error]
+         (let [path (error-path storm-id component-id)
+               last-error-path (last-error-path storm-id component-id)
+               data (thriftify-error {:time-secs (current-time-secs) :error (stringify-error error) :host node :port port})
+               _ (.mkdirs cluster-state path acls)
+               ser-data (Utils/serialize data)
+               _ (.mkdirs cluster-state path acls)
+               _ (.create_sequential cluster-state (str path "/e") ser-data acls)
+               _ (.set_data cluster-state last-error-path ser-data acls)
+               to-kill (->> (.get_children cluster-state path false)
+                            (sort-by parse-error-path)
+                            reverse
+                            (drop 10))]
+           (doseq [k to-kill]
+             (.delete_node cluster-state (str path "/" k)))))
+
+      (errors
+         [this storm-id component-id]
+         (let [path (error-path storm-id component-id)
+               errors (if (.node_exists cluster-state path false)
+                        (dofor [c (.get_children cluster-state path false)]
+                          (if-let [data (-> (.get_data cluster-state
+                                                      (str path "/" c)
+                                                      false)
+                                          (maybe-deserialize ErrorInfo)
+                                          clojurify-error)]
+                            (map->TaskError data)))
+                        ())]
+           (->> (filter not-nil? errors)
+                (sort-by (comp - :time-secs)))))
+
+      (last-error
+        [this storm-id component-id]
+        (let [path (last-error-path storm-id component-id)]
+          (if (.node_exists cluster-state path false)
+            (if-let [data (-> (.get_data cluster-state path false)
+                              (maybe-deserialize ErrorInfo)
+                              clojurify-error)]
+              (map->TaskError data)))))
+      
+      (disconnect
+         [this]
+        (.unregister cluster-state state-id)
+        (when solo?
+          (.close cluster-state))))))
+
+;; daemons have a single thread that will respond to events
+;; start with initialize event
+;; callbacks add events to the thread's queue
+
+;; keeps in memory cache of the state, only for what client subscribes to. Any subscription is automatically kept in sync, and when there are changes, client is notified.
+;; master gives orders through state, and client records status in state (ephemerally)
+
+;; master tells nodes what workers to launch
+
+;; master writes this. supervisors and workers subscribe to this to understand complete topology. each storm is a map from nodes to workers to tasks to ports whenever topology changes everyone will be notified
+;; master includes timestamp of each assignment so that appropriate time can be given to each worker to start up
+;; /assignments/{storm id}
+
+;; which tasks they talk to, etc. (immutable until shutdown)
+;; everyone reads this in full to understand structure
+;; /tasks/{storm id}/{task id} ; just contains bolt id
+
+;; supervisors send heartbeats here, master doesn't subscribe but checks asynchronously
+;; /supervisors/status/{ephemeral node ids}  ;; node metadata such as port ranges are kept here
+
+;; tasks send heartbeats here, master doesn't subscribe, just checks asynchronously
+;; /taskbeats/{storm id}/{ephemeral task id}
+
+;; contains data about whether it's started or not, tasks and workers subscribe to specific storm here to know when to shutdown
+;; master manipulates
+;; /storms/{storm id}
+
+;; Zookeeper flows:
+
+;; Master:
+;; job submit:
+;; 1. read which nodes are available
+;; 2. set up the worker/{storm}/{task} stuff (static)
+;; 3. set assignments
+;; 4. start storm - necessary in case master goes down, when goes back up can remember to take down the storm (2 states: on or off)
+
+;; Monitoring (or by checking when nodes go down or heartbeats aren't received):
+;; 1. read assignment
+;; 2. see which tasks/nodes are up
+;; 3. make new assignment to fix any problems
+;; 4. if a storm exists but is not taken down fully, ensure that storm takedown is launched (step by step remove tasks and finally remove assignments)
+
+;; masters only possible watches is on ephemeral nodes and tasks, and maybe not even
+
+;; Supervisor:
+;; 1. monitor /storms/* and assignments
+;; 2. local state about which workers are local
+;; 3. when storm is on, check that workers are running locally & start/kill if different than assignments
+;; 4. when storm is off, monitor tasks for workers - when they all die or don't hearbeat, kill the process and cleanup
+
+;; Worker:
+;; 1. On startup, start the tasks if the storm is on
+
+;; Task:
+;; 1. monitor assignments, reroute when assignments change
+;; 2. monitor storm (when storm turns off, error if assignments change) - take down tasks as master turns them off
+
+;; locally on supervisor: workers write pids locally on startup, supervisor deletes it on shutdown (associates pid with worker name)
+;; supervisor periodically checks to make sure processes are alive
+;; {rootdir}/workers/{storm id}/{worker id}   ;; contains pid inside
+
+;; all tasks in a worker share the same cluster state
+;; workers, supervisors, and tasks subscribes to storm to know when it's started or stopped
+;; on stopped, master removes records in order (tasks need to subscribe to themselves to see if they disappear)
+;; when a master removes a worker, the supervisor should kill it (and escalate to kill -9)
+;; on shutdown, tasks subscribe to tasks that send data to them to wait for them to die. when node disappears, they can die

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/cluster_state/zookeeper_state_factory.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/cluster_state/zookeeper_state_factory.clj b/storm-core/src/clj/org/apache/storm/cluster_state/zookeeper_state_factory.clj
new file mode 100644
index 0000000..3104c52
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/cluster_state/zookeeper_state_factory.clj
@@ -0,0 +1,161 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+
+(ns org.apache.storm.cluster-state.zookeeper-state-factory
+  (:import [org.apache.curator.framework.state ConnectionStateListener])
+  (:import [org.apache.zookeeper KeeperException$NoNodeException]
+           [org.apache.storm.cluster ClusterState DaemonType])
+  (:use [org.apache.storm cluster config log util])
+  (:require [org.apache.storm [zookeeper :as zk]])
+  (:gen-class
+   :implements [org.apache.storm.cluster.ClusterStateFactory]))
+
+(defn -mkState [this conf auth-conf acls context]
+  (let [zk (zk/mk-client conf (conf STORM-ZOOKEEPER-SERVERS) (conf STORM-ZOOKEEPER-PORT) :auth-conf auth-conf)]
+    (zk/mkdirs zk (conf STORM-ZOOKEEPER-ROOT) acls)
+    (.close zk))
+  (let [callbacks (atom {})
+        active (atom true)
+        zk-writer (zk/mk-client conf
+                         (conf STORM-ZOOKEEPER-SERVERS)
+                         (conf STORM-ZOOKEEPER-PORT)
+                         :auth-conf auth-conf
+                         :root (conf STORM-ZOOKEEPER-ROOT)
+                         :watcher (fn [state type path]
+                                    (when @active
+                                      (when-not (= :connected state)
+                                        (log-warn "Received event " state ":" type ":" path " with disconnected Writer Zookeeper."))
+                                      (when-not (= :none type)
+                                        (doseq [callback (vals @callbacks)]
+                                          (callback type path))))))
+        is-nimbus? (= (.getDaemonType context) DaemonType/NIMBUS)
+        zk-reader (if is-nimbus?
+                    (zk/mk-client conf
+                         (conf STORM-ZOOKEEPER-SERVERS)
+                         (conf STORM-ZOOKEEPER-PORT)
+                         :auth-conf auth-conf
+                         :root (conf STORM-ZOOKEEPER-ROOT)
+                         :watcher (fn [state type path]
+                                    (when @active
+                                      (when-not (= :connected state)
+                                        (log-warn "Received event " state ":" type ":" path " with disconnected Reader Zookeeper."))
+                                      (when-not (= :none type)
+                                        (doseq [callback (vals @callbacks)]
+                                          (callback type path))))))
+                    zk-writer)]
+    (reify
+     ClusterState
+
+     (register
+       [this callback]
+       (let [id (uuid)]
+         (swap! callbacks assoc id callback)
+         id))
+
+     (unregister
+       [this id]
+       (swap! callbacks dissoc id))
+
+     (set-ephemeral-node
+       [this path data acls]
+       (zk/mkdirs zk-writer (parent-path path) acls)
+       (if (zk/exists zk-writer path false)
+         (try-cause
+           (zk/set-data zk-writer path data) ; should verify that it's ephemeral
+           (catch KeeperException$NoNodeException e
+             (log-warn-error e "Ephemeral node disappeared between checking for existing and setting data")
+             (zk/create-node zk-writer path data :ephemeral acls)))
+         (zk/create-node zk-writer path data :ephemeral acls)))
+
+     (create-sequential
+       [this path data acls]
+       (zk/create-node zk-writer path data :sequential acls))
+
+     (set-data
+       [this path data acls]
+       ;; note: this does not turn off any existing watches
+       (if (zk/exists zk-writer path false)
+         (zk/set-data zk-writer path data)
+         (do
+           (zk/mkdirs zk-writer (parent-path path) acls)
+           (zk/create-node zk-writer path data :persistent acls))))
+
+     (set-worker-hb
+       [this path data acls]
+       (.set_data this path data acls))
+
+     (delete-node
+       [this path]
+       (zk/delete-node zk-writer path))
+
+     (delete-worker-hb
+       [this path]
+       (.delete_node this path))
+
+     (get-data
+       [this path watch?]
+       (zk/get-data zk-reader path watch?))
+
+     (get-data-with-version
+       [this path watch?]
+       (zk/get-data-with-version zk-reader path watch?))
+
+     (get-version
+       [this path watch?]
+       (zk/get-version zk-reader path watch?))
+
+     (get-worker-hb
+       [this path watch?]
+       (.get_data this path watch?))
+
+     (get-children
+       [this path watch?]
+       (zk/get-children zk-reader path watch?))
+
+     (get-worker-hb-children
+       [this path watch?]
+       (.get_children this path watch?))
+
+     (mkdirs
+       [this path acls]
+       (zk/mkdirs zk-writer path acls))
+
+     (node-exists
+       [this path watch?]
+       (zk/exists-node? zk-reader path watch?))
+
+     (add-listener
+       [this listener]
+       (let [curator-listener (reify ConnectionStateListener
+                                (stateChanged
+                                  [this client newState]
+                                  (.stateChanged listener client newState)))]
+         (zk/add-listener zk-reader curator-listener)))
+
+     (sync-path
+       [this path]
+       (zk/sync-path zk-writer path))
+
+      (delete-node-blobstore
+        [this path nimbus-host-port-info]
+        (zk/delete-node-blobstore zk-writer path nimbus-host-port-info))
+
+     (close
+       [this]
+       (reset! active false)
+       (.close zk-writer)
+       (if is-nimbus?
+         (.close zk-reader))))))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/command/activate.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/command/activate.clj b/storm-core/src/clj/org/apache/storm/command/activate.clj
new file mode 100644
index 0000000..dc452e8
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/command/activate.clj
@@ -0,0 +1,24 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+(ns org.apache.storm.command.activate
+  (:use [org.apache.storm thrift log])
+  (:gen-class))
+
+(defn -main [name] 
+  (with-configured-nimbus-connection nimbus
+    (.activate nimbus name)
+    (log-message "Activated topology: " name)
+    ))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/command/blobstore.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/command/blobstore.clj b/storm-core/src/clj/org/apache/storm/command/blobstore.clj
new file mode 100644
index 0000000..b1496db
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/command/blobstore.clj
@@ -0,0 +1,162 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+(ns org.apache.storm.command.blobstore
+  (:import [java.io InputStream OutputStream]
+           [org.apache.storm.generated SettableBlobMeta AccessControl AuthorizationException
+            KeyNotFoundException]
+           [org.apache.storm.blobstore BlobStoreAclHandler])
+  (:use [org.apache.storm config]
+        [clojure.string :only [split]]
+        [clojure.tools.cli :only [cli]]
+        [clojure.java.io :only [copy input-stream output-stream]]
+        [org.apache.storm blobstore log util])
+  (:gen-class))
+
+(defn update-blob-from-stream
+  "Update a blob in the blob store from an InputStream"
+  [key ^InputStream in]
+  (with-configured-blob-client blobstore
+    (let [out (.updateBlob blobstore key)]
+      (try 
+        (copy in out)
+        (.close out)
+        (catch Exception e
+          (log-message e)
+          (.cancel out)
+          (throw e))))))
+
+(defn create-blob-from-stream
+  "Create a blob in the blob store from an InputStream"
+  [key ^InputStream in ^SettableBlobMeta meta]
+  (with-configured-blob-client blobstore
+    (let [out (.createBlob blobstore key meta)]
+      (try 
+        (copy in out)
+        (.close out)
+        (catch Exception e
+          (.cancel out)
+          (throw e))))))
+
+(defn read-blob
+  "Read a blob in the blob store and write to an OutputStream"
+  [key ^OutputStream out]
+  (with-configured-blob-client blobstore
+    (with-open [in (.getBlob blobstore key)]
+      (copy in out))))
+
+(defn as-access-control
+  "Convert a parameter to an AccessControl object"
+  [param]
+  (BlobStoreAclHandler/parseAccessControl (str param)))
+
+(defn as-acl
+  [param]
+  (map as-access-control (split param #",")))
+
+(defn access-control-str
+  [^AccessControl acl]
+  (BlobStoreAclHandler/accessControlToString acl))
+
+(defn read-cli [args]
+  (let [[{file :file} [key] _] (cli args ["-f" "--file" :default nil])]
+    (if file
+      (with-open [f (output-stream file)]
+        (read-blob key f))
+      (read-blob key System/out))))
+
+(defn update-cli [args]
+  (let [[{file :file} [key] _] (cli args ["-f" "--file" :default nil])]
+    (if file
+      (with-open [f (input-stream file)]
+        (update-blob-from-stream key f))
+      (update-blob-from-stream key System/in))
+    (log-message "Successfully updated " key)))
+
+(defn create-cli [args]
+  (let [[{file :file acl :acl replication-factor :replication-factor} [key] _] (cli args ["-f" "--file" :default nil]
+                                                  ["-a" "--acl" :default [] :parse-fn as-acl]
+                                                  ["-r" "--replication-factor" :default -1 :parse-fn parse-int])
+        meta (doto (SettableBlobMeta. acl)
+                   (.set_replication_factor replication-factor))]
+    (validate-key-name! key)
+    (log-message "Creating " key " with ACL " (pr-str (map access-control-str acl)))
+    (if file
+      (with-open [f (input-stream file)]
+        (create-blob-from-stream key f meta))
+      (create-blob-from-stream key System/in meta))
+    (log-message "Successfully created " key)))
+
+(defn delete-cli [args]
+  (with-configured-blob-client blobstore
+    (doseq [key args]
+      (.deleteBlob blobstore key)
+      (log-message "deleted " key))))
+
+(defn list-cli [args]
+  (with-configured-blob-client blobstore
+    (let [keys (if (empty? args) (iterator-seq (.listKeys blobstore)) args)]
+      (doseq [key keys]
+        (try
+          (let [meta (.getBlobMeta blobstore key)
+                version (.get_version meta)
+                acl (.get_acl (.get_settable meta))]
+            (log-message key " " version " " (pr-str (map access-control-str acl))))
+          (catch AuthorizationException ae
+            (if-not (empty? args) (log-error "ACCESS DENIED to key: " key)))
+          (catch KeyNotFoundException knf
+            (if-not (empty? args) (log-error key " NOT FOUND"))))))))
+
+(defn set-acl-cli [args]
+  (let [[{set-acl :set} [key] _]
+           (cli args ["-s" "--set" :default [] :parse-fn as-acl])]
+    (with-configured-blob-client blobstore
+      (let [meta (.getBlobMeta blobstore key)
+            acl (.get_acl (.get_settable meta))
+            new-acl (if set-acl set-acl acl)
+            new-meta (SettableBlobMeta. new-acl)]
+        (log-message "Setting ACL for " key " to " (pr-str (map access-control-str new-acl)))
+        (.setBlobMeta blobstore key new-meta)))))
+
+(defn rep-cli [args]
+  (let [sub-command (first args)
+        new-args (rest args)]
+    (with-configured-blob-client blobstore
+      (condp = sub-command
+      "--read" (let [key (first new-args)
+                     blob-replication (.getBlobReplication blobstore key)]
+                 (log-message "Current replication factor " blob-replication)
+                 blob-replication)
+      "--update" (let [[{replication-factor :replication-factor} [key] _]
+                        (cli new-args ["-r" "--replication-factor" :parse-fn parse-int])]
+                   (if (nil? replication-factor)
+                     (throw (RuntimeException. (str "Please set the replication factor")))
+                     (let [blob-replication (.updateBlobReplication blobstore key replication-factor)]
+                       (log-message "Replication factor is set to " blob-replication)
+                       blob-replication)))
+      :else (throw (RuntimeException. (str sub-command " is not a supported blobstore command")))))))
+
+(defn -main [& args]
+  (let [command (first args)
+        new-args (rest args)]
+    (condp = command
+      "cat" (read-cli new-args)
+      "create" (create-cli new-args)
+      "update" (update-cli new-args)
+      "delete" (delete-cli new-args)
+      "list" (list-cli new-args)
+      "set-acl" (set-acl-cli new-args)
+      "replication" (rep-cli new-args)
+      :else (throw (RuntimeException. (str command " is not a supported blobstore command"))))))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/command/config_value.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/command/config_value.clj b/storm-core/src/clj/org/apache/storm/command/config_value.clj
new file mode 100644
index 0000000..9bc3e92
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/command/config_value.clj
@@ -0,0 +1,24 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+(ns org.apache.storm.command.config-value
+  (:use [org.apache.storm config log])
+  (:gen-class))
+
+
+(defn -main [^String name]
+  (let [conf (read-storm-config)]
+    (println "VALUE:" (conf name))
+    ))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/command/deactivate.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/command/deactivate.clj b/storm-core/src/clj/org/apache/storm/command/deactivate.clj
new file mode 100644
index 0000000..4fd2c85
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/command/deactivate.clj
@@ -0,0 +1,24 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+(ns org.apache.storm.command.deactivate
+  (:use [org.apache.storm thrift log])
+  (:gen-class))
+
+(defn -main [name] 
+  (with-configured-nimbus-connection nimbus
+    (.deactivate nimbus name)
+    (log-message "Deactivated topology: " name)
+    ))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/command/dev_zookeeper.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/command/dev_zookeeper.clj b/storm-core/src/clj/org/apache/storm/command/dev_zookeeper.clj
new file mode 100644
index 0000000..96de02d
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/command/dev_zookeeper.clj
@@ -0,0 +1,26 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+(ns org.apache.storm.command.dev-zookeeper
+  (:use [org.apache.storm zookeeper util config])
+  (:gen-class))
+
+(defn -main [& args]
+  (let [conf (read-storm-config)
+        port (conf STORM-ZOOKEEPER-PORT)
+        localpath (conf DEV-ZOOKEEPER-PATH)]
+    (rmr localpath)
+    (mk-inprocess-zookeeper localpath :port port)
+    ))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/command/get_errors.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/command/get_errors.clj b/storm-core/src/clj/org/apache/storm/command/get_errors.clj
new file mode 100644
index 0000000..c267390
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/command/get_errors.clj
@@ -0,0 +1,52 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+(ns org.apache.storm.command.get-errors
+  (:use [clojure.tools.cli :only [cli]])
+  (:use [org.apache.storm thrift log])
+  (:use [org.apache.storm util])
+  (:require [org.apache.storm.daemon
+             [nimbus :as nimbus]
+             [common :as common]])
+  (:import [org.apache.storm.generated GetInfoOptions NumErrorsChoice
+            TopologySummary ErrorInfo])
+  (:gen-class))
+
+(defn get-topology-id [name topologies]
+  (let [topology (first (filter #(= (.get_name %1) name) topologies))]
+    (when (not-nil? topology) (.get_id topology))))
+
+(defn get-component-errors
+  [topology-errors]
+  (apply hash-map (remove nil?
+                    (flatten (for [[comp-name comp-errors] topology-errors]
+                               (let [latest-error (when (not (empty? comp-errors)) (first comp-errors))]
+                                 (if latest-error [comp-name (.get_error ^ErrorInfo latest-error)])))))))
+
+(defn -main [name]
+  (with-configured-nimbus-connection nimbus
+    (let [opts (doto (GetInfoOptions.)
+                 (.set_num_err_choice NumErrorsChoice/ONE))
+          cluster-info (.getClusterInfo nimbus)
+          topologies (.get_topologies cluster-info)
+          topo-id (get-topology-id name topologies)
+          topo-info (when (not-nil? topo-id) (.getTopologyInfoWithOpts nimbus topo-id opts))]
+      (if (or (nil? topo-id) (nil? topo-info))
+        (println (to-json {"Failure" (str "No topologies running with name " name)}))
+        (let [topology-name (.get_name topo-info)
+              topology-errors (.get_errors topo-info)]
+          (println (to-json (hash-map
+                              "Topology Name" topology-name
+                              "Comp-Errors" (get-component-errors topology-errors)))))))))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/command/healthcheck.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/command/healthcheck.clj b/storm-core/src/clj/org/apache/storm/command/healthcheck.clj
new file mode 100644
index 0000000..d96d7b3
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/command/healthcheck.clj
@@ -0,0 +1,88 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+(ns org.apache.storm.command.healthcheck
+  (:require [org.apache.storm
+             [config :refer :all]
+             [log :refer :all]]
+            [clojure.java [io :as io]]
+            [clojure [string :refer [split]]])
+  (:gen-class))
+
+(defn interrupter
+  "Interrupt a given thread after ms milliseconds."
+  [thread ms]
+  (let [interrupter (Thread.
+                     (fn []
+                       (try
+                         (Thread/sleep ms)
+                         (.interrupt thread)
+                         (catch InterruptedException e))))]
+    (.start interrupter)
+    interrupter))
+
+(defn check-output [lines]
+  (if (some #(.startsWith % "ERROR") lines)
+    :failed
+    :success))
+
+(defn process-script [conf script]
+  (let [script-proc (. (Runtime/getRuntime) (exec script))
+        curthread (Thread/currentThread)
+        interrupter-thread (interrupter curthread
+                                        (conf STORM-HEALTH-CHECK-TIMEOUT-MS))]
+    (try
+      (.waitFor script-proc)
+      (.interrupt interrupter-thread)
+      (if (not (= (.exitValue script-proc) 0))
+        :failed_with_exit_code
+        (check-output (split
+                       (slurp (.getInputStream script-proc))
+                       #"\n+")))
+      (catch InterruptedException e
+        (println "Script" script "timed out.")
+        :timeout)
+      (catch Exception e
+        (println "Script failed with exception: " e)
+        :failed_with_exception)
+      (finally (.interrupt interrupter-thread)))))
+
+(defn health-check [conf]
+  (let [health-dir (absolute-healthcheck-dir conf)
+        health-files (file-seq (io/file health-dir))
+        health-scripts (filter #(and (.canExecute %)
+                                     (not (.isDirectory %)))
+                               health-files)
+        results (->> health-scripts
+                     (map #(.getAbsolutePath %))
+                     (map (partial process-script conf)))]
+    (log-message
+     (pr-str (map #'vector
+                  (map #(.getAbsolutePath %) health-scripts)
+                  results)))
+    ; failed_with_exit_code is OK. We're mimicing Hadoop's health checks.
+    ; We treat non-zero exit codes as indicators that the scripts failed
+    ; to execute properly, not that the system is unhealthy, in which case
+    ; we don't want to start killing things.
+    (if (every? #(or (= % :failed_with_exit_code)
+                     (= % :success))
+                results)
+      0
+      1)))
+
+(defn -main [& args]
+  (let [conf (read-storm-config)]
+    (System/exit
+     (health-check conf))))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/command/heartbeats.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/command/heartbeats.clj b/storm-core/src/clj/org/apache/storm/command/heartbeats.clj
new file mode 100644
index 0000000..ff28cba
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/command/heartbeats.clj
@@ -0,0 +1,52 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+(ns org.apache.storm.command.heartbeats
+  (:require [org.apache.storm
+             [config :refer :all]
+             [log :refer :all]
+             [cluster :refer :all]
+             [converter :refer :all]]
+        [clojure.string :refer :all])
+  (:import [org.apache.storm.generated ClusterWorkerHeartbeat]
+           [org.apache.storm.utils Utils])
+  (:gen-class))
+
+(defn -main [command path & args]
+  (let [conf (read-storm-config)
+        cluster (mk-distributed-cluster-state conf :auth-conf conf)]
+    (println "Command: [" command "]")
+    (condp = command
+      "list"
+      (let [message (join " \n" (.get_worker_hb_children cluster path false))]
+        (log-message "list " path ":\n"
+                     message "\n"))
+      "get"
+      (log-message 
+       (if-let [hb (.get_worker_hb cluster path false)]
+         (clojurify-zk-worker-hb
+          (Utils/deserialize
+           hb
+           ClusterWorkerHeartbeat))
+         "Nothing"))
+      
+      (log-message "Usage: heartbeats [list|get] path"))
+    
+    (try
+      (.close cluster)
+      (catch Exception e
+        (log-message "Caught exception: " e " on close."))))
+  (System/exit 0))
+         

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/command/kill_topology.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/command/kill_topology.clj b/storm-core/src/clj/org/apache/storm/command/kill_topology.clj
new file mode 100644
index 0000000..84e0a64
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/command/kill_topology.clj
@@ -0,0 +1,29 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+(ns org.apache.storm.command.kill-topology
+  (:use [clojure.tools.cli :only [cli]])
+  (:use [org.apache.storm thrift config log])
+  (:import [org.apache.storm.generated KillOptions])
+  (:gen-class))
+
+(defn -main [& args]
+  (let [[{wait :wait} [name] _] (cli args ["-w" "--wait" :default nil :parse-fn #(Integer/parseInt %)])
+        opts (KillOptions.)]
+    (if wait (.set_wait_secs opts wait))
+    (with-configured-nimbus-connection nimbus
+      (.killTopologyWithOpts nimbus name opts)
+      (log-message "Killed topology: " name)
+      )))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/command/kill_workers.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/command/kill_workers.clj b/storm-core/src/clj/org/apache/storm/command/kill_workers.clj
new file mode 100644
index 0000000..2670735
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/command/kill_workers.clj
@@ -0,0 +1,33 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+(ns org.apache.storm.command.kill-workers
+  (:import [java.io File])
+  (:use [org.apache.storm.daemon common])
+  (:use [org.apache.storm util config])
+  (:require [org.apache.storm.daemon
+             [supervisor :as supervisor]])
+  (:gen-class))
+
+(defn -main 
+  "Construct the supervisor-data from scratch and kill the workers on this supervisor"
+  [& args]
+  (let [conf (read-storm-config)
+        conf (assoc conf STORM-LOCAL-DIR (. (File. (conf STORM-LOCAL-DIR)) getCanonicalPath))
+        isupervisor (supervisor/standalone-supervisor)
+        supervisor-data (supervisor/supervisor-data conf nil isupervisor)
+        ids (supervisor/my-worker-ids conf)]
+    (doseq [id ids]
+      (supervisor/shutdown-worker supervisor-data id))))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/command/list.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/command/list.clj b/storm-core/src/clj/org/apache/storm/command/list.clj
new file mode 100644
index 0000000..87975cd
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/command/list.clj
@@ -0,0 +1,38 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+(ns org.apache.storm.command.list
+  (:use [org.apache.storm thrift log])
+  (:import [org.apache.storm.generated TopologySummary])
+  (:gen-class))
+
+(defn -main []
+  (with-configured-nimbus-connection nimbus
+    (let [cluster-info (.getClusterInfo nimbus)
+          topologies (.get_topologies cluster-info)
+          msg-format "%-20s %-10s %-10s %-12s %-10s"]
+      (if (or (nil? topologies) (empty? topologies))
+        (println "No topologies running.")
+        (do
+          (println (format msg-format "Topology_name" "Status" "Num_tasks" "Num_workers" "Uptime_secs"))
+          (println "-------------------------------------------------------------------")
+          (doseq [^TopologySummary topology topologies]
+            (let [topology-name (.get_name topology)
+                  topology-status (.get_status topology)
+                  topology-num-tasks (.get_num_tasks topology)
+                  topology-num-workers (.get_num_workers topology)
+                  topology-uptime-secs (.get_uptime_secs topology)]
+              (println (format msg-format  topology-name topology-status topology-num-tasks
+                               topology-num-workers topology-uptime-secs)))))))))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/command/monitor.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/command/monitor.clj b/storm-core/src/clj/org/apache/storm/command/monitor.clj
new file mode 100644
index 0000000..7fa9b2a
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/command/monitor.clj
@@ -0,0 +1,37 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+(ns org.apache.storm.command.monitor
+  (:use [clojure.tools.cli :only [cli]])
+  (:use [org.apache.storm.thrift :only [with-configured-nimbus-connection]])
+  (:import [org.apache.storm.utils Monitor])
+  (:gen-class)
+ )
+
+(defn -main [& args]
+  (let [[{interval :interval component :component stream :stream watch :watch} [name] _]
+        (cli args ["-i" "--interval" :default 4 :parse-fn #(Integer/parseInt %)]
+          ["-m" "--component" :default nil]
+          ["-s" "--stream" :default "default"]
+          ["-w" "--watch" :default "emitted"])
+        mon (Monitor.)]
+    (if interval (.set_interval mon interval))
+    (if name (.set_topology mon name))
+    (if component (.set_component mon component))
+    (if stream (.set_stream mon stream))
+    (if watch (.set_watch mon watch))
+    (with-configured-nimbus-connection nimbus
+      (.metrics mon nimbus)
+      )))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/command/rebalance.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/command/rebalance.clj b/storm-core/src/clj/org/apache/storm/command/rebalance.clj
new file mode 100644
index 0000000..3868091
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/command/rebalance.clj
@@ -0,0 +1,46 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+(ns org.apache.storm.command.rebalance
+  (:use [clojure.tools.cli :only [cli]])
+  (:use [org.apache.storm thrift config log])
+  (:import [org.apache.storm.generated RebalanceOptions])
+  (:gen-class))
+
+(defn- parse-executor [^String s]
+  (let [eq-pos (.lastIndexOf s "=")
+        name (.substring s 0 eq-pos)
+        amt (.substring s (inc eq-pos))]
+    {name (Integer/parseInt amt)}
+    ))
+
+(defn -main [& args] 
+  (let [[{wait :wait executor :executor num-workers :num-workers} [name] _]
+                  (cli args ["-w" "--wait" :default nil :parse-fn #(Integer/parseInt %)]
+                            ["-n" "--num-workers" :default nil :parse-fn #(Integer/parseInt %)]
+                            ["-e" "--executor"  :parse-fn parse-executor
+                             :assoc-fn (fn [previous key val]
+                                         (assoc previous key
+                                                (if-let [oldval (get previous key)]
+                                                  (merge oldval val)
+                                                  val)))])
+        opts (RebalanceOptions.)]
+    (if wait (.set_wait_secs opts wait))
+    (if executor (.set_num_executors opts executor))
+    (if num-workers (.set_num_workers opts num-workers))
+    (with-configured-nimbus-connection nimbus
+      (.rebalance nimbus name opts)
+      (log-message "Topology " name " is rebalancing")
+      )))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/command/set_log_level.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/command/set_log_level.clj b/storm-core/src/clj/org/apache/storm/command/set_log_level.clj
new file mode 100644
index 0000000..7e1c3c5
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/command/set_log_level.clj
@@ -0,0 +1,75 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+(ns org.apache.storm.command.set-log-level
+  (:use [clojure.tools.cli :only [cli]])
+  (:use [org.apache.storm thrift log])
+  (:import [org.apache.logging.log4j Level])
+  (:import [org.apache.storm.generated LogConfig LogLevel LogLevelAction])
+  (:gen-class))
+
+(defn- get-storm-id
+  "Get topology id for a running topology from the topology name."
+  [nimbus name]
+  (let [info (.getClusterInfo nimbus)
+        topologies (.get_topologies info)
+        topology (first (filter (fn [topo] (= name (.get_name topo))) topologies))]
+    (if topology 
+      (.get_id topology)
+      (throw (.IllegalArgumentException (str name " is not a running topology"))))))
+
+(defn- parse-named-log-levels [action]
+  "Parses [logger name]=[level string]:[optional timeout],[logger name2]...
+
+   e.g. ROOT=DEBUG:30
+        root logger, debug for 30 seconds
+
+        org.apache.foo=WARN
+        org.apache.foo set to WARN indefinitely"
+  (fn [^String s]
+    (let [log-args (re-find #"(.*)=([A-Z]+):?(\d*)" s)
+          name (if (= action LogLevelAction/REMOVE) s (nth log-args 1))
+          level (Level/toLevel (nth log-args 2))
+          timeout-str (nth log-args 3)
+          log-level (LogLevel.)]
+      (if (= action LogLevelAction/REMOVE)
+        (.set_action log-level action)
+        (do
+          (.set_action log-level action)
+          (.set_target_log_level log-level (.toString level))
+          (.set_reset_log_level_timeout_secs log-level
+            (Integer. (if (= timeout-str "") "0" timeout-str)))))
+      {name log-level})))
+
+(defn- merge-together [previous key val]
+   (assoc previous key
+      (if-let [oldval (get previous key)]
+         (merge oldval val)
+         val)))
+
+(defn -main [& args]
+  (let [[{log-setting :log-setting remove-log-setting :remove-log-setting} [name] _]
+        (cli args ["-l" "--log-setting"
+                   :parse-fn (parse-named-log-levels LogLevelAction/UPDATE)
+                   :assoc-fn merge-together]
+                  ["-r" "--remove-log-setting"
+                   :parse-fn (parse-named-log-levels LogLevelAction/REMOVE)
+                   :assoc-fn merge-together])
+        log-config (LogConfig.)]
+    (doseq [[log-name log-val] (merge log-setting remove-log-setting)]
+      (.put_to_named_logger_level log-config log-name log-val))
+    (log-message "Sent log config " log-config " for topology " name)
+    (with-configured-nimbus-connection nimbus
+      (.setLogConfig nimbus (get-storm-id nimbus name) log-config))))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/command/shell_submission.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/command/shell_submission.clj b/storm-core/src/clj/org/apache/storm/command/shell_submission.clj
new file mode 100644
index 0000000..b09c4f7
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/command/shell_submission.clj
@@ -0,0 +1,33 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+(ns org.apache.storm.command.shell-submission
+  (:import [org.apache.storm StormSubmitter])
+  (:use [org.apache.storm thrift util config log zookeeper])
+  (:require [clojure.string :as str])
+  (:gen-class))
+
+
+(defn -main [^String tmpjarpath & args]
+  (let [conf (read-storm-config)
+        zk-leader-elector (zk-leader-elector conf)
+        leader-nimbus (.getLeader zk-leader-elector)
+        host (.getHost leader-nimbus)
+        port (.getPort leader-nimbus)
+        no-op (.close zk-leader-elector)
+        jarpath (StormSubmitter/submitJar conf tmpjarpath)
+        args (concat args [host port jarpath])]
+    (exec-command! (str/join " " args))
+    ))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/org/apache/storm/command/upload_credentials.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/command/upload_credentials.clj b/storm-core/src/clj/org/apache/storm/command/upload_credentials.clj
new file mode 100644
index 0000000..f63bde4
--- /dev/null
+++ b/storm-core/src/clj/org/apache/storm/command/upload_credentials.clj
@@ -0,0 +1,35 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+(ns org.apache.storm.command.upload-credentials
+  (:use [clojure.tools.cli :only [cli]])
+  (:use [org.apache.storm log util])
+  (:import [org.apache.storm StormSubmitter])
+  (:import [java.util Properties])
+  (:import [java.io FileReader])
+  (:gen-class))
+
+(defn read-map [file-name]
+  (let [props (Properties. )
+        _ (.load props (FileReader. file-name))]
+    (clojurify-structure props)))
+
+(defn -main [& args]
+  (let [[{cred-file :file} [name & rawCreds]] (cli args ["-f" "--file" :default nil])
+        _ (when (and rawCreds (not (even? (.size rawCreds)))) (throw (RuntimeException.  "Need an even number of arguments to make a map")))
+        mapping (if rawCreds (apply assoc {} rawCreds) {})
+        file-mapping (if (nil? cred-file) {} (read-map cred-file))]
+      (StormSubmitter/pushCredentials name {} (merge file-mapping mapping))
+      (log-message "Uploaded new creds to topology: " name)))


[36/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/trident/DefaultCoordinator.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/trident/DefaultCoordinator.java b/external/storm-kafka/src/jvm/storm/kafka/trident/DefaultCoordinator.java
deleted file mode 100644
index 04e1396..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/trident/DefaultCoordinator.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka.trident;
-
-public class DefaultCoordinator implements IBatchCoordinator {
-
-    @Override
-    public boolean isReady(long txid) {
-        return true;
-    }
-
-    @Override
-    public void close() {
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/trident/GlobalPartitionInformation.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/trident/GlobalPartitionInformation.java b/external/storm-kafka/src/jvm/storm/kafka/trident/GlobalPartitionInformation.java
deleted file mode 100644
index b0d97fc..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/trident/GlobalPartitionInformation.java
+++ /dev/null
@@ -1,112 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka.trident;
-
-import com.google.common.base.Objects;
-import storm.kafka.Broker;
-import storm.kafka.Partition;
-
-import java.io.Serializable;
-import java.util.*;
-
-
-public class GlobalPartitionInformation implements Iterable<Partition>, Serializable {
-
-    private Map<Integer, Broker> partitionMap;
-    public String topic;
-
-    //Flag to keep the Partition Path Id backward compatible with Old implementation of Partition.getId() == "partition_" + partition
-    private Boolean bUseTopicNameForPartitionPathId;
-
-    public GlobalPartitionInformation(String topic, Boolean bUseTopicNameForPartitionPathId) {
-        this.topic = topic;
-        this.partitionMap = new TreeMap<Integer, Broker>();
-        this.bUseTopicNameForPartitionPathId = bUseTopicNameForPartitionPathId;
-    }
-
-    public GlobalPartitionInformation(String topic) {
-        this.topic = topic;
-        this.partitionMap = new TreeMap<Integer, Broker>();
-        this.bUseTopicNameForPartitionPathId = false;
-    }
-
-    public void addPartition(int partitionId, Broker broker) {
-        partitionMap.put(partitionId, broker);
-    }
-
-    @Override
-    public String toString() {
-        return "GlobalPartitionInformation{" +
-                "topic=" + topic +
-                ", partitionMap=" + partitionMap +
-                '}';
-    }
-
-    public Broker getBrokerFor(Integer partitionId) {
-        return partitionMap.get(partitionId);
-    }
-
-    public List<Partition> getOrderedPartitions() {
-        List<Partition> partitions = new LinkedList<Partition>();
-        for (Map.Entry<Integer, Broker> partition : partitionMap.entrySet()) {
-            partitions.add(new Partition(partition.getValue(), this.topic, partition.getKey(), this.bUseTopicNameForPartitionPathId));
-        }
-        return partitions;
-    }
-
-    @Override
-    public Iterator<Partition> iterator() {
-        final Iterator<Map.Entry<Integer, Broker>> iterator = partitionMap.entrySet().iterator();
-        final String topic = this.topic;
-        final Boolean bUseTopicNameForPartitionPathId = this.bUseTopicNameForPartitionPathId;
-        return new Iterator<Partition>() {
-            @Override
-            public boolean hasNext() {
-                return iterator.hasNext();
-            }
-
-            @Override
-            public Partition next() {
-                Map.Entry<Integer, Broker> next = iterator.next();
-                return new Partition(next.getValue(), topic , next.getKey(), bUseTopicNameForPartitionPathId);
-            }
-
-            @Override
-            public void remove() {
-                iterator.remove();
-            }
-        };
-    }
-
-    @Override
-    public int hashCode() {
-        return Objects.hashCode(partitionMap);
-    }
-
-    @Override
-    public boolean equals(Object obj) {
-        if (this == obj) {
-            return true;
-        }
-        if (obj == null || getClass() != obj.getClass()) {
-            return false;
-        }
-        final GlobalPartitionInformation other = (GlobalPartitionInformation) obj;
-        return Objects.equal(this.partitionMap, other.partitionMap);
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/trident/IBatchCoordinator.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/trident/IBatchCoordinator.java b/external/storm-kafka/src/jvm/storm/kafka/trident/IBatchCoordinator.java
deleted file mode 100644
index 04231f4..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/trident/IBatchCoordinator.java
+++ /dev/null
@@ -1,26 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka.trident;
-
-import java.io.Serializable;
-
-public interface IBatchCoordinator extends Serializable {
-    boolean isReady(long txid);
-
-    void close();
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/trident/IBrokerReader.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/trident/IBrokerReader.java b/external/storm-kafka/src/jvm/storm/kafka/trident/IBrokerReader.java
deleted file mode 100644
index afba659..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/trident/IBrokerReader.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka.trident;
-
-import java.util.List;
-import java.util.Map;
-
-public interface IBrokerReader {
-
-    GlobalPartitionInformation getBrokerForTopic(String topic);
-
-    List<GlobalPartitionInformation> getAllBrokers();
-
-    void close();
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/trident/MaxMetric.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/trident/MaxMetric.java b/external/storm-kafka/src/jvm/storm/kafka/trident/MaxMetric.java
deleted file mode 100644
index 60d7c7b..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/trident/MaxMetric.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka.trident;
-
-
-import backtype.storm.metric.api.ICombiner;
-
-public class MaxMetric implements ICombiner<Long> {
-    @Override
-    public Long identity() {
-        return null;
-    }
-
-    @Override
-    public Long combine(Long l1, Long l2) {
-        if (l1 == null) {
-            return l2;
-        }
-        if (l2 == null) {
-            return l1;
-        }
-        return Math.max(l1, l2);
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/trident/OpaqueTridentKafkaSpout.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/trident/OpaqueTridentKafkaSpout.java b/external/storm-kafka/src/jvm/storm/kafka/trident/OpaqueTridentKafkaSpout.java
deleted file mode 100644
index fbd1d7a..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/trident/OpaqueTridentKafkaSpout.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka.trident;
-
-import backtype.storm.task.TopologyContext;
-import backtype.storm.tuple.Fields;
-import storm.kafka.Partition;
-import storm.trident.spout.IOpaquePartitionedTridentSpout;
-
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-
-
-public class OpaqueTridentKafkaSpout implements IOpaquePartitionedTridentSpout<List<GlobalPartitionInformation>, Partition, Map> {
-
-
-    TridentKafkaConfig _config;
-
-    public OpaqueTridentKafkaSpout(TridentKafkaConfig config) {
-        _config = config;
-    }
-
-    @Override
-    public IOpaquePartitionedTridentSpout.Emitter<List<GlobalPartitionInformation>, Partition, Map> getEmitter(Map conf, TopologyContext context) {
-        return new TridentKafkaEmitter(conf, context, _config, context
-                .getStormId()).asOpaqueEmitter();
-    }
-
-    @Override
-    public IOpaquePartitionedTridentSpout.Coordinator getCoordinator(Map conf, TopologyContext tc) {
-        return new storm.kafka.trident.Coordinator(conf, _config);
-    }
-
-    @Override
-    public Fields getOutputFields() {
-        return _config.scheme.getOutputFields();
-    }
-
-    @Override
-    public Map<String, Object> getComponentConfiguration() {
-        return null;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/trident/StaticBrokerReader.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/trident/StaticBrokerReader.java b/external/storm-kafka/src/jvm/storm/kafka/trident/StaticBrokerReader.java
deleted file mode 100644
index ca83c06..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/trident/StaticBrokerReader.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka.trident;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
-
-public class StaticBrokerReader implements IBrokerReader {
-
-    private Map<String,GlobalPartitionInformation> brokers = new TreeMap<String,GlobalPartitionInformation>();
-
-    public StaticBrokerReader(String topic, GlobalPartitionInformation partitionInformation) {
-        this.brokers.put(topic, partitionInformation);
-    }
-
-    @Override
-    public GlobalPartitionInformation getBrokerForTopic(String topic) {
-        if (brokers.containsKey(topic)) return brokers.get(topic);
-        return null;
-    }
-
-    @Override
-    public List<GlobalPartitionInformation> getAllBrokers () {
-        List<GlobalPartitionInformation> list = new ArrayList<GlobalPartitionInformation>();
-        list.addAll(brokers.values());
-        return list;
-    }
-
-    @Override
-    public void close() {
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/trident/TransactionalTridentKafkaSpout.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/trident/TransactionalTridentKafkaSpout.java b/external/storm-kafka/src/jvm/storm/kafka/trident/TransactionalTridentKafkaSpout.java
deleted file mode 100644
index 9feffc8..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/trident/TransactionalTridentKafkaSpout.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka.trident;
-
-import backtype.storm.task.TopologyContext;
-import backtype.storm.tuple.Fields;
-import storm.kafka.Partition;
-import storm.trident.spout.IPartitionedTridentSpout;
-
-import java.util.Map;
-import java.util.UUID;
-
-
-public class TransactionalTridentKafkaSpout implements IPartitionedTridentSpout<GlobalPartitionInformation, Partition, Map> {
-
-    TridentKafkaConfig _config;
-
-    public TransactionalTridentKafkaSpout(TridentKafkaConfig config) {
-        _config = config;
-    }
-
-
-    @Override
-    public IPartitionedTridentSpout.Coordinator getCoordinator(Map conf, TopologyContext context) {
-        return new storm.kafka.trident.Coordinator(conf, _config);
-    }
-
-    @Override
-    public IPartitionedTridentSpout.Emitter getEmitter(Map conf, TopologyContext context) {
-        return new TridentKafkaEmitter(conf, context, _config, context
-                .getStormId()).asTransactionalEmitter();
-    }
-
-    @Override
-    public Fields getOutputFields() {
-        return _config.scheme.getOutputFields();
-    }
-
-    @Override
-    public Map<String, Object> getComponentConfiguration() {
-        return null;
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/trident/TridentKafkaConfig.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/trident/TridentKafkaConfig.java b/external/storm-kafka/src/jvm/storm/kafka/trident/TridentKafkaConfig.java
deleted file mode 100644
index 3878cc8..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/trident/TridentKafkaConfig.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka.trident;
-
-import storm.kafka.BrokerHosts;
-import storm.kafka.KafkaConfig;
-
-
-public class TridentKafkaConfig extends KafkaConfig {
-
-
-    public final IBatchCoordinator coordinator = new DefaultCoordinator();
-
-    public TridentKafkaConfig(BrokerHosts hosts, String topic) {
-        super(hosts, topic);
-    }
-
-    public TridentKafkaConfig(BrokerHosts hosts, String topic, String clientId) {
-        super(hosts, topic, clientId);
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/trident/TridentKafkaEmitter.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/trident/TridentKafkaEmitter.java b/external/storm-kafka/src/jvm/storm/kafka/trident/TridentKafkaEmitter.java
deleted file mode 100644
index a97d2cb..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/trident/TridentKafkaEmitter.java
+++ /dev/null
@@ -1,287 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka.trident;
-
-import backtype.storm.Config;
-import backtype.storm.metric.api.CombinedMetric;
-import backtype.storm.metric.api.MeanReducer;
-import backtype.storm.metric.api.ReducedMetric;
-import backtype.storm.task.TopologyContext;
-import com.google.common.collect.ImmutableMap;
-import kafka.javaapi.consumer.SimpleConsumer;
-import kafka.javaapi.message.ByteBufferMessageSet;
-import kafka.message.Message;
-import kafka.message.MessageAndOffset;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import storm.kafka.*;
-import storm.kafka.TopicOffsetOutOfRangeException;
-import storm.trident.operation.TridentCollector;
-import storm.trident.spout.IOpaquePartitionedTridentSpout;
-import storm.trident.spout.IPartitionedTridentSpout;
-import storm.trident.topology.TransactionAttempt;
-
-import java.util.*;
-
-public class TridentKafkaEmitter {
-
-    public static final Logger LOG = LoggerFactory.getLogger(TridentKafkaEmitter.class);
-
-    private DynamicPartitionConnections _connections;
-    private String _topologyName;
-    private KafkaUtils.KafkaOffsetMetric _kafkaOffsetMetric;
-    private ReducedMetric _kafkaMeanFetchLatencyMetric;
-    private CombinedMetric _kafkaMaxFetchLatencyMetric;
-    private TridentKafkaConfig _config;
-    private String _topologyInstanceId;
-
-    public TridentKafkaEmitter(Map conf, TopologyContext context, TridentKafkaConfig config, String topologyInstanceId) {
-        _config = config;
-        _topologyInstanceId = topologyInstanceId;
-        _connections = new DynamicPartitionConnections(_config, KafkaUtils.makeBrokerReader(conf, _config));
-        _topologyName = (String) conf.get(Config.TOPOLOGY_NAME);
-        _kafkaOffsetMetric = new KafkaUtils.KafkaOffsetMetric(_connections);
-        context.registerMetric("kafkaOffset", _kafkaOffsetMetric, _config.metricsTimeBucketSizeInSecs);
-        _kafkaMeanFetchLatencyMetric = context.registerMetric("kafkaFetchAvg", new MeanReducer(), _config.metricsTimeBucketSizeInSecs);
-        _kafkaMaxFetchLatencyMetric = context.registerMetric("kafkaFetchMax", new MaxMetric(), _config.metricsTimeBucketSizeInSecs);
-    }
-
-
-    private Map failFastEmitNewPartitionBatch(TransactionAttempt attempt, TridentCollector collector, Partition partition, Map lastMeta) {
-        SimpleConsumer consumer = _connections.register(partition);
-        Map ret = doEmitNewPartitionBatch(consumer, partition, collector, lastMeta);
-        _kafkaOffsetMetric.setLatestEmittedOffset(partition, (Long) ret.get("offset"));
-        return ret;
-    }
-
-    private Map emitNewPartitionBatch(TransactionAttempt attempt, TridentCollector collector, Partition partition, Map lastMeta) {
-        try {
-            return failFastEmitNewPartitionBatch(attempt, collector, partition, lastMeta);
-        } catch (FailedFetchException e) {
-            LOG.warn("Failed to fetch from partition " + partition);
-            if (lastMeta == null) {
-                return null;
-            } else {
-                Map ret = new HashMap();
-                ret.put("offset", lastMeta.get("nextOffset"));
-                ret.put("nextOffset", lastMeta.get("nextOffset"));
-                ret.put("partition", partition.partition);
-                ret.put("broker", ImmutableMap.of("host", partition.host.host, "port", partition.host.port));
-                ret.put("topic", partition.topic);
-                ret.put("topology", ImmutableMap.of("name", _topologyName, "id", _topologyInstanceId));
-                return ret;
-            }
-        }
-    }
-
-    private Map doEmitNewPartitionBatch(SimpleConsumer consumer, Partition partition, TridentCollector collector, Map lastMeta) {
-        long offset;
-        if (lastMeta != null) {
-            String lastInstanceId = null;
-            Map lastTopoMeta = (Map) lastMeta.get("topology");
-            if (lastTopoMeta != null) {
-                lastInstanceId = (String) lastTopoMeta.get("id");
-            }
-            if (_config.ignoreZkOffsets && !_topologyInstanceId.equals(lastInstanceId)) {
-                offset = KafkaUtils.getOffset(consumer, partition.topic, partition.partition, _config.startOffsetTime);
-            } else {
-                offset = (Long) lastMeta.get("nextOffset");
-            }
-        } else {
-            offset = KafkaUtils.getOffset(consumer, partition.topic, partition.partition, _config);
-        }
-
-        ByteBufferMessageSet msgs = null;
-        try {
-            msgs = fetchMessages(consumer, partition, offset);
-        } catch (TopicOffsetOutOfRangeException e) {
-            long newOffset = KafkaUtils.getOffset(consumer, partition.topic, partition.partition, kafka.api.OffsetRequest.EarliestTime());
-            LOG.warn("OffsetOutOfRange: Updating offset from offset = " + offset + " to offset = " + newOffset);
-            offset = newOffset;
-            msgs = KafkaUtils.fetchMessages(_config, consumer, partition, offset);
-        }
-
-        long endoffset = offset;
-        for (MessageAndOffset msg : msgs) {
-            emit(collector, msg.message(), partition, msg.offset());
-            endoffset = msg.nextOffset();
-        }
-        Map newMeta = new HashMap();
-        newMeta.put("offset", offset);
-        newMeta.put("nextOffset", endoffset);
-        newMeta.put("instanceId", _topologyInstanceId);
-        newMeta.put("partition", partition.partition);
-        newMeta.put("broker", ImmutableMap.of("host", partition.host.host, "port", partition.host.port));
-        newMeta.put("topic", partition.topic);
-        newMeta.put("topology", ImmutableMap.of("name", _topologyName, "id", _topologyInstanceId));
-        return newMeta;
-    }
-
-    private ByteBufferMessageSet fetchMessages(SimpleConsumer consumer, Partition partition, long offset) {
-        long start = System.nanoTime();
-        ByteBufferMessageSet msgs = null;
-        msgs = KafkaUtils.fetchMessages(_config, consumer, partition, offset);
-        long end = System.nanoTime();
-        long millis = (end - start) / 1000000;
-        _kafkaMeanFetchLatencyMetric.update(millis);
-        _kafkaMaxFetchLatencyMetric.update(millis);
-        return msgs;
-    }
-
-    /**
-     * re-emit the batch described by the meta data provided
-     *
-     * @param attempt
-     * @param collector
-     * @param partition
-     * @param meta
-     */
-    private void reEmitPartitionBatch(TransactionAttempt attempt, TridentCollector collector, Partition partition, Map meta) {
-        LOG.info("re-emitting batch, attempt " + attempt);
-        String instanceId = (String) meta.get("instanceId");
-        if (!_config.ignoreZkOffsets || instanceId.equals(_topologyInstanceId)) {
-            SimpleConsumer consumer = _connections.register(partition);
-            long offset = (Long) meta.get("offset");
-            long nextOffset = (Long) meta.get("nextOffset");
-            ByteBufferMessageSet msgs = null;
-            msgs = fetchMessages(consumer, partition, offset);
-
-            if(msgs != null) {
-                for (MessageAndOffset msg : msgs) {
-                    if (offset == nextOffset) {
-                        break;
-                    }
-                    if (offset > nextOffset) {
-                        throw new RuntimeException("Error when re-emitting batch. overshot the end offset");
-                    }
-                    emit(collector, msg.message(), partition, msg.offset());
-                    offset = msg.nextOffset();
-                }
-            }
-        }
-    }
-
-    private void emit(TridentCollector collector, Message msg, Partition partition, long offset) {
-        Iterable<List<Object>> values;
-        if (_config.scheme instanceof MessageMetadataSchemeAsMultiScheme) {
-            values = KafkaUtils.generateTuples((MessageMetadataSchemeAsMultiScheme) _config.scheme, msg, partition, offset);
-        } else {
-            values = KafkaUtils.generateTuples(_config, msg, partition.topic);
-        }
-
-        if (values != null) {
-            for (List<Object> value : values) {
-                collector.emit(value);
-            }
-        }
-    }
-
-    private void clear() {
-        _connections.clear();
-    }
-
-    private List<Partition> orderPartitions(List<GlobalPartitionInformation> partitions) {
-        List<Partition> part = new ArrayList<Partition>();
-        for (GlobalPartitionInformation globalPartitionInformation : partitions)
-            part.addAll(globalPartitionInformation.getOrderedPartitions());
-        return part;
-    }
-
-    private void refresh(List<Partition> list) {
-        _connections.clear();
-        _kafkaOffsetMetric.refreshPartitions(new HashSet<Partition>(list));
-    }
-
-
-    public IOpaquePartitionedTridentSpout.Emitter<List<GlobalPartitionInformation>, Partition, Map> asOpaqueEmitter() {
-
-        return new IOpaquePartitionedTridentSpout.Emitter<List<GlobalPartitionInformation>, Partition, Map>() {
-
-            /**
-             * Emit a batch of tuples for a partition/transaction.
-             *
-             * Return the metadata describing this batch that will be used as lastPartitionMeta
-             * for defining the parameters of the next batch.
-             */
-            @Override
-            public Map emitPartitionBatch(TransactionAttempt transactionAttempt, TridentCollector tridentCollector, Partition partition, Map map) {
-                return emitNewPartitionBatch(transactionAttempt, tridentCollector, partition, map);
-            }
-
-            @Override
-            public void refreshPartitions(List<Partition> partitions) {
-                refresh(partitions);
-            }
-
-            @Override
-            public List<Partition> getOrderedPartitions(List<GlobalPartitionInformation> partitionInformation) {
-                return orderPartitions(partitionInformation);
-            }
-
-            @Override
-            public void close() {
-                clear();
-            }
-        };
-    }
-
-    public IPartitionedTridentSpout.Emitter asTransactionalEmitter() {
-        return new IPartitionedTridentSpout.Emitter<List<GlobalPartitionInformation>, Partition, Map>() {
-
-            /**
-             * Emit a batch of tuples for a partition/transaction that's never been emitted before.
-             * Return the metadata that can be used to reconstruct this partition/batch in the future.
-             */
-            @Override
-            public Map emitPartitionBatchNew(TransactionAttempt transactionAttempt, TridentCollector tridentCollector, Partition partition, Map map) {
-                return failFastEmitNewPartitionBatch(transactionAttempt, tridentCollector, partition, map);
-            }
-
-            /**
-             * Emit a batch of tuples for a partition/transaction that has been emitted before, using
-             * the metadata created when it was first emitted.
-             */
-            @Override
-            public void emitPartitionBatch(TransactionAttempt transactionAttempt, TridentCollector tridentCollector, Partition partition, Map map) {
-                reEmitPartitionBatch(transactionAttempt, tridentCollector, partition, map);
-            }
-
-            /**
-             * This method is called when this task is responsible for a new set of partitions. Should be used
-             * to manage things like connections to brokers.
-             */
-            @Override
-            public void refreshPartitions(List<Partition> partitions) {
-                refresh(partitions);
-            }
-
-            @Override
-            public List<Partition> getOrderedPartitions(List<GlobalPartitionInformation> partitionInformation) {
-                return orderPartitions(partitionInformation);
-            }
-
-            @Override
-            public void close() {
-                clear();
-            }
-        };
-
-    }
-
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/trident/TridentKafkaState.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/trident/TridentKafkaState.java b/external/storm-kafka/src/jvm/storm/kafka/trident/TridentKafkaState.java
deleted file mode 100644
index 84b6a6a..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/trident/TridentKafkaState.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka.trident;
-
-import backtype.storm.task.OutputCollector;
-import backtype.storm.topology.FailedException;
-import org.apache.commons.lang.Validate;
-import org.apache.kafka.clients.producer.KafkaProducer;
-import org.apache.kafka.clients.producer.ProducerRecord;
-import org.apache.kafka.clients.producer.RecordMetadata;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import storm.kafka.trident.mapper.TridentTupleToKafkaMapper;
-import storm.kafka.trident.selector.KafkaTopicSelector;
-import storm.trident.operation.TridentCollector;
-import storm.trident.state.State;
-import storm.trident.tuple.TridentTuple;
-
-import java.util.List;
-import java.util.Properties;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-
-public class TridentKafkaState implements State {
-    private static final Logger LOG = LoggerFactory.getLogger(TridentKafkaState.class);
-
-    private KafkaProducer producer;
-    private OutputCollector collector;
-
-    private TridentTupleToKafkaMapper mapper;
-    private KafkaTopicSelector topicSelector;
-
-    public TridentKafkaState withTridentTupleToKafkaMapper(TridentTupleToKafkaMapper mapper) {
-        this.mapper = mapper;
-        return this;
-    }
-
-    public TridentKafkaState withKafkaTopicSelector(KafkaTopicSelector selector) {
-        this.topicSelector = selector;
-        return this;
-    }
-
-    @Override
-    public void beginCommit(Long txid) {
-        LOG.debug("beginCommit is Noop.");
-    }
-
-    @Override
-    public void commit(Long txid) {
-        LOG.debug("commit is Noop.");
-    }
-
-    public void prepare(Properties options) {
-        Validate.notNull(mapper, "mapper can not be null");
-        Validate.notNull(topicSelector, "topicSelector can not be null");
-        producer = new KafkaProducer(options);
-    }
-
-    public void updateState(List<TridentTuple> tuples, TridentCollector collector) {
-        String topic = null;
-        for (TridentTuple tuple : tuples) {
-            try {
-                topic = topicSelector.getTopic(tuple);
-
-                if(topic != null) {
-                    Future<RecordMetadata> result = producer.send(new ProducerRecord(topic,
-                            mapper.getKeyFromTuple(tuple), mapper.getMessageFromTuple(tuple)));
-                    try {
-                        result.get();
-                    } catch (ExecutionException e) {
-                        String errorMsg = "Could not retrieve result for message with key = "
-                                + mapper.getKeyFromTuple(tuple) + " from topic = " + topic;
-                        LOG.error(errorMsg, e);
-                        throw new FailedException(errorMsg, e);
-                    }
-                } else {
-                    LOG.warn("skipping key = " + mapper.getKeyFromTuple(tuple) + ", topic selector returned null.");
-                }
-            } catch (Exception ex) {
-                String errorMsg = "Could not send message with key = " + mapper.getKeyFromTuple(tuple)
-                        + " to topic = " + topic;
-                LOG.warn(errorMsg, ex);
-                throw new FailedException(errorMsg, ex);
-            }
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/trident/TridentKafkaStateFactory.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/trident/TridentKafkaStateFactory.java b/external/storm-kafka/src/jvm/storm/kafka/trident/TridentKafkaStateFactory.java
deleted file mode 100644
index a5d9d42..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/trident/TridentKafkaStateFactory.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka.trident;
-
-import backtype.storm.task.IMetricsContext;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import storm.kafka.trident.mapper.TridentTupleToKafkaMapper;
-import storm.kafka.trident.selector.KafkaTopicSelector;
-import storm.trident.state.State;
-import storm.trident.state.StateFactory;
-
-import java.util.Map;
-import java.util.Properties;
-
-public class TridentKafkaStateFactory implements StateFactory {
-
-    private static final Logger LOG = LoggerFactory.getLogger(TridentKafkaStateFactory.class);
-
-    private TridentTupleToKafkaMapper mapper;
-    private KafkaTopicSelector topicSelector;
-    private Properties producerProperties = new Properties();
-
-    public TridentKafkaStateFactory withTridentTupleToKafkaMapper(TridentTupleToKafkaMapper mapper) {
-        this.mapper = mapper;
-        return this;
-    }
-
-    public TridentKafkaStateFactory withKafkaTopicSelector(KafkaTopicSelector selector) {
-        this.topicSelector = selector;
-        return this;
-    }
-
-    public TridentKafkaStateFactory withProducerProperties(Properties props) {
-        this.producerProperties = props;
-        return this;
-    }
-
-    @Override
-    public State makeState(Map conf, IMetricsContext metrics, int partitionIndex, int numPartitions) {
-        LOG.info("makeState(partitonIndex={}, numpartitions={}", partitionIndex, numPartitions);
-        TridentKafkaState state = new TridentKafkaState()
-                .withKafkaTopicSelector(this.topicSelector)
-                .withTridentTupleToKafkaMapper(this.mapper);
-        state.prepare(producerProperties);
-        return state;
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/trident/TridentKafkaUpdater.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/trident/TridentKafkaUpdater.java b/external/storm-kafka/src/jvm/storm/kafka/trident/TridentKafkaUpdater.java
deleted file mode 100644
index 6639b36..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/trident/TridentKafkaUpdater.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka.trident;
-
-import storm.trident.operation.TridentCollector;
-import storm.trident.state.BaseStateUpdater;
-import storm.trident.tuple.TridentTuple;
-
-import java.util.List;
-
-public class TridentKafkaUpdater extends BaseStateUpdater<TridentKafkaState> {
-    @Override
-    public void updateState(TridentKafkaState state, List<TridentTuple> tuples, TridentCollector collector) {
-        state.updateState(tuples, collector);
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/trident/ZkBrokerReader.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/trident/ZkBrokerReader.java b/external/storm-kafka/src/jvm/storm/kafka/trident/ZkBrokerReader.java
deleted file mode 100644
index b480bdd..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/trident/ZkBrokerReader.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka.trident;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import storm.kafka.DynamicBrokersReader;
-import storm.kafka.ZkHosts;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-
-public class ZkBrokerReader implements IBrokerReader {
-
-	public static final Logger LOG = LoggerFactory.getLogger(ZkBrokerReader.class);
-
-	List<GlobalPartitionInformation> cachedBrokers = new ArrayList<GlobalPartitionInformation>();
-	DynamicBrokersReader reader;
-	long lastRefreshTimeMs;
-
-
-	long refreshMillis;
-
-	public ZkBrokerReader(Map conf, String topic, ZkHosts hosts) {
-		try {
-			reader = new DynamicBrokersReader(conf, hosts.brokerZkStr, hosts.brokerZkPath, topic);
-			cachedBrokers = reader.getBrokerInfo();
-			lastRefreshTimeMs = System.currentTimeMillis();
-			refreshMillis = hosts.refreshFreqSecs * 1000L;
-		} catch (java.net.SocketTimeoutException e) {
-			LOG.warn("Failed to update brokers", e);
-		}
-
-	}
-
-	private void refresh() {
-		long currTime = System.currentTimeMillis();
-		if (currTime > lastRefreshTimeMs + refreshMillis) {
-			try {
-				LOG.info("brokers need refreshing because " + refreshMillis + "ms have expired");
-				cachedBrokers = reader.getBrokerInfo();
-				lastRefreshTimeMs = currTime;
-			} catch (java.net.SocketTimeoutException e) {
-				LOG.warn("Failed to update brokers", e);
-			}
-		}
-	}
-	@Override
-	public GlobalPartitionInformation getBrokerForTopic(String topic) {
-		refresh();
-        for(GlobalPartitionInformation partitionInformation : cachedBrokers) {
-            if (partitionInformation.topic.equals(topic)) return partitionInformation;
-        }
-		return null;
-	}
-
-	@Override
-	public List<GlobalPartitionInformation> getAllBrokers() {
-		refresh();
-		return cachedBrokers;
-	}
-
-	@Override
-	public void close() {
-		reader.close();
-	}
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/trident/mapper/FieldNameBasedTupleToKafkaMapper.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/trident/mapper/FieldNameBasedTupleToKafkaMapper.java b/external/storm-kafka/src/jvm/storm/kafka/trident/mapper/FieldNameBasedTupleToKafkaMapper.java
deleted file mode 100644
index 29a49d1..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/trident/mapper/FieldNameBasedTupleToKafkaMapper.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka.trident.mapper;
-
-import storm.trident.tuple.TridentTuple;
-
-public class FieldNameBasedTupleToKafkaMapper<K, V> implements TridentTupleToKafkaMapper {
-
-    public final String keyFieldName;
-    public final String msgFieldName;
-
-    public FieldNameBasedTupleToKafkaMapper(String keyFieldName, String msgFieldName) {
-        this.keyFieldName = keyFieldName;
-        this.msgFieldName = msgFieldName;
-    }
-
-    @Override
-    public K getKeyFromTuple(TridentTuple tuple) {
-        return (K) tuple.getValueByField(keyFieldName);
-    }
-
-    @Override
-    public V getMessageFromTuple(TridentTuple tuple) {
-        return (V) tuple.getValueByField(msgFieldName);
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/trident/mapper/TridentTupleToKafkaMapper.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/trident/mapper/TridentTupleToKafkaMapper.java b/external/storm-kafka/src/jvm/storm/kafka/trident/mapper/TridentTupleToKafkaMapper.java
deleted file mode 100644
index 9759ba3..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/trident/mapper/TridentTupleToKafkaMapper.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka.trident.mapper;
-
-import backtype.storm.tuple.Tuple;
-import storm.trident.tuple.TridentTuple;
-
-import java.io.Serializable;
-
-public interface TridentTupleToKafkaMapper<K,V>  extends Serializable {
-    K getKeyFromTuple(TridentTuple tuple);
-    V getMessageFromTuple(TridentTuple tuple);
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/trident/selector/DefaultTopicSelector.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/trident/selector/DefaultTopicSelector.java b/external/storm-kafka/src/jvm/storm/kafka/trident/selector/DefaultTopicSelector.java
deleted file mode 100644
index 473a38d..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/trident/selector/DefaultTopicSelector.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka.trident.selector;
-
-import storm.trident.tuple.TridentTuple;
-
-public class DefaultTopicSelector implements KafkaTopicSelector {
-
-    private final String topicName;
-
-    public DefaultTopicSelector(final String topicName) {
-        this.topicName = topicName;
-    }
-
-    @Override
-    public String getTopic(TridentTuple tuple) {
-        return topicName;
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/storm/kafka/trident/selector/KafkaTopicSelector.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/storm/kafka/trident/selector/KafkaTopicSelector.java b/external/storm-kafka/src/jvm/storm/kafka/trident/selector/KafkaTopicSelector.java
deleted file mode 100644
index f6c5d82..0000000
--- a/external/storm-kafka/src/jvm/storm/kafka/trident/selector/KafkaTopicSelector.java
+++ /dev/null
@@ -1,26 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package storm.kafka.trident.selector;
-
-import storm.trident.tuple.TridentTuple;
-
-import java.io.Serializable;
-
-public interface KafkaTopicSelector extends Serializable {
-    String getTopic(TridentTuple tuple);
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/test/org/apache/storm/kafka/DynamicBrokersReaderTest.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/test/org/apache/storm/kafka/DynamicBrokersReaderTest.java b/external/storm-kafka/src/test/org/apache/storm/kafka/DynamicBrokersReaderTest.java
new file mode 100644
index 0000000..3363252
--- /dev/null
+++ b/external/storm-kafka/src/test/org/apache/storm/kafka/DynamicBrokersReaderTest.java
@@ -0,0 +1,252 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+import org.apache.storm.Config;
+import org.apache.curator.framework.CuratorFramework;
+import org.apache.curator.framework.CuratorFrameworkFactory;
+import org.apache.curator.retry.ExponentialBackoffRetry;
+import org.apache.curator.test.TestingServer;
+import org.apache.curator.utils.ZKPaths;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.apache.storm.kafka.trident.GlobalPartitionInformation;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Date: 16/05/2013
+ * Time: 20:35
+ */
+public class DynamicBrokersReaderTest {
+    private DynamicBrokersReader dynamicBrokersReader, wildCardBrokerReader;
+    private String masterPath = "/brokers";
+    private String topic = "testing1";
+    private String secondTopic = "testing2";
+    private String thirdTopic = "testing3";
+
+    private CuratorFramework zookeeper;
+    private TestingServer server;
+
+    @Before
+    public void setUp() throws Exception {
+        server = new TestingServer();
+        String connectionString = server.getConnectString();
+        Map conf = new HashMap();
+        conf.put(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT, 1000);
+        conf.put(Config.STORM_ZOOKEEPER_CONNECTION_TIMEOUT, 1000);
+        conf.put(Config.STORM_ZOOKEEPER_RETRY_TIMES, 4);
+        conf.put(Config.STORM_ZOOKEEPER_RETRY_INTERVAL, 5);
+
+        ExponentialBackoffRetry retryPolicy = new ExponentialBackoffRetry(1000, 3);
+        zookeeper = CuratorFrameworkFactory.newClient(connectionString, retryPolicy);
+        dynamicBrokersReader = new DynamicBrokersReader(conf, connectionString, masterPath, topic);
+
+        Map conf2 = new HashMap();
+        conf2.putAll(conf);
+        conf2.put("kafka.topic.wildcard.match",true);
+
+        wildCardBrokerReader = new DynamicBrokersReader(conf2, connectionString, masterPath, "^test.*$");
+        zookeeper.start();
+    }
+
+    @After
+    public void tearDown() throws Exception {
+        dynamicBrokersReader.close();
+        zookeeper.close();
+        server.close();
+    }
+
+    private void addPartition(int id, String host, int port, String topic) throws Exception {
+        writePartitionId(id, topic);
+        writeLeader(id, 0, topic);
+        writeLeaderDetails(0, host, port);
+    }
+
+    private void addPartition(int id, int leader, String host, int port, String topic) throws Exception {
+        writePartitionId(id, topic);
+        writeLeader(id, leader, topic);
+        writeLeaderDetails(leader, host, port);
+    }
+
+    private void writePartitionId(int id, String topic) throws Exception {
+        String path = dynamicBrokersReader.partitionPath(topic);
+        writeDataToPath(path, ("" + id));
+    }
+
+    private void writeDataToPath(String path, String data) throws Exception {
+        ZKPaths.mkdirs(zookeeper.getZookeeperClient().getZooKeeper(), path);
+        zookeeper.setData().forPath(path, data.getBytes());
+    }
+
+    private void writeLeader(int id, int leaderId, String topic) throws Exception {
+        String path = dynamicBrokersReader.partitionPath(topic) + "/" + id + "/state";
+        String value = " { \"controller_epoch\":4, \"isr\":[ 1, 0 ], \"leader\":" + leaderId + ", \"leader_epoch\":1, \"version\":1 }";
+        writeDataToPath(path, value);
+    }
+
+    private void writeLeaderDetails(int leaderId, String host, int port) throws Exception {
+        String path = dynamicBrokersReader.brokerPath() + "/" + leaderId;
+        String value = "{ \"host\":\"" + host + "\", \"jmx_port\":9999, \"port\":" + port + ", \"version\":1 }";
+        writeDataToPath(path, value);
+    }
+
+
+    private GlobalPartitionInformation getByTopic(List<GlobalPartitionInformation> partitions, String topic){
+        for(GlobalPartitionInformation partitionInformation : partitions) {
+            if (partitionInformation.topic.equals(topic)) return partitionInformation;
+        }
+        return null;
+    }
+
+    @Test
+    public void testGetBrokerInfo() throws Exception {
+        String host = "localhost";
+        int port = 9092;
+        int partition = 0;
+        addPartition(partition, host, port, topic);
+        List<GlobalPartitionInformation> partitions = dynamicBrokersReader.getBrokerInfo();
+
+        GlobalPartitionInformation brokerInfo = getByTopic(partitions, topic);
+        assertNotNull(brokerInfo);
+        assertEquals(1, brokerInfo.getOrderedPartitions().size());
+        assertEquals(port, brokerInfo.getBrokerFor(partition).port);
+        assertEquals(host, brokerInfo.getBrokerFor(partition).host);
+    }
+
+    @Test
+    public void testGetBrokerInfoWildcardMatch() throws Exception {
+        String host = "localhost";
+        int port = 9092;
+        int partition = 0;
+        addPartition(partition, host, port, topic);
+        addPartition(partition, host, port, secondTopic);
+
+        List<GlobalPartitionInformation> partitions = wildCardBrokerReader.getBrokerInfo();
+
+        GlobalPartitionInformation brokerInfo = getByTopic(partitions, topic);
+        assertNotNull(brokerInfo);
+        assertEquals(1, brokerInfo.getOrderedPartitions().size());
+        assertEquals(port, brokerInfo.getBrokerFor(partition).port);
+        assertEquals(host, brokerInfo.getBrokerFor(partition).host);
+
+        brokerInfo = getByTopic(partitions, secondTopic);
+        assertNotNull(brokerInfo);
+        assertEquals(1, brokerInfo.getOrderedPartitions().size());
+        assertEquals(port, brokerInfo.getBrokerFor(partition).port);
+        assertEquals(host, brokerInfo.getBrokerFor(partition).host);
+
+        addPartition(partition, host, port, thirdTopic);
+        //Discover newly added topic
+        partitions = wildCardBrokerReader.getBrokerInfo();
+        assertNotNull(getByTopic(partitions, topic));
+        assertNotNull(getByTopic(partitions, secondTopic));
+        assertNotNull(getByTopic(partitions, secondTopic));
+    }
+
+
+    @Test
+    public void testMultiplePartitionsOnDifferentHosts() throws Exception {
+        String host = "localhost";
+        int port = 9092;
+        int secondPort = 9093;
+        int partition = 0;
+        int secondPartition = partition + 1;
+        addPartition(partition, 0, host, port, topic);
+        addPartition(secondPartition, 1, host, secondPort, topic);
+
+        List<GlobalPartitionInformation> partitions = dynamicBrokersReader.getBrokerInfo();
+
+        GlobalPartitionInformation brokerInfo = getByTopic(partitions, topic);
+        assertNotNull(brokerInfo);
+        assertEquals(2, brokerInfo.getOrderedPartitions().size());
+
+        assertEquals(port, brokerInfo.getBrokerFor(partition).port);
+        assertEquals(host, brokerInfo.getBrokerFor(partition).host);
+
+        assertEquals(secondPort, brokerInfo.getBrokerFor(secondPartition).port);
+        assertEquals(host, brokerInfo.getBrokerFor(secondPartition).host);
+    }
+
+
+    @Test
+    public void testMultiplePartitionsOnSameHost() throws Exception {
+        String host = "localhost";
+        int port = 9092;
+        int partition = 0;
+        int secondPartition = partition + 1;
+        addPartition(partition, 0, host, port, topic);
+        addPartition(secondPartition, 0, host, port, topic);
+
+        List<GlobalPartitionInformation> partitions = dynamicBrokersReader.getBrokerInfo();
+
+        GlobalPartitionInformation brokerInfo = getByTopic(partitions, topic);
+        assertNotNull(brokerInfo);
+        assertEquals(2, brokerInfo.getOrderedPartitions().size());
+
+        assertEquals(port, brokerInfo.getBrokerFor(partition).port);
+        assertEquals(host, brokerInfo.getBrokerFor(partition).host);
+
+        assertEquals(port, brokerInfo.getBrokerFor(secondPartition).port);
+        assertEquals(host, brokerInfo.getBrokerFor(secondPartition).host);
+    }
+
+    @Test
+    public void testSwitchHostForPartition() throws Exception {
+        String host = "localhost";
+        int port = 9092;
+        int partition = 0;
+        addPartition(partition, host, port, topic);
+        List<GlobalPartitionInformation> partitions = dynamicBrokersReader.getBrokerInfo();
+
+        GlobalPartitionInformation brokerInfo = getByTopic(partitions, topic);
+        assertNotNull(brokerInfo);
+        assertEquals(port, brokerInfo.getBrokerFor(partition).port);
+        assertEquals(host, brokerInfo.getBrokerFor(partition).host);
+
+        String newHost = host + "switch";
+        int newPort = port + 1;
+        addPartition(partition, newHost, newPort, topic);
+        partitions = dynamicBrokersReader.getBrokerInfo();
+
+        brokerInfo = getByTopic(partitions, topic);
+        assertNotNull(brokerInfo);
+        assertEquals(newPort, brokerInfo.getBrokerFor(partition).port);
+        assertEquals(newHost, brokerInfo.getBrokerFor(partition).host);
+    }
+
+    @Test(expected = NullPointerException.class)
+    public void testErrorLogsWhenConfigIsMissing() throws Exception {
+        String connectionString = server.getConnectString();
+        Map conf = new HashMap();
+        conf.put(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT, 1000);
+//        conf.put(Config.STORM_ZOOKEEPER_CONNECTION_TIMEOUT, 1000);
+        conf.put(Config.STORM_ZOOKEEPER_RETRY_TIMES, 4);
+        conf.put(Config.STORM_ZOOKEEPER_RETRY_INTERVAL, 5);
+
+        DynamicBrokersReader dynamicBrokersReader1 = new DynamicBrokersReader(conf, connectionString, masterPath, topic);
+
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/test/org/apache/storm/kafka/ExponentialBackoffMsgRetryManagerTest.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/test/org/apache/storm/kafka/ExponentialBackoffMsgRetryManagerTest.java b/external/storm-kafka/src/test/org/apache/storm/kafka/ExponentialBackoffMsgRetryManagerTest.java
new file mode 100644
index 0000000..8fa6564
--- /dev/null
+++ b/external/storm-kafka/src/test/org/apache/storm/kafka/ExponentialBackoffMsgRetryManagerTest.java
@@ -0,0 +1,235 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import org.junit.Test;
+
+public class ExponentialBackoffMsgRetryManagerTest {
+
+    private static final Long TEST_OFFSET = 101L;
+    private static final Long TEST_OFFSET2 = 102L;
+    private static final Long TEST_OFFSET3 = 105L;
+    private static final Long TEST_NEW_OFFSET = 103L;
+
+    @Test
+    public void testImmediateRetry() throws Exception {
+        ExponentialBackoffMsgRetryManager manager = new ExponentialBackoffMsgRetryManager(0, 0d, 0);
+        manager.failed(TEST_OFFSET);
+        Long next = manager.nextFailedMessageToRetry();
+        assertEquals("expect test offset next available for retry", TEST_OFFSET, next);
+        assertTrue("message should be ready for retry immediately", manager.shouldRetryMsg(TEST_OFFSET));
+
+        manager.retryStarted(TEST_OFFSET);
+
+        manager.failed(TEST_OFFSET);
+        next = manager.nextFailedMessageToRetry();
+        assertEquals("expect test offset next available for retry", TEST_OFFSET, next);
+        assertTrue("message should be ready for retry immediately", manager.shouldRetryMsg(TEST_OFFSET));
+    }
+
+    @Test
+    public void testSingleDelay() throws Exception {
+        ExponentialBackoffMsgRetryManager manager = new ExponentialBackoffMsgRetryManager(100, 1d, 1000);
+        manager.failed(TEST_OFFSET);
+        Thread.sleep(5);
+        Long next = manager.nextFailedMessageToRetry();
+        assertNull("expect no message ready for retry yet", next);
+        assertFalse("message should not be ready for retry yet", manager.shouldRetryMsg(TEST_OFFSET));
+
+        Thread.sleep(100);
+        next = manager.nextFailedMessageToRetry();
+        assertEquals("expect test offset next available for retry", TEST_OFFSET, next);
+        assertTrue("message should be ready for retry", manager.shouldRetryMsg(TEST_OFFSET));
+    }
+
+    @Test
+    public void testExponentialBackoff() throws Exception {
+        final long initial = 10;
+        final double mult = 2d;
+        ExponentialBackoffMsgRetryManager manager = new ExponentialBackoffMsgRetryManager(initial, mult, initial * 10);
+
+        long expectedWaitTime = initial;
+        for (long i = 0L; i < 3L; ++i) {
+            manager.failed(TEST_OFFSET);
+
+            Thread.sleep((expectedWaitTime + 1L) / 2L);
+            assertFalse("message should not be ready for retry yet", manager.shouldRetryMsg(TEST_OFFSET));
+
+            Thread.sleep((expectedWaitTime + 1L) / 2L);
+            Long next = manager.nextFailedMessageToRetry();
+            assertEquals("expect test offset next available for retry", TEST_OFFSET, next);
+            assertTrue("message should be ready for retry", manager.shouldRetryMsg(TEST_OFFSET));
+
+            manager.retryStarted(TEST_OFFSET);
+            expectedWaitTime *= mult;
+        }
+    }
+
+    @Test
+    public void testRetryOrder() throws Exception {
+        final long initial = 10;
+        final double mult = 2d;
+        final long max = 20;
+        ExponentialBackoffMsgRetryManager manager = new ExponentialBackoffMsgRetryManager(initial, mult, max);
+
+        manager.failed(TEST_OFFSET);
+        Thread.sleep(initial);
+
+        manager.retryStarted(TEST_OFFSET);
+        manager.failed(TEST_OFFSET);
+        manager.failed(TEST_OFFSET2);
+
+        // although TEST_OFFSET failed first, it's retry delay time is longer b/c this is the second retry
+        // so TEST_OFFSET2 should come first
+
+        Thread.sleep(initial * 2);
+        assertTrue("message "+TEST_OFFSET+"should be ready for retry", manager.shouldRetryMsg(TEST_OFFSET));
+        assertTrue("message "+TEST_OFFSET2+"should be ready for retry", manager.shouldRetryMsg(TEST_OFFSET2));
+
+        Long next = manager.nextFailedMessageToRetry();
+        assertEquals("expect first message to retry is "+TEST_OFFSET2, TEST_OFFSET2, next);
+
+        Thread.sleep(initial);
+
+        // haven't retried yet, so first should still be TEST_OFFSET2
+        next = manager.nextFailedMessageToRetry();
+        assertEquals("expect first message to retry is "+TEST_OFFSET2, TEST_OFFSET2, next);
+        manager.retryStarted(next);
+
+        // now it should be TEST_OFFSET
+        next = manager.nextFailedMessageToRetry();
+        assertEquals("expect message to retry is now "+TEST_OFFSET, TEST_OFFSET, next);
+        manager.retryStarted(next);
+
+        // now none left
+        next = manager.nextFailedMessageToRetry();
+        assertNull("expect no message to retry now", next);
+    }
+
+    @Test
+    public void testQueriesAfterRetriedAlready() throws Exception {
+        ExponentialBackoffMsgRetryManager manager = new ExponentialBackoffMsgRetryManager(0, 0d, 0);
+        manager.failed(TEST_OFFSET);
+        Long next = manager.nextFailedMessageToRetry();
+        assertEquals("expect test offset next available for retry", TEST_OFFSET, next);
+        assertTrue("message should be ready for retry immediately", manager.shouldRetryMsg(TEST_OFFSET));
+
+        manager.retryStarted(TEST_OFFSET);
+        next = manager.nextFailedMessageToRetry();
+        assertNull("expect no message ready after retried", next);
+        assertFalse("message should not be ready after retried", manager.shouldRetryMsg(TEST_OFFSET));
+    }
+
+    @Test(expected = IllegalStateException.class)
+    public void testRetryWithoutFail() throws Exception {
+        ExponentialBackoffMsgRetryManager manager = new ExponentialBackoffMsgRetryManager(0, 0d, 0);
+        manager.retryStarted(TEST_OFFSET);
+    }
+
+    @Test(expected = IllegalStateException.class)
+    public void testFailRetryRetry() throws Exception {
+        ExponentialBackoffMsgRetryManager manager = new ExponentialBackoffMsgRetryManager(0, 0d, 0);
+        manager.failed(TEST_OFFSET);
+        try {
+            manager.retryStarted(TEST_OFFSET);
+        } catch (IllegalStateException ise) {
+            fail("IllegalStateException unexpected here: " + ise);
+        }
+
+        assertFalse("message should not be ready for retry", manager.shouldRetryMsg(TEST_OFFSET));
+        manager.retryStarted(TEST_OFFSET);
+    }
+
+    @Test
+    public void testMaxBackoff() throws Exception {
+        final long initial = 100;
+        final double mult = 2d;
+        final long max = 2000;
+        ExponentialBackoffMsgRetryManager manager = new ExponentialBackoffMsgRetryManager(initial, mult, max);
+
+        long expectedWaitTime = initial;
+        for (long i = 0L; i < 4L; ++i) {
+            manager.failed(TEST_OFFSET);
+
+            Thread.sleep((expectedWaitTime + 1L) / 2L);
+            assertFalse("message should not be ready for retry yet", manager.shouldRetryMsg(TEST_OFFSET));
+
+            Thread.sleep((expectedWaitTime + 1L) / 2L);
+            Long next = manager.nextFailedMessageToRetry();
+            assertEquals("expect test offset next available for retry", TEST_OFFSET, next);
+            assertTrue("message should be ready for retry", manager.shouldRetryMsg(TEST_OFFSET));
+
+            manager.retryStarted(TEST_OFFSET);
+            expectedWaitTime = Math.min((long) (expectedWaitTime * mult), max);
+        }
+    }
+
+    @Test
+    public void testFailThenAck() throws Exception {
+        ExponentialBackoffMsgRetryManager manager = new ExponentialBackoffMsgRetryManager(0, 0d, 0);
+        manager.failed(TEST_OFFSET);
+        assertTrue("message should be ready for retry", manager.shouldRetryMsg(TEST_OFFSET));
+
+        manager.acked(TEST_OFFSET);
+
+        Long next = manager.nextFailedMessageToRetry();
+        assertNull("expect no message ready after acked", next);
+        assertFalse("message should not be ready after acked", manager.shouldRetryMsg(TEST_OFFSET));
+    }
+
+    @Test
+    public void testAckThenFail() throws Exception {
+        ExponentialBackoffMsgRetryManager manager = new ExponentialBackoffMsgRetryManager(0, 0d, 0);
+        manager.acked(TEST_OFFSET);
+        assertFalse("message should not be ready after acked", manager.shouldRetryMsg(TEST_OFFSET));
+
+        manager.failed(TEST_OFFSET);
+
+        Long next = manager.nextFailedMessageToRetry();
+        assertEquals("expect test offset next available for retry", TEST_OFFSET, next);
+        assertTrue("message should be ready for retry", manager.shouldRetryMsg(TEST_OFFSET));
+    }
+    
+    @Test
+    public void testClearInvalidMessages() throws Exception {
+        ExponentialBackoffMsgRetryManager manager = new ExponentialBackoffMsgRetryManager(0, 0d, 0);
+        manager.failed(TEST_OFFSET);
+        manager.failed(TEST_OFFSET2);
+        manager.failed(TEST_OFFSET3);
+        
+        assertTrue("message should be ready for retry", manager.shouldRetryMsg(TEST_OFFSET));
+        assertTrue("message should be ready for retry", manager.shouldRetryMsg(TEST_OFFSET2));
+        assertTrue("message should be ready for retry", manager.shouldRetryMsg(TEST_OFFSET3));
+
+        manager.clearInvalidMessages(TEST_NEW_OFFSET);
+
+        Long next = manager.nextFailedMessageToRetry();
+        assertEquals("expect test offset next available for retry", TEST_OFFSET3, next);
+        
+        manager.acked(TEST_OFFSET3);
+        next = manager.nextFailedMessageToRetry();
+        assertNull("expect no message ready after acked", next);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/test/org/apache/storm/kafka/KafkaErrorTest.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/test/org/apache/storm/kafka/KafkaErrorTest.java b/external/storm-kafka/src/test/org/apache/storm/kafka/KafkaErrorTest.java
new file mode 100644
index 0000000..e38bc1e
--- /dev/null
+++ b/external/storm-kafka/src/test/org/apache/storm/kafka/KafkaErrorTest.java
@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+import org.junit.Test;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.is;
+import static org.junit.Assert.assertThat;
+
+/**
+ * Date: 12/01/2014
+ * Time: 18:09
+ */
+public class KafkaErrorTest {
+
+    @Test
+    public void getError() {
+        assertThat(KafkaError.getError(0), is(equalTo(KafkaError.NO_ERROR)));
+    }
+
+    @Test
+    public void offsetMetaDataTooLarge() {
+        assertThat(KafkaError.getError(12), is(equalTo(KafkaError.OFFSET_METADATA_TOO_LARGE)));
+    }
+
+    @Test
+    public void unknownNegative() {
+        assertThat(KafkaError.getError(-1), is(equalTo(KafkaError.UNKNOWN)));
+    }
+
+    @Test
+    public void unknownPositive() {
+        assertThat(KafkaError.getError(75), is(equalTo(KafkaError.UNKNOWN)));
+    }
+
+    @Test
+    public void unknown() {
+        assertThat(KafkaError.getError(13), is(equalTo(KafkaError.UNKNOWN)));
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/test/org/apache/storm/kafka/KafkaTestBroker.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/test/org/apache/storm/kafka/KafkaTestBroker.java b/external/storm-kafka/src/test/org/apache/storm/kafka/KafkaTestBroker.java
new file mode 100644
index 0000000..e2fb60f
--- /dev/null
+++ b/external/storm-kafka/src/test/org/apache/storm/kafka/KafkaTestBroker.java
@@ -0,0 +1,92 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+import org.apache.curator.framework.CuratorFramework;
+import org.apache.curator.framework.CuratorFrameworkFactory;
+import org.apache.curator.framework.imps.CuratorFrameworkState;
+import org.apache.curator.retry.ExponentialBackoffRetry;
+import org.apache.curator.test.InstanceSpec;
+import org.apache.curator.test.TestingServer;
+
+import kafka.server.KafkaConfig;
+import kafka.server.KafkaServerStartable;
+import org.apache.commons.io.FileUtils;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Properties;
+
+/**
+ * Date: 11/01/2014
+ * Time: 13:15
+ */
+public class KafkaTestBroker {
+
+    private int port;
+    private KafkaServerStartable kafka;
+    private TestingServer server;
+    private CuratorFramework zookeeper;
+    private File logDir;
+
+    public KafkaTestBroker() {
+        try {
+            server = new TestingServer();
+            String zookeeperConnectionString = server.getConnectString();
+            ExponentialBackoffRetry retryPolicy = new ExponentialBackoffRetry(1000, 3);
+            zookeeper = CuratorFrameworkFactory.newClient(zookeeperConnectionString, retryPolicy);
+            zookeeper.start();
+            port = InstanceSpec.getRandomPort();
+            logDir = new File(System.getProperty("java.io.tmpdir"), "kafka/logs/kafka-test-" + port);
+            KafkaConfig config = buildKafkaConfig(zookeeperConnectionString);
+            kafka = new KafkaServerStartable(config);
+            kafka.startup();
+        } catch (Exception ex) {
+            throw new RuntimeException("Could not start test broker", ex);
+        }
+    }
+
+    private kafka.server.KafkaConfig buildKafkaConfig(String zookeeperConnectionString) {
+        Properties p = new Properties();
+        p.setProperty("zookeeper.connect", zookeeperConnectionString);
+        p.setProperty("broker.id", "0");
+        p.setProperty("port", "" + port);
+        p.setProperty("log.dirs", logDir.getAbsolutePath());
+        return new KafkaConfig(p);
+    }
+
+    public String getBrokerConnectionString() {
+        return "localhost:" + port;
+    }
+
+    public int getPort() {
+        return port;
+    }
+    public void shutdown() {
+        kafka.shutdown();
+        if (zookeeper.getState().equals(CuratorFrameworkState.STARTED)) {
+            zookeeper.close();
+        }
+        try {
+            server.close();
+        } catch (IOException e) {
+            e.printStackTrace();
+        }
+        FileUtils.deleteQuietly(logDir);
+    }
+}


[06/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/generated/Assignment.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/generated/Assignment.java b/storm-core/src/jvm/backtype/storm/generated/Assignment.java
deleted file mode 100644
index cf59c05..0000000
--- a/storm-core/src/jvm/backtype/storm/generated/Assignment.java
+++ /dev/null
@@ -1,1159 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * Autogenerated by Thrift Compiler (0.9.3)
- *
- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
- *  @generated
- */
-package backtype.storm.generated;
-
-import org.apache.thrift.scheme.IScheme;
-import org.apache.thrift.scheme.SchemeFactory;
-import org.apache.thrift.scheme.StandardScheme;
-
-import org.apache.thrift.scheme.TupleScheme;
-import org.apache.thrift.protocol.TTupleProtocol;
-import org.apache.thrift.protocol.TProtocolException;
-import org.apache.thrift.EncodingUtils;
-import org.apache.thrift.TException;
-import org.apache.thrift.async.AsyncMethodCallback;
-import org.apache.thrift.server.AbstractNonblockingServer.*;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.EnumMap;
-import java.util.Set;
-import java.util.HashSet;
-import java.util.EnumSet;
-import java.util.Collections;
-import java.util.BitSet;
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import javax.annotation.Generated;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
-public class Assignment implements org.apache.thrift.TBase<Assignment, Assignment._Fields>, java.io.Serializable, Cloneable, Comparable<Assignment> {
-  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Assignment");
-
-  private static final org.apache.thrift.protocol.TField MASTER_CODE_DIR_FIELD_DESC = new org.apache.thrift.protocol.TField("master_code_dir", org.apache.thrift.protocol.TType.STRING, (short)1);
-  private static final org.apache.thrift.protocol.TField NODE_HOST_FIELD_DESC = new org.apache.thrift.protocol.TField("node_host", org.apache.thrift.protocol.TType.MAP, (short)2);
-  private static final org.apache.thrift.protocol.TField EXECUTOR_NODE_PORT_FIELD_DESC = new org.apache.thrift.protocol.TField("executor_node_port", org.apache.thrift.protocol.TType.MAP, (short)3);
-  private static final org.apache.thrift.protocol.TField EXECUTOR_START_TIME_SECS_FIELD_DESC = new org.apache.thrift.protocol.TField("executor_start_time_secs", org.apache.thrift.protocol.TType.MAP, (short)4);
-  private static final org.apache.thrift.protocol.TField WORKER_RESOURCES_FIELD_DESC = new org.apache.thrift.protocol.TField("worker_resources", org.apache.thrift.protocol.TType.MAP, (short)5);
-
-  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
-  static {
-    schemes.put(StandardScheme.class, new AssignmentStandardSchemeFactory());
-    schemes.put(TupleScheme.class, new AssignmentTupleSchemeFactory());
-  }
-
-  private String master_code_dir; // required
-  private Map<String,String> node_host; // optional
-  private Map<List<Long>,NodeInfo> executor_node_port; // optional
-  private Map<List<Long>,Long> executor_start_time_secs; // optional
-  private Map<NodeInfo,WorkerResources> worker_resources; // optional
-
-  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
-  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-    MASTER_CODE_DIR((short)1, "master_code_dir"),
-    NODE_HOST((short)2, "node_host"),
-    EXECUTOR_NODE_PORT((short)3, "executor_node_port"),
-    EXECUTOR_START_TIME_SECS((short)4, "executor_start_time_secs"),
-    WORKER_RESOURCES((short)5, "worker_resources");
-
-    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
-
-    static {
-      for (_Fields field : EnumSet.allOf(_Fields.class)) {
-        byName.put(field.getFieldName(), field);
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, or null if its not found.
-     */
-    public static _Fields findByThriftId(int fieldId) {
-      switch(fieldId) {
-        case 1: // MASTER_CODE_DIR
-          return MASTER_CODE_DIR;
-        case 2: // NODE_HOST
-          return NODE_HOST;
-        case 3: // EXECUTOR_NODE_PORT
-          return EXECUTOR_NODE_PORT;
-        case 4: // EXECUTOR_START_TIME_SECS
-          return EXECUTOR_START_TIME_SECS;
-        case 5: // WORKER_RESOURCES
-          return WORKER_RESOURCES;
-        default:
-          return null;
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, throwing an exception
-     * if it is not found.
-     */
-    public static _Fields findByThriftIdOrThrow(int fieldId) {
-      _Fields fields = findByThriftId(fieldId);
-      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
-      return fields;
-    }
-
-    /**
-     * Find the _Fields constant that matches name, or null if its not found.
-     */
-    public static _Fields findByName(String name) {
-      return byName.get(name);
-    }
-
-    private final short _thriftId;
-    private final String _fieldName;
-
-    _Fields(short thriftId, String fieldName) {
-      _thriftId = thriftId;
-      _fieldName = fieldName;
-    }
-
-    public short getThriftFieldId() {
-      return _thriftId;
-    }
-
-    public String getFieldName() {
-      return _fieldName;
-    }
-  }
-
-  // isset id assignments
-  private static final _Fields optionals[] = {_Fields.NODE_HOST,_Fields.EXECUTOR_NODE_PORT,_Fields.EXECUTOR_START_TIME_SECS,_Fields.WORKER_RESOURCES};
-  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
-  static {
-    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.MASTER_CODE_DIR, new org.apache.thrift.meta_data.FieldMetaData("master_code_dir", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    tmpMap.put(_Fields.NODE_HOST, new org.apache.thrift.meta_data.FieldMetaData("node_host", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
-            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
-            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
-    tmpMap.put(_Fields.EXECUTOR_NODE_PORT, new org.apache.thrift.meta_data.FieldMetaData("executor_node_port", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
-            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
-                new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)), 
-            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, NodeInfo.class))));
-    tmpMap.put(_Fields.EXECUTOR_START_TIME_SECS, new org.apache.thrift.meta_data.FieldMetaData("executor_start_time_secs", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
-            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
-                new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)), 
-            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))));
-    tmpMap.put(_Fields.WORKER_RESOURCES, new org.apache.thrift.meta_data.FieldMetaData("worker_resources", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
-            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, NodeInfo.class), 
-            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WorkerResources.class))));
-    metaDataMap = Collections.unmodifiableMap(tmpMap);
-    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Assignment.class, metaDataMap);
-  }
-
-  public Assignment() {
-    this.node_host = new HashMap<String,String>();
-
-    this.executor_node_port = new HashMap<List<Long>,NodeInfo>();
-
-    this.executor_start_time_secs = new HashMap<List<Long>,Long>();
-
-    this.worker_resources = new HashMap<NodeInfo,WorkerResources>();
-
-  }
-
-  public Assignment(
-    String master_code_dir)
-  {
-    this();
-    this.master_code_dir = master_code_dir;
-  }
-
-  /**
-   * Performs a deep copy on <i>other</i>.
-   */
-  public Assignment(Assignment other) {
-    if (other.is_set_master_code_dir()) {
-      this.master_code_dir = other.master_code_dir;
-    }
-    if (other.is_set_node_host()) {
-      Map<String,String> __this__node_host = new HashMap<String,String>(other.node_host);
-      this.node_host = __this__node_host;
-    }
-    if (other.is_set_executor_node_port()) {
-      Map<List<Long>,NodeInfo> __this__executor_node_port = new HashMap<List<Long>,NodeInfo>(other.executor_node_port.size());
-      for (Map.Entry<List<Long>, NodeInfo> other_element : other.executor_node_port.entrySet()) {
-
-        List<Long> other_element_key = other_element.getKey();
-        NodeInfo other_element_value = other_element.getValue();
-
-        List<Long> __this__executor_node_port_copy_key = new ArrayList<Long>(other_element_key);
-
-        NodeInfo __this__executor_node_port_copy_value = new NodeInfo(other_element_value);
-
-        __this__executor_node_port.put(__this__executor_node_port_copy_key, __this__executor_node_port_copy_value);
-      }
-      this.executor_node_port = __this__executor_node_port;
-    }
-    if (other.is_set_executor_start_time_secs()) {
-      Map<List<Long>,Long> __this__executor_start_time_secs = new HashMap<List<Long>,Long>(other.executor_start_time_secs.size());
-      for (Map.Entry<List<Long>, Long> other_element : other.executor_start_time_secs.entrySet()) {
-
-        List<Long> other_element_key = other_element.getKey();
-        Long other_element_value = other_element.getValue();
-
-        List<Long> __this__executor_start_time_secs_copy_key = new ArrayList<Long>(other_element_key);
-
-        Long __this__executor_start_time_secs_copy_value = other_element_value;
-
-        __this__executor_start_time_secs.put(__this__executor_start_time_secs_copy_key, __this__executor_start_time_secs_copy_value);
-      }
-      this.executor_start_time_secs = __this__executor_start_time_secs;
-    }
-    if (other.is_set_worker_resources()) {
-      Map<NodeInfo,WorkerResources> __this__worker_resources = new HashMap<NodeInfo,WorkerResources>(other.worker_resources.size());
-      for (Map.Entry<NodeInfo, WorkerResources> other_element : other.worker_resources.entrySet()) {
-
-        NodeInfo other_element_key = other_element.getKey();
-        WorkerResources other_element_value = other_element.getValue();
-
-        NodeInfo __this__worker_resources_copy_key = new NodeInfo(other_element_key);
-
-        WorkerResources __this__worker_resources_copy_value = new WorkerResources(other_element_value);
-
-        __this__worker_resources.put(__this__worker_resources_copy_key, __this__worker_resources_copy_value);
-      }
-      this.worker_resources = __this__worker_resources;
-    }
-  }
-
-  public Assignment deepCopy() {
-    return new Assignment(this);
-  }
-
-  @Override
-  public void clear() {
-    this.master_code_dir = null;
-    this.node_host = new HashMap<String,String>();
-
-    this.executor_node_port = new HashMap<List<Long>,NodeInfo>();
-
-    this.executor_start_time_secs = new HashMap<List<Long>,Long>();
-
-    this.worker_resources = new HashMap<NodeInfo,WorkerResources>();
-
-  }
-
-  public String get_master_code_dir() {
-    return this.master_code_dir;
-  }
-
-  public void set_master_code_dir(String master_code_dir) {
-    this.master_code_dir = master_code_dir;
-  }
-
-  public void unset_master_code_dir() {
-    this.master_code_dir = null;
-  }
-
-  /** Returns true if field master_code_dir is set (has been assigned a value) and false otherwise */
-  public boolean is_set_master_code_dir() {
-    return this.master_code_dir != null;
-  }
-
-  public void set_master_code_dir_isSet(boolean value) {
-    if (!value) {
-      this.master_code_dir = null;
-    }
-  }
-
-  public int get_node_host_size() {
-    return (this.node_host == null) ? 0 : this.node_host.size();
-  }
-
-  public void put_to_node_host(String key, String val) {
-    if (this.node_host == null) {
-      this.node_host = new HashMap<String,String>();
-    }
-    this.node_host.put(key, val);
-  }
-
-  public Map<String,String> get_node_host() {
-    return this.node_host;
-  }
-
-  public void set_node_host(Map<String,String> node_host) {
-    this.node_host = node_host;
-  }
-
-  public void unset_node_host() {
-    this.node_host = null;
-  }
-
-  /** Returns true if field node_host is set (has been assigned a value) and false otherwise */
-  public boolean is_set_node_host() {
-    return this.node_host != null;
-  }
-
-  public void set_node_host_isSet(boolean value) {
-    if (!value) {
-      this.node_host = null;
-    }
-  }
-
-  public int get_executor_node_port_size() {
-    return (this.executor_node_port == null) ? 0 : this.executor_node_port.size();
-  }
-
-  public void put_to_executor_node_port(List<Long> key, NodeInfo val) {
-    if (this.executor_node_port == null) {
-      this.executor_node_port = new HashMap<List<Long>,NodeInfo>();
-    }
-    this.executor_node_port.put(key, val);
-  }
-
-  public Map<List<Long>,NodeInfo> get_executor_node_port() {
-    return this.executor_node_port;
-  }
-
-  public void set_executor_node_port(Map<List<Long>,NodeInfo> executor_node_port) {
-    this.executor_node_port = executor_node_port;
-  }
-
-  public void unset_executor_node_port() {
-    this.executor_node_port = null;
-  }
-
-  /** Returns true if field executor_node_port is set (has been assigned a value) and false otherwise */
-  public boolean is_set_executor_node_port() {
-    return this.executor_node_port != null;
-  }
-
-  public void set_executor_node_port_isSet(boolean value) {
-    if (!value) {
-      this.executor_node_port = null;
-    }
-  }
-
-  public int get_executor_start_time_secs_size() {
-    return (this.executor_start_time_secs == null) ? 0 : this.executor_start_time_secs.size();
-  }
-
-  public void put_to_executor_start_time_secs(List<Long> key, long val) {
-    if (this.executor_start_time_secs == null) {
-      this.executor_start_time_secs = new HashMap<List<Long>,Long>();
-    }
-    this.executor_start_time_secs.put(key, val);
-  }
-
-  public Map<List<Long>,Long> get_executor_start_time_secs() {
-    return this.executor_start_time_secs;
-  }
-
-  public void set_executor_start_time_secs(Map<List<Long>,Long> executor_start_time_secs) {
-    this.executor_start_time_secs = executor_start_time_secs;
-  }
-
-  public void unset_executor_start_time_secs() {
-    this.executor_start_time_secs = null;
-  }
-
-  /** Returns true if field executor_start_time_secs is set (has been assigned a value) and false otherwise */
-  public boolean is_set_executor_start_time_secs() {
-    return this.executor_start_time_secs != null;
-  }
-
-  public void set_executor_start_time_secs_isSet(boolean value) {
-    if (!value) {
-      this.executor_start_time_secs = null;
-    }
-  }
-
-  public int get_worker_resources_size() {
-    return (this.worker_resources == null) ? 0 : this.worker_resources.size();
-  }
-
-  public void put_to_worker_resources(NodeInfo key, WorkerResources val) {
-    if (this.worker_resources == null) {
-      this.worker_resources = new HashMap<NodeInfo,WorkerResources>();
-    }
-    this.worker_resources.put(key, val);
-  }
-
-  public Map<NodeInfo,WorkerResources> get_worker_resources() {
-    return this.worker_resources;
-  }
-
-  public void set_worker_resources(Map<NodeInfo,WorkerResources> worker_resources) {
-    this.worker_resources = worker_resources;
-  }
-
-  public void unset_worker_resources() {
-    this.worker_resources = null;
-  }
-
-  /** Returns true if field worker_resources is set (has been assigned a value) and false otherwise */
-  public boolean is_set_worker_resources() {
-    return this.worker_resources != null;
-  }
-
-  public void set_worker_resources_isSet(boolean value) {
-    if (!value) {
-      this.worker_resources = null;
-    }
-  }
-
-  public void setFieldValue(_Fields field, Object value) {
-    switch (field) {
-    case MASTER_CODE_DIR:
-      if (value == null) {
-        unset_master_code_dir();
-      } else {
-        set_master_code_dir((String)value);
-      }
-      break;
-
-    case NODE_HOST:
-      if (value == null) {
-        unset_node_host();
-      } else {
-        set_node_host((Map<String,String>)value);
-      }
-      break;
-
-    case EXECUTOR_NODE_PORT:
-      if (value == null) {
-        unset_executor_node_port();
-      } else {
-        set_executor_node_port((Map<List<Long>,NodeInfo>)value);
-      }
-      break;
-
-    case EXECUTOR_START_TIME_SECS:
-      if (value == null) {
-        unset_executor_start_time_secs();
-      } else {
-        set_executor_start_time_secs((Map<List<Long>,Long>)value);
-      }
-      break;
-
-    case WORKER_RESOURCES:
-      if (value == null) {
-        unset_worker_resources();
-      } else {
-        set_worker_resources((Map<NodeInfo,WorkerResources>)value);
-      }
-      break;
-
-    }
-  }
-
-  public Object getFieldValue(_Fields field) {
-    switch (field) {
-    case MASTER_CODE_DIR:
-      return get_master_code_dir();
-
-    case NODE_HOST:
-      return get_node_host();
-
-    case EXECUTOR_NODE_PORT:
-      return get_executor_node_port();
-
-    case EXECUTOR_START_TIME_SECS:
-      return get_executor_start_time_secs();
-
-    case WORKER_RESOURCES:
-      return get_worker_resources();
-
-    }
-    throw new IllegalStateException();
-  }
-
-  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
-  public boolean isSet(_Fields field) {
-    if (field == null) {
-      throw new IllegalArgumentException();
-    }
-
-    switch (field) {
-    case MASTER_CODE_DIR:
-      return is_set_master_code_dir();
-    case NODE_HOST:
-      return is_set_node_host();
-    case EXECUTOR_NODE_PORT:
-      return is_set_executor_node_port();
-    case EXECUTOR_START_TIME_SECS:
-      return is_set_executor_start_time_secs();
-    case WORKER_RESOURCES:
-      return is_set_worker_resources();
-    }
-    throw new IllegalStateException();
-  }
-
-  @Override
-  public boolean equals(Object that) {
-    if (that == null)
-      return false;
-    if (that instanceof Assignment)
-      return this.equals((Assignment)that);
-    return false;
-  }
-
-  public boolean equals(Assignment that) {
-    if (that == null)
-      return false;
-
-    boolean this_present_master_code_dir = true && this.is_set_master_code_dir();
-    boolean that_present_master_code_dir = true && that.is_set_master_code_dir();
-    if (this_present_master_code_dir || that_present_master_code_dir) {
-      if (!(this_present_master_code_dir && that_present_master_code_dir))
-        return false;
-      if (!this.master_code_dir.equals(that.master_code_dir))
-        return false;
-    }
-
-    boolean this_present_node_host = true && this.is_set_node_host();
-    boolean that_present_node_host = true && that.is_set_node_host();
-    if (this_present_node_host || that_present_node_host) {
-      if (!(this_present_node_host && that_present_node_host))
-        return false;
-      if (!this.node_host.equals(that.node_host))
-        return false;
-    }
-
-    boolean this_present_executor_node_port = true && this.is_set_executor_node_port();
-    boolean that_present_executor_node_port = true && that.is_set_executor_node_port();
-    if (this_present_executor_node_port || that_present_executor_node_port) {
-      if (!(this_present_executor_node_port && that_present_executor_node_port))
-        return false;
-      if (!this.executor_node_port.equals(that.executor_node_port))
-        return false;
-    }
-
-    boolean this_present_executor_start_time_secs = true && this.is_set_executor_start_time_secs();
-    boolean that_present_executor_start_time_secs = true && that.is_set_executor_start_time_secs();
-    if (this_present_executor_start_time_secs || that_present_executor_start_time_secs) {
-      if (!(this_present_executor_start_time_secs && that_present_executor_start_time_secs))
-        return false;
-      if (!this.executor_start_time_secs.equals(that.executor_start_time_secs))
-        return false;
-    }
-
-    boolean this_present_worker_resources = true && this.is_set_worker_resources();
-    boolean that_present_worker_resources = true && that.is_set_worker_resources();
-    if (this_present_worker_resources || that_present_worker_resources) {
-      if (!(this_present_worker_resources && that_present_worker_resources))
-        return false;
-      if (!this.worker_resources.equals(that.worker_resources))
-        return false;
-    }
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    List<Object> list = new ArrayList<Object>();
-
-    boolean present_master_code_dir = true && (is_set_master_code_dir());
-    list.add(present_master_code_dir);
-    if (present_master_code_dir)
-      list.add(master_code_dir);
-
-    boolean present_node_host = true && (is_set_node_host());
-    list.add(present_node_host);
-    if (present_node_host)
-      list.add(node_host);
-
-    boolean present_executor_node_port = true && (is_set_executor_node_port());
-    list.add(present_executor_node_port);
-    if (present_executor_node_port)
-      list.add(executor_node_port);
-
-    boolean present_executor_start_time_secs = true && (is_set_executor_start_time_secs());
-    list.add(present_executor_start_time_secs);
-    if (present_executor_start_time_secs)
-      list.add(executor_start_time_secs);
-
-    boolean present_worker_resources = true && (is_set_worker_resources());
-    list.add(present_worker_resources);
-    if (present_worker_resources)
-      list.add(worker_resources);
-
-    return list.hashCode();
-  }
-
-  @Override
-  public int compareTo(Assignment other) {
-    if (!getClass().equals(other.getClass())) {
-      return getClass().getName().compareTo(other.getClass().getName());
-    }
-
-    int lastComparison = 0;
-
-    lastComparison = Boolean.valueOf(is_set_master_code_dir()).compareTo(other.is_set_master_code_dir());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_master_code_dir()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.master_code_dir, other.master_code_dir);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_node_host()).compareTo(other.is_set_node_host());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_node_host()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.node_host, other.node_host);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_executor_node_port()).compareTo(other.is_set_executor_node_port());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_executor_node_port()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.executor_node_port, other.executor_node_port);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_executor_start_time_secs()).compareTo(other.is_set_executor_start_time_secs());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_executor_start_time_secs()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.executor_start_time_secs, other.executor_start_time_secs);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_worker_resources()).compareTo(other.is_set_worker_resources());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_worker_resources()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.worker_resources, other.worker_resources);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    return 0;
-  }
-
-  public _Fields fieldForId(int fieldId) {
-    return _Fields.findByThriftId(fieldId);
-  }
-
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
-    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
-  }
-
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
-    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder("Assignment(");
-    boolean first = true;
-
-    sb.append("master_code_dir:");
-    if (this.master_code_dir == null) {
-      sb.append("null");
-    } else {
-      sb.append(this.master_code_dir);
-    }
-    first = false;
-    if (is_set_node_host()) {
-      if (!first) sb.append(", ");
-      sb.append("node_host:");
-      if (this.node_host == null) {
-        sb.append("null");
-      } else {
-        sb.append(this.node_host);
-      }
-      first = false;
-    }
-    if (is_set_executor_node_port()) {
-      if (!first) sb.append(", ");
-      sb.append("executor_node_port:");
-      if (this.executor_node_port == null) {
-        sb.append("null");
-      } else {
-        sb.append(this.executor_node_port);
-      }
-      first = false;
-    }
-    if (is_set_executor_start_time_secs()) {
-      if (!first) sb.append(", ");
-      sb.append("executor_start_time_secs:");
-      if (this.executor_start_time_secs == null) {
-        sb.append("null");
-      } else {
-        sb.append(this.executor_start_time_secs);
-      }
-      first = false;
-    }
-    if (is_set_worker_resources()) {
-      if (!first) sb.append(", ");
-      sb.append("worker_resources:");
-      if (this.worker_resources == null) {
-        sb.append("null");
-      } else {
-        sb.append(this.worker_resources);
-      }
-      first = false;
-    }
-    sb.append(")");
-    return sb.toString();
-  }
-
-  public void validate() throws org.apache.thrift.TException {
-    // check for required fields
-    if (!is_set_master_code_dir()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'master_code_dir' is unset! Struct:" + toString());
-    }
-
-    // check for sub-struct validity
-  }
-
-  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
-    try {
-      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
-    try {
-      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private static class AssignmentStandardSchemeFactory implements SchemeFactory {
-    public AssignmentStandardScheme getScheme() {
-      return new AssignmentStandardScheme();
-    }
-  }
-
-  private static class AssignmentStandardScheme extends StandardScheme<Assignment> {
-
-    public void read(org.apache.thrift.protocol.TProtocol iprot, Assignment struct) throws org.apache.thrift.TException {
-      org.apache.thrift.protocol.TField schemeField;
-      iprot.readStructBegin();
-      while (true)
-      {
-        schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
-          break;
-        }
-        switch (schemeField.id) {
-          case 1: // MASTER_CODE_DIR
-            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
-              struct.master_code_dir = iprot.readString();
-              struct.set_master_code_dir_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 2: // NODE_HOST
-            if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
-              {
-                org.apache.thrift.protocol.TMap _map548 = iprot.readMapBegin();
-                struct.node_host = new HashMap<String,String>(2*_map548.size);
-                String _key549;
-                String _val550;
-                for (int _i551 = 0; _i551 < _map548.size; ++_i551)
-                {
-                  _key549 = iprot.readString();
-                  _val550 = iprot.readString();
-                  struct.node_host.put(_key549, _val550);
-                }
-                iprot.readMapEnd();
-              }
-              struct.set_node_host_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 3: // EXECUTOR_NODE_PORT
-            if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
-              {
-                org.apache.thrift.protocol.TMap _map552 = iprot.readMapBegin();
-                struct.executor_node_port = new HashMap<List<Long>,NodeInfo>(2*_map552.size);
-                List<Long> _key553;
-                NodeInfo _val554;
-                for (int _i555 = 0; _i555 < _map552.size; ++_i555)
-                {
-                  {
-                    org.apache.thrift.protocol.TList _list556 = iprot.readListBegin();
-                    _key553 = new ArrayList<Long>(_list556.size);
-                    long _elem557;
-                    for (int _i558 = 0; _i558 < _list556.size; ++_i558)
-                    {
-                      _elem557 = iprot.readI64();
-                      _key553.add(_elem557);
-                    }
-                    iprot.readListEnd();
-                  }
-                  _val554 = new NodeInfo();
-                  _val554.read(iprot);
-                  struct.executor_node_port.put(_key553, _val554);
-                }
-                iprot.readMapEnd();
-              }
-              struct.set_executor_node_port_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 4: // EXECUTOR_START_TIME_SECS
-            if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
-              {
-                org.apache.thrift.protocol.TMap _map559 = iprot.readMapBegin();
-                struct.executor_start_time_secs = new HashMap<List<Long>,Long>(2*_map559.size);
-                List<Long> _key560;
-                long _val561;
-                for (int _i562 = 0; _i562 < _map559.size; ++_i562)
-                {
-                  {
-                    org.apache.thrift.protocol.TList _list563 = iprot.readListBegin();
-                    _key560 = new ArrayList<Long>(_list563.size);
-                    long _elem564;
-                    for (int _i565 = 0; _i565 < _list563.size; ++_i565)
-                    {
-                      _elem564 = iprot.readI64();
-                      _key560.add(_elem564);
-                    }
-                    iprot.readListEnd();
-                  }
-                  _val561 = iprot.readI64();
-                  struct.executor_start_time_secs.put(_key560, _val561);
-                }
-                iprot.readMapEnd();
-              }
-              struct.set_executor_start_time_secs_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 5: // WORKER_RESOURCES
-            if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
-              {
-                org.apache.thrift.protocol.TMap _map566 = iprot.readMapBegin();
-                struct.worker_resources = new HashMap<NodeInfo,WorkerResources>(2*_map566.size);
-                NodeInfo _key567;
-                WorkerResources _val568;
-                for (int _i569 = 0; _i569 < _map566.size; ++_i569)
-                {
-                  _key567 = new NodeInfo();
-                  _key567.read(iprot);
-                  _val568 = new WorkerResources();
-                  _val568.read(iprot);
-                  struct.worker_resources.put(_key567, _val568);
-                }
-                iprot.readMapEnd();
-              }
-              struct.set_worker_resources_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          default:
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-        }
-        iprot.readFieldEnd();
-      }
-      iprot.readStructEnd();
-      struct.validate();
-    }
-
-    public void write(org.apache.thrift.protocol.TProtocol oprot, Assignment struct) throws org.apache.thrift.TException {
-      struct.validate();
-
-      oprot.writeStructBegin(STRUCT_DESC);
-      if (struct.master_code_dir != null) {
-        oprot.writeFieldBegin(MASTER_CODE_DIR_FIELD_DESC);
-        oprot.writeString(struct.master_code_dir);
-        oprot.writeFieldEnd();
-      }
-      if (struct.node_host != null) {
-        if (struct.is_set_node_host()) {
-          oprot.writeFieldBegin(NODE_HOST_FIELD_DESC);
-          {
-            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.node_host.size()));
-            for (Map.Entry<String, String> _iter570 : struct.node_host.entrySet())
-            {
-              oprot.writeString(_iter570.getKey());
-              oprot.writeString(_iter570.getValue());
-            }
-            oprot.writeMapEnd();
-          }
-          oprot.writeFieldEnd();
-        }
-      }
-      if (struct.executor_node_port != null) {
-        if (struct.is_set_executor_node_port()) {
-          oprot.writeFieldBegin(EXECUTOR_NODE_PORT_FIELD_DESC);
-          {
-            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.LIST, org.apache.thrift.protocol.TType.STRUCT, struct.executor_node_port.size()));
-            for (Map.Entry<List<Long>, NodeInfo> _iter571 : struct.executor_node_port.entrySet())
-            {
-              {
-                oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, _iter571.getKey().size()));
-                for (long _iter572 : _iter571.getKey())
-                {
-                  oprot.writeI64(_iter572);
-                }
-                oprot.writeListEnd();
-              }
-              _iter571.getValue().write(oprot);
-            }
-            oprot.writeMapEnd();
-          }
-          oprot.writeFieldEnd();
-        }
-      }
-      if (struct.executor_start_time_secs != null) {
-        if (struct.is_set_executor_start_time_secs()) {
-          oprot.writeFieldBegin(EXECUTOR_START_TIME_SECS_FIELD_DESC);
-          {
-            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.LIST, org.apache.thrift.protocol.TType.I64, struct.executor_start_time_secs.size()));
-            for (Map.Entry<List<Long>, Long> _iter573 : struct.executor_start_time_secs.entrySet())
-            {
-              {
-                oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, _iter573.getKey().size()));
-                for (long _iter574 : _iter573.getKey())
-                {
-                  oprot.writeI64(_iter574);
-                }
-                oprot.writeListEnd();
-              }
-              oprot.writeI64(_iter573.getValue());
-            }
-            oprot.writeMapEnd();
-          }
-          oprot.writeFieldEnd();
-        }
-      }
-      if (struct.worker_resources != null) {
-        if (struct.is_set_worker_resources()) {
-          oprot.writeFieldBegin(WORKER_RESOURCES_FIELD_DESC);
-          {
-            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.STRUCT, struct.worker_resources.size()));
-            for (Map.Entry<NodeInfo, WorkerResources> _iter575 : struct.worker_resources.entrySet())
-            {
-              _iter575.getKey().write(oprot);
-              _iter575.getValue().write(oprot);
-            }
-            oprot.writeMapEnd();
-          }
-          oprot.writeFieldEnd();
-        }
-      }
-      oprot.writeFieldStop();
-      oprot.writeStructEnd();
-    }
-
-  }
-
-  private static class AssignmentTupleSchemeFactory implements SchemeFactory {
-    public AssignmentTupleScheme getScheme() {
-      return new AssignmentTupleScheme();
-    }
-  }
-
-  private static class AssignmentTupleScheme extends TupleScheme<Assignment> {
-
-    @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, Assignment struct) throws org.apache.thrift.TException {
-      TTupleProtocol oprot = (TTupleProtocol) prot;
-      oprot.writeString(struct.master_code_dir);
-      BitSet optionals = new BitSet();
-      if (struct.is_set_node_host()) {
-        optionals.set(0);
-      }
-      if (struct.is_set_executor_node_port()) {
-        optionals.set(1);
-      }
-      if (struct.is_set_executor_start_time_secs()) {
-        optionals.set(2);
-      }
-      if (struct.is_set_worker_resources()) {
-        optionals.set(3);
-      }
-      oprot.writeBitSet(optionals, 4);
-      if (struct.is_set_node_host()) {
-        {
-          oprot.writeI32(struct.node_host.size());
-          for (Map.Entry<String, String> _iter576 : struct.node_host.entrySet())
-          {
-            oprot.writeString(_iter576.getKey());
-            oprot.writeString(_iter576.getValue());
-          }
-        }
-      }
-      if (struct.is_set_executor_node_port()) {
-        {
-          oprot.writeI32(struct.executor_node_port.size());
-          for (Map.Entry<List<Long>, NodeInfo> _iter577 : struct.executor_node_port.entrySet())
-          {
-            {
-              oprot.writeI32(_iter577.getKey().size());
-              for (long _iter578 : _iter577.getKey())
-              {
-                oprot.writeI64(_iter578);
-              }
-            }
-            _iter577.getValue().write(oprot);
-          }
-        }
-      }
-      if (struct.is_set_executor_start_time_secs()) {
-        {
-          oprot.writeI32(struct.executor_start_time_secs.size());
-          for (Map.Entry<List<Long>, Long> _iter579 : struct.executor_start_time_secs.entrySet())
-          {
-            {
-              oprot.writeI32(_iter579.getKey().size());
-              for (long _iter580 : _iter579.getKey())
-              {
-                oprot.writeI64(_iter580);
-              }
-            }
-            oprot.writeI64(_iter579.getValue());
-          }
-        }
-      }
-      if (struct.is_set_worker_resources()) {
-        {
-          oprot.writeI32(struct.worker_resources.size());
-          for (Map.Entry<NodeInfo, WorkerResources> _iter581 : struct.worker_resources.entrySet())
-          {
-            _iter581.getKey().write(oprot);
-            _iter581.getValue().write(oprot);
-          }
-        }
-      }
-    }
-
-    @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, Assignment struct) throws org.apache.thrift.TException {
-      TTupleProtocol iprot = (TTupleProtocol) prot;
-      struct.master_code_dir = iprot.readString();
-      struct.set_master_code_dir_isSet(true);
-      BitSet incoming = iprot.readBitSet(4);
-      if (incoming.get(0)) {
-        {
-          org.apache.thrift.protocol.TMap _map582 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-          struct.node_host = new HashMap<String,String>(2*_map582.size);
-          String _key583;
-          String _val584;
-          for (int _i585 = 0; _i585 < _map582.size; ++_i585)
-          {
-            _key583 = iprot.readString();
-            _val584 = iprot.readString();
-            struct.node_host.put(_key583, _val584);
-          }
-        }
-        struct.set_node_host_isSet(true);
-      }
-      if (incoming.get(1)) {
-        {
-          org.apache.thrift.protocol.TMap _map586 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.LIST, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-          struct.executor_node_port = new HashMap<List<Long>,NodeInfo>(2*_map586.size);
-          List<Long> _key587;
-          NodeInfo _val588;
-          for (int _i589 = 0; _i589 < _map586.size; ++_i589)
-          {
-            {
-              org.apache.thrift.protocol.TList _list590 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32());
-              _key587 = new ArrayList<Long>(_list590.size);
-              long _elem591;
-              for (int _i592 = 0; _i592 < _list590.size; ++_i592)
-              {
-                _elem591 = iprot.readI64();
-                _key587.add(_elem591);
-              }
-            }
-            _val588 = new NodeInfo();
-            _val588.read(iprot);
-            struct.executor_node_port.put(_key587, _val588);
-          }
-        }
-        struct.set_executor_node_port_isSet(true);
-      }
-      if (incoming.get(2)) {
-        {
-          org.apache.thrift.protocol.TMap _map593 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.LIST, org.apache.thrift.protocol.TType.I64, iprot.readI32());
-          struct.executor_start_time_secs = new HashMap<List<Long>,Long>(2*_map593.size);
-          List<Long> _key594;
-          long _val595;
-          for (int _i596 = 0; _i596 < _map593.size; ++_i596)
-          {
-            {
-              org.apache.thrift.protocol.TList _list597 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32());
-              _key594 = new ArrayList<Long>(_list597.size);
-              long _elem598;
-              for (int _i599 = 0; _i599 < _list597.size; ++_i599)
-              {
-                _elem598 = iprot.readI64();
-                _key594.add(_elem598);
-              }
-            }
-            _val595 = iprot.readI64();
-            struct.executor_start_time_secs.put(_key594, _val595);
-          }
-        }
-        struct.set_executor_start_time_secs_isSet(true);
-      }
-      if (incoming.get(3)) {
-        {
-          org.apache.thrift.protocol.TMap _map600 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-          struct.worker_resources = new HashMap<NodeInfo,WorkerResources>(2*_map600.size);
-          NodeInfo _key601;
-          WorkerResources _val602;
-          for (int _i603 = 0; _i603 < _map600.size; ++_i603)
-          {
-            _key601 = new NodeInfo();
-            _key601.read(iprot);
-            _val602 = new WorkerResources();
-            _val602.read(iprot);
-            struct.worker_resources.put(_key601, _val602);
-          }
-        }
-        struct.set_worker_resources_isSet(true);
-      }
-    }
-  }
-
-}
-

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/generated/AuthorizationException.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/generated/AuthorizationException.java b/storm-core/src/jvm/backtype/storm/generated/AuthorizationException.java
deleted file mode 100644
index c4bf053..0000000
--- a/storm-core/src/jvm/backtype/storm/generated/AuthorizationException.java
+++ /dev/null
@@ -1,406 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * Autogenerated by Thrift Compiler (0.9.3)
- *
- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
- *  @generated
- */
-package backtype.storm.generated;
-
-import org.apache.thrift.scheme.IScheme;
-import org.apache.thrift.scheme.SchemeFactory;
-import org.apache.thrift.scheme.StandardScheme;
-
-import org.apache.thrift.scheme.TupleScheme;
-import org.apache.thrift.protocol.TTupleProtocol;
-import org.apache.thrift.protocol.TProtocolException;
-import org.apache.thrift.EncodingUtils;
-import org.apache.thrift.TException;
-import org.apache.thrift.async.AsyncMethodCallback;
-import org.apache.thrift.server.AbstractNonblockingServer.*;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.EnumMap;
-import java.util.Set;
-import java.util.HashSet;
-import java.util.EnumSet;
-import java.util.Collections;
-import java.util.BitSet;
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import javax.annotation.Generated;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
-public class AuthorizationException extends TException implements org.apache.thrift.TBase<AuthorizationException, AuthorizationException._Fields>, java.io.Serializable, Cloneable, Comparable<AuthorizationException> {
-  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AuthorizationException");
-
-  private static final org.apache.thrift.protocol.TField MSG_FIELD_DESC = new org.apache.thrift.protocol.TField("msg", org.apache.thrift.protocol.TType.STRING, (short)1);
-
-  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
-  static {
-    schemes.put(StandardScheme.class, new AuthorizationExceptionStandardSchemeFactory());
-    schemes.put(TupleScheme.class, new AuthorizationExceptionTupleSchemeFactory());
-  }
-
-  private String msg; // required
-
-  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
-  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-    MSG((short)1, "msg");
-
-    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
-
-    static {
-      for (_Fields field : EnumSet.allOf(_Fields.class)) {
-        byName.put(field.getFieldName(), field);
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, or null if its not found.
-     */
-    public static _Fields findByThriftId(int fieldId) {
-      switch(fieldId) {
-        case 1: // MSG
-          return MSG;
-        default:
-          return null;
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, throwing an exception
-     * if it is not found.
-     */
-    public static _Fields findByThriftIdOrThrow(int fieldId) {
-      _Fields fields = findByThriftId(fieldId);
-      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
-      return fields;
-    }
-
-    /**
-     * Find the _Fields constant that matches name, or null if its not found.
-     */
-    public static _Fields findByName(String name) {
-      return byName.get(name);
-    }
-
-    private final short _thriftId;
-    private final String _fieldName;
-
-    _Fields(short thriftId, String fieldName) {
-      _thriftId = thriftId;
-      _fieldName = fieldName;
-    }
-
-    public short getThriftFieldId() {
-      return _thriftId;
-    }
-
-    public String getFieldName() {
-      return _fieldName;
-    }
-  }
-
-  // isset id assignments
-  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
-  static {
-    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.MSG, new org.apache.thrift.meta_data.FieldMetaData("msg", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    metaDataMap = Collections.unmodifiableMap(tmpMap);
-    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AuthorizationException.class, metaDataMap);
-  }
-
-  public AuthorizationException() {
-  }
-
-  public AuthorizationException(
-    String msg)
-  {
-    this();
-    this.msg = msg;
-  }
-
-  /**
-   * Performs a deep copy on <i>other</i>.
-   */
-  public AuthorizationException(AuthorizationException other) {
-    if (other.is_set_msg()) {
-      this.msg = other.msg;
-    }
-  }
-
-  public AuthorizationException deepCopy() {
-    return new AuthorizationException(this);
-  }
-
-  @Override
-  public void clear() {
-    this.msg = null;
-  }
-
-  public String get_msg() {
-    return this.msg;
-  }
-
-  public void set_msg(String msg) {
-    this.msg = msg;
-  }
-
-  public void unset_msg() {
-    this.msg = null;
-  }
-
-  /** Returns true if field msg is set (has been assigned a value) and false otherwise */
-  public boolean is_set_msg() {
-    return this.msg != null;
-  }
-
-  public void set_msg_isSet(boolean value) {
-    if (!value) {
-      this.msg = null;
-    }
-  }
-
-  public void setFieldValue(_Fields field, Object value) {
-    switch (field) {
-    case MSG:
-      if (value == null) {
-        unset_msg();
-      } else {
-        set_msg((String)value);
-      }
-      break;
-
-    }
-  }
-
-  public Object getFieldValue(_Fields field) {
-    switch (field) {
-    case MSG:
-      return get_msg();
-
-    }
-    throw new IllegalStateException();
-  }
-
-  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
-  public boolean isSet(_Fields field) {
-    if (field == null) {
-      throw new IllegalArgumentException();
-    }
-
-    switch (field) {
-    case MSG:
-      return is_set_msg();
-    }
-    throw new IllegalStateException();
-  }
-
-  @Override
-  public boolean equals(Object that) {
-    if (that == null)
-      return false;
-    if (that instanceof AuthorizationException)
-      return this.equals((AuthorizationException)that);
-    return false;
-  }
-
-  public boolean equals(AuthorizationException that) {
-    if (that == null)
-      return false;
-
-    boolean this_present_msg = true && this.is_set_msg();
-    boolean that_present_msg = true && that.is_set_msg();
-    if (this_present_msg || that_present_msg) {
-      if (!(this_present_msg && that_present_msg))
-        return false;
-      if (!this.msg.equals(that.msg))
-        return false;
-    }
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    List<Object> list = new ArrayList<Object>();
-
-    boolean present_msg = true && (is_set_msg());
-    list.add(present_msg);
-    if (present_msg)
-      list.add(msg);
-
-    return list.hashCode();
-  }
-
-  @Override
-  public int compareTo(AuthorizationException other) {
-    if (!getClass().equals(other.getClass())) {
-      return getClass().getName().compareTo(other.getClass().getName());
-    }
-
-    int lastComparison = 0;
-
-    lastComparison = Boolean.valueOf(is_set_msg()).compareTo(other.is_set_msg());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_msg()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.msg, other.msg);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    return 0;
-  }
-
-  public _Fields fieldForId(int fieldId) {
-    return _Fields.findByThriftId(fieldId);
-  }
-
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
-    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
-  }
-
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
-    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder("AuthorizationException(");
-    boolean first = true;
-
-    sb.append("msg:");
-    if (this.msg == null) {
-      sb.append("null");
-    } else {
-      sb.append(this.msg);
-    }
-    first = false;
-    sb.append(")");
-    return sb.toString();
-  }
-
-  public void validate() throws org.apache.thrift.TException {
-    // check for required fields
-    if (!is_set_msg()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'msg' is unset! Struct:" + toString());
-    }
-
-    // check for sub-struct validity
-  }
-
-  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
-    try {
-      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
-    try {
-      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private static class AuthorizationExceptionStandardSchemeFactory implements SchemeFactory {
-    public AuthorizationExceptionStandardScheme getScheme() {
-      return new AuthorizationExceptionStandardScheme();
-    }
-  }
-
-  private static class AuthorizationExceptionStandardScheme extends StandardScheme<AuthorizationException> {
-
-    public void read(org.apache.thrift.protocol.TProtocol iprot, AuthorizationException struct) throws org.apache.thrift.TException {
-      org.apache.thrift.protocol.TField schemeField;
-      iprot.readStructBegin();
-      while (true)
-      {
-        schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
-          break;
-        }
-        switch (schemeField.id) {
-          case 1: // MSG
-            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
-              struct.msg = iprot.readString();
-              struct.set_msg_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          default:
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-        }
-        iprot.readFieldEnd();
-      }
-      iprot.readStructEnd();
-      struct.validate();
-    }
-
-    public void write(org.apache.thrift.protocol.TProtocol oprot, AuthorizationException struct) throws org.apache.thrift.TException {
-      struct.validate();
-
-      oprot.writeStructBegin(STRUCT_DESC);
-      if (struct.msg != null) {
-        oprot.writeFieldBegin(MSG_FIELD_DESC);
-        oprot.writeString(struct.msg);
-        oprot.writeFieldEnd();
-      }
-      oprot.writeFieldStop();
-      oprot.writeStructEnd();
-    }
-
-  }
-
-  private static class AuthorizationExceptionTupleSchemeFactory implements SchemeFactory {
-    public AuthorizationExceptionTupleScheme getScheme() {
-      return new AuthorizationExceptionTupleScheme();
-    }
-  }
-
-  private static class AuthorizationExceptionTupleScheme extends TupleScheme<AuthorizationException> {
-
-    @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, AuthorizationException struct) throws org.apache.thrift.TException {
-      TTupleProtocol oprot = (TTupleProtocol) prot;
-      oprot.writeString(struct.msg);
-    }
-
-    @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, AuthorizationException struct) throws org.apache.thrift.TException {
-      TTupleProtocol iprot = (TTupleProtocol) prot;
-      struct.msg = iprot.readString();
-      struct.set_msg_isSet(true);
-    }
-  }
-
-}
-

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/generated/BeginDownloadResult.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/generated/BeginDownloadResult.java b/storm-core/src/jvm/backtype/storm/generated/BeginDownloadResult.java
deleted file mode 100644
index f01c4eb..0000000
--- a/storm-core/src/jvm/backtype/storm/generated/BeginDownloadResult.java
+++ /dev/null
@@ -1,608 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * Autogenerated by Thrift Compiler (0.9.3)
- *
- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
- *  @generated
- */
-package backtype.storm.generated;
-
-import org.apache.thrift.scheme.IScheme;
-import org.apache.thrift.scheme.SchemeFactory;
-import org.apache.thrift.scheme.StandardScheme;
-
-import org.apache.thrift.scheme.TupleScheme;
-import org.apache.thrift.protocol.TTupleProtocol;
-import org.apache.thrift.protocol.TProtocolException;
-import org.apache.thrift.EncodingUtils;
-import org.apache.thrift.TException;
-import org.apache.thrift.async.AsyncMethodCallback;
-import org.apache.thrift.server.AbstractNonblockingServer.*;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.EnumMap;
-import java.util.Set;
-import java.util.HashSet;
-import java.util.EnumSet;
-import java.util.Collections;
-import java.util.BitSet;
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import javax.annotation.Generated;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
-public class BeginDownloadResult implements org.apache.thrift.TBase<BeginDownloadResult, BeginDownloadResult._Fields>, java.io.Serializable, Cloneable, Comparable<BeginDownloadResult> {
-  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("BeginDownloadResult");
-
-  private static final org.apache.thrift.protocol.TField VERSION_FIELD_DESC = new org.apache.thrift.protocol.TField("version", org.apache.thrift.protocol.TType.I64, (short)1);
-  private static final org.apache.thrift.protocol.TField SESSION_FIELD_DESC = new org.apache.thrift.protocol.TField("session", org.apache.thrift.protocol.TType.STRING, (short)2);
-  private static final org.apache.thrift.protocol.TField DATA_SIZE_FIELD_DESC = new org.apache.thrift.protocol.TField("data_size", org.apache.thrift.protocol.TType.I64, (short)3);
-
-  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
-  static {
-    schemes.put(StandardScheme.class, new BeginDownloadResultStandardSchemeFactory());
-    schemes.put(TupleScheme.class, new BeginDownloadResultTupleSchemeFactory());
-  }
-
-  private long version; // required
-  private String session; // required
-  private long data_size; // optional
-
-  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
-  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-    VERSION((short)1, "version"),
-    SESSION((short)2, "session"),
-    DATA_SIZE((short)3, "data_size");
-
-    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
-
-    static {
-      for (_Fields field : EnumSet.allOf(_Fields.class)) {
-        byName.put(field.getFieldName(), field);
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, or null if its not found.
-     */
-    public static _Fields findByThriftId(int fieldId) {
-      switch(fieldId) {
-        case 1: // VERSION
-          return VERSION;
-        case 2: // SESSION
-          return SESSION;
-        case 3: // DATA_SIZE
-          return DATA_SIZE;
-        default:
-          return null;
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, throwing an exception
-     * if it is not found.
-     */
-    public static _Fields findByThriftIdOrThrow(int fieldId) {
-      _Fields fields = findByThriftId(fieldId);
-      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
-      return fields;
-    }
-
-    /**
-     * Find the _Fields constant that matches name, or null if its not found.
-     */
-    public static _Fields findByName(String name) {
-      return byName.get(name);
-    }
-
-    private final short _thriftId;
-    private final String _fieldName;
-
-    _Fields(short thriftId, String fieldName) {
-      _thriftId = thriftId;
-      _fieldName = fieldName;
-    }
-
-    public short getThriftFieldId() {
-      return _thriftId;
-    }
-
-    public String getFieldName() {
-      return _fieldName;
-    }
-  }
-
-  // isset id assignments
-  private static final int __VERSION_ISSET_ID = 0;
-  private static final int __DATA_SIZE_ISSET_ID = 1;
-  private byte __isset_bitfield = 0;
-  private static final _Fields optionals[] = {_Fields.DATA_SIZE};
-  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
-  static {
-    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.VERSION, new org.apache.thrift.meta_data.FieldMetaData("version", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
-    tmpMap.put(_Fields.SESSION, new org.apache.thrift.meta_data.FieldMetaData("session", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    tmpMap.put(_Fields.DATA_SIZE, new org.apache.thrift.meta_data.FieldMetaData("data_size", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
-    metaDataMap = Collections.unmodifiableMap(tmpMap);
-    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(BeginDownloadResult.class, metaDataMap);
-  }
-
-  public BeginDownloadResult() {
-  }
-
-  public BeginDownloadResult(
-    long version,
-    String session)
-  {
-    this();
-    this.version = version;
-    set_version_isSet(true);
-    this.session = session;
-  }
-
-  /**
-   * Performs a deep copy on <i>other</i>.
-   */
-  public BeginDownloadResult(BeginDownloadResult other) {
-    __isset_bitfield = other.__isset_bitfield;
-    this.version = other.version;
-    if (other.is_set_session()) {
-      this.session = other.session;
-    }
-    this.data_size = other.data_size;
-  }
-
-  public BeginDownloadResult deepCopy() {
-    return new BeginDownloadResult(this);
-  }
-
-  @Override
-  public void clear() {
-    set_version_isSet(false);
-    this.version = 0;
-    this.session = null;
-    set_data_size_isSet(false);
-    this.data_size = 0;
-  }
-
-  public long get_version() {
-    return this.version;
-  }
-
-  public void set_version(long version) {
-    this.version = version;
-    set_version_isSet(true);
-  }
-
-  public void unset_version() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __VERSION_ISSET_ID);
-  }
-
-  /** Returns true if field version is set (has been assigned a value) and false otherwise */
-  public boolean is_set_version() {
-    return EncodingUtils.testBit(__isset_bitfield, __VERSION_ISSET_ID);
-  }
-
-  public void set_version_isSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __VERSION_ISSET_ID, value);
-  }
-
-  public String get_session() {
-    return this.session;
-  }
-
-  public void set_session(String session) {
-    this.session = session;
-  }
-
-  public void unset_session() {
-    this.session = null;
-  }
-
-  /** Returns true if field session is set (has been assigned a value) and false otherwise */
-  public boolean is_set_session() {
-    return this.session != null;
-  }
-
-  public void set_session_isSet(boolean value) {
-    if (!value) {
-      this.session = null;
-    }
-  }
-
-  public long get_data_size() {
-    return this.data_size;
-  }
-
-  public void set_data_size(long data_size) {
-    this.data_size = data_size;
-    set_data_size_isSet(true);
-  }
-
-  public void unset_data_size() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __DATA_SIZE_ISSET_ID);
-  }
-
-  /** Returns true if field data_size is set (has been assigned a value) and false otherwise */
-  public boolean is_set_data_size() {
-    return EncodingUtils.testBit(__isset_bitfield, __DATA_SIZE_ISSET_ID);
-  }
-
-  public void set_data_size_isSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __DATA_SIZE_ISSET_ID, value);
-  }
-
-  public void setFieldValue(_Fields field, Object value) {
-    switch (field) {
-    case VERSION:
-      if (value == null) {
-        unset_version();
-      } else {
-        set_version((Long)value);
-      }
-      break;
-
-    case SESSION:
-      if (value == null) {
-        unset_session();
-      } else {
-        set_session((String)value);
-      }
-      break;
-
-    case DATA_SIZE:
-      if (value == null) {
-        unset_data_size();
-      } else {
-        set_data_size((Long)value);
-      }
-      break;
-
-    }
-  }
-
-  public Object getFieldValue(_Fields field) {
-    switch (field) {
-    case VERSION:
-      return get_version();
-
-    case SESSION:
-      return get_session();
-
-    case DATA_SIZE:
-      return get_data_size();
-
-    }
-    throw new IllegalStateException();
-  }
-
-  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
-  public boolean isSet(_Fields field) {
-    if (field == null) {
-      throw new IllegalArgumentException();
-    }
-
-    switch (field) {
-    case VERSION:
-      return is_set_version();
-    case SESSION:
-      return is_set_session();
-    case DATA_SIZE:
-      return is_set_data_size();
-    }
-    throw new IllegalStateException();
-  }
-
-  @Override
-  public boolean equals(Object that) {
-    if (that == null)
-      return false;
-    if (that instanceof BeginDownloadResult)
-      return this.equals((BeginDownloadResult)that);
-    return false;
-  }
-
-  public boolean equals(BeginDownloadResult that) {
-    if (that == null)
-      return false;
-
-    boolean this_present_version = true;
-    boolean that_present_version = true;
-    if (this_present_version || that_present_version) {
-      if (!(this_present_version && that_present_version))
-        return false;
-      if (this.version != that.version)
-        return false;
-    }
-
-    boolean this_present_session = true && this.is_set_session();
-    boolean that_present_session = true && that.is_set_session();
-    if (this_present_session || that_present_session) {
-      if (!(this_present_session && that_present_session))
-        return false;
-      if (!this.session.equals(that.session))
-        return false;
-    }
-
-    boolean this_present_data_size = true && this.is_set_data_size();
-    boolean that_present_data_size = true && that.is_set_data_size();
-    if (this_present_data_size || that_present_data_size) {
-      if (!(this_present_data_size && that_present_data_size))
-        return false;
-      if (this.data_size != that.data_size)
-        return false;
-    }
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    List<Object> list = new ArrayList<Object>();
-
-    boolean present_version = true;
-    list.add(present_version);
-    if (present_version)
-      list.add(version);
-
-    boolean present_session = true && (is_set_session());
-    list.add(present_session);
-    if (present_session)
-      list.add(session);
-
-    boolean present_data_size = true && (is_set_data_size());
-    list.add(present_data_size);
-    if (present_data_size)
-      list.add(data_size);
-
-    return list.hashCode();
-  }
-
-  @Override
-  public int compareTo(BeginDownloadResult other) {
-    if (!getClass().equals(other.getClass())) {
-      return getClass().getName().compareTo(other.getClass().getName());
-    }
-
-    int lastComparison = 0;
-
-    lastComparison = Boolean.valueOf(is_set_version()).compareTo(other.is_set_version());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_version()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.version, other.version);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_session()).compareTo(other.is_set_session());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_session()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.session, other.session);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_data_size()).compareTo(other.is_set_data_size());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_data_size()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.data_size, other.data_size);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    return 0;
-  }
-
-  public _Fields fieldForId(int fieldId) {
-    return _Fields.findByThriftId(fieldId);
-  }
-
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
-    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
-  }
-
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
-    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder("BeginDownloadResult(");
-    boolean first = true;
-
-    sb.append("version:");
-    sb.append(this.version);
-    first = false;
-    if (!first) sb.append(", ");
-    sb.append("session:");
-    if (this.session == null) {
-      sb.append("null");
-    } else {
-      sb.append(this.session);
-    }
-    first = false;
-    if (is_set_data_size()) {
-      if (!first) sb.append(", ");
-      sb.append("data_size:");
-      sb.append(this.data_size);
-      first = false;
-    }
-    sb.append(")");
-    return sb.toString();
-  }
-
-  public void validate() throws org.apache.thrift.TException {
-    // check for required fields
-    if (!is_set_version()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'version' is unset! Struct:" + toString());
-    }
-
-    if (!is_set_session()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'session' is unset! Struct:" + toString());
-    }
-
-    // check for sub-struct validity
-  }
-
-  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
-    try {
-      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
-    try {
-      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
-      __isset_bitfield = 0;
-      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private static class BeginDownloadResultStandardSchemeFactory implements SchemeFactory {
-    public BeginDownloadResultStandardScheme getScheme() {
-      return new BeginDownloadResultStandardScheme();
-    }
-  }
-
-  private static class BeginDownloadResultStandardScheme extends StandardScheme<BeginDownloadResult> {
-
-    public void read(org.apache.thrift.protocol.TProtocol iprot, BeginDownloadResult struct) throws org.apache.thrift.TException {
-      org.apache.thrift.protocol.TField schemeField;
-      iprot.readStructBegin();
-      while (true)
-      {
-        schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
-          break;
-        }
-        switch (schemeField.id) {
-          case 1: // VERSION
-            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
-              struct.version = iprot.readI64();
-              struct.set_version_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 2: // SESSION
-            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
-              struct.session = iprot.readString();
-              struct.set_session_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 3: // DATA_SIZE
-            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
-              struct.data_size = iprot.readI64();
-              struct.set_data_size_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          default:
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-        }
-        iprot.readFieldEnd();
-      }
-      iprot.readStructEnd();
-      struct.validate();
-    }
-
-    public void write(org.apache.thrift.protocol.TProtocol oprot, BeginDownloadResult struct) throws org.apache.thrift.TException {
-      struct.validate();
-
-      oprot.writeStructBegin(STRUCT_DESC);
-      oprot.writeFieldBegin(VERSION_FIELD_DESC);
-      oprot.writeI64(struct.version);
-      oprot.writeFieldEnd();
-      if (struct.session != null) {
-        oprot.writeFieldBegin(SESSION_FIELD_DESC);
-        oprot.writeString(struct.session);
-        oprot.writeFieldEnd();
-      }
-      if (struct.is_set_data_size()) {
-        oprot.writeFieldBegin(DATA_SIZE_FIELD_DESC);
-        oprot.writeI64(struct.data_size);
-        oprot.writeFieldEnd();
-      }
-      oprot.writeFieldStop();
-      oprot.writeStructEnd();
-    }
-
-  }
-
-  private static class BeginDownloadResultTupleSchemeFactory implements SchemeFactory {
-    public BeginDownloadResultTupleScheme getScheme() {
-      return new BeginDownloadResultTupleScheme();
-    }
-  }
-
-  private static class BeginDownloadResultTupleScheme extends TupleScheme<BeginDownloadResult> {
-
-    @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, BeginDownloadResult struct) throws org.apache.thrift.TException {
-      TTupleProtocol oprot = (TTupleProtocol) prot;
-      oprot.writeI64(struct.version);
-      oprot.writeString(struct.session);
-      BitSet optionals = new BitSet();
-      if (struct.is_set_data_size()) {
-        optionals.set(0);
-      }
-      oprot.writeBitSet(optionals, 1);
-      if (struct.is_set_data_size()) {
-        oprot.writeI64(struct.data_size);
-      }
-    }
-
-    @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, BeginDownloadResult struct) throws org.apache.thrift.TException {
-      TTupleProtocol iprot = (TTupleProtocol) prot;
-      struct.version = iprot.readI64();
-      struct.set_version_isSet(true);
-      struct.session = iprot.readString();
-      struct.set_session_isSet(true);
-      BitSet incoming = iprot.readBitSet(1);
-      if (incoming.get(0)) {
-        struct.data_size = iprot.readI64();
-        struct.set_data_size_isSet(true);
-      }
-    }
-  }
-
-}
-


[02/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/generated/ComponentCommon.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/generated/ComponentCommon.java b/storm-core/src/jvm/backtype/storm/generated/ComponentCommon.java
deleted file mode 100644
index 9950756..0000000
--- a/storm-core/src/jvm/backtype/storm/generated/ComponentCommon.java
+++ /dev/null
@@ -1,852 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * Autogenerated by Thrift Compiler (0.9.3)
- *
- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
- *  @generated
- */
-package backtype.storm.generated;
-
-import org.apache.thrift.scheme.IScheme;
-import org.apache.thrift.scheme.SchemeFactory;
-import org.apache.thrift.scheme.StandardScheme;
-
-import org.apache.thrift.scheme.TupleScheme;
-import org.apache.thrift.protocol.TTupleProtocol;
-import org.apache.thrift.protocol.TProtocolException;
-import org.apache.thrift.EncodingUtils;
-import org.apache.thrift.TException;
-import org.apache.thrift.async.AsyncMethodCallback;
-import org.apache.thrift.server.AbstractNonblockingServer.*;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.EnumMap;
-import java.util.Set;
-import java.util.HashSet;
-import java.util.EnumSet;
-import java.util.Collections;
-import java.util.BitSet;
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import javax.annotation.Generated;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
-public class ComponentCommon implements org.apache.thrift.TBase<ComponentCommon, ComponentCommon._Fields>, java.io.Serializable, Cloneable, Comparable<ComponentCommon> {
-  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ComponentCommon");
-
-  private static final org.apache.thrift.protocol.TField INPUTS_FIELD_DESC = new org.apache.thrift.protocol.TField("inputs", org.apache.thrift.protocol.TType.MAP, (short)1);
-  private static final org.apache.thrift.protocol.TField STREAMS_FIELD_DESC = new org.apache.thrift.protocol.TField("streams", org.apache.thrift.protocol.TType.MAP, (short)2);
-  private static final org.apache.thrift.protocol.TField PARALLELISM_HINT_FIELD_DESC = new org.apache.thrift.protocol.TField("parallelism_hint", org.apache.thrift.protocol.TType.I32, (short)3);
-  private static final org.apache.thrift.protocol.TField JSON_CONF_FIELD_DESC = new org.apache.thrift.protocol.TField("json_conf", org.apache.thrift.protocol.TType.STRING, (short)4);
-
-  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
-  static {
-    schemes.put(StandardScheme.class, new ComponentCommonStandardSchemeFactory());
-    schemes.put(TupleScheme.class, new ComponentCommonTupleSchemeFactory());
-  }
-
-  private Map<GlobalStreamId,Grouping> inputs; // required
-  private Map<String,StreamInfo> streams; // required
-  private int parallelism_hint; // optional
-  private String json_conf; // optional
-
-  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
-  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-    INPUTS((short)1, "inputs"),
-    STREAMS((short)2, "streams"),
-    PARALLELISM_HINT((short)3, "parallelism_hint"),
-    JSON_CONF((short)4, "json_conf");
-
-    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
-
-    static {
-      for (_Fields field : EnumSet.allOf(_Fields.class)) {
-        byName.put(field.getFieldName(), field);
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, or null if its not found.
-     */
-    public static _Fields findByThriftId(int fieldId) {
-      switch(fieldId) {
-        case 1: // INPUTS
-          return INPUTS;
-        case 2: // STREAMS
-          return STREAMS;
-        case 3: // PARALLELISM_HINT
-          return PARALLELISM_HINT;
-        case 4: // JSON_CONF
-          return JSON_CONF;
-        default:
-          return null;
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, throwing an exception
-     * if it is not found.
-     */
-    public static _Fields findByThriftIdOrThrow(int fieldId) {
-      _Fields fields = findByThriftId(fieldId);
-      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
-      return fields;
-    }
-
-    /**
-     * Find the _Fields constant that matches name, or null if its not found.
-     */
-    public static _Fields findByName(String name) {
-      return byName.get(name);
-    }
-
-    private final short _thriftId;
-    private final String _fieldName;
-
-    _Fields(short thriftId, String fieldName) {
-      _thriftId = thriftId;
-      _fieldName = fieldName;
-    }
-
-    public short getThriftFieldId() {
-      return _thriftId;
-    }
-
-    public String getFieldName() {
-      return _fieldName;
-    }
-  }
-
-  // isset id assignments
-  private static final int __PARALLELISM_HINT_ISSET_ID = 0;
-  private byte __isset_bitfield = 0;
-  private static final _Fields optionals[] = {_Fields.PARALLELISM_HINT,_Fields.JSON_CONF};
-  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
-  static {
-    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.INPUTS, new org.apache.thrift.meta_data.FieldMetaData("inputs", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
-            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GlobalStreamId.class), 
-            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Grouping.class))));
-    tmpMap.put(_Fields.STREAMS, new org.apache.thrift.meta_data.FieldMetaData("streams", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
-            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
-            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, StreamInfo.class))));
-    tmpMap.put(_Fields.PARALLELISM_HINT, new org.apache.thrift.meta_data.FieldMetaData("parallelism_hint", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
-    tmpMap.put(_Fields.JSON_CONF, new org.apache.thrift.meta_data.FieldMetaData("json_conf", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    metaDataMap = Collections.unmodifiableMap(tmpMap);
-    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ComponentCommon.class, metaDataMap);
-  }
-
-  public ComponentCommon() {
-  }
-
-  public ComponentCommon(
-    Map<GlobalStreamId,Grouping> inputs,
-    Map<String,StreamInfo> streams)
-  {
-    this();
-    this.inputs = inputs;
-    this.streams = streams;
-  }
-
-  /**
-   * Performs a deep copy on <i>other</i>.
-   */
-  public ComponentCommon(ComponentCommon other) {
-    __isset_bitfield = other.__isset_bitfield;
-    if (other.is_set_inputs()) {
-      Map<GlobalStreamId,Grouping> __this__inputs = new HashMap<GlobalStreamId,Grouping>(other.inputs.size());
-      for (Map.Entry<GlobalStreamId, Grouping> other_element : other.inputs.entrySet()) {
-
-        GlobalStreamId other_element_key = other_element.getKey();
-        Grouping other_element_value = other_element.getValue();
-
-        GlobalStreamId __this__inputs_copy_key = new GlobalStreamId(other_element_key);
-
-        Grouping __this__inputs_copy_value = new Grouping(other_element_value);
-
-        __this__inputs.put(__this__inputs_copy_key, __this__inputs_copy_value);
-      }
-      this.inputs = __this__inputs;
-    }
-    if (other.is_set_streams()) {
-      Map<String,StreamInfo> __this__streams = new HashMap<String,StreamInfo>(other.streams.size());
-      for (Map.Entry<String, StreamInfo> other_element : other.streams.entrySet()) {
-
-        String other_element_key = other_element.getKey();
-        StreamInfo other_element_value = other_element.getValue();
-
-        String __this__streams_copy_key = other_element_key;
-
-        StreamInfo __this__streams_copy_value = new StreamInfo(other_element_value);
-
-        __this__streams.put(__this__streams_copy_key, __this__streams_copy_value);
-      }
-      this.streams = __this__streams;
-    }
-    this.parallelism_hint = other.parallelism_hint;
-    if (other.is_set_json_conf()) {
-      this.json_conf = other.json_conf;
-    }
-  }
-
-  public ComponentCommon deepCopy() {
-    return new ComponentCommon(this);
-  }
-
-  @Override
-  public void clear() {
-    this.inputs = null;
-    this.streams = null;
-    set_parallelism_hint_isSet(false);
-    this.parallelism_hint = 0;
-    this.json_conf = null;
-  }
-
-  public int get_inputs_size() {
-    return (this.inputs == null) ? 0 : this.inputs.size();
-  }
-
-  public void put_to_inputs(GlobalStreamId key, Grouping val) {
-    if (this.inputs == null) {
-      this.inputs = new HashMap<GlobalStreamId,Grouping>();
-    }
-    this.inputs.put(key, val);
-  }
-
-  public Map<GlobalStreamId,Grouping> get_inputs() {
-    return this.inputs;
-  }
-
-  public void set_inputs(Map<GlobalStreamId,Grouping> inputs) {
-    this.inputs = inputs;
-  }
-
-  public void unset_inputs() {
-    this.inputs = null;
-  }
-
-  /** Returns true if field inputs is set (has been assigned a value) and false otherwise */
-  public boolean is_set_inputs() {
-    return this.inputs != null;
-  }
-
-  public void set_inputs_isSet(boolean value) {
-    if (!value) {
-      this.inputs = null;
-    }
-  }
-
-  public int get_streams_size() {
-    return (this.streams == null) ? 0 : this.streams.size();
-  }
-
-  public void put_to_streams(String key, StreamInfo val) {
-    if (this.streams == null) {
-      this.streams = new HashMap<String,StreamInfo>();
-    }
-    this.streams.put(key, val);
-  }
-
-  public Map<String,StreamInfo> get_streams() {
-    return this.streams;
-  }
-
-  public void set_streams(Map<String,StreamInfo> streams) {
-    this.streams = streams;
-  }
-
-  public void unset_streams() {
-    this.streams = null;
-  }
-
-  /** Returns true if field streams is set (has been assigned a value) and false otherwise */
-  public boolean is_set_streams() {
-    return this.streams != null;
-  }
-
-  public void set_streams_isSet(boolean value) {
-    if (!value) {
-      this.streams = null;
-    }
-  }
-
-  public int get_parallelism_hint() {
-    return this.parallelism_hint;
-  }
-
-  public void set_parallelism_hint(int parallelism_hint) {
-    this.parallelism_hint = parallelism_hint;
-    set_parallelism_hint_isSet(true);
-  }
-
-  public void unset_parallelism_hint() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __PARALLELISM_HINT_ISSET_ID);
-  }
-
-  /** Returns true if field parallelism_hint is set (has been assigned a value) and false otherwise */
-  public boolean is_set_parallelism_hint() {
-    return EncodingUtils.testBit(__isset_bitfield, __PARALLELISM_HINT_ISSET_ID);
-  }
-
-  public void set_parallelism_hint_isSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __PARALLELISM_HINT_ISSET_ID, value);
-  }
-
-  public String get_json_conf() {
-    return this.json_conf;
-  }
-
-  public void set_json_conf(String json_conf) {
-    this.json_conf = json_conf;
-  }
-
-  public void unset_json_conf() {
-    this.json_conf = null;
-  }
-
-  /** Returns true if field json_conf is set (has been assigned a value) and false otherwise */
-  public boolean is_set_json_conf() {
-    return this.json_conf != null;
-  }
-
-  public void set_json_conf_isSet(boolean value) {
-    if (!value) {
-      this.json_conf = null;
-    }
-  }
-
-  public void setFieldValue(_Fields field, Object value) {
-    switch (field) {
-    case INPUTS:
-      if (value == null) {
-        unset_inputs();
-      } else {
-        set_inputs((Map<GlobalStreamId,Grouping>)value);
-      }
-      break;
-
-    case STREAMS:
-      if (value == null) {
-        unset_streams();
-      } else {
-        set_streams((Map<String,StreamInfo>)value);
-      }
-      break;
-
-    case PARALLELISM_HINT:
-      if (value == null) {
-        unset_parallelism_hint();
-      } else {
-        set_parallelism_hint((Integer)value);
-      }
-      break;
-
-    case JSON_CONF:
-      if (value == null) {
-        unset_json_conf();
-      } else {
-        set_json_conf((String)value);
-      }
-      break;
-
-    }
-  }
-
-  public Object getFieldValue(_Fields field) {
-    switch (field) {
-    case INPUTS:
-      return get_inputs();
-
-    case STREAMS:
-      return get_streams();
-
-    case PARALLELISM_HINT:
-      return get_parallelism_hint();
-
-    case JSON_CONF:
-      return get_json_conf();
-
-    }
-    throw new IllegalStateException();
-  }
-
-  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
-  public boolean isSet(_Fields field) {
-    if (field == null) {
-      throw new IllegalArgumentException();
-    }
-
-    switch (field) {
-    case INPUTS:
-      return is_set_inputs();
-    case STREAMS:
-      return is_set_streams();
-    case PARALLELISM_HINT:
-      return is_set_parallelism_hint();
-    case JSON_CONF:
-      return is_set_json_conf();
-    }
-    throw new IllegalStateException();
-  }
-
-  @Override
-  public boolean equals(Object that) {
-    if (that == null)
-      return false;
-    if (that instanceof ComponentCommon)
-      return this.equals((ComponentCommon)that);
-    return false;
-  }
-
-  public boolean equals(ComponentCommon that) {
-    if (that == null)
-      return false;
-
-    boolean this_present_inputs = true && this.is_set_inputs();
-    boolean that_present_inputs = true && that.is_set_inputs();
-    if (this_present_inputs || that_present_inputs) {
-      if (!(this_present_inputs && that_present_inputs))
-        return false;
-      if (!this.inputs.equals(that.inputs))
-        return false;
-    }
-
-    boolean this_present_streams = true && this.is_set_streams();
-    boolean that_present_streams = true && that.is_set_streams();
-    if (this_present_streams || that_present_streams) {
-      if (!(this_present_streams && that_present_streams))
-        return false;
-      if (!this.streams.equals(that.streams))
-        return false;
-    }
-
-    boolean this_present_parallelism_hint = true && this.is_set_parallelism_hint();
-    boolean that_present_parallelism_hint = true && that.is_set_parallelism_hint();
-    if (this_present_parallelism_hint || that_present_parallelism_hint) {
-      if (!(this_present_parallelism_hint && that_present_parallelism_hint))
-        return false;
-      if (this.parallelism_hint != that.parallelism_hint)
-        return false;
-    }
-
-    boolean this_present_json_conf = true && this.is_set_json_conf();
-    boolean that_present_json_conf = true && that.is_set_json_conf();
-    if (this_present_json_conf || that_present_json_conf) {
-      if (!(this_present_json_conf && that_present_json_conf))
-        return false;
-      if (!this.json_conf.equals(that.json_conf))
-        return false;
-    }
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    List<Object> list = new ArrayList<Object>();
-
-    boolean present_inputs = true && (is_set_inputs());
-    list.add(present_inputs);
-    if (present_inputs)
-      list.add(inputs);
-
-    boolean present_streams = true && (is_set_streams());
-    list.add(present_streams);
-    if (present_streams)
-      list.add(streams);
-
-    boolean present_parallelism_hint = true && (is_set_parallelism_hint());
-    list.add(present_parallelism_hint);
-    if (present_parallelism_hint)
-      list.add(parallelism_hint);
-
-    boolean present_json_conf = true && (is_set_json_conf());
-    list.add(present_json_conf);
-    if (present_json_conf)
-      list.add(json_conf);
-
-    return list.hashCode();
-  }
-
-  @Override
-  public int compareTo(ComponentCommon other) {
-    if (!getClass().equals(other.getClass())) {
-      return getClass().getName().compareTo(other.getClass().getName());
-    }
-
-    int lastComparison = 0;
-
-    lastComparison = Boolean.valueOf(is_set_inputs()).compareTo(other.is_set_inputs());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_inputs()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.inputs, other.inputs);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_streams()).compareTo(other.is_set_streams());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_streams()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.streams, other.streams);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_parallelism_hint()).compareTo(other.is_set_parallelism_hint());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_parallelism_hint()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.parallelism_hint, other.parallelism_hint);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_json_conf()).compareTo(other.is_set_json_conf());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_json_conf()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.json_conf, other.json_conf);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    return 0;
-  }
-
-  public _Fields fieldForId(int fieldId) {
-    return _Fields.findByThriftId(fieldId);
-  }
-
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
-    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
-  }
-
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
-    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder("ComponentCommon(");
-    boolean first = true;
-
-    sb.append("inputs:");
-    if (this.inputs == null) {
-      sb.append("null");
-    } else {
-      sb.append(this.inputs);
-    }
-    first = false;
-    if (!first) sb.append(", ");
-    sb.append("streams:");
-    if (this.streams == null) {
-      sb.append("null");
-    } else {
-      sb.append(this.streams);
-    }
-    first = false;
-    if (is_set_parallelism_hint()) {
-      if (!first) sb.append(", ");
-      sb.append("parallelism_hint:");
-      sb.append(this.parallelism_hint);
-      first = false;
-    }
-    if (is_set_json_conf()) {
-      if (!first) sb.append(", ");
-      sb.append("json_conf:");
-      if (this.json_conf == null) {
-        sb.append("null");
-      } else {
-        sb.append(this.json_conf);
-      }
-      first = false;
-    }
-    sb.append(")");
-    return sb.toString();
-  }
-
-  public void validate() throws org.apache.thrift.TException {
-    // check for required fields
-    if (!is_set_inputs()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'inputs' is unset! Struct:" + toString());
-    }
-
-    if (!is_set_streams()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'streams' is unset! Struct:" + toString());
-    }
-
-    // check for sub-struct validity
-  }
-
-  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
-    try {
-      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
-    try {
-      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
-      __isset_bitfield = 0;
-      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private static class ComponentCommonStandardSchemeFactory implements SchemeFactory {
-    public ComponentCommonStandardScheme getScheme() {
-      return new ComponentCommonStandardScheme();
-    }
-  }
-
-  private static class ComponentCommonStandardScheme extends StandardScheme<ComponentCommon> {
-
-    public void read(org.apache.thrift.protocol.TProtocol iprot, ComponentCommon struct) throws org.apache.thrift.TException {
-      org.apache.thrift.protocol.TField schemeField;
-      iprot.readStructBegin();
-      while (true)
-      {
-        schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
-          break;
-        }
-        switch (schemeField.id) {
-          case 1: // INPUTS
-            if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
-              {
-                org.apache.thrift.protocol.TMap _map24 = iprot.readMapBegin();
-                struct.inputs = new HashMap<GlobalStreamId,Grouping>(2*_map24.size);
-                GlobalStreamId _key25;
-                Grouping _val26;
-                for (int _i27 = 0; _i27 < _map24.size; ++_i27)
-                {
-                  _key25 = new GlobalStreamId();
-                  _key25.read(iprot);
-                  _val26 = new Grouping();
-                  _val26.read(iprot);
-                  struct.inputs.put(_key25, _val26);
-                }
-                iprot.readMapEnd();
-              }
-              struct.set_inputs_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 2: // STREAMS
-            if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
-              {
-                org.apache.thrift.protocol.TMap _map28 = iprot.readMapBegin();
-                struct.streams = new HashMap<String,StreamInfo>(2*_map28.size);
-                String _key29;
-                StreamInfo _val30;
-                for (int _i31 = 0; _i31 < _map28.size; ++_i31)
-                {
-                  _key29 = iprot.readString();
-                  _val30 = new StreamInfo();
-                  _val30.read(iprot);
-                  struct.streams.put(_key29, _val30);
-                }
-                iprot.readMapEnd();
-              }
-              struct.set_streams_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 3: // PARALLELISM_HINT
-            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
-              struct.parallelism_hint = iprot.readI32();
-              struct.set_parallelism_hint_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 4: // JSON_CONF
-            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
-              struct.json_conf = iprot.readString();
-              struct.set_json_conf_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          default:
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-        }
-        iprot.readFieldEnd();
-      }
-      iprot.readStructEnd();
-      struct.validate();
-    }
-
-    public void write(org.apache.thrift.protocol.TProtocol oprot, ComponentCommon struct) throws org.apache.thrift.TException {
-      struct.validate();
-
-      oprot.writeStructBegin(STRUCT_DESC);
-      if (struct.inputs != null) {
-        oprot.writeFieldBegin(INPUTS_FIELD_DESC);
-        {
-          oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.STRUCT, struct.inputs.size()));
-          for (Map.Entry<GlobalStreamId, Grouping> _iter32 : struct.inputs.entrySet())
-          {
-            _iter32.getKey().write(oprot);
-            _iter32.getValue().write(oprot);
-          }
-          oprot.writeMapEnd();
-        }
-        oprot.writeFieldEnd();
-      }
-      if (struct.streams != null) {
-        oprot.writeFieldBegin(STREAMS_FIELD_DESC);
-        {
-          oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.streams.size()));
-          for (Map.Entry<String, StreamInfo> _iter33 : struct.streams.entrySet())
-          {
-            oprot.writeString(_iter33.getKey());
-            _iter33.getValue().write(oprot);
-          }
-          oprot.writeMapEnd();
-        }
-        oprot.writeFieldEnd();
-      }
-      if (struct.is_set_parallelism_hint()) {
-        oprot.writeFieldBegin(PARALLELISM_HINT_FIELD_DESC);
-        oprot.writeI32(struct.parallelism_hint);
-        oprot.writeFieldEnd();
-      }
-      if (struct.json_conf != null) {
-        if (struct.is_set_json_conf()) {
-          oprot.writeFieldBegin(JSON_CONF_FIELD_DESC);
-          oprot.writeString(struct.json_conf);
-          oprot.writeFieldEnd();
-        }
-      }
-      oprot.writeFieldStop();
-      oprot.writeStructEnd();
-    }
-
-  }
-
-  private static class ComponentCommonTupleSchemeFactory implements SchemeFactory {
-    public ComponentCommonTupleScheme getScheme() {
-      return new ComponentCommonTupleScheme();
-    }
-  }
-
-  private static class ComponentCommonTupleScheme extends TupleScheme<ComponentCommon> {
-
-    @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, ComponentCommon struct) throws org.apache.thrift.TException {
-      TTupleProtocol oprot = (TTupleProtocol) prot;
-      {
-        oprot.writeI32(struct.inputs.size());
-        for (Map.Entry<GlobalStreamId, Grouping> _iter34 : struct.inputs.entrySet())
-        {
-          _iter34.getKey().write(oprot);
-          _iter34.getValue().write(oprot);
-        }
-      }
-      {
-        oprot.writeI32(struct.streams.size());
-        for (Map.Entry<String, StreamInfo> _iter35 : struct.streams.entrySet())
-        {
-          oprot.writeString(_iter35.getKey());
-          _iter35.getValue().write(oprot);
-        }
-      }
-      BitSet optionals = new BitSet();
-      if (struct.is_set_parallelism_hint()) {
-        optionals.set(0);
-      }
-      if (struct.is_set_json_conf()) {
-        optionals.set(1);
-      }
-      oprot.writeBitSet(optionals, 2);
-      if (struct.is_set_parallelism_hint()) {
-        oprot.writeI32(struct.parallelism_hint);
-      }
-      if (struct.is_set_json_conf()) {
-        oprot.writeString(struct.json_conf);
-      }
-    }
-
-    @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, ComponentCommon struct) throws org.apache.thrift.TException {
-      TTupleProtocol iprot = (TTupleProtocol) prot;
-      {
-        org.apache.thrift.protocol.TMap _map36 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-        struct.inputs = new HashMap<GlobalStreamId,Grouping>(2*_map36.size);
-        GlobalStreamId _key37;
-        Grouping _val38;
-        for (int _i39 = 0; _i39 < _map36.size; ++_i39)
-        {
-          _key37 = new GlobalStreamId();
-          _key37.read(iprot);
-          _val38 = new Grouping();
-          _val38.read(iprot);
-          struct.inputs.put(_key37, _val38);
-        }
-      }
-      struct.set_inputs_isSet(true);
-      {
-        org.apache.thrift.protocol.TMap _map40 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-        struct.streams = new HashMap<String,StreamInfo>(2*_map40.size);
-        String _key41;
-        StreamInfo _val42;
-        for (int _i43 = 0; _i43 < _map40.size; ++_i43)
-        {
-          _key41 = iprot.readString();
-          _val42 = new StreamInfo();
-          _val42.read(iprot);
-          struct.streams.put(_key41, _val42);
-        }
-      }
-      struct.set_streams_isSet(true);
-      BitSet incoming = iprot.readBitSet(2);
-      if (incoming.get(0)) {
-        struct.parallelism_hint = iprot.readI32();
-        struct.set_parallelism_hint_isSet(true);
-      }
-      if (incoming.get(1)) {
-        struct.json_conf = iprot.readString();
-        struct.set_json_conf_isSet(true);
-      }
-    }
-  }
-
-}
-

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/generated/ComponentObject.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/generated/ComponentObject.java b/storm-core/src/jvm/backtype/storm/generated/ComponentObject.java
deleted file mode 100644
index 722e7db..0000000
--- a/storm-core/src/jvm/backtype/storm/generated/ComponentObject.java
+++ /dev/null
@@ -1,462 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * Autogenerated by Thrift Compiler (0.9.3)
- *
- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
- *  @generated
- */
-package backtype.storm.generated;
-
-import org.apache.thrift.scheme.IScheme;
-import org.apache.thrift.scheme.SchemeFactory;
-import org.apache.thrift.scheme.StandardScheme;
-
-import org.apache.thrift.scheme.TupleScheme;
-import org.apache.thrift.protocol.TTupleProtocol;
-import org.apache.thrift.protocol.TProtocolException;
-import org.apache.thrift.EncodingUtils;
-import org.apache.thrift.TException;
-import org.apache.thrift.async.AsyncMethodCallback;
-import org.apache.thrift.server.AbstractNonblockingServer.*;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.EnumMap;
-import java.util.Set;
-import java.util.HashSet;
-import java.util.EnumSet;
-import java.util.Collections;
-import java.util.BitSet;
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import javax.annotation.Generated;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-public class ComponentObject extends org.apache.thrift.TUnion<ComponentObject, ComponentObject._Fields> {
-  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ComponentObject");
-  private static final org.apache.thrift.protocol.TField SERIALIZED_JAVA_FIELD_DESC = new org.apache.thrift.protocol.TField("serialized_java", org.apache.thrift.protocol.TType.STRING, (short)1);
-  private static final org.apache.thrift.protocol.TField SHELL_FIELD_DESC = new org.apache.thrift.protocol.TField("shell", org.apache.thrift.protocol.TType.STRUCT, (short)2);
-  private static final org.apache.thrift.protocol.TField JAVA_OBJECT_FIELD_DESC = new org.apache.thrift.protocol.TField("java_object", org.apache.thrift.protocol.TType.STRUCT, (short)3);
-
-  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
-  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-    SERIALIZED_JAVA((short)1, "serialized_java"),
-    SHELL((short)2, "shell"),
-    JAVA_OBJECT((short)3, "java_object");
-
-    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
-
-    static {
-      for (_Fields field : EnumSet.allOf(_Fields.class)) {
-        byName.put(field.getFieldName(), field);
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, or null if its not found.
-     */
-    public static _Fields findByThriftId(int fieldId) {
-      switch(fieldId) {
-        case 1: // SERIALIZED_JAVA
-          return SERIALIZED_JAVA;
-        case 2: // SHELL
-          return SHELL;
-        case 3: // JAVA_OBJECT
-          return JAVA_OBJECT;
-        default:
-          return null;
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, throwing an exception
-     * if it is not found.
-     */
-    public static _Fields findByThriftIdOrThrow(int fieldId) {
-      _Fields fields = findByThriftId(fieldId);
-      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
-      return fields;
-    }
-
-    /**
-     * Find the _Fields constant that matches name, or null if its not found.
-     */
-    public static _Fields findByName(String name) {
-      return byName.get(name);
-    }
-
-    private final short _thriftId;
-    private final String _fieldName;
-
-    _Fields(short thriftId, String fieldName) {
-      _thriftId = thriftId;
-      _fieldName = fieldName;
-    }
-
-    public short getThriftFieldId() {
-      return _thriftId;
-    }
-
-    public String getFieldName() {
-      return _fieldName;
-    }
-  }
-
-  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
-  static {
-    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.SERIALIZED_JAVA, new org.apache.thrift.meta_data.FieldMetaData("serialized_java", org.apache.thrift.TFieldRequirementType.DEFAULT, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
-    tmpMap.put(_Fields.SHELL, new org.apache.thrift.meta_data.FieldMetaData("shell", org.apache.thrift.TFieldRequirementType.DEFAULT, 
-        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ShellComponent.class)));
-    tmpMap.put(_Fields.JAVA_OBJECT, new org.apache.thrift.meta_data.FieldMetaData("java_object", org.apache.thrift.TFieldRequirementType.DEFAULT, 
-        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, JavaObject.class)));
-    metaDataMap = Collections.unmodifiableMap(tmpMap);
-    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ComponentObject.class, metaDataMap);
-  }
-
-  public ComponentObject() {
-    super();
-  }
-
-  public ComponentObject(_Fields setField, Object value) {
-    super(setField, value);
-  }
-
-  public ComponentObject(ComponentObject other) {
-    super(other);
-  }
-  public ComponentObject deepCopy() {
-    return new ComponentObject(this);
-  }
-
-  public static ComponentObject serialized_java(ByteBuffer value) {
-    ComponentObject x = new ComponentObject();
-    x.set_serialized_java(value);
-    return x;
-  }
-
-  public static ComponentObject serialized_java(byte[] value) {
-    ComponentObject x = new ComponentObject();
-    x.set_serialized_java(ByteBuffer.wrap(Arrays.copyOf(value, value.length)));
-    return x;
-  }
-
-  public static ComponentObject shell(ShellComponent value) {
-    ComponentObject x = new ComponentObject();
-    x.set_shell(value);
-    return x;
-  }
-
-  public static ComponentObject java_object(JavaObject value) {
-    ComponentObject x = new ComponentObject();
-    x.set_java_object(value);
-    return x;
-  }
-
-
-  @Override
-  protected void checkType(_Fields setField, Object value) throws ClassCastException {
-    switch (setField) {
-      case SERIALIZED_JAVA:
-        if (value instanceof ByteBuffer) {
-          break;
-        }
-        throw new ClassCastException("Was expecting value of type ByteBuffer for field 'serialized_java', but got " + value.getClass().getSimpleName());
-      case SHELL:
-        if (value instanceof ShellComponent) {
-          break;
-        }
-        throw new ClassCastException("Was expecting value of type ShellComponent for field 'shell', but got " + value.getClass().getSimpleName());
-      case JAVA_OBJECT:
-        if (value instanceof JavaObject) {
-          break;
-        }
-        throw new ClassCastException("Was expecting value of type JavaObject for field 'java_object', but got " + value.getClass().getSimpleName());
-      default:
-        throw new IllegalArgumentException("Unknown field id " + setField);
-    }
-  }
-
-  @Override
-  protected Object standardSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TField field) throws org.apache.thrift.TException {
-    _Fields setField = _Fields.findByThriftId(field.id);
-    if (setField != null) {
-      switch (setField) {
-        case SERIALIZED_JAVA:
-          if (field.type == SERIALIZED_JAVA_FIELD_DESC.type) {
-            ByteBuffer serialized_java;
-            serialized_java = iprot.readBinary();
-            return serialized_java;
-          } else {
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
-            return null;
-          }
-        case SHELL:
-          if (field.type == SHELL_FIELD_DESC.type) {
-            ShellComponent shell;
-            shell = new ShellComponent();
-            shell.read(iprot);
-            return shell;
-          } else {
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
-            return null;
-          }
-        case JAVA_OBJECT:
-          if (field.type == JAVA_OBJECT_FIELD_DESC.type) {
-            JavaObject java_object;
-            java_object = new JavaObject();
-            java_object.read(iprot);
-            return java_object;
-          } else {
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
-            return null;
-          }
-        default:
-          throw new IllegalStateException("setField wasn't null, but didn't match any of the case statements!");
-      }
-    } else {
-      org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
-      return null;
-    }
-  }
-
-  @Override
-  protected void standardSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
-    switch (setField_) {
-      case SERIALIZED_JAVA:
-        ByteBuffer serialized_java = (ByteBuffer)value_;
-        oprot.writeBinary(serialized_java);
-        return;
-      case SHELL:
-        ShellComponent shell = (ShellComponent)value_;
-        shell.write(oprot);
-        return;
-      case JAVA_OBJECT:
-        JavaObject java_object = (JavaObject)value_;
-        java_object.write(oprot);
-        return;
-      default:
-        throw new IllegalStateException("Cannot write union with unknown field " + setField_);
-    }
-  }
-
-  @Override
-  protected Object tupleSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, short fieldID) throws org.apache.thrift.TException {
-    _Fields setField = _Fields.findByThriftId(fieldID);
-    if (setField != null) {
-      switch (setField) {
-        case SERIALIZED_JAVA:
-          ByteBuffer serialized_java;
-          serialized_java = iprot.readBinary();
-          return serialized_java;
-        case SHELL:
-          ShellComponent shell;
-          shell = new ShellComponent();
-          shell.read(iprot);
-          return shell;
-        case JAVA_OBJECT:
-          JavaObject java_object;
-          java_object = new JavaObject();
-          java_object.read(iprot);
-          return java_object;
-        default:
-          throw new IllegalStateException("setField wasn't null, but didn't match any of the case statements!");
-      }
-    } else {
-      throw new TProtocolException("Couldn't find a field with field id " + fieldID);
-    }
-  }
-
-  @Override
-  protected void tupleSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
-    switch (setField_) {
-      case SERIALIZED_JAVA:
-        ByteBuffer serialized_java = (ByteBuffer)value_;
-        oprot.writeBinary(serialized_java);
-        return;
-      case SHELL:
-        ShellComponent shell = (ShellComponent)value_;
-        shell.write(oprot);
-        return;
-      case JAVA_OBJECT:
-        JavaObject java_object = (JavaObject)value_;
-        java_object.write(oprot);
-        return;
-      default:
-        throw new IllegalStateException("Cannot write union with unknown field " + setField_);
-    }
-  }
-
-  @Override
-  protected org.apache.thrift.protocol.TField getFieldDesc(_Fields setField) {
-    switch (setField) {
-      case SERIALIZED_JAVA:
-        return SERIALIZED_JAVA_FIELD_DESC;
-      case SHELL:
-        return SHELL_FIELD_DESC;
-      case JAVA_OBJECT:
-        return JAVA_OBJECT_FIELD_DESC;
-      default:
-        throw new IllegalArgumentException("Unknown field id " + setField);
-    }
-  }
-
-  @Override
-  protected org.apache.thrift.protocol.TStruct getStructDesc() {
-    return STRUCT_DESC;
-  }
-
-  @Override
-  protected _Fields enumForId(short id) {
-    return _Fields.findByThriftIdOrThrow(id);
-  }
-
-  public _Fields fieldForId(int fieldId) {
-    return _Fields.findByThriftId(fieldId);
-  }
-
-
-  public byte[] get_serialized_java() {
-    set_serialized_java(org.apache.thrift.TBaseHelper.rightSize(buffer_for_serialized_java()));
-    ByteBuffer b = buffer_for_serialized_java();
-    return b == null ? null : b.array();
-  }
-
-  public ByteBuffer buffer_for_serialized_java() {
-    if (getSetField() == _Fields.SERIALIZED_JAVA) {
-      return org.apache.thrift.TBaseHelper.copyBinary((ByteBuffer)getFieldValue());
-    } else {
-      throw new RuntimeException("Cannot get field 'serialized_java' because union is currently set to " + getFieldDesc(getSetField()).name);
-    }
-  }
-
-  public void set_serialized_java(byte[] value) {
-    set_serialized_java(ByteBuffer.wrap(Arrays.copyOf(value, value.length)));
-  }
-
-  public void set_serialized_java(ByteBuffer value) {
-    if (value == null) throw new NullPointerException();
-    setField_ = _Fields.SERIALIZED_JAVA;
-    value_ = value;
-  }
-
-  public ShellComponent get_shell() {
-    if (getSetField() == _Fields.SHELL) {
-      return (ShellComponent)getFieldValue();
-    } else {
-      throw new RuntimeException("Cannot get field 'shell' because union is currently set to " + getFieldDesc(getSetField()).name);
-    }
-  }
-
-  public void set_shell(ShellComponent value) {
-    if (value == null) throw new NullPointerException();
-    setField_ = _Fields.SHELL;
-    value_ = value;
-  }
-
-  public JavaObject get_java_object() {
-    if (getSetField() == _Fields.JAVA_OBJECT) {
-      return (JavaObject)getFieldValue();
-    } else {
-      throw new RuntimeException("Cannot get field 'java_object' because union is currently set to " + getFieldDesc(getSetField()).name);
-    }
-  }
-
-  public void set_java_object(JavaObject value) {
-    if (value == null) throw new NullPointerException();
-    setField_ = _Fields.JAVA_OBJECT;
-    value_ = value;
-  }
-
-  public boolean is_set_serialized_java() {
-    return setField_ == _Fields.SERIALIZED_JAVA;
-  }
-
-
-  public boolean is_set_shell() {
-    return setField_ == _Fields.SHELL;
-  }
-
-
-  public boolean is_set_java_object() {
-    return setField_ == _Fields.JAVA_OBJECT;
-  }
-
-
-  public boolean equals(Object other) {
-    if (other instanceof ComponentObject) {
-      return equals((ComponentObject)other);
-    } else {
-      return false;
-    }
-  }
-
-  public boolean equals(ComponentObject other) {
-    return other != null && getSetField() == other.getSetField() && getFieldValue().equals(other.getFieldValue());
-  }
-
-  @Override
-  public int compareTo(ComponentObject other) {
-    int lastComparison = org.apache.thrift.TBaseHelper.compareTo(getSetField(), other.getSetField());
-    if (lastComparison == 0) {
-      return org.apache.thrift.TBaseHelper.compareTo(getFieldValue(), other.getFieldValue());
-    }
-    return lastComparison;
-  }
-
-
-  @Override
-  public int hashCode() {
-    List<Object> list = new ArrayList<Object>();
-    list.add(this.getClass().getName());
-    org.apache.thrift.TFieldIdEnum setField = getSetField();
-    if (setField != null) {
-      list.add(setField.getThriftFieldId());
-      Object value = getFieldValue();
-      if (value instanceof org.apache.thrift.TEnum) {
-        list.add(((org.apache.thrift.TEnum)getFieldValue()).getValue());
-      } else {
-        list.add(value);
-      }
-    }
-    return list.hashCode();
-  }
-  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
-    try {
-      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-
-  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
-    try {
-      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-
-}


[09/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/blobstore/BlobSynchronizer.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/blobstore/BlobSynchronizer.java b/storm-core/src/jvm/backtype/storm/blobstore/BlobSynchronizer.java
deleted file mode 100644
index 1f20d7c..0000000
--- a/storm-core/src/jvm/backtype/storm/blobstore/BlobSynchronizer.java
+++ /dev/null
@@ -1,124 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm.blobstore;
-
-import backtype.storm.nimbus.NimbusInfo;
-import org.apache.curator.framework.CuratorFramework;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Is called periodically and updates the nimbus with blobs based on the state stored inside the zookeeper
- * for a non leader nimbus trying to be in sync with the operations performed on the leader nimbus.
- */
-public class BlobSynchronizer {
-    private static final Logger LOG = LoggerFactory.getLogger(BlobSynchronizer.class);
-    private CuratorFramework zkClient;
-    private Map conf;
-    private BlobStore blobStore;
-    private Set<String> blobStoreKeySet = new HashSet<String>();
-    private Set<String> zookeeperKeySet = new HashSet<String>();
-    private NimbusInfo nimbusInfo;
-
-    public BlobSynchronizer(BlobStore blobStore, Map conf) {
-        this.blobStore = blobStore;
-        this.conf = conf;
-    }
-
-    public void setNimbusInfo(NimbusInfo nimbusInfo) {
-        this.nimbusInfo = nimbusInfo;
-    }
-
-    public void setZookeeperKeySet(Set<String> zookeeperKeySet) {
-        this.zookeeperKeySet = zookeeperKeySet;
-    }
-
-    public void setBlobStoreKeySet(Set<String> blobStoreKeySet) {
-        this.blobStoreKeySet = blobStoreKeySet;
-    }
-
-    public Set<String> getBlobStoreKeySet() {
-        Set<String> keySet = new HashSet<String>();
-        keySet.addAll(blobStoreKeySet);
-        return keySet;
-    }
-
-    public Set<String> getZookeeperKeySet() {
-        Set<String> keySet = new HashSet<String>();
-        keySet.addAll(zookeeperKeySet);
-        return keySet;
-    }
-
-    public synchronized void syncBlobs() {
-        try {
-            LOG.debug("Sync blobs - blobstore keys {}, zookeeper keys {}",getBlobStoreKeySet(), getZookeeperKeySet());
-            zkClient = BlobStoreUtils.createZKClient(conf);
-            deleteKeySetFromBlobStoreNotOnZookeeper(getBlobStoreKeySet(), getZookeeperKeySet());
-            updateKeySetForBlobStore(getBlobStoreKeySet());
-            Set<String> keySetToDownload = getKeySetToDownload(getBlobStoreKeySet(), getZookeeperKeySet());
-            LOG.debug("Key set Blobstore-> Zookeeper-> DownloadSet {}-> {}-> {}", getBlobStoreKeySet(), getZookeeperKeySet(), keySetToDownload);
-
-            for (String key : keySetToDownload) {
-                Set<NimbusInfo> nimbusInfoSet = BlobStoreUtils.getNimbodesWithLatestSequenceNumberOfBlob(zkClient, key);
-                if(BlobStoreUtils.downloadMissingBlob(conf, blobStore, key, nimbusInfoSet)) {
-                    BlobStoreUtils.createStateInZookeeper(conf, key, nimbusInfo);
-                }
-            }
-            if (zkClient !=null) {
-                zkClient.close();
-            }
-        } catch(InterruptedException exp) {
-            LOG.error("InterruptedException {}", exp);
-        } catch(Exception exp) {
-            throw new RuntimeException(exp);
-        }
-    }
-
-    public void deleteKeySetFromBlobStoreNotOnZookeeper(Set<String> keySetBlobStore, Set<String> keySetZookeeper) throws Exception {
-        if (keySetBlobStore.removeAll(keySetZookeeper)
-                || (keySetZookeeper.isEmpty() && !keySetBlobStore.isEmpty())) {
-            LOG.debug("Key set to delete in blobstore {}", keySetBlobStore);
-            for (String key : keySetBlobStore) {
-                blobStore.deleteBlob(key, BlobStoreUtils.getNimbusSubject());
-            }
-        }
-    }
-
-    // Update current key list inside the blobstore if the version changes
-    public void updateKeySetForBlobStore(Set<String> keySetBlobStore) {
-        try {
-            for (String key : keySetBlobStore) {
-                LOG.debug("updating blob");
-                BlobStoreUtils.updateKeyForBlobStore(conf, blobStore, zkClient, key, nimbusInfo);
-            }
-        } catch (Exception exp) {
-            throw new RuntimeException(exp);
-        }
-    }
-
-    // Make a key list to download
-    public Set<String> getKeySetToDownload(Set<String> blobStoreKeySet, Set<String> zookeeperKeySet) {
-        zookeeperKeySet.removeAll(blobStoreKeySet);
-        LOG.debug("Key list to download {}", zookeeperKeySet);
-        return zookeeperKeySet;
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/blobstore/ClientBlobStore.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/blobstore/ClientBlobStore.java b/storm-core/src/jvm/backtype/storm/blobstore/ClientBlobStore.java
deleted file mode 100644
index 6408469..0000000
--- a/storm-core/src/jvm/backtype/storm/blobstore/ClientBlobStore.java
+++ /dev/null
@@ -1,184 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm.blobstore;
-
-import backtype.storm.daemon.Shutdownable;
-import backtype.storm.generated.AuthorizationException;
-import backtype.storm.generated.ReadableBlobMeta;
-import backtype.storm.generated.SettableBlobMeta;
-import backtype.storm.generated.KeyAlreadyExistsException;
-import backtype.storm.generated.KeyNotFoundException;
-import backtype.storm.utils.NimbusClient;
-
-import java.util.Iterator;
-import java.util.Map;
-
-/**
- * The ClientBlobStore has two concrete implementations
- * 1. NimbusBlobStore
- * 2. HdfsClientBlobStore
- *
- * Create, update, read and delete are some of the basic operations defined by this interface.
- * Each operation is validated for permissions against an user. We currently have NIMBUS_ADMINS and SUPERVISOR_ADMINS
- * configuration. NIMBUS_ADMINS are given READ, WRITE and ADMIN access whereas the SUPERVISOR_ADMINS are given READ
- * access in order to read and download the blobs form the nimbus.
- *
- * The ACLs for the blob store are validated against whether the subject is a NIMBUS_ADMIN, SUPERVISOR_ADMIN or USER
- * who has read, write or admin privileges in order to perform respective operations on the blob.
- *
- * For more detailed implementation
- * @see backtype.storm.blobstore.NimbusBlobStore
- * @see backtype.storm.blobstore.LocalFsBlobStore
- * @see org.apache.storm.hdfs.blobstore.HdfsClientBlobStore
- * @see org.apache.storm.hdfs.blobstore.HdfsBlobStore
- */
-public abstract class ClientBlobStore implements Shutdownable {
-    protected Map conf;
-
-    /**
-     * Sets up the client API by parsing the configs.
-     * @param conf The storm conf containing the config details.
-     */
-    public abstract void prepare(Map conf);
-
-    /**
-     * Client facing API to create a blob.
-     * @param key blob key name.
-     * @param meta contains ACL information.
-     * @return AtomicOutputStream returns an output stream into which data can be written.
-     * @throws AuthorizationException
-     * @throws KeyAlreadyExistsException
-     */
-    protected abstract AtomicOutputStream createBlobToExtend(String key, SettableBlobMeta meta) throws AuthorizationException, KeyAlreadyExistsException;
-
-    /**
-     * Client facing API to update a blob.
-     * @param key blob key name.
-     * @return AtomicOutputStream returns an output stream into which data can be written.
-     * @throws AuthorizationException
-     * @throws KeyNotFoundException
-     */
-    public abstract AtomicOutputStream updateBlob(String key) throws AuthorizationException, KeyNotFoundException;
-
-    /**
-     * Client facing API to read the metadata information.
-     * @param key blob key name.
-     * @return AtomicOutputStream returns an output stream into which data can be written.
-     * @throws AuthorizationException
-     * @throws KeyNotFoundException
-     */
-    public abstract ReadableBlobMeta getBlobMeta(String key) throws AuthorizationException, KeyNotFoundException;
-
-    /**
-     * Client facing API to set the metadata for a blob.
-     * @param key blob key name.
-     * @param meta contains ACL information.
-     * @throws AuthorizationException
-     * @throws KeyNotFoundException
-     */
-    protected abstract void setBlobMetaToExtend(String key, SettableBlobMeta meta) throws AuthorizationException, KeyNotFoundException;
-
-    /**
-     * Client facing API to delete a blob.
-     * @param key blob key name.
-     * @throws AuthorizationException
-     * @throws KeyNotFoundException
-     */
-    public abstract void deleteBlob(String key) throws AuthorizationException, KeyNotFoundException;
-
-    /**
-     * Client facing API to read a blob.
-     * @param key blob key name.
-     * @return an InputStream to read the metadata for a blob.
-     * @throws AuthorizationException
-     * @throws KeyNotFoundException
-     */
-    public abstract InputStreamWithMeta getBlob(String key) throws AuthorizationException, KeyNotFoundException;
-
-    /**
-     * @return Iterator for a list of keys currently present in the blob store.
-     */
-    public abstract Iterator<String> listKeys();
-
-    /**
-     * Client facing API to read the replication of a blob.
-     * @param key blob key name.
-     * @return int indicates the replication factor of a blob.
-     * @throws AuthorizationException
-     * @throws KeyNotFoundException
-     */
-    public abstract int getBlobReplication(String key) throws AuthorizationException, KeyNotFoundException;
-
-    /**
-     * Client facing API to update the replication of a blob.
-     * @param key blob key name.
-     * @param replication int indicates the replication factor a blob has to be set.
-     * @return int indicates the replication factor of a blob.
-     * @throws AuthorizationException
-     * @throws KeyNotFoundException
-     */
-    public abstract int updateBlobReplication(String key, int replication) throws AuthorizationException, KeyNotFoundException;
-
-    /**
-     * Client facing API to set a nimbus client.
-     * @param conf storm conf
-     * @param client NimbusClient
-     * @return indicates where the client connection has been setup.
-     */
-    public abstract boolean setClient(Map conf, NimbusClient client);
-
-    /**
-     * Creates state inside a zookeeper.
-     * Required for blobstore to write to zookeeper
-     * when Nimbus HA is turned on in order to maintain
-     * state consistency
-     * @param key
-     */
-    public abstract void createStateInZookeeper(String key);
-
-    /**
-     * Client facing API to create a blob.
-     * @param key blob key name.
-     * @param meta contains ACL information.
-     * @return AtomicOutputStream returns an output stream into which data can be written.
-     * @throws AuthorizationException
-     * @throws KeyAlreadyExistsException
-     */
-    public final AtomicOutputStream createBlob(String key, SettableBlobMeta meta) throws AuthorizationException, KeyAlreadyExistsException {
-        if (meta !=null && meta.is_set_acl()) {
-            BlobStoreAclHandler.validateSettableACLs(key, meta.get_acl());
-        }
-        return createBlobToExtend(key, meta);
-    }
-
-    /**
-     * Client facing API to set the metadata for a blob.
-     * @param key blob key name.
-     * @param meta contains ACL information.
-     * @throws AuthorizationException
-     * @throws KeyNotFoundException
-     */
-    public final void setBlobMeta(String key, SettableBlobMeta meta) throws AuthorizationException, KeyNotFoundException {
-        if (meta !=null && meta.is_set_acl()) {
-            BlobStoreAclHandler.validateSettableACLs(key, meta.get_acl());
-        }
-        setBlobMetaToExtend(key, meta);
-    }
-
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/blobstore/FileBlobStoreImpl.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/blobstore/FileBlobStoreImpl.java b/storm-core/src/jvm/backtype/storm/blobstore/FileBlobStoreImpl.java
deleted file mode 100644
index b789335..0000000
--- a/storm-core/src/jvm/backtype/storm/blobstore/FileBlobStoreImpl.java
+++ /dev/null
@@ -1,248 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm.blobstore;
-
-import backtype.storm.Config;
-import backtype.storm.utils.Utils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.nio.file.Files;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.Map;
-import java.util.NoSuchElementException;
-import java.util.Timer;
-import java.util.TimerTask;
-
-/**
- * Very basic blob store impl with no ACL handling.
- */
-public class FileBlobStoreImpl {
-    private static final long FULL_CLEANUP_FREQ = 60 * 60 * 1000l;
-    private static final int BUCKETS = 1024;
-    private static final Logger LOG = LoggerFactory.getLogger(FileBlobStoreImpl.class);
-    private static final Timer timer = new Timer("FileBlobStore cleanup thread", true);
-
-    public class KeyInHashDirIterator implements Iterator<String> {
-        private int currentBucket = 0;
-        private Iterator<String> it = null;
-        private String next = null;
-
-        public KeyInHashDirIterator() throws IOException {
-            primeNext();
-        }
-
-        private void primeNext() throws IOException {
-            while (it == null && currentBucket < BUCKETS) {
-                String name = String.valueOf(currentBucket);
-                File dir = new File(fullPath, name);
-                try {
-                    it = listKeys(dir);
-                } catch (FileNotFoundException e) {
-                    it = null;
-                }
-                if (it == null || !it.hasNext()) {
-                    it = null;
-                    currentBucket++;
-                } else {
-                    next = it.next();
-                }
-            }
-        }
-
-        @Override
-        public boolean hasNext() {
-            return next != null;
-        }
-
-        @Override
-        public String next() {
-            if (!hasNext()) {
-                throw new NoSuchElementException();
-            }
-            String current = next;
-            next = null;
-            if (it != null) {
-                if (!it.hasNext()) {
-                    it = null;
-                    currentBucket++;
-                    try {
-                        primeNext();
-                    } catch (IOException e) {
-                        throw new RuntimeException(e);
-                    }
-                } else {
-                    next = it.next();
-                }
-            }
-            return current;
-        }
-
-        @Override
-        public void remove() {
-            throw new UnsupportedOperationException("Delete Not Supported");
-        }
-    }
-
-    private File fullPath;
-    private TimerTask cleanup = null;
-
-    public FileBlobStoreImpl(File path, Map<String, Object> conf) throws IOException {
-        LOG.info("Creating new blob store based in {}", path);
-        fullPath = path;
-        fullPath.mkdirs();
-        Object shouldCleanup = conf.get(Config.BLOBSTORE_CLEANUP_ENABLE);
-        if (Utils.getBoolean(shouldCleanup, false)) {
-            LOG.debug("Starting File blobstore cleaner");
-            cleanup = new TimerTask() {
-                @Override
-                public void run() {
-                    try {
-                        fullCleanup(FULL_CLEANUP_FREQ);
-                    } catch (IOException e) {
-                        LOG.error("Error trying to cleanup", e);
-                    }
-                }
-            };
-            timer.scheduleAtFixedRate(cleanup, 0, FULL_CLEANUP_FREQ);
-        }
-    }
-
-    /**
-     * @return all keys that are available for reading.
-     * @throws IOException on any error.
-     */
-    public Iterator<String> listKeys() throws IOException {
-        return new KeyInHashDirIterator();
-    }
-
-    /**
-     * Get an input stream for reading a part.
-     * @param key the key of the part to read.
-     * @return the where to read the data from.
-     * @throws IOException on any error
-     */
-    public LocalFsBlobStoreFile read(String key) throws IOException {
-        return new LocalFsBlobStoreFile(getKeyDir(key), BlobStoreFile.BLOBSTORE_DATA_FILE);
-    }
-
-    /**
-     * Get an object tied to writing the data.
-     * @param key the key of the part to write to.
-     * @return an object that can be used to both write to, but also commit/cancel the operation.
-     * @throws IOException on any error
-     */
-    public LocalFsBlobStoreFile write(String key, boolean create) throws IOException {
-        return new LocalFsBlobStoreFile(getKeyDir(key), true, create);
-    }
-
-    /**
-     * Check if the key exists in the blob store.
-     * @param key the key to check for
-     * @return true if it exists else false.
-     */
-    public boolean exists(String key) {
-        return getKeyDir(key).exists();
-    }
-
-    /**
-     * Delete a key from the blob store
-     * @param key the key to delete
-     * @throws IOException on any error
-     */
-    public void deleteKey(String key) throws IOException {
-        File keyDir = getKeyDir(key);
-        LocalFsBlobStoreFile pf = new LocalFsBlobStoreFile(keyDir, BlobStoreFile.BLOBSTORE_DATA_FILE);
-        pf.delete();
-        delete(keyDir);
-    }
-
-    private File getKeyDir(String key) {
-        String hash = String.valueOf(Math.abs((long)key.hashCode()) % BUCKETS);
-        File ret = new File(new File(fullPath, hash), key);
-        LOG.debug("{} Looking for {} in {}", new Object[]{fullPath, key, hash});
-        return ret;
-    }
-
-    public void fullCleanup(long age) throws IOException {
-        long cleanUpIfBefore = System.currentTimeMillis() - age;
-        Iterator<String> keys = new KeyInHashDirIterator();
-        while (keys.hasNext()) {
-            String key = keys.next();
-            File keyDir = getKeyDir(key);
-            Iterator<LocalFsBlobStoreFile> i = listBlobStoreFiles(keyDir);
-            if (!i.hasNext()) {
-                //The dir is empty, so try to delete it, may fail, but that is OK
-                try {
-                    keyDir.delete();
-                } catch (Exception e) {
-                    LOG.warn("Could not delete "+keyDir+" will try again later");
-                }
-            }
-            while (i.hasNext()) {
-                LocalFsBlobStoreFile f = i.next();
-                if (f.isTmp()) {
-                    if (f.getModTime() <= cleanUpIfBefore) {
-                        f.delete();
-                    }
-                }
-            }
-        }
-    }
-
-    protected Iterator<LocalFsBlobStoreFile> listBlobStoreFiles(File path) throws IOException {
-        ArrayList<LocalFsBlobStoreFile> ret = new ArrayList<LocalFsBlobStoreFile>();
-        File[] files = path.listFiles();
-        if (files != null) {
-            for (File sub: files) {
-                try {
-                    ret.add(new LocalFsBlobStoreFile(sub.getParentFile(), sub.getName()));
-                } catch (IllegalArgumentException e) {
-                    //Ignored the file did not match
-                    LOG.warn("Found an unexpected file in {} {}",path, sub.getName());
-                }
-            }
-        }
-        return ret.iterator();
-    }
-
-    protected Iterator<String> listKeys(File path) throws IOException {
-        String[] files = path.list();
-        if (files != null) {
-            return Arrays.asList(files).iterator();
-        }
-        return new LinkedList<String>().iterator();
-    }
-
-    protected void delete(File path) throws IOException {
-        Files.deleteIfExists(path.toPath());
-    }
-
-    public void shutdown() {
-        if (cleanup != null) {
-            cleanup.cancel();
-            cleanup = null;
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/blobstore/InputStreamWithMeta.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/blobstore/InputStreamWithMeta.java b/storm-core/src/jvm/backtype/storm/blobstore/InputStreamWithMeta.java
deleted file mode 100644
index 1d29fda..0000000
--- a/storm-core/src/jvm/backtype/storm/blobstore/InputStreamWithMeta.java
+++ /dev/null
@@ -1,26 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm.blobstore;
-
-import java.io.IOException;
-import java.io.InputStream;
-
-public abstract class InputStreamWithMeta extends InputStream {
-    public abstract long getVersion() throws IOException;
-    public abstract long getFileLength() throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/blobstore/KeyFilter.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/blobstore/KeyFilter.java b/storm-core/src/jvm/backtype/storm/blobstore/KeyFilter.java
deleted file mode 100644
index 32bb9fd..0000000
--- a/storm-core/src/jvm/backtype/storm/blobstore/KeyFilter.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm.blobstore;
-
-public interface KeyFilter<R> {
-    R filter(String key);
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/blobstore/KeySequenceNumber.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/blobstore/KeySequenceNumber.java b/storm-core/src/jvm/backtype/storm/blobstore/KeySequenceNumber.java
deleted file mode 100644
index 2a53828..0000000
--- a/storm-core/src/jvm/backtype/storm/blobstore/KeySequenceNumber.java
+++ /dev/null
@@ -1,229 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package backtype.storm.blobstore;
-
-import backtype.storm.nimbus.NimbusInfo;
-import backtype.storm.utils.Utils;
-import org.apache.curator.framework.CuratorFramework;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.ZooDefs;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.nio.ByteBuffer;
-import java.util.TreeSet;
-import java.util.Map;
-import java.util.List;
-
-/**
- * Class hands over the key sequence number which implies the number of updates made to a blob.
- * The information regarding the keys and the sequence number which represents the number of updates are
- * stored within the zookeeper in the following format.
- * /storm/blobstore/key_name/nimbushostport-sequencenumber
- * Example:
- * If there are two nimbodes with nimbus.seeds:leader,non-leader are set,
- * then the state inside the zookeeper is eventually stored as:
- * /storm/blobstore/key1/leader:8080-1
- * /storm/blobstore/key1/non-leader:8080-1
- * indicates that a new blob with the name key1 has been created on the leader
- * nimbus and the non-leader nimbus syncs after a call back is triggered by attempting
- * to download the blob and finally updates its state inside the zookeeper.
- *
- * A watch is placed on the /storm/blobstore/key1 and the znodes leader:8080-1 and
- * non-leader:8080-1 are ephemeral which implies that these nodes exist only until the
- * connection between the corresponding nimbus and the zookeeper persist. If in case the
- * nimbus crashes the node disappears under /storm/blobstore/key1.
- *
- * The sequence number for the keys are handed over based on the following scenario:
- * Lets assume there are three nimbodes up and running, one being the leader and the other
- * being the non-leader.
- *
- * 1. Create is straight forward.
- * Check whether the znode -> /storm/blobstore/key1 has been created or not. It implies
- * the blob has not been created yet. If not created, it creates it and updates the zookeeper
- * states under /storm/blobstore/key1 and /storm/blobstoremaxkeysequencenumber/key1.
- * The znodes it creates on these nodes are /storm/blobstore/key1/leader:8080-1,
- * /storm/blobstore/key1/non-leader:8080-1 and /storm/blobstoremaxkeysequencenumber/key1/1.
- * The latter holds the global sequence number across all nimbodes more like a static variable
- * indicating the true value of number of updates for a blob. This node helps to maintain sanity in case
- * leadership changes due to crashing.
- *
- * 2. Delete does not require to hand over the sequence number.
- *
- * 3. Finally, the update has few scenarios.
- *
- *  The class implements a TreeSet. The basic idea is if all the nimbodes have the same
- *  sequence number for the blob, then the number of elements in the set is 1 which holds
- *  the latest value of sequence number. If the number of elements are greater than 1 then it
- *  implies that there is sequence mismatch and there is need for syncing the blobs across
- *  nimbodes.
- *
- *  The logic for handing over sequence numbers based on the state are described as follows
- *  Here consider Nimbus-1 alias as N1 and Nimbus-2 alias as N2.
- *  Scenario 1:
- *  Example: Normal create/update scenario
- *  Operation     Nimbus-1:state     Nimbus-2:state     Seq-Num-Nimbus-1  Seq-Num-Nimbus-2          Max-Seq-Num
- *  Create-Key1   alive - Leader     alive              1                                           1
- *  Sync          alive - Leader     alive              1                 1 (callback -> download)  1
- *  Update-Key1   alive - Leader     alive              2                 1                         2
- *  Sync          alive - Leader     alive              2                 2 (callback -> download)  2
- *
- *  Scenario 2:
- *  Example: Leader nimbus crash followed by leader election, update and ex-leader restored again
- *  Operation     Nimbus-1:state     Nimbus-2:state     Seq-Num-Nimbus-1  Seq-Num-Nimbus-2          Max-Seq-Num
- *  Create        alive - Leader     alive              1                                           1
- *  Sync          alive - Leader     alive              1                 1 (callback -> download)  1
- *  Update        alive - Leader     alive              2                 1                         2
- *  Sync          alive - Leader     alive              2                 2 (callback -> download)  2
- *  Update        alive - Leader     alive              3                 2                         3
- *  Crash         crash - Leader     alive              3                 2                         3
- *  New - Leader  crash              alive - Leader     3 (Invalid)       2                         3
- *  Update        crash              alive - Leader     3 (Invalid)       4 (max-seq-num + 1)       4
- *  N1-Restored   alive              alive - Leader     0                 4                         4
- *  Sync          alive              alive - Leader     4                 4                         4
- *
- *  Scenario 3:
- *  Example: Leader nimbus crash followed by leader election, update and ex-leader restored again
- *  Operation     Nimbus-1:state     Nimbus-2:state     Seq-Num-Nimbus-1  Seq-Num-Nimbus-2          Max-Seq-Num
- *  Create        alive - Leader     alive              1                                           1
- *  Sync          alive - Leader     alive              1                 1 (callback -> download)  1
- *  Update        alive - Leader     alive              2                 1                         2
- *  Sync          alive - Leader     alive              2                 2 (callback -> download)  2
- *  Update        alive - Leader     alive              3                 2                         3
- *  Crash         crash - Leader     alive              3                 2                         3
- *  Elect Leader  crash              alive - Leader     3 (Invalid)       2                         3
- *  N1-Restored   alive              alive - Leader     3                 2                         3
- *  Read/Update   alive              alive - Leader     3                 4 (Downloads from N1)     4
- *  Sync          alive              alive - Leader     4 (callback)      4                         4
- *  Here the download is triggered whenever an operation corresponding to the blob is triggered on the
- *  nimbus like a read or update operation. Here, in the read/update call it is hard to know which call
- *  is read or update. Hence, by incrementing the sequence number to max-seq-num + 1 we ensure that the
- *  synchronization happens appropriately and all nimbodes have the same blob.
- */
-public class KeySequenceNumber {
-    private static final Logger LOG = LoggerFactory.getLogger(KeySequenceNumber.class);
-    private final String BLOBSTORE_SUBTREE="/blobstore";
-    private final String BLOBSTORE_MAX_KEY_SEQUENCE_SUBTREE="/blobstoremaxkeysequencenumber";
-    private final String key;
-    private final NimbusInfo nimbusInfo;
-    private final int INT_CAPACITY = 4;
-    private final int INITIAL_SEQUENCE_NUMBER = 1;
-
-    public KeySequenceNumber(String key, NimbusInfo nimbusInfo) {
-        this.key = key;
-        this.nimbusInfo = nimbusInfo;
-    }
-
-    public synchronized int getKeySequenceNumber(Map conf) {
-        TreeSet<Integer> sequenceNumbers = new TreeSet<Integer>();
-        CuratorFramework zkClient = BlobStoreUtils.createZKClient(conf);
-        try {
-            // Key has not been created yet and it is the first time it is being created
-            if(zkClient.checkExists().forPath(BLOBSTORE_SUBTREE + "/" + key) == null) {
-                zkClient.create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT)
-                        .withACL(ZooDefs.Ids.OPEN_ACL_UNSAFE).forPath(BLOBSTORE_MAX_KEY_SEQUENCE_SUBTREE + "/" + key);
-                zkClient.setData().forPath(BLOBSTORE_MAX_KEY_SEQUENCE_SUBTREE + "/" + key,
-                        ByteBuffer.allocate(INT_CAPACITY).putInt(INITIAL_SEQUENCE_NUMBER).array());
-                return INITIAL_SEQUENCE_NUMBER;
-            }
-
-            // When all nimbodes go down and one or few of them come up
-            // Unfortunately there might not be an exact way to know which one contains the most updated blob,
-            // if all go down which is unlikely. Hence there might be a need to update the blob if all go down.
-            List<String> stateInfoList = zkClient.getChildren().forPath(BLOBSTORE_SUBTREE + "/" + key);
-            LOG.debug("stateInfoList-size {} stateInfoList-data {}", stateInfoList.size(), stateInfoList);
-            if(stateInfoList.isEmpty()) {
-                return getMaxSequenceNumber(zkClient);
-            }
-
-            LOG.debug("stateInfoSize {}", stateInfoList.size());
-            // In all other cases check for the latest update sequence of the blob on the nimbus
-            // and assign the appropriate number. Check if all are have same sequence number,
-            // if not assign the highest sequence number.
-            for (String stateInfo:stateInfoList) {
-                sequenceNumbers.add(Integer.parseInt(BlobStoreUtils.normalizeNimbusHostPortSequenceNumberInfo(stateInfo)
-                        .getSequenceNumber()));
-            }
-
-            // Update scenario 2 and 3 explain the code logic written here
-            // especially when nimbus crashes and comes up after and before update
-            // respectively.
-            int currentSeqNumber = getMaxSequenceNumber(zkClient);
-            if (!checkIfStateContainsCurrentNimbusHost(stateInfoList, nimbusInfo) && !nimbusInfo.isLeader()) {
-                if (sequenceNumbers.last() < currentSeqNumber) {
-                    return currentSeqNumber;
-                } else {
-                    return INITIAL_SEQUENCE_NUMBER - 1;
-                }
-            }
-
-            // It covers scenarios expalined in scenario 3 when nimbus-1 holding the latest
-            // update goes down before it is downloaded by nimbus-2. Nimbus-2 gets elected as a leader
-            // after which nimbus-1 comes back up and a read or update is performed.
-            if (!checkIfStateContainsCurrentNimbusHost(stateInfoList, nimbusInfo) && nimbusInfo.isLeader()) {
-                incrementMaxSequenceNumber(zkClient, currentSeqNumber);
-                return currentSeqNumber + 1;
-            }
-
-            // This code logic covers the update scenarios in 2 when the nimbus-1 goes down
-            // before syncing the blob to nimbus-2 and an update happens.
-            // If seq-num for nimbus-2 is 2 and max-seq-number is 3 then next sequence number is 4
-            // (max-seq-number + 1).
-            // Other scenario it covers is when max-seq-number and nimbus seq number are equal.
-            if (sequenceNumbers.size() == 1) {
-                if (sequenceNumbers.first() < currentSeqNumber) {
-                    incrementMaxSequenceNumber(zkClient, currentSeqNumber);
-                    return currentSeqNumber + 1;
-                } else {
-                    incrementMaxSequenceNumber(zkClient, currentSeqNumber);
-                    return sequenceNumbers.first() + 1;
-                }
-            }
-        } catch(Exception e) {
-            LOG.error("Exception {}", e);
-        } finally {
-            if (zkClient != null) {
-                zkClient.close();
-            }
-        }
-        // Normal create update sync scenario returns the greatest sequence number in the set
-        return sequenceNumbers.last();
-    }
-
-    private boolean checkIfStateContainsCurrentNimbusHost(List<String> stateInfoList, NimbusInfo nimbusInfo) {
-        boolean containsNimbusHost = false;
-        for(String stateInfo:stateInfoList) {
-            if(stateInfo.contains(nimbusInfo.getHost())) {
-                containsNimbusHost = true;
-                break;
-            }
-        }
-        return containsNimbusHost;
-    }
-
-    private void incrementMaxSequenceNumber(CuratorFramework zkClient, int count) throws Exception {
-        zkClient.setData().forPath(BLOBSTORE_MAX_KEY_SEQUENCE_SUBTREE + "/" + key,
-                ByteBuffer.allocate(INT_CAPACITY).putInt(count + 1).array());
-    }
-
-    private int getMaxSequenceNumber(CuratorFramework zkClient) throws Exception {
-        return ByteBuffer.wrap(zkClient.getData()
-                .forPath(BLOBSTORE_MAX_KEY_SEQUENCE_SUBTREE + "/" + key)).getInt();
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/blobstore/LocalFsBlobStore.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/blobstore/LocalFsBlobStore.java b/storm-core/src/jvm/backtype/storm/blobstore/LocalFsBlobStore.java
deleted file mode 100644
index b8daad2..0000000
--- a/storm-core/src/jvm/backtype/storm/blobstore/LocalFsBlobStore.java
+++ /dev/null
@@ -1,311 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm.blobstore;
-
-import backtype.storm.Config;
-import backtype.storm.generated.SettableBlobMeta;
-import backtype.storm.generated.AuthorizationException;
-import backtype.storm.generated.KeyAlreadyExistsException;
-import backtype.storm.generated.KeyNotFoundException;
-import backtype.storm.generated.ReadableBlobMeta;
-
-import backtype.storm.nimbus.NimbusInfo;
-import backtype.storm.utils.Utils;
-import org.apache.curator.framework.CuratorFramework;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.security.auth.Subject;
-import java.io.ByteArrayOutputStream;
-import java.io.File;
-import java.io.IOException;
-import java.io.FileNotFoundException;
-import java.io.InputStream;
-
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;;
-
-import static backtype.storm.blobstore.BlobStoreAclHandler.ADMIN;
-import static backtype.storm.blobstore.BlobStoreAclHandler.READ;
-import static backtype.storm.blobstore.BlobStoreAclHandler.WRITE;
-
-/**
- * Provides a local file system backed blob store implementation for Nimbus.
- *
- * For a local blob store the user and the supervisor use NimbusBlobStore Client API in order to talk to nimbus through thrift.
- * The authentication and authorization here is based on the subject.
- * We currently have NIMBUS_ADMINS and SUPERVISOR_ADMINS configuration. NIMBUS_ADMINS are given READ, WRITE and ADMIN
- * access whereas the SUPERVISOR_ADMINS are given READ access in order to read and download the blobs form the nimbus.
- *
- * The ACLs for the blob store are validated against whether the subject is a NIMBUS_ADMIN, SUPERVISOR_ADMIN or USER
- * who has read, write or admin privileges in order to perform respective operations on the blob.
- *
- * For local blob store
- * 1. The USER interacts with nimbus to upload and access blobs through NimbusBlobStore Client API.
- * 2. The USER sets the ACLs, and the blob access is validated against these ACLs.
- * 3. The SUPERVISOR interacts with nimbus through the NimbusBlobStore Client API to download the blobs.
- * The supervisors principal should match the set of users configured into SUPERVISOR_ADMINS.
- * Here, the PrincipalToLocalPlugin takes care of mapping the principal to user name before the ACL validation.
- */
-public class LocalFsBlobStore extends BlobStore {
-    public static final Logger LOG = LoggerFactory.getLogger(LocalFsBlobStore.class);
-    private static final String DATA_PREFIX = "data_";
-    private static final String META_PREFIX = "meta_";
-    protected BlobStoreAclHandler _aclHandler;
-    private final String BLOBSTORE_SUBTREE = "/blobstore/";
-    private NimbusInfo nimbusInfo;
-    private FileBlobStoreImpl fbs;
-    private final int allPermissions = READ | WRITE | ADMIN;
-    private Map conf;
-    private CuratorFramework zkClient;
-
-    @Override
-    public void prepare(Map conf, String overrideBase, NimbusInfo nimbusInfo) {
-        this.conf = conf;
-        this.nimbusInfo = nimbusInfo;
-        zkClient = BlobStoreUtils.createZKClient(conf);
-        if (overrideBase == null) {
-            overrideBase = (String)conf.get(Config.BLOBSTORE_DIR);
-            if (overrideBase == null) {
-                overrideBase = (String) conf.get(Config.STORM_LOCAL_DIR);
-            }
-        }
-        File baseDir = new File(overrideBase, BASE_BLOBS_DIR_NAME);
-        try {
-            fbs = new FileBlobStoreImpl(baseDir, conf);
-        } catch (IOException e) {
-            throw new RuntimeException(e);
-        }
-        _aclHandler = new BlobStoreAclHandler(conf);
-    }
-
-    @Override
-    public AtomicOutputStream createBlob(String key, SettableBlobMeta meta, Subject who) throws AuthorizationException, KeyAlreadyExistsException {
-        LOG.debug("Creating Blob for key {}", key);
-        validateKey(key);
-        _aclHandler.normalizeSettableBlobMeta(key, meta, who, allPermissions);
-        BlobStoreAclHandler.validateSettableACLs(key, meta.get_acl());
-        _aclHandler.hasPermissions(meta.get_acl(), allPermissions, who, key);
-        if (fbs.exists(DATA_PREFIX+key)) {
-            throw new KeyAlreadyExistsException(key);
-        }
-        BlobStoreFileOutputStream mOut = null;
-        try {
-            mOut = new BlobStoreFileOutputStream(fbs.write(META_PREFIX+key, true));
-            mOut.write(Utils.thriftSerialize(meta));
-            mOut.close();
-            mOut = null;
-            return new BlobStoreFileOutputStream(fbs.write(DATA_PREFIX+key, true));
-        } catch (IOException e) {
-            throw new RuntimeException(e);
-        } finally {
-            if (mOut != null) {
-                try {
-                    mOut.cancel();
-                } catch (IOException e) {
-                    //Ignored
-                }
-            }
-        }
-    }
-
-    @Override
-    public AtomicOutputStream updateBlob(String key, Subject who) throws AuthorizationException, KeyNotFoundException {
-        validateKey(key);
-        checkForBlobOrDownload(key);
-        SettableBlobMeta meta = getStoredBlobMeta(key);
-        _aclHandler.hasPermissions(meta.get_acl(), WRITE, who, key);
-        try {
-            return new BlobStoreFileOutputStream(fbs.write(DATA_PREFIX+key, false));
-        } catch (IOException e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    private SettableBlobMeta getStoredBlobMeta(String key) throws KeyNotFoundException {
-        InputStream in = null;
-        try {
-            LocalFsBlobStoreFile pf = fbs.read(META_PREFIX+key);
-            try {
-                in = pf.getInputStream();
-            } catch (FileNotFoundException fnf) {
-                throw new KeyNotFoundException(key);
-            }
-            ByteArrayOutputStream out = new ByteArrayOutputStream();
-            byte [] buffer = new byte[2048];
-            int len;
-            while ((len = in.read(buffer)) > 0) {
-                out.write(buffer, 0, len);
-            }
-            in.close();
-            in = null;
-            return Utils.thriftDeserialize(SettableBlobMeta.class, out.toByteArray());
-        } catch (IOException e) {
-            throw new RuntimeException(e);
-        } finally {
-            if (in != null) {
-                try {
-                    in.close();
-                } catch (IOException e) {
-                    //Ignored
-                }
-            }
-        }
-    }
-
-    @Override
-    public ReadableBlobMeta getBlobMeta(String key, Subject who) throws AuthorizationException, KeyNotFoundException {
-        validateKey(key);
-        if(!checkForBlobOrDownload(key)) {
-            checkForBlobUpdate(key);
-        }
-        SettableBlobMeta meta = getStoredBlobMeta(key);
-        _aclHandler.validateUserCanReadMeta(meta.get_acl(), who, key);
-        ReadableBlobMeta rbm = new ReadableBlobMeta();
-        rbm.set_settable(meta);
-        try {
-            LocalFsBlobStoreFile pf = fbs.read(DATA_PREFIX+key);
-            rbm.set_version(pf.getModTime());
-        } catch (IOException e) {
-            throw new RuntimeException(e);
-        }
-        return rbm;
-    }
-
-    @Override
-    public void setBlobMeta(String key, SettableBlobMeta meta, Subject who) throws AuthorizationException, KeyNotFoundException {
-        validateKey(key);
-        checkForBlobOrDownload(key);
-        _aclHandler.normalizeSettableBlobMeta(key, meta, who, ADMIN);
-        BlobStoreAclHandler.validateSettableACLs(key, meta.get_acl());
-        SettableBlobMeta orig = getStoredBlobMeta(key);
-        _aclHandler.hasPermissions(orig.get_acl(), ADMIN, who, key);
-        BlobStoreFileOutputStream mOut = null;
-        try {
-            mOut = new BlobStoreFileOutputStream(fbs.write(META_PREFIX+key, false));
-            mOut.write(Utils.thriftSerialize(meta));
-            mOut.close();
-            mOut = null;
-        } catch (IOException e) {
-            throw new RuntimeException(e);
-        } finally {
-            if (mOut != null) {
-                try {
-                    mOut.cancel();
-                } catch (IOException e) {
-                    //Ignored
-                }
-            }
-        }
-    }
-
-    @Override
-    public void deleteBlob(String key, Subject who) throws AuthorizationException, KeyNotFoundException {
-        validateKey(key);
-        checkForBlobOrDownload(key);
-        SettableBlobMeta meta = getStoredBlobMeta(key);
-        _aclHandler.hasPermissions(meta.get_acl(), WRITE, who, key);
-        try {
-            fbs.deleteKey(DATA_PREFIX+key);
-            fbs.deleteKey(META_PREFIX+key);
-        } catch (IOException e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    @Override
-    public InputStreamWithMeta getBlob(String key, Subject who) throws AuthorizationException, KeyNotFoundException {
-        validateKey(key);
-        if(!checkForBlobOrDownload(key)) {
-            checkForBlobUpdate(key);
-        }
-        SettableBlobMeta meta = getStoredBlobMeta(key);
-        _aclHandler.hasPermissions(meta.get_acl(), READ, who, key);
-        try {
-            return new BlobStoreFileInputStream(fbs.read(DATA_PREFIX+key));
-        } catch (IOException e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    @Override
-    public Iterator<String> listKeys() {
-        try {
-            return new KeyTranslationIterator(fbs.listKeys(), DATA_PREFIX);
-        } catch (IOException e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    @Override
-    public void shutdown() {
-        if (zkClient != null) {
-            zkClient.close();
-        }
-    }
-
-    @Override
-    public int getBlobReplication(String key, Subject who) throws Exception {
-        int replicationCount = 0;
-        validateKey(key);
-        SettableBlobMeta meta = getStoredBlobMeta(key);
-        _aclHandler.hasPermissions(meta.get_acl(), READ, who, key);
-        if (zkClient.checkExists().forPath(BLOBSTORE_SUBTREE + key) == null) {
-            return 0;
-        }
-        replicationCount = zkClient.getChildren().forPath(BLOBSTORE_SUBTREE + key).size();
-        return replicationCount;
-    }
-
-    @Override
-    public int updateBlobReplication(String key, int replication, Subject who) throws AuthorizationException, KeyNotFoundException {
-        throw new UnsupportedOperationException("For local file system blob store the update blobs function does not work. " +
-                "Please use HDFS blob store to make this feature available.");
-    }
-
-    //This additional check and download is for nimbus high availability in case you have more than one nimbus
-    public synchronized boolean checkForBlobOrDownload(String key) {
-        boolean checkBlobDownload = false;
-        try {
-            List<String> keyList = BlobStoreUtils.getKeyListFromBlobStore(this);
-            if (!keyList.contains(key)) {
-                if (zkClient.checkExists().forPath(BLOBSTORE_SUBTREE + key) != null) {
-                    Set<NimbusInfo> nimbusSet = BlobStoreUtils.getNimbodesWithLatestSequenceNumberOfBlob(zkClient, key);
-                    if (BlobStoreUtils.downloadMissingBlob(conf, this, key, nimbusSet)) {
-                        LOG.debug("Updating blobs state");
-                        BlobStoreUtils.createStateInZookeeper(conf, key, nimbusInfo);
-                        checkBlobDownload = true;
-                    }
-                }
-            }
-        } catch (Exception e) {
-            throw new RuntimeException(e);
-        }
-        return checkBlobDownload;
-    }
-
-    public synchronized void checkForBlobUpdate(String key) {
-        BlobStoreUtils.updateKeyForBlobStore(conf, this, zkClient, key, nimbusInfo);
-    }
-
-    public void fullCleanup(long age) throws IOException {
-        fbs.fullCleanup(age);
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/blobstore/LocalFsBlobStoreFile.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/blobstore/LocalFsBlobStoreFile.java b/storm-core/src/jvm/backtype/storm/blobstore/LocalFsBlobStoreFile.java
deleted file mode 100644
index fb11fa6..0000000
--- a/storm-core/src/jvm/backtype/storm/blobstore/LocalFsBlobStoreFile.java
+++ /dev/null
@@ -1,159 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm.blobstore;
-
-import backtype.storm.generated.SettableBlobMeta;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.nio.file.Files;
-import java.nio.file.StandardCopyOption;
-import java.util.regex.Matcher;
-
-public class LocalFsBlobStoreFile extends BlobStoreFile {
-
-    private final String _key;
-    private final boolean _isTmp;
-    private final File _path;
-    private Long _modTime = null;
-    private final boolean _mustBeNew;
-    private SettableBlobMeta meta;
-
-    public LocalFsBlobStoreFile(File base, String name) {
-        if (BlobStoreFile.BLOBSTORE_DATA_FILE.equals(name)) {
-            _isTmp = false;
-        } else {
-            Matcher m = TMP_NAME_PATTERN.matcher(name);
-            if (!m.matches()) {
-                throw new IllegalArgumentException("File name does not match '"+name+"' !~ "+TMP_NAME_PATTERN);
-            }
-            _isTmp = true;
-        }
-        _key = base.getName();
-        _path = new File(base, name);
-        _mustBeNew = false;
-    }
-
-    public LocalFsBlobStoreFile(File base, boolean isTmp, boolean mustBeNew) {
-        _key = base.getName();
-        _isTmp = isTmp;
-        _mustBeNew = mustBeNew;
-        if (_isTmp) {
-            _path = new File(base, System.currentTimeMillis()+TMP_EXT);
-        } else {
-            _path = new File(base, BlobStoreFile.BLOBSTORE_DATA_FILE);
-        }
-    }
-
-    @Override
-    public void delete() throws IOException {
-        _path.delete();
-    }
-
-    @Override
-    public boolean isTmp() {
-        return _isTmp;
-    }
-
-    @Override
-    public String getKey() {
-        return _key;
-    }
-
-    @Override
-    public long getModTime() throws IOException {
-        if (_modTime == null) {
-            _modTime = _path.lastModified();
-        }
-        return _modTime;
-    }
-
-    @Override
-    public InputStream getInputStream() throws IOException {
-        if (isTmp()) {
-            throw new IllegalStateException("Cannot read from a temporary part file.");
-        }
-        return new FileInputStream(_path);
-    }
-
-    @Override
-    public OutputStream getOutputStream() throws IOException {
-        if (!isTmp()) {
-            throw new IllegalStateException("Can only write to a temporary part file.");
-        }
-        boolean success = false;
-        try {
-            success = _path.createNewFile();
-        } catch (IOException e) {
-            //Try to create the parent directory, may not work
-            _path.getParentFile().mkdirs();
-            success = _path.createNewFile();
-        }
-        if (!success) {
-            throw new IOException(_path+" already exists");
-        }
-        return new FileOutputStream(_path);
-    }
-
-    @Override
-    public void commit() throws IOException {
-        if (!isTmp()) {
-            throw new IllegalStateException("Can only write to a temporary part file.");
-        }
-
-        File dest = new File(_path.getParentFile(), BlobStoreFile.BLOBSTORE_DATA_FILE);
-        if (_mustBeNew) {
-            Files.move(_path.toPath(), dest.toPath(), StandardCopyOption.ATOMIC_MOVE);
-        } else {
-            Files.move(_path.toPath(), dest.toPath(), StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING);
-        }
-    }
-
-    @Override
-    public void cancel() throws IOException {
-        if (!isTmp()) {
-            throw new IllegalStateException("Can only write to a temporary part file.");
-        }
-        delete();
-    }
-
-    @Override
-    public SettableBlobMeta getMetadata () {
-        return meta;
-    }
-
-    @Override
-    public void setMetadata (SettableBlobMeta meta) {
-        this.meta = meta;
-    }
-
-    @Override
-    public String toString() {
-        return _path+":"+(_isTmp ? "tmp": BlobStoreFile.BLOBSTORE_DATA_FILE)+":"+_key;
-    }
-
-    @Override
-    public long getFileLength() {
-        return _path.length();
-    }
-}
-

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/blobstore/NimbusBlobStore.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/blobstore/NimbusBlobStore.java b/storm-core/src/jvm/backtype/storm/blobstore/NimbusBlobStore.java
deleted file mode 100644
index 334e6bb..0000000
--- a/storm-core/src/jvm/backtype/storm/blobstore/NimbusBlobStore.java
+++ /dev/null
@@ -1,420 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm.blobstore;
-
-import backtype.storm.Config;
-import backtype.storm.generated.AuthorizationException;
-import backtype.storm.generated.BeginDownloadResult;
-import backtype.storm.generated.ListBlobsResult;
-import backtype.storm.generated.ReadableBlobMeta;
-import backtype.storm.generated.SettableBlobMeta;
-import backtype.storm.generated.KeyAlreadyExistsException;
-import backtype.storm.generated.KeyNotFoundException;
-import backtype.storm.utils.NimbusClient;
-import backtype.storm.utils.Utils;
-import org.apache.thrift.TException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.NoSuchElementException;
-
-/**
- * NimbusBlobStore is a USER facing client API to perform
- * basic operations such as create, update, delete and read
- * for local and hdfs blob store.
- *
- * For local blob store it is also the client facing API for
- * supervisor in order to download blobs from nimbus.
- */
-public class NimbusBlobStore extends ClientBlobStore {
-    private static final Logger LOG = LoggerFactory.getLogger(NimbusBlobStore.class);
-
-    public class NimbusKeyIterator implements Iterator<String> {
-        private ListBlobsResult listBlobs = null;
-        private int offset = 0;
-        private boolean eof = false;
-
-        public NimbusKeyIterator(ListBlobsResult listBlobs) {
-            this.listBlobs = listBlobs;
-            this.eof = (listBlobs.get_keys_size() == 0);
-        }
-
-        private boolean isCacheEmpty() {
-            return listBlobs.get_keys_size() <= offset;
-        }
-
-        private void readMore() throws TException {
-            if (!eof) {
-                offset = 0;
-                synchronized(client) {
-                    listBlobs = client.getClient().listBlobs(listBlobs.get_session());
-                }
-                if (listBlobs.get_keys_size() == 0) {
-                    eof = true;
-                }
-            }
-        }
-
-        @Override
-        public synchronized boolean hasNext() {
-            try {
-                if (isCacheEmpty()) {
-                    readMore();
-                }
-            } catch (TException e) {
-                throw new RuntimeException(e);
-            }
-            return !eof;
-        }
-
-        @Override
-        public synchronized String next() {
-            if (!hasNext()) {
-                throw new NoSuchElementException();
-            }
-            String ret = listBlobs.get_keys().get(offset);
-            offset++;
-            return ret;
-        }
-
-        @Override
-        public void remove() {
-            throw new UnsupportedOperationException("Delete Not Supported");
-        }
-    }
-
-    public class NimbusDownloadInputStream extends InputStreamWithMeta {
-        private BeginDownloadResult beginBlobDownload;
-        private byte[] buffer = null;
-        private int offset = 0;
-        private int end = 0;
-        private boolean eof = false;
-
-        public NimbusDownloadInputStream(BeginDownloadResult beginBlobDownload) {
-            this.beginBlobDownload = beginBlobDownload;
-        }
-
-        @Override
-        public long getVersion() throws IOException {
-            return beginBlobDownload.get_version();
-        }
-
-        @Override
-        public synchronized int read() throws IOException {
-            try {
-                if (isEmpty()) {
-                    readMore();
-                    if (eof) {
-                        return -1;
-                    }
-                }
-                int length = Math.min(1, available());
-                if (length == 0) {
-                    return -1;
-                }
-                int ret = buffer[offset];
-                offset += length;
-                return ret;
-            } catch(TException exp) {
-                throw new IOException(exp);
-            }
-        }
-
-        @Override
-        public synchronized int read(byte[] b, int off, int len) throws IOException {
-            try {
-                if (isEmpty()) {
-                    readMore();
-                    if (eof) {
-                        return -1;
-                    }
-                }
-                int length = Math.min(len, available());
-                System.arraycopy(buffer, offset, b, off, length);
-                offset += length;
-                return length;
-            } catch(TException exp) {
-                throw new IOException(exp);
-            }
-        }
-
-        private boolean isEmpty() {
-            return buffer == null || offset >= end;
-        }
-
-        private void readMore() throws TException {
-            if (!eof) {
-                ByteBuffer buff;
-                synchronized(client) {
-                    buff = client.getClient().downloadBlobChunk(beginBlobDownload.get_session());
-                }
-                buffer = buff.array();
-                offset = buff.arrayOffset() + buff.position();
-                int length = buff.remaining();
-                end = offset + length;
-                if (length == 0) {
-                    eof = true;
-                }
-            }
-        }
-
-        @Override
-        public synchronized int read(byte[] b) throws IOException {
-            return read(b, 0, b.length);
-        }
-
-        @Override
-        public synchronized int available() {
-            return buffer == null ? 0 : (end - offset);
-        }
-
-        @Override
-        public long getFileLength() {
-            return beginBlobDownload.get_data_size();
-        }
-    }
-
-    public class NimbusUploadAtomicOutputStream extends AtomicOutputStream {
-        private String session;
-        private int maxChunkSize = 4096;
-        private String key;
-
-        public NimbusUploadAtomicOutputStream(String session, int bufferSize, String key) {
-            this.session = session;
-            this.maxChunkSize = bufferSize;
-            this.key = key;
-        }
-
-        @Override
-        public void cancel() throws IOException {
-            try {
-                synchronized(client) {
-                    client.getClient().cancelBlobUpload(session);
-                }
-            } catch (TException e) {
-                throw new RuntimeException(e);
-            }
-        }
-
-        @Override
-        public void write(int b) throws IOException {
-            try {
-                synchronized(client) {
-                    client.getClient().uploadBlobChunk(session, ByteBuffer.wrap(new byte[] {(byte)b}));
-                }
-            } catch (TException e) {
-                throw new RuntimeException(e);
-            }
-        }
-
-        @Override
-        public void write(byte []b) throws IOException {
-            write(b, 0, b.length);
-        }
-
-        @Override
-        public void write(byte []b, int offset, int len) throws IOException {
-            try {
-                int end = offset + len;
-                for (int realOffset = offset; realOffset < end; realOffset += maxChunkSize) {
-                    int realLen = Math.min(end - realOffset, maxChunkSize);
-                    LOG.debug("Writing {} bytes of {} remaining",realLen,(end-realOffset));
-                    synchronized(client) {
-                        client.getClient().uploadBlobChunk(session, ByteBuffer.wrap(b, realOffset, realLen));
-                    }
-                }
-            } catch (TException e) {
-                throw new RuntimeException(e);
-            }
-        }
-
-        @Override
-        public void close() throws IOException {
-            try {
-                synchronized(client) {
-                    client.getClient().finishBlobUpload(session);
-                    client.getClient().createStateInZookeeper(key);
-                }
-            } catch (TException e) {
-                throw new RuntimeException(e);
-            }
-        }
-    }
-
-    private NimbusClient client;
-    private int bufferSize = 4096;
-
-    @Override
-    public void prepare(Map conf) {
-        this.client = NimbusClient.getConfiguredClient(conf);
-        if (conf != null) {
-            this.bufferSize = Utils.getInt(conf.get(Config.STORM_BLOBSTORE_INPUTSTREAM_BUFFER_SIZE_BYTES), bufferSize);
-        }
-    }
-
-    @Override
-    protected AtomicOutputStream createBlobToExtend(String key, SettableBlobMeta meta)
-            throws AuthorizationException, KeyAlreadyExistsException {
-        try {
-            synchronized(client) {
-                return new NimbusUploadAtomicOutputStream(client.getClient().beginCreateBlob(key, meta), this.bufferSize, key);
-            }
-        } catch (AuthorizationException | KeyAlreadyExistsException exp) {
-            throw exp;
-        } catch (TException e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    @Override
-    public AtomicOutputStream updateBlob(String key)
-            throws AuthorizationException, KeyNotFoundException {
-        try {
-            synchronized(client) {
-                return new NimbusUploadAtomicOutputStream(client.getClient().beginUpdateBlob(key), this.bufferSize, key);
-            }
-        } catch (AuthorizationException | KeyNotFoundException exp) {
-            throw exp;
-        } catch (TException e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    @Override
-    public ReadableBlobMeta getBlobMeta(String key) throws AuthorizationException, KeyNotFoundException {
-        try {
-            synchronized(client) {
-                return client.getClient().getBlobMeta(key);
-            }
-        } catch (AuthorizationException | KeyNotFoundException exp) {
-            throw exp;
-        } catch (TException e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    @Override
-    protected void setBlobMetaToExtend(String key, SettableBlobMeta meta)
-            throws AuthorizationException, KeyNotFoundException {
-        try {
-            synchronized(client) {
-                client.getClient().setBlobMeta(key, meta);
-            }
-        } catch (AuthorizationException | KeyNotFoundException exp) {
-            throw exp;
-        } catch (TException e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    @Override
-    public void deleteBlob(String key) throws AuthorizationException, KeyNotFoundException {
-        try {
-            synchronized(client) {
-                client.getClient().deleteBlob(key);
-            }
-        } catch (AuthorizationException | KeyNotFoundException exp) {
-            throw exp;
-        } catch (TException e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    @Override
-    public void createStateInZookeeper(String key) {
-        try {
-            synchronized(client) {
-                client.getClient().createStateInZookeeper(key);
-            }
-        } catch (TException e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    @Override
-    public InputStreamWithMeta getBlob(String key) throws AuthorizationException, KeyNotFoundException {
-        try {
-            synchronized(client) {
-                return new NimbusDownloadInputStream(client.getClient().beginBlobDownload(key));
-            }
-        } catch (AuthorizationException | KeyNotFoundException exp) {
-            throw exp;
-        } catch (TException e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    @Override
-    public Iterator<String> listKeys() {
-        try {
-            synchronized(client) {
-                return new NimbusKeyIterator(client.getClient().listBlobs(""));
-            }
-        } catch (TException e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    @Override
-    public int getBlobReplication(String key) throws AuthorizationException, KeyNotFoundException {
-        try {
-            return client.getClient().getBlobReplication(key);
-        } catch (AuthorizationException | KeyNotFoundException exp) {
-            throw exp;
-        } catch (TException e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    @Override
-    public int updateBlobReplication(String key, int replication) throws AuthorizationException, KeyNotFoundException {
-        try {
-            return client.getClient().updateBlobReplication(key, replication);
-        } catch (AuthorizationException | KeyNotFoundException exp) {
-            throw exp;
-        } catch (TException e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    @Override
-    public boolean setClient(Map conf, NimbusClient client) {
-        this.client = client;
-        if (conf != null) {
-            this.bufferSize = Utils.getInt(conf.get(Config.STORM_BLOBSTORE_INPUTSTREAM_BUFFER_SIZE_BYTES), bufferSize);
-        }
-        return true;
-    }
-
-    @Override
-    protected void finalize() {
-        shutdown();
-    }
-
-    @Override
-    public void shutdown() {
-        if (client != null) {
-            client.close();
-            client = null;
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/clojure/ClojureBolt.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/clojure/ClojureBolt.java b/storm-core/src/jvm/backtype/storm/clojure/ClojureBolt.java
deleted file mode 100644
index 5de9bde..0000000
--- a/storm-core/src/jvm/backtype/storm/clojure/ClojureBolt.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm.clojure;
-
-import backtype.storm.coordination.CoordinatedBolt.FinishedCallback;
-import backtype.storm.generated.StreamInfo;
-import backtype.storm.task.IBolt;
-import backtype.storm.task.OutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.IRichBolt;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.tuple.Fields;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.utils.Utils;
-import clojure.lang.IFn;
-import clojure.lang.PersistentArrayMap;
-import clojure.lang.Keyword;
-import clojure.lang.Symbol;
-import clojure.lang.RT;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-
-public class ClojureBolt implements IRichBolt, FinishedCallback {
-    Map<String, StreamInfo> _fields;
-    List<String> _fnSpec;
-    List<String> _confSpec;
-    List<Object> _params;
-    
-    IBolt _bolt;
-    
-    public ClojureBolt(List fnSpec, List confSpec, List<Object> params, Map<String, StreamInfo> fields) {
-        _fnSpec = fnSpec;
-        _confSpec = confSpec;
-        _params = params;
-        _fields = fields;
-    }
-
-    @Override
-    public void prepare(final Map stormConf, final TopologyContext context, final OutputCollector collector) {
-        IFn hof = Utils.loadClojureFn(_fnSpec.get(0), _fnSpec.get(1));
-        try {
-            IFn preparer = (IFn) hof.applyTo(RT.seq(_params));
-            final Map<Keyword,Object> collectorMap = new PersistentArrayMap( new Object[] {
-                Keyword.intern(Symbol.create("output-collector")), collector,
-                Keyword.intern(Symbol.create("context")), context});
-            List<Object> args = new ArrayList<Object>() {{
-                add(stormConf);
-                add(context);
-                add(collectorMap);
-            }};
-            
-            _bolt = (IBolt) preparer.applyTo(RT.seq(args));
-            //this is kind of unnecessary for clojure
-            try {
-                _bolt.prepare(stormConf, context, collector);
-            } catch(AbstractMethodError ame) {
-                
-            }
-        } catch (Exception e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    @Override
-    public void execute(Tuple input) {
-        _bolt.execute(input);
-    }
-
-    @Override
-    public void cleanup() {
-            try {
-                _bolt.cleanup();
-            } catch(AbstractMethodError ame) {
-                
-            }
-    }
-
-    @Override
-    public void declareOutputFields(OutputFieldsDeclarer declarer) {
-        for(String stream: _fields.keySet()) {
-            StreamInfo info = _fields.get(stream);
-            declarer.declareStream(stream, info.is_direct(), new Fields(info.get_output_fields()));
-        }
-    }
-
-    @Override
-    public void finishedId(Object id) {
-        if(_bolt instanceof FinishedCallback) {
-            ((FinishedCallback) _bolt).finishedId(id);
-        }
-    }
-
-    @Override
-    public Map<String, Object> getComponentConfiguration() {
-        IFn hof = Utils.loadClojureFn(_confSpec.get(0), _confSpec.get(1));
-        try {
-            return (Map) hof.applyTo(RT.seq(_params));
-        } catch (Exception e) {
-            throw new RuntimeException(e);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/clojure/ClojureSpout.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/clojure/ClojureSpout.java b/storm-core/src/jvm/backtype/storm/clojure/ClojureSpout.java
deleted file mode 100644
index f6422e3..0000000
--- a/storm-core/src/jvm/backtype/storm/clojure/ClojureSpout.java
+++ /dev/null
@@ -1,153 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm.clojure;
-
-import backtype.storm.generated.StreamInfo;
-import backtype.storm.spout.ISpout;
-import backtype.storm.spout.SpoutOutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.topology.IRichSpout;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.tuple.Fields;
-import backtype.storm.utils.Utils;
-import clojure.lang.IFn;
-import clojure.lang.PersistentArrayMap;
-import clojure.lang.Keyword;
-import clojure.lang.Symbol;
-import clojure.lang.RT;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-public class ClojureSpout implements IRichSpout {
-    Map<String, StreamInfo> _fields;
-    List<String> _fnSpec;
-    List<String> _confSpec;
-    List<Object> _params;
-    
-    ISpout _spout;
-    
-    public ClojureSpout(List fnSpec, List confSpec, List<Object> params, Map<String, StreamInfo> fields) {
-        _fnSpec = fnSpec;
-        _confSpec = confSpec;
-        _params = params;
-        _fields = fields;
-    }
-    
-
-    @Override
-    public void open(final Map conf, final TopologyContext context, final SpoutOutputCollector collector) {
-        IFn hof = Utils.loadClojureFn(_fnSpec.get(0), _fnSpec.get(1));
-        try {
-            IFn preparer = (IFn) hof.applyTo(RT.seq(_params));
-            final Map<Keyword,Object> collectorMap = new PersistentArrayMap( new Object[] {
-                Keyword.intern(Symbol.create("output-collector")), collector,
-                Keyword.intern(Symbol.create("context")), context});
-            List<Object> args = new ArrayList<Object>() {{
-                add(conf);
-                add(context);
-                add(collectorMap);
-            }};
-            
-            _spout = (ISpout) preparer.applyTo(RT.seq(args));
-            //this is kind of unnecessary for clojure
-            try {
-                _spout.open(conf, context, collector);
-            } catch(AbstractMethodError ame) {
-                
-            }
-        } catch (Exception e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    @Override
-    public void close() {
-        try {
-            _spout.close();
-        } catch(AbstractMethodError ame) {
-                
-        }
-    }
-
-    @Override
-    public void nextTuple() {
-        try {
-            _spout.nextTuple();
-        } catch(AbstractMethodError ame) {
-                
-        }
-
-    }
-
-    @Override
-    public void ack(Object msgId) {
-        try {
-            _spout.ack(msgId);
-        } catch(AbstractMethodError ame) {
-                
-        }
-
-    }
-
-    @Override
-    public void fail(Object msgId) {
-        try {
-            _spout.fail(msgId);
-        } catch(AbstractMethodError ame) {
-                
-        }
-
-    }
-
-    @Override
-    public void declareOutputFields(OutputFieldsDeclarer declarer) {
-        for(String stream: _fields.keySet()) {
-            StreamInfo info = _fields.get(stream);
-            declarer.declareStream(stream, info.is_direct(), new Fields(info.get_output_fields()));
-        }
-    }
-    
-    @Override
-    public Map<String, Object> getComponentConfiguration() {
-        IFn hof = Utils.loadClojureFn(_confSpec.get(0), _confSpec.get(1));
-        try {
-            return (Map) hof.applyTo(RT.seq(_params));
-        } catch (Exception e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    @Override
-    public void activate() {
-        try {
-            _spout.activate();
-        } catch(AbstractMethodError ame) {
-                
-        }
-    }
-
-    @Override
-    public void deactivate() {
-        try {
-            _spout.deactivate();
-        } catch(AbstractMethodError ame) {
-                
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/clojure/RichShellBolt.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/clojure/RichShellBolt.java b/storm-core/src/jvm/backtype/storm/clojure/RichShellBolt.java
deleted file mode 100644
index a155008..0000000
--- a/storm-core/src/jvm/backtype/storm/clojure/RichShellBolt.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm.clojure;
-
-import backtype.storm.generated.StreamInfo;
-import backtype.storm.task.ShellBolt;
-import backtype.storm.topology.IRichBolt;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.tuple.Fields;
-import java.util.Map;
-
-public class RichShellBolt extends ShellBolt implements IRichBolt {
-    private Map<String, StreamInfo> _outputs;
-    
-    public RichShellBolt(String[] command, Map<String, StreamInfo> outputs) {
-        super(command);
-        _outputs = outputs;
-    }
-    
-    @Override
-    public void declareOutputFields(OutputFieldsDeclarer declarer) {
-        for(String stream: _outputs.keySet()) {
-            StreamInfo def = _outputs.get(stream);
-            if(def.is_direct()) {
-                declarer.declareStream(stream, true, new Fields(def.get_output_fields()));
-            } else {
-                declarer.declareStream(stream, new Fields(def.get_output_fields()));                
-            }
-        }
-    }
-
-    @Override
-    public Map<String, Object> getComponentConfiguration() {
-        return null;
-    }    
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/jvm/backtype/storm/clojure/RichShellSpout.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/clojure/RichShellSpout.java b/storm-core/src/jvm/backtype/storm/clojure/RichShellSpout.java
deleted file mode 100644
index b49fbef..0000000
--- a/storm-core/src/jvm/backtype/storm/clojure/RichShellSpout.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm.clojure;
-
-import backtype.storm.generated.StreamInfo;
-import backtype.storm.spout.ShellSpout;
-import backtype.storm.topology.IRichSpout;
-import backtype.storm.topology.OutputFieldsDeclarer;
-import backtype.storm.tuple.Fields;
-import java.util.Map;
-
-public class RichShellSpout extends ShellSpout implements IRichSpout {
-    private Map<String, StreamInfo> _outputs;
-
-    public RichShellSpout(String[] command, Map<String, StreamInfo> outputs) {
-        super(command);
-        _outputs = outputs;
-    }
-
-    @Override
-    public void declareOutputFields(OutputFieldsDeclarer declarer) {
-        for(String stream: _outputs.keySet()) {
-            StreamInfo def = _outputs.get(stream);
-            if(def.is_direct()) {
-                declarer.declareStream(stream, true, new Fields(def.get_output_fields()));
-            } else {
-                declarer.declareStream(stream, new Fields(def.get_output_fields()));
-            }
-        }
-    }
-
-    @Override
-    public Map<String, Object> getComponentConfiguration() {
-        return null;
-    }
-}


[27/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/daemon/worker.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/daemon/worker.clj b/storm-core/src/clj/backtype/storm/daemon/worker.clj
deleted file mode 100644
index f265bb2..0000000
--- a/storm-core/src/clj/backtype/storm/daemon/worker.clj
+++ /dev/null
@@ -1,763 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-(ns backtype.storm.daemon.worker
-  (:use [backtype.storm.daemon common])
-  (:use [backtype.storm config log util timer local-state])
-  (:require [clj-time.core :as time])
-  (:require [clj-time.coerce :as coerce])
-  (:require [backtype.storm.daemon [executor :as executor]])
-  (:require [backtype.storm [disruptor :as disruptor] [cluster :as cluster]])
-  (:require [clojure.set :as set])
-  (:require [backtype.storm.messaging.loader :as msg-loader])
-  (:import [java.util.concurrent Executors]
-           [backtype.storm.hooks IWorkerHook BaseWorkerHook])
-  (:import [java.util ArrayList HashMap])
-  (:import [backtype.storm.utils Utils TransferDrainer ThriftTopologyUtils WorkerBackpressureThread DisruptorQueue])
-  (:import [backtype.storm.grouping LoadMapping])
-  (:import [backtype.storm.messaging TransportFactory])
-  (:import [backtype.storm.messaging TaskMessage IContext IConnection ConnectionWithStatus ConnectionWithStatus$Status])
-  (:import [backtype.storm.daemon Shutdownable])
-  (:import [backtype.storm.serialization KryoTupleSerializer])
-  (:import [backtype.storm.generated StormTopology])
-  (:import [backtype.storm.tuple AddressedTuple Fields])
-  (:import [backtype.storm.task WorkerTopologyContext])
-  (:import [backtype.storm Constants])
-  (:import [backtype.storm.security.auth AuthUtils])
-  (:import [backtype.storm.cluster ClusterStateContext DaemonType])
-  (:import [javax.security.auth Subject])
-  (:import [java.security PrivilegedExceptionAction])
-  (:import [org.apache.logging.log4j LogManager])
-  (:import [org.apache.logging.log4j Level])
-  (:import [org.apache.logging.log4j.core.config LoggerConfig])
-  (:import [backtype.storm.generated LogConfig LogLevelAction])
-  (:gen-class))
-
-(defmulti mk-suicide-fn cluster-mode)
-
-(defn read-worker-executors [storm-conf storm-cluster-state storm-id assignment-id port assignment-versions]
-  (log-message "Reading Assignments.")
-  (let [assignment (:executor->node+port (.assignment-info storm-cluster-state storm-id nil))]
-    (doall
-     (concat
-      [Constants/SYSTEM_EXECUTOR_ID]
-      (mapcat (fn [[executor loc]]
-                (if (= loc [assignment-id port])
-                  [executor]
-                  ))
-              assignment)))))
-
-(defnk do-executor-heartbeats [worker :executors nil]
-  ;; stats is how we know what executors are assigned to this worker 
-  (let [stats (if-not executors
-                  (into {} (map (fn [e] {e nil}) (:executors worker)))
-                  (->> executors
-                    (map (fn [e] {(executor/get-executor-id e) (executor/render-stats e)}))
-                    (apply merge)))
-        zk-hb {:storm-id (:storm-id worker)
-               :executor-stats stats
-               :uptime ((:uptime worker))
-               :time-secs (current-time-secs)
-               }]
-    ;; do the zookeeper heartbeat
-    (.worker-heartbeat! (:storm-cluster-state worker) (:storm-id worker) (:assignment-id worker) (:port worker) zk-hb)
-    ))
-
-(defn do-heartbeat [worker]
-  (let [conf (:conf worker)
-        state (worker-state conf (:worker-id worker))]
-    ;; do the local-file-system heartbeat.
-    (ls-worker-heartbeat! state (current-time-secs) (:storm-id worker) (:executors worker) (:port worker))
-    (.cleanup state 60) ; this is just in case supervisor is down so that disk doesn't fill up.
-                         ; it shouldn't take supervisor 120 seconds between listing dir and reading it
-
-    ))
-
-(defn worker-outbound-tasks
-  "Returns seq of task-ids that receive messages from this worker"
-  [worker]
-  (let [context (worker-context worker)
-        components (mapcat
-                     (fn [task-id]
-                       (->> (.getComponentId context (int task-id))
-                            (.getTargets context)
-                            vals
-                            (map keys)
-                            (apply concat)))
-                     (:task-ids worker))]
-    (-> worker
-        :task->component
-        reverse-map
-        (select-keys components)
-        vals
-        flatten
-        set )))
-
-(defn get-dest
-  [^AddressedTuple addressed-tuple]
-  "get the destination for an AddressedTuple"
-  (.getDest addressed-tuple))
-
-(defn mk-transfer-local-fn [worker]
-  (let [short-executor-receive-queue-map (:short-executor-receive-queue-map worker)
-        task->short-executor (:task->short-executor worker)
-        task-getter (comp #(get task->short-executor %) get-dest)]
-    (fn [tuple-batch]
-      (let [grouped (fast-group-by task-getter tuple-batch)]
-        (fast-map-iter [[short-executor pairs] grouped]
-          (let [q (short-executor-receive-queue-map short-executor)]
-            (if q
-              (disruptor/publish q pairs)
-              (log-warn "Received invalid messages for unknown tasks. Dropping... ")
-              )))))))
-
-(defn- assert-can-serialize [^KryoTupleSerializer serializer tuple-batch]
-  "Check that all of the tuples can be serialized by serializing them."
-  (fast-list-iter [[task tuple :as pair] tuple-batch]
-    (.serialize serializer tuple)))
-
-(defn- mk-backpressure-handler [executors]
-  "make a handler that checks and updates worker's backpressure flag"
-  (disruptor/worker-backpressure-handler
-    (fn [worker]
-      (let [storm-id (:storm-id worker)
-            assignment-id (:assignment-id worker)
-            port (:port worker)
-            storm-cluster-state (:storm-cluster-state worker)
-            prev-backpressure-flag @(:backpressure worker)]
-        (when executors
-          (reset! (:backpressure worker)
-                  (or @(:transfer-backpressure worker)
-                      (reduce #(or %1 %2) (map #(.get-backpressure-flag %1) executors)))))
-        ;; update the worker's backpressure flag to zookeeper only when it has changed
-        (log-debug "BP " @(:backpressure worker) " WAS " prev-backpressure-flag)
-        (when (not= prev-backpressure-flag @(:backpressure worker))
-          (.worker-backpressure! storm-cluster-state storm-id assignment-id port @(:backpressure worker)))
-        ))))
-
-(defn- mk-disruptor-backpressure-handler [worker]
-  "make a handler for the worker's send disruptor queue to
-  check highWaterMark and lowWaterMark for backpressure"
-  (disruptor/disruptor-backpressure-handler
-    (fn []
-      (reset! (:transfer-backpressure worker) true)
-      (WorkerBackpressureThread/notifyBackpressureChecker (:backpressure-trigger worker)))
-    (fn []
-      (reset! (:transfer-backpressure worker) false)
-      (WorkerBackpressureThread/notifyBackpressureChecker (:backpressure-trigger worker)))))
-
-(defn mk-transfer-fn [worker]
-  (let [local-tasks (-> worker :task-ids set)
-        local-transfer (:transfer-local-fn worker)
-        ^DisruptorQueue transfer-queue (:transfer-queue worker)
-        task->node+port (:cached-task->node+port worker)
-        try-serialize-local ((:storm-conf worker) TOPOLOGY-TESTING-ALWAYS-TRY-SERIALIZE)
-
-        transfer-fn
-          (fn [^KryoTupleSerializer serializer tuple-batch]
-            (let [^ArrayList local (ArrayList.)
-                  ^HashMap remoteMap (HashMap.)]
-              (fast-list-iter [^AddressedTuple addressed-tuple tuple-batch]
-                (let [task (.getDest addressed-tuple)
-                      tuple (.getTuple addressed-tuple)]
-                  (if (local-tasks task)
-                    (.add local addressed-tuple)
-
-                    ;;Using java objects directly to avoid performance issues in java code
-                    (do
-                      (when (not (.get remoteMap task))
-                        (.put remoteMap task (ArrayList.)))
-                      (let [^ArrayList remote (.get remoteMap task)]
-                        (if (not-nil? task)
-                          (.add remote (TaskMessage. task ^bytes (.serialize serializer tuple)))
-                          (log-warn "Can't transfer tuple - task value is nil. tuple type: " (pr-str (type tuple)) " and information: " (pr-str tuple)))
-                       )))))
-
-              (when (not (.isEmpty local)) (local-transfer local))
-              (when (not (.isEmpty remoteMap)) (disruptor/publish transfer-queue remoteMap))))]
-    (if try-serialize-local
-      (do
-        (log-warn "WILL TRY TO SERIALIZE ALL TUPLES (Turn off " TOPOLOGY-TESTING-ALWAYS-TRY-SERIALIZE " for production)")
-        (fn [^KryoTupleSerializer serializer tuple-batch]
-          (assert-can-serialize serializer tuple-batch)
-          (transfer-fn serializer tuple-batch)))
-      transfer-fn)))
-
-(defn- mk-receive-queue-map [storm-conf executors]
-  (->> executors
-       ;; TODO: this depends on the type of executor
-       (map (fn [e] [e (disruptor/disruptor-queue (str "receive-queue" e)
-                                                  (storm-conf TOPOLOGY-EXECUTOR-RECEIVE-BUFFER-SIZE)
-                                                  (storm-conf TOPOLOGY-DISRUPTOR-WAIT-TIMEOUT-MILLIS)
-                                                  :batch-size (storm-conf TOPOLOGY-DISRUPTOR-BATCH-SIZE)
-                                                  :batch-timeout (storm-conf TOPOLOGY-DISRUPTOR-BATCH-TIMEOUT-MILLIS))]))
-       (into {})
-       ))
-
-(defn- stream->fields [^StormTopology topology component]
-  (->> (ThriftTopologyUtils/getComponentCommon topology component)
-       .get_streams
-       (map (fn [[s info]] [s (Fields. (.get_output_fields info))]))
-       (into {})
-       (HashMap.)))
-
-(defn component->stream->fields [^StormTopology topology]
-  (->> (ThriftTopologyUtils/getComponentIds topology)
-       (map (fn [c] [c (stream->fields topology c)]))
-       (into {})
-       (HashMap.)))
-
-(defn- mk-default-resources [worker]
-  (let [conf (:conf worker)
-        thread-pool-size (int (conf TOPOLOGY-WORKER-SHARED-THREAD-POOL-SIZE))]
-    {WorkerTopologyContext/SHARED_EXECUTOR (Executors/newFixedThreadPool thread-pool-size)}
-    ))
-
-(defn- mk-user-resources [worker]
-  ;;TODO: need to invoke a hook provided by the topology, giving it a chance to create user resources.
-  ;; this would be part of the initialization hook
-  ;; need to separate workertopologycontext into WorkerContext and WorkerUserContext.
-  ;; actually just do it via interfaces. just need to make sure to hide setResource from tasks
-  {})
-
-(defn mk-halting-timer [timer-name]
-  (mk-timer :kill-fn (fn [t]
-                       (log-error t "Error when processing event")
-                       (exit-process! 20 "Error when processing an event")
-                       )
-            :timer-name timer-name))
-
-(defn worker-data [conf mq-context storm-id assignment-id port worker-id storm-conf cluster-state storm-cluster-state]
-  (let [assignment-versions (atom {})
-        executors (set (read-worker-executors storm-conf storm-cluster-state storm-id assignment-id port assignment-versions))
-        transfer-queue (disruptor/disruptor-queue "worker-transfer-queue" (storm-conf TOPOLOGY-TRANSFER-BUFFER-SIZE)
-                                                  (storm-conf TOPOLOGY-DISRUPTOR-WAIT-TIMEOUT-MILLIS)
-                                                  :batch-size (storm-conf TOPOLOGY-DISRUPTOR-BATCH-SIZE)
-                                                  :batch-timeout (storm-conf TOPOLOGY-DISRUPTOR-BATCH-TIMEOUT-MILLIS))
-        executor-receive-queue-map (mk-receive-queue-map storm-conf executors)
-
-        receive-queue-map (->> executor-receive-queue-map
-                               (mapcat (fn [[e queue]] (for [t (executor-id->tasks e)] [t queue])))
-                               (into {}))
-
-        topology (read-supervisor-topology conf storm-id)
-        mq-context  (if mq-context
-                      mq-context
-                      (TransportFactory/makeContext storm-conf))]
-
-    (recursive-map
-      :conf conf
-      :mq-context mq-context
-      :receiver (.bind ^IContext mq-context storm-id port)
-      :storm-id storm-id
-      :assignment-id assignment-id
-      :port port
-      :worker-id worker-id
-      :cluster-state cluster-state
-      :storm-cluster-state storm-cluster-state
-      ;; when worker bootup, worker will start to setup initial connections to
-      ;; other workers. When all connection is ready, we will enable this flag
-      ;; and spout and bolt will be activated.
-      :worker-active-flag (atom false)
-      :storm-active-atom (atom false)
-      :storm-component->debug-atom (atom {})
-      :executors executors
-      :task-ids (->> receive-queue-map keys (map int) sort)
-      :storm-conf storm-conf
-      :topology topology
-      :system-topology (system-topology! storm-conf topology)
-      :heartbeat-timer (mk-halting-timer "heartbeat-timer")
-      :refresh-load-timer (mk-halting-timer "refresh-load-timer")
-      :refresh-connections-timer (mk-halting-timer "refresh-connections-timer")
-      :refresh-credentials-timer (mk-halting-timer "refresh-credentials-timer")
-      :reset-log-levels-timer (mk-halting-timer "reset-log-levels-timer")
-      :refresh-active-timer (mk-halting-timer "refresh-active-timer")
-      :executor-heartbeat-timer (mk-halting-timer "executor-heartbeat-timer")
-      :user-timer (mk-halting-timer "user-timer")
-      :task->component (HashMap. (storm-task-info topology storm-conf)) ; for optimized access when used in tasks later on
-      :component->stream->fields (component->stream->fields (:system-topology <>))
-      :component->sorted-tasks (->> (:task->component <>) reverse-map (map-val sort))
-      :endpoint-socket-lock (mk-rw-lock)
-      :cached-node+port->socket (atom {})
-      :cached-task->node+port (atom {})
-      :transfer-queue transfer-queue
-      :executor-receive-queue-map executor-receive-queue-map
-      :short-executor-receive-queue-map (map-key first executor-receive-queue-map)
-      :task->short-executor (->> executors
-                                 (mapcat (fn [e] (for [t (executor-id->tasks e)] [t (first e)])))
-                                 (into {})
-                                 (HashMap.))
-      :suicide-fn (mk-suicide-fn conf)
-      :uptime (uptime-computer)
-      :default-shared-resources (mk-default-resources <>)
-      :user-shared-resources (mk-user-resources <>)
-      :transfer-local-fn (mk-transfer-local-fn <>)
-      :transfer-fn (mk-transfer-fn <>)
-      :load-mapping (LoadMapping.)
-      :assignment-versions assignment-versions
-      :backpressure (atom false) ;; whether this worker is going slow
-      :transfer-backpressure (atom false) ;; if the transfer queue is backed-up
-      :backpressure-trigger (atom false) ;; a trigger for synchronization with executors
-      :throttle-on (atom false) ;; whether throttle is activated for spouts
-      )))
-
-(defn- endpoint->string [[node port]]
-  (str port "/" node))
-
-(defn string->endpoint [^String s]
-  (let [[port-str node] (.split s "/" 2)]
-    [node (Integer/valueOf port-str)]
-    ))
-
-(def LOAD-REFRESH-INTERVAL-MS 5000)
-
-(defn mk-refresh-load [worker]
-  (let [local-tasks (set (:task-ids worker))
-        remote-tasks (set/difference (worker-outbound-tasks worker) local-tasks)
-        short-executor-receive-queue-map (:short-executor-receive-queue-map worker)
-        next-update (atom 0)]
-    (fn this
-      ([]
-        (let [^LoadMapping load-mapping (:load-mapping worker)
-              local-pop (map-val (fn [queue]
-                                   (let [q-metrics (.getMetrics queue)]
-                                     (/ (double (.population q-metrics)) (.capacity q-metrics))))
-                                 short-executor-receive-queue-map)
-              remote-load (reduce merge (for [[np conn] @(:cached-node+port->socket worker)] (into {} (.getLoad conn remote-tasks))))
-              now (System/currentTimeMillis)]
-          (.setLocal load-mapping local-pop)
-          (.setRemote load-mapping remote-load)
-          (when (> now @next-update)
-            (.sendLoadMetrics (:receiver worker) local-pop)
-            (reset! next-update (+ LOAD-REFRESH-INTERVAL-MS now))))))))
-
-(defn mk-refresh-connections [worker]
-  (let [outbound-tasks (worker-outbound-tasks worker)
-        conf (:conf worker)
-        storm-cluster-state (:storm-cluster-state worker)
-        storm-id (:storm-id worker)]
-    (fn this
-      ([]
-        (this (fn [& ignored] (schedule (:refresh-connections-timer worker) 0 this))))
-      ([callback]
-         (let [version (.assignment-version storm-cluster-state storm-id callback)
-               assignment (if (= version (:version (get @(:assignment-versions worker) storm-id)))
-                            (:data (get @(:assignment-versions worker) storm-id))
-                            (let [new-assignment (.assignment-info-with-version storm-cluster-state storm-id callback)]
-                              (swap! (:assignment-versions worker) assoc storm-id new-assignment)
-                              (:data new-assignment)))
-              my-assignment (-> assignment
-                                :executor->node+port
-                                to-task->node+port
-                                (select-keys outbound-tasks)
-                                (#(map-val endpoint->string %)))
-              ;; we dont need a connection for the local tasks anymore
-              needed-assignment (->> my-assignment
-                                      (filter-key (complement (-> worker :task-ids set))))
-              needed-connections (-> needed-assignment vals set)
-              needed-tasks (-> needed-assignment keys)
-
-              current-connections (set (keys @(:cached-node+port->socket worker)))
-              new-connections (set/difference needed-connections current-connections)
-              remove-connections (set/difference current-connections needed-connections)]
-              (swap! (:cached-node+port->socket worker)
-                     #(HashMap. (merge (into {} %1) %2))
-                     (into {}
-                       (dofor [endpoint-str new-connections
-                               :let [[node port] (string->endpoint endpoint-str)]]
-                         [endpoint-str
-                          (.connect
-                           ^IContext (:mq-context worker)
-                           storm-id
-                           ((:node->host assignment) node)
-                           port)
-                          ]
-                         )))
-              (write-locked (:endpoint-socket-lock worker)
-                (reset! (:cached-task->node+port worker)
-                        (HashMap. my-assignment)))
-              (doseq [endpoint remove-connections]
-                (.close (get @(:cached-node+port->socket worker) endpoint)))
-              (apply swap!
-                     (:cached-node+port->socket worker)
-                     #(HashMap. (apply dissoc (into {} %1) %&))
-                     remove-connections)
-
-           )))))
-
-(defn refresh-storm-active
-  ([worker]
-    (refresh-storm-active worker (fn [& ignored] (schedule (:refresh-active-timer worker) 0 (partial refresh-storm-active worker)))))
-  ([worker callback]
-    (let [base (.storm-base (:storm-cluster-state worker) (:storm-id worker) callback)]
-      (reset!
-        (:storm-active-atom worker)
-        (and (= :active (-> base :status :type)) @(:worker-active-flag worker)))
-      (reset! (:storm-component->debug-atom worker) (-> base :component->debug))
-      (log-debug "Event debug options " @(:storm-component->debug-atom worker)))))
-
-;; TODO: consider having a max batch size besides what disruptor does automagically to prevent latency issues
-(defn mk-transfer-tuples-handler [worker]
-  (let [^DisruptorQueue transfer-queue (:transfer-queue worker)
-        drainer (TransferDrainer.)
-        node+port->socket (:cached-node+port->socket worker)
-        task->node+port (:cached-task->node+port worker)
-        endpoint-socket-lock (:endpoint-socket-lock worker)
-        ]
-    (disruptor/clojure-handler
-      (fn [packets _ batch-end?]
-        (.add drainer packets)
-
-        (when batch-end?
-          (read-locked endpoint-socket-lock
-             (let [node+port->socket @node+port->socket
-                   task->node+port @task->node+port]
-               (.send drainer task->node+port node+port->socket)))
-          (.clear drainer))))))
-
-;; Check whether this messaging connection is ready to send data
-(defn is-connection-ready [^IConnection connection]
-  (if (instance?  ConnectionWithStatus connection)
-    (let [^ConnectionWithStatus connection connection
-          status (.status connection)]
-      (= status ConnectionWithStatus$Status/Ready))
-    true))
-
-;; all connections are ready
-(defn all-connections-ready [worker]
-    (let [connections (vals @(:cached-node+port->socket worker))]
-      (every? is-connection-ready connections)))
-
-;; we will wait all connections to be ready and then activate the spout/bolt
-;; when the worker bootup
-(defn activate-worker-when-all-connections-ready
-  [worker]
-  (let [timer (:refresh-active-timer worker)
-        delay-secs 0
-        recur-secs 1]
-    (schedule timer
-      delay-secs
-      (fn this []
-        (if (all-connections-ready worker)
-          (do
-            (log-message "All connections are ready for worker " (:assignment-id worker) ":" (:port worker)
-              " with id "(:worker-id worker))
-            (reset! (:worker-active-flag worker) true))
-          (schedule timer recur-secs this :check-active false)
-            )))))
-
-(defn register-callbacks [worker]
-  (log-message "Registering IConnectionCallbacks for " (:assignment-id worker) ":" (:port worker))
-  (msg-loader/register-callback (:transfer-local-fn worker)
-                                (:receiver worker)
-                                (:storm-conf worker)
-                                (worker-context worker)))
-
-(defn- close-resources [worker]
-  (let [dr (:default-shared-resources worker)]
-    (log-message "Shutting down default resources")
-    (.shutdownNow (get dr WorkerTopologyContext/SHARED_EXECUTOR))
-    (log-message "Shut down default resources")))
-
-(defn- get-logger-levels []
-  (into {}
-    (let [logger-config (.getConfiguration (LogManager/getContext false))]
-      (for [[logger-name logger] (.getLoggers logger-config)]
-        {logger-name (.getLevel logger)}))))
-
-(defn set-logger-level [logger-context logger-name new-level]
-  (let [config (.getConfiguration logger-context)
-        logger-config (.getLoggerConfig config logger-name)]
-    (if (not (= (.getName logger-config) logger-name))
-      ;; create a new config. Make it additive (true) s.t. inherit
-      ;; parents appenders
-      (let [new-logger-config (LoggerConfig. logger-name new-level true)]
-        (log-message "Adding config for: " new-logger-config " with level: " new-level)
-        (.addLogger config logger-name new-logger-config))
-      (do
-        (log-message "Setting " logger-config " log level to: " new-level)
-        (.setLevel logger-config new-level)))))
-
-;; function called on timer to reset log levels last set to DEBUG
-;; also called from process-log-config-change
-(defn reset-log-levels [latest-log-config-atom]
-  (let [latest-log-config @latest-log-config-atom
-        logger-context (LogManager/getContext false)]
-    (doseq [[logger-name logger-setting] (sort latest-log-config)]
-      (let [timeout (:timeout logger-setting)
-            target-log-level (:target-log-level logger-setting)
-            reset-log-level (:reset-log-level logger-setting)]
-        (when (> (coerce/to-long (time/now)) timeout)
-          (log-message logger-name ": Resetting level to " reset-log-level) 
-          (set-logger-level logger-context logger-name reset-log-level)
-          (swap! latest-log-config-atom
-            (fn [prev]
-              (dissoc prev logger-name))))))
-    (.updateLoggers logger-context)))
-
-;; when a new log level is received from zookeeper, this function is called
-(defn process-log-config-change [latest-log-config original-log-levels log-config]
-  (when log-config
-    (log-debug "Processing received log config: " log-config)
-    ;; merge log configs together
-    (let [loggers (.get_named_logger_level log-config)
-          logger-context (LogManager/getContext false)]
-      (def new-log-configs
-        (into {}
-         ;; merge named log levels
-         (for [[msg-logger-name logger-level] loggers]
-           (let [logger-name (if (= msg-logger-name "ROOT")
-                                LogManager/ROOT_LOGGER_NAME
-                                msg-logger-name)]
-             ;; the new-timeouts map now contains logger => timeout 
-             (when (.is_set_reset_log_level_timeout_epoch logger-level)
-               {logger-name {:action (.get_action logger-level)
-                             :target-log-level (Level/toLevel (.get_target_log_level logger-level))
-                             :reset-log-level (or (.get @original-log-levels logger-name) (Level/INFO))
-                             :timeout (.get_reset_log_level_timeout_epoch logger-level)}})))))
-
-      ;; look for deleted log timeouts
-      (doseq [[logger-name logger-val] (sort @latest-log-config)]
-        (when (not (contains? new-log-configs logger-name))
-          ;; if we had a timeout, but the timeout is no longer active
-          (set-logger-level
-            logger-context logger-name (:reset-log-level logger-val))))
-
-      ;; apply new log settings we just received
-      ;; the merged configs are only for the reset logic
-      (doseq [[msg-logger-name logger-level] (sort (into {} (.get_named_logger_level log-config)))]
-        (let [logger-name (if (= msg-logger-name "ROOT")
-                                LogManager/ROOT_LOGGER_NAME
-                                msg-logger-name)
-              level (Level/toLevel (.get_target_log_level logger-level))
-              action (.get_action logger-level)]
-          (if (= action LogLevelAction/UPDATE)
-            (set-logger-level logger-context logger-name level))))
-   
-      (.updateLoggers logger-context)
-      (reset! latest-log-config new-log-configs)
-      (log-debug "New merged log config is " @latest-log-config))))
-
-(defn run-worker-start-hooks [worker]
-  (let [topology (:topology worker)
-        topo-conf (:storm-conf worker)
-        worker-topology-context (worker-context worker)
-        hooks (.get_worker_hooks topology)]
-    (dofor [hook hooks]
-      (let [hook-bytes (Utils/toByteArray hook)
-            deser-hook (Utils/javaDeserialize hook-bytes BaseWorkerHook)]
-        (.start deser-hook topo-conf worker-topology-context)))))
-
-(defn run-worker-shutdown-hooks [worker]
-  (let [topology (:topology worker)
-        hooks (.get_worker_hooks topology)]
-    (dofor [hook hooks]
-      (let [hook-bytes (Utils/toByteArray hook)
-            deser-hook (Utils/javaDeserialize hook-bytes BaseWorkerHook)]
-        (.shutdown deser-hook)))))
-
-;; TODO: should worker even take the storm-id as input? this should be
-;; deducable from cluster state (by searching through assignments)
-;; what about if there's inconsistency in assignments? -> but nimbus
-;; should guarantee this consistency
-(defserverfn mk-worker [conf shared-mq-context storm-id assignment-id port worker-id]
-  (log-message "Launching worker for " storm-id " on " assignment-id ":" port " with id " worker-id
-               " and conf " conf)
-  (if-not (local-mode? conf)
-    (redirect-stdio-to-slf4j!))
-  ;; because in local mode, its not a separate
-  ;; process. supervisor will register it in this case
-  (when (= :distributed (cluster-mode conf))
-    (let [pid (process-pid)]
-      (touch (worker-pid-path conf worker-id pid))
-      (spit (worker-artifacts-pid-path conf storm-id port) pid)))
-
-  (declare establish-log-setting-callback)
-
-  ;; start out with empty list of timeouts 
-  (def latest-log-config (atom {}))
-  (def original-log-levels (atom {}))
-
-  (let [storm-conf (read-supervisor-storm-conf conf storm-id)
-        storm-conf (override-login-config-with-system-property storm-conf)
-        acls (Utils/getWorkerACL storm-conf)
-        cluster-state (cluster/mk-distributed-cluster-state conf :auth-conf storm-conf :acls acls :context (ClusterStateContext. DaemonType/WORKER))
-        storm-cluster-state (cluster/mk-storm-cluster-state cluster-state :acls acls)
-        initial-credentials (.credentials storm-cluster-state storm-id nil)
-        auto-creds (AuthUtils/GetAutoCredentials storm-conf)
-        subject (AuthUtils/populateSubject nil auto-creds initial-credentials)]
-      (Subject/doAs subject (reify PrivilegedExceptionAction
-        (run [this]
-          (let [worker (worker-data conf shared-mq-context storm-id assignment-id port worker-id storm-conf cluster-state storm-cluster-state)
-        heartbeat-fn #(do-heartbeat worker)
-
-        ;; do this here so that the worker process dies if this fails
-        ;; it's important that worker heartbeat to supervisor ASAP when launching so that the supervisor knows it's running (and can move on)
-        _ (heartbeat-fn)
-
-        executors (atom nil)
-        ;; launch heartbeat threads immediately so that slow-loading tasks don't cause the worker to timeout
-        ;; to the supervisor
-        _ (schedule-recurring (:heartbeat-timer worker) 0 (conf WORKER-HEARTBEAT-FREQUENCY-SECS) heartbeat-fn)
-        _ (schedule-recurring (:executor-heartbeat-timer worker) 0 (conf TASK-HEARTBEAT-FREQUENCY-SECS) #(do-executor-heartbeats worker :executors @executors))
-
-        _ (register-callbacks worker)
-
-        refresh-connections (mk-refresh-connections worker)
-        refresh-load (mk-refresh-load worker)
-
-        _ (refresh-connections nil)
-
-        _ (activate-worker-when-all-connections-ready worker)
-
-        _ (refresh-storm-active worker nil)
-
-        _ (run-worker-start-hooks worker)
-
-        _ (reset! executors (dofor [e (:executors worker)] (executor/mk-executor worker e initial-credentials)))
-
-        transfer-tuples (mk-transfer-tuples-handler worker)
-        
-        transfer-thread (disruptor/consume-loop* (:transfer-queue worker) transfer-tuples)               
-
-        disruptor-handler (mk-disruptor-backpressure-handler worker)
-        _ (.registerBackpressureCallback (:transfer-queue worker) disruptor-handler)
-        _ (-> (.setHighWaterMark (:transfer-queue worker) ((:storm-conf worker) BACKPRESSURE-DISRUPTOR-HIGH-WATERMARK))
-              (.setLowWaterMark ((:storm-conf worker) BACKPRESSURE-DISRUPTOR-LOW-WATERMARK))
-              (.setEnableBackpressure ((:storm-conf worker) TOPOLOGY-BACKPRESSURE-ENABLE)))
-        backpressure-handler (mk-backpressure-handler @executors)        
-        backpressure-thread (WorkerBackpressureThread. (:backpressure-trigger worker) worker backpressure-handler)
-        _ (if ((:storm-conf worker) TOPOLOGY-BACKPRESSURE-ENABLE) 
-            (.start backpressure-thread))
-        callback (fn cb [& ignored]
-                   (let [throttle-on (.topology-backpressure storm-cluster-state storm-id cb)]
-                     (reset! (:throttle-on worker) throttle-on)))
-        _ (if ((:storm-conf worker) TOPOLOGY-BACKPRESSURE-ENABLE)
-            (.topology-backpressure storm-cluster-state storm-id callback))
-
-        shutdown* (fn []
-                    (log-message "Shutting down worker " storm-id " " assignment-id " " port)
-                    (doseq [[_ socket] @(:cached-node+port->socket worker)]
-                      ;; this will do best effort flushing since the linger period
-                      ;; was set on creation
-                      (.close socket))
-                    (log-message "Terminating messaging context")
-                    (log-message "Shutting down executors")
-                    (doseq [executor @executors] (.shutdown executor))
-                    (log-message "Shut down executors")
-
-                    ;;this is fine because the only time this is shared is when it's a local context,
-                    ;;in which case it's a noop
-                    (.term ^IContext (:mq-context worker))
-                    (log-message "Shutting down transfer thread")
-                    (disruptor/halt-with-interrupt! (:transfer-queue worker))
-
-                    (.interrupt transfer-thread)
-                    (.join transfer-thread)
-                    (log-message "Shut down transfer thread")
-                    (.interrupt backpressure-thread)
-                    (.join backpressure-thread)
-                    (log-message "Shut down backpressure thread")
-                    (cancel-timer (:heartbeat-timer worker))
-                    (cancel-timer (:refresh-connections-timer worker))
-                    (cancel-timer (:refresh-credentials-timer worker))
-                    (cancel-timer (:refresh-active-timer worker))
-                    (cancel-timer (:executor-heartbeat-timer worker))
-                    (cancel-timer (:user-timer worker))
-                    (cancel-timer (:refresh-load-timer worker))
-
-                    (close-resources worker)
-
-                    (log-message "Trigger any worker shutdown hooks")
-                    (run-worker-shutdown-hooks worker)
-
-                    (.remove-worker-heartbeat! (:storm-cluster-state worker) storm-id assignment-id port)
-                    (log-message "Disconnecting from storm cluster state context")
-                    (.disconnect (:storm-cluster-state worker))
-                    (.close (:cluster-state worker))
-                    (log-message "Shut down worker " storm-id " " assignment-id " " port))
-        ret (reify
-             Shutdownable
-             (shutdown
-              [this]
-              (shutdown*))
-             DaemonCommon
-             (waiting? [this]
-               (and
-                 (timer-waiting? (:heartbeat-timer worker))
-                 (timer-waiting? (:refresh-connections-timer worker))
-                 (timer-waiting? (:refresh-load-timer worker))
-                 (timer-waiting? (:refresh-credentials-timer worker))
-                 (timer-waiting? (:refresh-active-timer worker))
-                 (timer-waiting? (:executor-heartbeat-timer worker))
-                 (timer-waiting? (:user-timer worker))
-                 ))
-             )
-        credentials (atom initial-credentials)
-        check-credentials-changed (fn []
-                                    (let [new-creds (.credentials (:storm-cluster-state worker) storm-id nil)]
-                                      (when-not (= new-creds @credentials) ;;This does not have to be atomic, worst case we update when one is not needed
-                                        (AuthUtils/updateSubject subject auto-creds new-creds)
-                                        (dofor [e @executors] (.credentials-changed e new-creds))
-                                        (reset! credentials new-creds))))
-       check-throttle-changed (fn []
-                                (let [callback (fn cb [& ignored]
-                                                 (let [throttle-on (.topology-backpressure (:storm-cluster-state worker) storm-id cb)]
-                                                   (reset! (:throttle-on worker) throttle-on)))
-                                      new-throttle-on (.topology-backpressure (:storm-cluster-state worker) storm-id callback)]
-                                    (reset! (:throttle-on worker) new-throttle-on)))
-        check-log-config-changed (fn []
-                                  (let [log-config (.topology-log-config (:storm-cluster-state worker) storm-id nil)]
-                                    (process-log-config-change latest-log-config original-log-levels log-config)
-                                    (establish-log-setting-callback)))]
-    (reset! original-log-levels (get-logger-levels))
-    (log-message "Started with log levels: " @original-log-levels)
-  
-    (defn establish-log-setting-callback []
-      (.topology-log-config (:storm-cluster-state worker) storm-id (fn [args] (check-log-config-changed))))
-
-    (establish-log-setting-callback)
-    (.credentials (:storm-cluster-state worker) storm-id (fn [args] (check-credentials-changed)))
-    (schedule-recurring (:refresh-credentials-timer worker) 0 (conf TASK-CREDENTIALS-POLL-SECS)
-                        (fn [& args]
-                          (check-credentials-changed)
-                          (if ((:storm-conf worker) TOPOLOGY-BACKPRESSURE-ENABLE)
-                            (check-throttle-changed))))
-    ;; The jitter allows the clients to get the data at different times, and avoids thundering herd
-    (when-not (.get conf TOPOLOGY-DISABLE-LOADAWARE-MESSAGING)
-      (schedule-recurring-with-jitter (:refresh-load-timer worker) 0 1 500 refresh-load))
-    (schedule-recurring (:refresh-connections-timer worker) 0 (conf TASK-REFRESH-POLL-SECS) refresh-connections)
-    (schedule-recurring (:reset-log-levels-timer worker) 0 (conf WORKER-LOG-LEVEL-RESET-POLL-SECS) (fn [] (reset-log-levels latest-log-config)))
-    (schedule-recurring (:refresh-active-timer worker) 0 (conf TASK-REFRESH-POLL-SECS) (partial refresh-storm-active worker))
-
-    (log-message "Worker has topology config " (redact-value (:storm-conf worker) STORM-ZOOKEEPER-TOPOLOGY-AUTH-PAYLOAD))
-    (log-message "Worker " worker-id " for storm " storm-id " on " assignment-id ":" port " has finished loading")
-    ret
-    ))))))
-
-(defmethod mk-suicide-fn
-  :local [conf]
-  (fn [] (exit-process! 1 "Worker died")))
-
-(defmethod mk-suicide-fn
-  :distributed [conf]
-  (fn [] (exit-process! 1 "Worker died")))
-
-(defn -main [storm-id assignment-id port-str worker-id]
-  (let [conf (read-storm-config)]
-    (setup-default-uncaught-exception-handler)
-    (validate-distributed-mode! conf)
-    (let [worker (mk-worker conf nil storm-id assignment-id (Integer/parseInt port-str) worker-id)]
-      (add-shutdown-hook-with-force-kill-in-1-sec #(.shutdown worker)))))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/disruptor.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/disruptor.clj b/storm-core/src/clj/backtype/storm/disruptor.clj
deleted file mode 100644
index bbfe048..0000000
--- a/storm-core/src/clj/backtype/storm/disruptor.clj
+++ /dev/null
@@ -1,89 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-
-(ns backtype.storm.disruptor
-  (:import [backtype.storm.utils DisruptorQueue WorkerBackpressureCallback DisruptorBackpressureCallback])
-  (:import [com.lmax.disruptor.dsl ProducerType])
-  (:require [clojure [string :as str]])
-  (:require [clojure [set :as set]])
-  (:use [clojure walk])
-  (:use [backtype.storm util log]))
-
-(def PRODUCER-TYPE
-  {:multi-threaded ProducerType/MULTI
-   :single-threaded ProducerType/SINGLE})
-
-(defnk disruptor-queue
-  [^String queue-name buffer-size timeout :producer-type :multi-threaded :batch-size 100 :batch-timeout 1]
-  (DisruptorQueue. queue-name
-                   (PRODUCER-TYPE producer-type) buffer-size
-                   timeout batch-size batch-timeout))
-
-(defn clojure-handler
-  [afn]
-  (reify com.lmax.disruptor.EventHandler
-    (onEvent
-      [this o seq-id batchEnd?]
-      (afn o seq-id batchEnd?))))
-
-(defn disruptor-backpressure-handler
-  [afn-high-wm afn-low-wm]
-  (reify DisruptorBackpressureCallback
-    (highWaterMark
-      [this]
-      (afn-high-wm))
-    (lowWaterMark
-      [this]
-      (afn-low-wm))))
-
-(defn worker-backpressure-handler
-  [afn]
-  (reify WorkerBackpressureCallback
-    (onEvent
-      [this o]
-      (afn o))))
-
-(defmacro handler
-  [& args]
-  `(clojure-handler (fn ~@args)))
-
-(defn publish
-  [^DisruptorQueue q o]
-  (.publish q o))
-
-(defn consume-batch
-  [^DisruptorQueue queue handler]
-  (.consumeBatch queue handler))
-
-(defn consume-batch-when-available
-  [^DisruptorQueue queue handler]
-  (.consumeBatchWhenAvailable queue handler))
-
-(defn halt-with-interrupt!
-  [^DisruptorQueue queue]
-  (.haltWithInterrupt queue))
-
-(defnk consume-loop*
-  [^DisruptorQueue queue handler
-   :kill-fn (fn [error] (exit-process! 1 "Async loop died!"))]
-  (async-loop
-          (fn [] (consume-batch-when-available queue handler) 0)
-          :kill-fn kill-fn
-          :thread-name (.getName queue)))
-
-(defmacro consume-loop [queue & handler-args]
-  `(let [handler# (handler ~@handler-args)]
-     (consume-loop* ~queue handler#)))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/event.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/event.clj b/storm-core/src/clj/backtype/storm/event.clj
deleted file mode 100644
index c1dfb1b..0000000
--- a/storm-core/src/clj/backtype/storm/event.clj
+++ /dev/null
@@ -1,71 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-
-(ns backtype.storm.event
-  (:use [backtype.storm log util])
-  (:import [backtype.storm.utils Time Utils])
-  (:import [java.io InterruptedIOException])
-  (:import [java.util.concurrent LinkedBlockingQueue TimeUnit]))
-
-(defprotocol EventManager
-  (add [this event-fn])
-  (waiting? [this])
-  (shutdown [this]))
-
-(defn event-manager
-  "Creates a thread to respond to events. Any error will cause process to halt"
-  [daemon?]
-  (let [added (atom 0)
-        processed (atom 0)
-        ^LinkedBlockingQueue queue (LinkedBlockingQueue.)
-        running (atom true)
-        runner (Thread.
-                 (fn []
-                   (try-cause
-                     (while @running
-                       (let [r (.take queue)]
-                         (r)
-                         (swap! processed inc)))
-                     (catch InterruptedIOException t
-                       (log-message "Event manager interrupted while doing IO"))
-                     (catch InterruptedException t
-                       (log-message "Event manager interrupted"))
-                     (catch Throwable t
-                       (log-error t "Error when processing event")
-                       (exit-process! 20 "Error when processing an event")))))]
-    (.setDaemon runner daemon?)
-    (.start runner)
-    (reify
-      EventManager
-
-      (add
-        [this event-fn]
-        ;; should keep track of total added and processed to know if this is finished yet
-        (when-not @running
-          (throw (RuntimeException. "Cannot add events to a shutdown event manager")))
-        (swap! added inc)
-        (.put queue event-fn))
-
-      (waiting?
-        [this]
-        (or (Time/isThreadWaiting runner)
-            (= @processed @added)))
-
-      (shutdown
-        [this]
-        (reset! running false)
-        (.interrupt runner)
-        (.join runner)))))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/local_state.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/local_state.clj b/storm-core/src/clj/backtype/storm/local_state.clj
deleted file mode 100644
index bf9567d..0000000
--- a/storm-core/src/clj/backtype/storm/local_state.clj
+++ /dev/null
@@ -1,131 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-(ns backtype.storm.local-state
-  (:use [backtype.storm log util])
-  (:import [backtype.storm.generated StormTopology
-            InvalidTopologyException GlobalStreamId
-            LSSupervisorId LSApprovedWorkers
-            LSSupervisorAssignments LocalAssignment
-            ExecutorInfo LSWorkerHeartbeat
-            LSTopoHistory LSTopoHistoryList
-            WorkerResources])
-  (:import [backtype.storm.utils LocalState]))
-
-(def LS-WORKER-HEARTBEAT "worker-heartbeat")
-(def LS-ID "supervisor-id")
-(def LS-LOCAL-ASSIGNMENTS "local-assignments")
-(def LS-APPROVED-WORKERS "approved-workers")
-(def LS-TOPO-HISTORY "topo-hist")
-
-(defn ->LSTopoHistory
-  [{topoid :topoid timestamp :timestamp users :users groups :groups}]
-  (LSTopoHistory. topoid timestamp users groups))
-
-(defn ->topo-history
-  [thrift-topo-hist]
-  {
-    :topoid (.get_topology_id thrift-topo-hist)
-    :timestamp (.get_time_stamp thrift-topo-hist)
-    :users (.get_users thrift-topo-hist)
-    :groups (.get_groups thrift-topo-hist)})
-
-(defn ls-topo-hist!
-  [^LocalState local-state hist-list]
-  (.put local-state LS-TOPO-HISTORY
-    (LSTopoHistoryList. (map ->LSTopoHistory hist-list))))
-
-(defn ls-topo-hist
-  [^LocalState local-state]
-  (if-let [thrift-hist-list (.get local-state LS-TOPO-HISTORY)]
-    (map ->topo-history (.get_topo_history thrift-hist-list))))
-
-(defn ls-supervisor-id!
-  [^LocalState local-state ^String id]
-    (.put local-state LS-ID (LSSupervisorId. id)))
-
-(defn ls-supervisor-id
-  [^LocalState local-state]
-  (if-let [super-id (.get local-state LS-ID)]
-    (.get_supervisor_id super-id)))
-
-(defn ls-approved-workers!
-  [^LocalState local-state workers]
-    (.put local-state LS-APPROVED-WORKERS (LSApprovedWorkers. workers)))
-
-(defn ls-approved-workers
-  [^LocalState local-state]
-  (if-let [tmp (.get local-state LS-APPROVED-WORKERS)]
-    (into {} (.get_approved_workers tmp))))
-
-(defn ->ExecutorInfo
-  [[low high]] (ExecutorInfo. low high))
-
-(defn ->ExecutorInfo-list
-  [executors]
-  (map ->ExecutorInfo executors))
-
-(defn ->executor-list
-  [executors]
-  (into [] 
-    (for [exec-info executors] 
-      [(.get_task_start exec-info) (.get_task_end exec-info)])))
-
-(defn ->LocalAssignment
-  [{storm-id :storm-id executors :executors resources :resources}]
-  (let [assignment (LocalAssignment. storm-id (->ExecutorInfo-list executors))]
-    (if resources (.set_resources assignment
-                                  (doto (WorkerResources. )
-                                    (.set_mem_on_heap (first resources))
-                                    (.set_mem_off_heap (second resources))
-                                    (.set_cpu (last resources)))))
-    assignment))
-
-(defn mk-local-assignment
-  [storm-id executors resources]
-  {:storm-id storm-id :executors executors :resources resources})
-
-(defn ->local-assignment
-  [^LocalAssignment thrift-local-assignment]
-    (mk-local-assignment
-      (.get_topology_id thrift-local-assignment)
-      (->executor-list (.get_executors thrift-local-assignment))
-      (.get_resources thrift-local-assignment)))
-
-(defn ls-local-assignments!
-  [^LocalState local-state assignments]
-    (let [local-assignment-map (map-val ->LocalAssignment assignments)]
-    (.put local-state LS-LOCAL-ASSIGNMENTS 
-          (LSSupervisorAssignments. local-assignment-map))))
-
-(defn ls-local-assignments
-  [^LocalState local-state]
-    (if-let [thrift-local-assignments (.get local-state LS-LOCAL-ASSIGNMENTS)]
-      (map-val
-        ->local-assignment
-        (.get_assignments thrift-local-assignments))))
-
-(defn ls-worker-heartbeat!
-  [^LocalState local-state time-secs storm-id executors port]
-  (.put local-state LS-WORKER-HEARTBEAT (LSWorkerHeartbeat. time-secs storm-id (->ExecutorInfo-list executors) port) false))
-
-(defn ls-worker-heartbeat 
-  [^LocalState local-state]
-  (if-let [worker-hb (.get local-state LS-WORKER-HEARTBEAT)]
-    {:time-secs (.get_time_secs worker-hb)
-     :storm-id (.get_topology_id worker-hb)
-     :executors (->executor-list (.get_executors worker-hb))
-     :port (.get_port worker-hb)}))
-

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/log.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/log.clj b/storm-core/src/clj/backtype/storm/log.clj
deleted file mode 100644
index abe9b32..0000000
--- a/storm-core/src/clj/backtype/storm/log.clj
+++ /dev/null
@@ -1,56 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-
-(ns backtype.storm.log
-  (:require [clojure.tools.logging :as log])
-  (:use [clojure pprint])
-  (:import [java.io StringWriter]))
-
-(defmacro log-message
-  [& args]
-  `(log/info (str ~@args)))
-
-(defmacro log-error
-  [e & args]
-  `(log/log :error ~e (str ~@args)))
-
-(defmacro log-debug
-  [& args]
-  `(log/debug (str ~@args)))
-
-(defmacro log-warn-error
-  [e & args]
-  `(log/warn (str ~@args) ~e))
-
-(defmacro log-warn
-  [& args]
-  `(log/warn (str ~@args)))
-
-(defn log-capture!
-  [& args]
-  (apply log/log-capture! args))
-
-(defn log-stream
-  [& args]
-  (apply log/log-stream args))
-
-(defmacro log-pprint
-  [& args]
-  `(let [^StringWriter writer# (StringWriter.)]
-     (doall
-       (for [object# [~@args]]
-         (pprint object# writer#)))
-     (log-message "\n" writer#)))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/messaging/loader.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/messaging/loader.clj b/storm-core/src/clj/backtype/storm/messaging/loader.clj
deleted file mode 100644
index 72dd382..0000000
--- a/storm-core/src/clj/backtype/storm/messaging/loader.clj
+++ /dev/null
@@ -1,34 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-(ns backtype.storm.messaging.loader
-  (:import [backtype.storm.messaging IConnection DeserializingConnectionCallback])
-  (:require [backtype.storm.messaging [local :as local]]))
-
-(defn mk-local-context []
-  (local/mk-context))
-
-(defn- mk-connection-callback
-  "make an IConnectionCallback"
-  [transfer-local-fn storm-conf worker-context]
-  (DeserializingConnectionCallback. storm-conf
-                                    worker-context
-                                    (fn [batch]
-                                      (transfer-local-fn batch))))
-
-(defn register-callback
-  "register the local-transfer-fn with the server"
-  [transfer-local-fn ^IConnection socket storm-conf worker-context]
-  (.registerRecv socket (mk-connection-callback transfer-local-fn storm-conf worker-context)))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/messaging/local.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/messaging/local.clj b/storm-core/src/clj/backtype/storm/messaging/local.clj
deleted file mode 100644
index b99a77a..0000000
--- a/storm-core/src/clj/backtype/storm/messaging/local.clj
+++ /dev/null
@@ -1,23 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-(ns backtype.storm.messaging.local
-  (:import [backtype.storm.messaging IContext])
-  (:import [backtype.storm.messaging.local Context]))
-
-(defn mk-context [] 
-  (let [context  (Context.)]
-    (.prepare ^IContext context nil)
-    context))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/metric/testing.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/metric/testing.clj b/storm-core/src/clj/backtype/storm/metric/testing.clj
deleted file mode 100644
index a05dfee..0000000
--- a/storm-core/src/clj/backtype/storm/metric/testing.clj
+++ /dev/null
@@ -1,68 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-(ns backtype.storm.metric.testing
-  "This namespace is for AOT dependent metrics testing code."
-  (:gen-class))
-
-(letfn [(for- [threader arg seq-exprs body]
-          `(reduce #(%2 %1)
-                   ~arg
-                   (for ~seq-exprs
-                     (fn [arg#] (~threader arg# ~@body)))))]
-  (defmacro for->
-    "Apply a thread expression to a sequence.
-   eg.
-      (-> 1
-        (for-> [x [1 2 3]]
-          (+ x)))
-   => 7"
-    {:indent 1}
-    [arg seq-exprs & body]
-    (for- 'clojure.core/-> arg seq-exprs body)))
-
-(gen-class
- :name clojure.storm.metric.testing.FakeMetricConsumer
- :implements [backtype.storm.metric.api.IMetricsConsumer]
- :prefix "impl-")
-
-(def buffer (atom nil))
-
-(defn impl-prepare [this conf argument ctx error-reporter]
-  (reset! buffer {}))
-
-(defn impl-cleanup [this]
-  (reset! buffer {}))
-
-(defn vec-conj [coll x] (if coll
-                          (conj coll x)
-                          [x]))
-
-(defn expand-complex-datapoint [dp]
-  (if (or (map? (.value dp))
-          (instance? java.util.AbstractMap (.value dp)))
-    (into [] (for [[k v] (.value dp)]
-               [(str (.name dp) "/" k) v]))
-    [[(.name dp) (.value dp)]]))
-
-(defn impl-handleDataPoints [this task-info data-points]  
-  (swap! buffer
-         (fn [old]
-           (-> old
-            (for-> [dp data-points
-                    [name val] (expand-complex-datapoint dp)]
-                   (update-in [(.srcComponentId task-info) name (.srcTaskId task-info)] vec-conj val))))))
- 
-

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/process_simulator.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/process_simulator.clj b/storm-core/src/clj/backtype/storm/process_simulator.clj
deleted file mode 100644
index e0cf6ed..0000000
--- a/storm-core/src/clj/backtype/storm/process_simulator.clj
+++ /dev/null
@@ -1,51 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-
-(ns backtype.storm.process-simulator
-  (:use [backtype.storm log util]))
-
-(def pid-counter (mk-counter))
-
-(def process-map (atom {}))
-
-(def kill-lock (Object.))
-
-(defn register-process [pid shutdownable]
-  (swap! process-map assoc pid shutdownable))
-
-(defn process-handle
-  [pid]
-  (@process-map pid))
-
-(defn all-processes
-  []
-  (vals @process-map))
-
-(defn kill-process
-  "Uses `locking` in case cluster shuts down while supervisor is
-  killing a task"
-  [pid]
-  (locking kill-lock
-    (log-message "Killing process " pid)
-    (let [shutdownable (process-handle pid)]
-      (swap! process-map dissoc pid)
-      (when shutdownable
-        (.shutdown shutdownable)))))
-
-(defn kill-all-processes
-  []
-  (doseq [pid (keys @process-map)]
-    (kill-process pid)))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/scheduler/DefaultScheduler.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/scheduler/DefaultScheduler.clj b/storm-core/src/clj/backtype/storm/scheduler/DefaultScheduler.clj
deleted file mode 100644
index 1198eb6..0000000
--- a/storm-core/src/clj/backtype/storm/scheduler/DefaultScheduler.clj
+++ /dev/null
@@ -1,77 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-(ns backtype.storm.scheduler.DefaultScheduler
-  (:use [backtype.storm util config])
-  (:require [backtype.storm.scheduler.EvenScheduler :as EvenScheduler])
-  (:import [backtype.storm.scheduler IScheduler Topologies
-            Cluster TopologyDetails WorkerSlot SchedulerAssignment
-            EvenScheduler ExecutorDetails])
-  (:gen-class
-    :implements [backtype.storm.scheduler.IScheduler]))
-
-(defn- bad-slots [existing-slots num-executors num-workers]
-  (if (= 0 num-workers)
-    '()
-    (let [distribution (atom (integer-divided num-executors num-workers))
-          keepers (atom {})]
-      (doseq [[node+port executor-list] existing-slots :let [executor-count (count executor-list)]]
-        (when (pos? (get @distribution executor-count 0))
-          (swap! keepers assoc node+port executor-list)
-          (swap! distribution update-in [executor-count] dec)
-          ))
-      (->> @keepers
-           keys
-           (apply dissoc existing-slots)
-           keys
-           (map (fn [[node port]]
-                  (WorkerSlot. node port)))))))
-
-(defn slots-can-reassign [^Cluster cluster slots]
-  (->> slots
-      (filter
-        (fn [[node port]]
-          (if-not (.isBlackListed cluster node)
-            (if-let [supervisor (.getSupervisorById cluster node)]
-              (.contains (.getAllPorts supervisor) (int port))
-              ))))))
-
-(defn -prepare [this conf]
-  )
-
-(defn default-schedule [^Topologies topologies ^Cluster cluster]
-  (let [needs-scheduling-topologies (.needsSchedulingTopologies cluster topologies)]
-    (doseq [^TopologyDetails topology needs-scheduling-topologies
-            :let [topology-id (.getId topology)
-                  available-slots (->> (.getAvailableSlots cluster)
-                                       (map #(vector (.getNodeId %) (.getPort %))))
-                  all-executors (->> topology
-                                     .getExecutors
-                                     (map #(vector (.getStartTask %) (.getEndTask %)))
-                                     set)
-                  alive-assigned (EvenScheduler/get-alive-assigned-node+port->executors cluster topology-id)
-                  alive-executors (->> alive-assigned vals (apply concat) set)
-                  can-reassign-slots (slots-can-reassign cluster (keys alive-assigned))
-                  total-slots-to-use (min (.getNumWorkers topology)
-                                          (+ (count can-reassign-slots) (count available-slots)))
-                  bad-slots (if (or (> total-slots-to-use (count alive-assigned)) 
-                                    (not= alive-executors all-executors))
-                                (bad-slots alive-assigned (count all-executors) total-slots-to-use)
-                                [])]]
-      (.freeSlots cluster bad-slots)
-      (EvenScheduler/schedule-topologies-evenly (Topologies. {topology-id topology}) cluster))))
-
-(defn -schedule [this ^Topologies topologies ^Cluster cluster]
-  (default-schedule topologies cluster))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/scheduler/EvenScheduler.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/scheduler/EvenScheduler.clj b/storm-core/src/clj/backtype/storm/scheduler/EvenScheduler.clj
deleted file mode 100644
index 25ba03b..0000000
--- a/storm-core/src/clj/backtype/storm/scheduler/EvenScheduler.clj
+++ /dev/null
@@ -1,81 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-(ns backtype.storm.scheduler.EvenScheduler
-  (:use [backtype.storm util log config])
-  (:require [clojure.set :as set])
-  (:import [backtype.storm.scheduler IScheduler Topologies
-            Cluster TopologyDetails WorkerSlot ExecutorDetails])
-  (:gen-class
-    :implements [backtype.storm.scheduler.IScheduler]))
-
-(defn sort-slots [all-slots]
-  (let [split-up (sort-by count > (vals (group-by first all-slots)))]
-    (apply interleave-all split-up)
-    ))
-
-(defn get-alive-assigned-node+port->executors [cluster topology-id]
-  (let [existing-assignment (.getAssignmentById cluster topology-id)
-        executor->slot (if existing-assignment
-                         (.getExecutorToSlot existing-assignment)
-                         {}) 
-        executor->node+port (into {} (for [[^ExecutorDetails executor ^WorkerSlot slot] executor->slot
-                                           :let [executor [(.getStartTask executor) (.getEndTask executor)]
-                                                 node+port [(.getNodeId slot) (.getPort slot)]]]
-                                       {executor node+port}))
-        alive-assigned (reverse-map executor->node+port)]
-    alive-assigned))
-
-(defn- schedule-topology [^TopologyDetails topology ^Cluster cluster]
-  (let [topology-id (.getId topology)
-        available-slots (->> (.getAvailableSlots cluster)
-                             (map #(vector (.getNodeId %) (.getPort %))))
-        all-executors (->> topology
-                          .getExecutors
-                          (map #(vector (.getStartTask %) (.getEndTask %)))
-                          set)
-        alive-assigned (get-alive-assigned-node+port->executors cluster topology-id)
-        total-slots-to-use (min (.getNumWorkers topology)
-                                (+ (count available-slots) (count alive-assigned)))
-        reassign-slots (take (- total-slots-to-use (count alive-assigned))
-                             (sort-slots available-slots))
-        reassign-executors (sort (set/difference all-executors (set (apply concat (vals alive-assigned)))))
-        reassignment (into {}
-                           (map vector
-                                reassign-executors
-                                ;; for some reason it goes into infinite loop without limiting the repeat-seq
-                                (repeat-seq (count reassign-executors) reassign-slots)))]
-    (when-not (empty? reassignment)
-      (log-message "Available slots: " (pr-str available-slots))
-      )
-    reassignment))
-
-(defn schedule-topologies-evenly [^Topologies topologies ^Cluster cluster]
-  (let [needs-scheduling-topologies (.needsSchedulingTopologies cluster topologies)]
-    (doseq [^TopologyDetails topology needs-scheduling-topologies
-            :let [topology-id (.getId topology)
-                  new-assignment (schedule-topology topology cluster)
-                  node+port->executors (reverse-map new-assignment)]]
-      (doseq [[node+port executors] node+port->executors
-              :let [^WorkerSlot slot (WorkerSlot. (first node+port) (last node+port))
-                    executors (for [[start-task end-task] executors]
-                                (ExecutorDetails. start-task end-task))]]
-        (.assign cluster slot topology-id executors)))))
-
-(defn -prepare [this conf]
-  )
-
-(defn -schedule [this ^Topologies topologies ^Cluster cluster]
-  (schedule-topologies-evenly topologies cluster))

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/storm-core/src/clj/backtype/storm/scheduler/IsolationScheduler.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/scheduler/IsolationScheduler.clj b/storm-core/src/clj/backtype/storm/scheduler/IsolationScheduler.clj
deleted file mode 100644
index c6cf8d9..0000000
--- a/storm-core/src/clj/backtype/storm/scheduler/IsolationScheduler.clj
+++ /dev/null
@@ -1,219 +0,0 @@
-;; Licensed to the Apache Software Foundation (ASF) under one
-;; or more contributor license agreements.  See the NOTICE file
-;; distributed with this work for additional information
-;; regarding copyright ownership.  The ASF licenses this file
-;; to you under the Apache License, Version 2.0 (the
-;; "License"); you may not use this file except in compliance
-;; with the License.  You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-(ns backtype.storm.scheduler.IsolationScheduler
-  (:use [backtype.storm util config log])
-  (:require [backtype.storm.scheduler.DefaultScheduler :as DefaultScheduler])
-  (:import [java.util HashSet Set List LinkedList ArrayList Map HashMap])
-  (:import [backtype.storm.scheduler IScheduler Topologies
-            Cluster TopologyDetails WorkerSlot SchedulerAssignment
-            EvenScheduler ExecutorDetails])
-  (:gen-class
-    :init init
-    :constructors {[] []}
-    :state state 
-    :implements [backtype.storm.scheduler.IScheduler]))
-
-(defn -init []
-  [[] (container)])
-
-(defn -prepare [this conf]
-  (container-set! (.state this) conf))
-
-(defn- compute-worker-specs "Returns mutable set of sets of executors"
-  [^TopologyDetails details]
-  (->> (.getExecutorToComponent details)
-       reverse-map
-       (map second)
-       (apply concat)
-       (map vector (repeat-seq (range (.getNumWorkers details))))
-       (group-by first)
-       (map-val #(map second %))
-       vals
-       (map set)
-       (HashSet.)
-       ))
-
-(defn isolated-topologies [conf topologies]
-  (let [tset (-> conf (get ISOLATION-SCHEDULER-MACHINES) keys set)]
-    (filter (fn [^TopologyDetails t] (contains? tset (.getName t))) topologies)
-    ))
-
-;; map from topology id -> set of sets of executors
-(defn topology-worker-specs [iso-topologies]
-  (->> iso-topologies
-       (map (fn [t] {(.getId t) (compute-worker-specs t)}))
-       (apply merge)))
-
-(defn machine-distribution [conf ^TopologyDetails topology]
-  (let [name->machines (get conf ISOLATION-SCHEDULER-MACHINES)
-        machines (get name->machines (.getName topology))
-        workers (.getNumWorkers topology)]
-    (-> (integer-divided workers machines)
-        (dissoc 0)
-        (HashMap.)
-        )))
-
-(defn topology-machine-distribution [conf iso-topologies]
-  (->> iso-topologies
-       (map (fn [t] {(.getId t) (machine-distribution conf t)}))
-       (apply merge)))
-
-(defn host-assignments [^Cluster cluster]
-  (letfn [(to-slot-specs [^SchedulerAssignment ass]
-            (->> ass
-                 .getExecutorToSlot
-                 reverse-map
-                 (map (fn [[slot executors]]
-                        [slot (.getTopologyId ass) (set executors)]))))]
-  (->> cluster
-       .getAssignments
-       vals
-       (mapcat to-slot-specs)
-       (group-by (fn [[^WorkerSlot slot & _]] (.getHost cluster (.getNodeId slot))))
-       )))
-
-(defn- decrement-distribution! [^Map distribution value]
-  (let [v (-> distribution (get value) dec)]
-    (if (zero? v)
-      (.remove distribution value)
-      (.put distribution value v))))
-
-;; returns list of list of slots, reverse sorted by number of slots
-(defn- host-assignable-slots [^Cluster cluster]
-  (-<> cluster
-       .getAssignableSlots
-       (group-by #(.getHost cluster (.getNodeId ^WorkerSlot %)) <>)
-       (dissoc <> nil)
-       (sort-by #(-> % second count -) <>)
-       shuffle
-       (LinkedList. <>)
-       ))
-
-(defn- host->used-slots [^Cluster cluster]
-  (->> cluster
-       .getUsedSlots
-       (group-by #(.getHost cluster (.getNodeId ^WorkerSlot %)))
-       ))
-
-(defn- distribution->sorted-amts [distribution]
-  (->> distribution
-       (mapcat (fn [[val amt]] (repeat amt val)))
-       (sort-by -)
-       ))
-
-(defn- allocated-topologies [topology-worker-specs]
-  (->> topology-worker-specs
-    (filter (fn [[_ worker-specs]] (empty? worker-specs)))
-    (map first)
-    set
-    ))
-
-(defn- leftover-topologies [^Topologies topologies filter-ids-set]
-  (->> topologies
-       .getTopologies
-       (filter (fn [^TopologyDetails t] (not (contains? filter-ids-set (.getId t)))))
-       (map (fn [^TopologyDetails t] {(.getId t) t}))
-       (apply merge)
-       (Topologies.)
-       ))
-
-;; for each isolated topology:
-;;   compute even distribution of executors -> workers on the number of workers specified for the topology
-;;   compute distribution of workers to machines
-;; determine host -> list of [slot, topology id, executors]
-;; iterate through hosts and: a machine is good if:
-;;   1. only running workers from one isolated topology
-;;   2. all workers running on it match one of the distributions of executors for that topology
-;;   3. matches one of the # of workers
-;; blacklist the good hosts and remove those workers from the list of need to be assigned workers
-;; otherwise unassign all other workers for isolated topologies if assigned
-
-(defn remove-elem-from-set! [^Set aset]
-  (let [elem (-> aset .iterator .next)]
-    (.remove aset elem)
-    elem
-    ))
-
-;; get host -> all assignable worker slots for non-blacklisted machines (assigned or not assigned)
-;; will then have a list of machines that need to be assigned (machine -> [topology, list of list of executors])
-;; match each spec to a machine (who has the right number of workers), free everything else on that machine and assign those slots (do one topology at a time)
-;; blacklist all machines who had production slots defined
-;; log isolated topologies who weren't able to get enough slots / machines
-;; run default scheduler on isolated topologies that didn't have enough slots + non-isolated topologies on remaining machines
-;; set blacklist to what it was initially
-(defn -schedule [this ^Topologies topologies ^Cluster cluster]
-  (let [conf (container-get (.state this))        
-        orig-blacklist (HashSet. (.getBlacklistedHosts cluster))
-        iso-topologies (isolated-topologies conf (.getTopologies topologies))
-        iso-ids-set (->> iso-topologies (map #(.getId ^TopologyDetails %)) set)
-        topology-worker-specs (topology-worker-specs iso-topologies)
-        topology-machine-distribution (topology-machine-distribution conf iso-topologies)
-        host-assignments (host-assignments cluster)]
-    (doseq [[host assignments] host-assignments]
-      (let [top-id (-> assignments first second)
-            distribution (get topology-machine-distribution top-id)
-            ^Set worker-specs (get topology-worker-specs top-id)
-            num-workers (count assignments)
-            ]
-        (if (and (contains? iso-ids-set top-id)
-                 (every? #(= (second %) top-id) assignments)
-                 (contains? distribution num-workers)
-                 (every? #(contains? worker-specs (nth % 2)) assignments))
-          (do (decrement-distribution! distribution num-workers)
-              (doseq [[_ _ executors] assignments] (.remove worker-specs executors))
-              (.blacklistHost cluster host))
-          (doseq [[slot top-id _] assignments]
-            (when (contains? iso-ids-set top-id)
-              (.freeSlot cluster slot)
-              ))
-          )))
-    
-    (let [host->used-slots (host->used-slots cluster)
-          ^LinkedList sorted-assignable-hosts (host-assignable-slots cluster)]
-      ;; TODO: can improve things further by ordering topologies in terms of who needs the least workers
-      (doseq [[top-id worker-specs] topology-worker-specs
-              :let [amts (distribution->sorted-amts (get topology-machine-distribution top-id))]]
-        (doseq [amt amts
-                :let [[host host-slots] (.peek sorted-assignable-hosts)]]
-          (when (and host-slots (>= (count host-slots) amt))
-            (.poll sorted-assignable-hosts)
-            (.freeSlots cluster (get host->used-slots host))
-            (doseq [slot (take amt host-slots)
-                    :let [executors-set (remove-elem-from-set! worker-specs)]]
-              (.assign cluster slot top-id executors-set))
-            (.blacklistHost cluster host))
-          )))
-    
-    (let [failed-iso-topologies (->> topology-worker-specs
-                                  (mapcat (fn [[top-id worker-specs]]
-                                    (if-not (empty? worker-specs) [top-id])
-                                    )))]
-      (if (empty? failed-iso-topologies)
-        ;; run default scheduler on non-isolated topologies
-        (-<> topology-worker-specs
-             allocated-topologies
-             (leftover-topologies topologies <>)
-             (DefaultScheduler/default-schedule <> cluster))
-        (do
-          (log-warn "Unable to isolate topologies " (pr-str failed-iso-topologies) ". No machine had enough worker slots to run the remaining workers for these topologies. Clearing all other resources and will wait for enough resources for isolated topologies before allocating any other resources.")
-          ;; clear workers off all hosts that are not blacklisted
-          (doseq [[host slots] (host->used-slots cluster)]
-            (if-not (.isBlacklistedHost cluster host)
-              (.freeSlots cluster slots)
-              )))
-        ))
-    (.setBlacklistedHosts cluster orig-blacklist)
-    ))


[39/53] [abbrv] [partial] storm git commit: STORM-1202: Migrate APIs to org.apache.storm, but try to provide some form of backwards compatability

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/PartitionManager.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/PartitionManager.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/PartitionManager.java
new file mode 100644
index 0000000..0f5aaa0
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/PartitionManager.java
@@ -0,0 +1,316 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+import org.apache.storm.Config;
+import org.apache.storm.metric.api.CombinedMetric;
+import org.apache.storm.metric.api.CountMetric;
+import org.apache.storm.metric.api.MeanReducer;
+import org.apache.storm.metric.api.ReducedMetric;
+import org.apache.storm.spout.SpoutOutputCollector;
+
+import com.google.common.base.Strings;
+import com.google.common.collect.ImmutableMap;
+
+import kafka.javaapi.consumer.SimpleConsumer;
+import kafka.javaapi.message.ByteBufferMessageSet;
+import kafka.message.MessageAndOffset;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.storm.kafka.KafkaSpout.EmitState;
+import org.apache.storm.kafka.trident.MaxMetric;
+
+import java.util.*;
+
+public class PartitionManager {
+    public static final Logger LOG = LoggerFactory.getLogger(PartitionManager.class);
+
+    private final CombinedMetric _fetchAPILatencyMax;
+    private final ReducedMetric _fetchAPILatencyMean;
+    private final CountMetric _fetchAPICallCount;
+    private final CountMetric _fetchAPIMessageCount;
+    Long _emittedToOffset;
+    // _pending key = Kafka offset, value = time at which the message was first submitted to the topology
+    private SortedMap<Long,Long> _pending = new TreeMap<Long,Long>();
+    private final FailedMsgRetryManager _failedMsgRetryManager;
+
+    // retryRecords key = Kafka offset, value = retry info for the given message
+    Long _committedTo;
+    LinkedList<MessageAndOffset> _waitingToEmit = new LinkedList<MessageAndOffset>();
+    Partition _partition;
+    SpoutConfig _spoutConfig;
+    String _topologyInstanceId;
+    SimpleConsumer _consumer;
+    DynamicPartitionConnections _connections;
+    ZkState _state;
+    Map _stormConf;
+    long numberFailed, numberAcked;
+    public PartitionManager(DynamicPartitionConnections connections, String topologyInstanceId, ZkState state, Map stormConf, SpoutConfig spoutConfig, Partition id) {
+        _partition = id;
+        _connections = connections;
+        _spoutConfig = spoutConfig;
+        _topologyInstanceId = topologyInstanceId;
+        _consumer = connections.register(id.host, id.topic, id.partition);
+        _state = state;
+        _stormConf = stormConf;
+        numberAcked = numberFailed = 0;
+
+        _failedMsgRetryManager = new ExponentialBackoffMsgRetryManager(_spoutConfig.retryInitialDelayMs,
+                                                                           _spoutConfig.retryDelayMultiplier,
+                                                                           _spoutConfig.retryDelayMaxMs);
+
+        String jsonTopologyId = null;
+        Long jsonOffset = null;
+        String path = committedPath();
+        try {
+            Map<Object, Object> json = _state.readJSON(path);
+            LOG.info("Read partition information from: " + path +  "  --> " + json );
+            if (json != null) {
+                jsonTopologyId = (String) ((Map<Object, Object>) json.get("topology")).get("id");
+                jsonOffset = (Long) json.get("offset");
+            }
+        } catch (Throwable e) {
+            LOG.warn("Error reading and/or parsing at ZkNode: " + path, e);
+        }
+
+        String topic = _partition.topic;
+        Long currentOffset = KafkaUtils.getOffset(_consumer, topic, id.partition, spoutConfig);
+
+        if (jsonTopologyId == null || jsonOffset == null) { // failed to parse JSON?
+            _committedTo = currentOffset;
+            LOG.info("No partition information found, using configuration to determine offset");
+        } else if (!topologyInstanceId.equals(jsonTopologyId) && spoutConfig.ignoreZkOffsets) {
+            _committedTo = KafkaUtils.getOffset(_consumer, topic, id.partition, spoutConfig.startOffsetTime);
+            LOG.info("Topology change detected and ignore zookeeper offsets set to true, using configuration to determine offset");
+        } else {
+            _committedTo = jsonOffset;
+            LOG.info("Read last commit offset from zookeeper: " + _committedTo + "; old topology_id: " + jsonTopologyId + " - new topology_id: " + topologyInstanceId );
+        }
+
+        if (currentOffset - _committedTo > spoutConfig.maxOffsetBehind || _committedTo <= 0) {
+            LOG.info("Last commit offset from zookeeper: " + _committedTo);
+            Long lastCommittedOffset = _committedTo;
+            _committedTo = currentOffset;
+            LOG.info("Commit offset " + lastCommittedOffset + " is more than " +
+                    spoutConfig.maxOffsetBehind + " behind latest offset " + currentOffset + ", resetting to startOffsetTime=" + spoutConfig.startOffsetTime);
+        }
+
+        LOG.info("Starting Kafka " + _consumer.host() + ":" + id.partition + " from offset " + _committedTo);
+        _emittedToOffset = _committedTo;
+
+        _fetchAPILatencyMax = new CombinedMetric(new MaxMetric());
+        _fetchAPILatencyMean = new ReducedMetric(new MeanReducer());
+        _fetchAPICallCount = new CountMetric();
+        _fetchAPIMessageCount = new CountMetric();
+    }
+
+    public Map getMetricsDataMap() {
+        Map ret = new HashMap();
+        ret.put(_partition + "/fetchAPILatencyMax", _fetchAPILatencyMax.getValueAndReset());
+        ret.put(_partition + "/fetchAPILatencyMean", _fetchAPILatencyMean.getValueAndReset());
+        ret.put(_partition + "/fetchAPICallCount", _fetchAPICallCount.getValueAndReset());
+        ret.put(_partition + "/fetchAPIMessageCount", _fetchAPIMessageCount.getValueAndReset());
+        return ret;
+    }
+
+    //returns false if it's reached the end of current batch
+    public EmitState next(SpoutOutputCollector collector) {
+        if (_waitingToEmit.isEmpty()) {
+            fill();
+        }
+        while (true) {
+            MessageAndOffset toEmit = _waitingToEmit.pollFirst();
+            if (toEmit == null) {
+                return EmitState.NO_EMITTED;
+            }
+
+            Iterable<List<Object>> tups;
+            if (_spoutConfig.scheme instanceof MessageMetadataSchemeAsMultiScheme) {
+                tups = KafkaUtils.generateTuples((MessageMetadataSchemeAsMultiScheme) _spoutConfig.scheme, toEmit.message(), _partition, toEmit.offset());
+            } else {
+                tups = KafkaUtils.generateTuples(_spoutConfig, toEmit.message(), _partition.topic);
+            }
+            
+            if ((tups != null) && tups.iterator().hasNext()) {
+               if (!Strings.isNullOrEmpty(_spoutConfig.outputStreamId)) {
+                    for (List<Object> tup : tups) {
+                        collector.emit(_spoutConfig.topic, tup, new KafkaMessageId(_partition, toEmit.offset()));
+                    }
+                } else {
+                    for (List<Object> tup : tups) {
+                        collector.emit(tup, new KafkaMessageId(_partition, toEmit.offset()));
+                    }
+                }
+                break;
+            } else {
+                ack(toEmit.offset());
+            }
+        }
+        if (!_waitingToEmit.isEmpty()) {
+            return EmitState.EMITTED_MORE_LEFT;
+        } else {
+            return EmitState.EMITTED_END;
+        }
+    }
+
+
+    private void fill() {
+        long start = System.nanoTime();
+        Long offset;
+
+        // Are there failed tuples? If so, fetch those first.
+        offset = this._failedMsgRetryManager.nextFailedMessageToRetry();
+        final boolean processingNewTuples = (offset == null);
+        if (processingNewTuples) {
+            offset = _emittedToOffset;
+        }
+
+        ByteBufferMessageSet msgs = null;
+        try {
+            msgs = KafkaUtils.fetchMessages(_spoutConfig, _consumer, _partition, offset);
+        } catch (TopicOffsetOutOfRangeException e) {
+            _emittedToOffset = KafkaUtils.getOffset(_consumer, _partition.topic, _partition.partition, kafka.api.OffsetRequest.EarliestTime());
+            LOG.warn("{} Using new offset: {}", _partition.partition, _emittedToOffset);
+            // fetch failed, so don't update the metrics
+            
+            //fix bug [STORM-643] : remove outdated failed offsets
+            if (!processingNewTuples) {
+                // For the case of EarliestTime it would be better to discard
+                // all the failed offsets, that are earlier than actual EarliestTime
+                // offset, since they are anyway not there.
+                // These calls to broker API will be then saved.
+                Set<Long> omitted = this._failedMsgRetryManager.clearInvalidMessages(_emittedToOffset);
+                
+                LOG.warn("Removing the failed offsets that are out of range: {}", omitted);
+            }
+            
+            return;
+        }
+        long end = System.nanoTime();
+        long millis = (end - start) / 1000000;
+        _fetchAPILatencyMax.update(millis);
+        _fetchAPILatencyMean.update(millis);
+        _fetchAPICallCount.incr();
+        if (msgs != null) {
+            int numMessages = 0;
+
+            for (MessageAndOffset msg : msgs) {
+                final Long cur_offset = msg.offset();
+                if (cur_offset < offset) {
+                    // Skip any old offsets.
+                    continue;
+                }
+                if (processingNewTuples || this._failedMsgRetryManager.shouldRetryMsg(cur_offset)) {
+                    numMessages += 1;
+                    if (!_pending.containsKey(cur_offset)) {
+                        _pending.put(cur_offset, System.currentTimeMillis());
+                    }
+                    _waitingToEmit.add(msg);
+                    _emittedToOffset = Math.max(msg.nextOffset(), _emittedToOffset);
+                    if (_failedMsgRetryManager.shouldRetryMsg(cur_offset)) {
+                        this._failedMsgRetryManager.retryStarted(cur_offset);
+                    }
+                }
+            }
+            _fetchAPIMessageCount.incrBy(numMessages);
+        }
+    }
+
+    public void ack(Long offset) {
+        if (!_pending.isEmpty() && _pending.firstKey() < offset - _spoutConfig.maxOffsetBehind) {
+            // Too many things pending!
+            _pending.headMap(offset - _spoutConfig.maxOffsetBehind).clear();
+        }
+        _pending.remove(offset);
+        this._failedMsgRetryManager.acked(offset);
+        numberAcked++;
+    }
+
+    public void fail(Long offset) {
+        if (offset < _emittedToOffset - _spoutConfig.maxOffsetBehind) {
+            LOG.info(
+                    "Skipping failed tuple at offset=" + offset +
+                            " because it's more than maxOffsetBehind=" + _spoutConfig.maxOffsetBehind +
+                            " behind _emittedToOffset=" + _emittedToOffset
+            );
+        } else {
+            LOG.debug("failing at offset={} with _pending.size()={} pending and _emittedToOffset={}", offset, _pending.size(), _emittedToOffset);
+            numberFailed++;
+            if (numberAcked == 0 && numberFailed > _spoutConfig.maxOffsetBehind) {
+                throw new RuntimeException("Too many tuple failures");
+            }
+
+            this._failedMsgRetryManager.failed(offset);
+        }
+    }
+
+    public void commit() {
+        long lastCompletedOffset = lastCompletedOffset();
+        if (_committedTo != lastCompletedOffset) {
+            LOG.debug("Writing last completed offset ({}) to ZK for {} for topology: {}", lastCompletedOffset, _partition, _topologyInstanceId);
+            Map<Object, Object> data = (Map<Object, Object>) ImmutableMap.builder()
+                    .put("topology", ImmutableMap.of("id", _topologyInstanceId,
+                            "name", _stormConf.get(Config.TOPOLOGY_NAME)))
+                    .put("offset", lastCompletedOffset)
+                    .put("partition", _partition.partition)
+                    .put("broker", ImmutableMap.of("host", _partition.host.host,
+                            "port", _partition.host.port))
+                    .put("topic", _partition.topic).build();
+            _state.writeJSON(committedPath(), data);
+
+            _committedTo = lastCompletedOffset;
+            LOG.debug("Wrote last completed offset ({}) to ZK for {} for topology: {}", lastCompletedOffset, _partition, _topologyInstanceId);
+        } else {
+            LOG.debug("No new offset for {} for topology: {}", _partition, _topologyInstanceId);
+        }
+    }
+
+    private String committedPath() {
+        return _spoutConfig.zkRoot + "/" + _spoutConfig.id + "/" + _partition.getId();
+    }
+
+    public long lastCompletedOffset() {
+        if (_pending.isEmpty()) {
+            return _emittedToOffset;
+        } else {
+            return _pending.firstKey();
+        }
+    }
+
+    public Partition getPartition() {
+        return _partition;
+    }
+
+    public void close() {
+        commit();
+        _connections.unregister(_partition.host, _partition.topic , _partition.partition);
+    }
+
+    static class KafkaMessageId {
+        public Partition partition;
+        public long offset;
+
+
+        public KafkaMessageId(Partition partition, long offset) {
+            this.partition = partition;
+            this.offset = offset;
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/SpoutConfig.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/SpoutConfig.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/SpoutConfig.java
new file mode 100644
index 0000000..1ac41c8
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/SpoutConfig.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+import java.io.Serializable;
+import java.util.List;
+
+
+public class SpoutConfig extends KafkaConfig implements Serializable {
+    public List<String> zkServers = null;
+    public Integer zkPort = null;
+    public String zkRoot = null;
+    public String id = null;
+
+    public String outputStreamId;
+
+    // setting for how often to save the current kafka offset to ZooKeeper
+    public long stateUpdateIntervalMs = 2000;
+
+    // Exponential back-off retry settings.  These are used when retrying messages after a bolt
+    // calls OutputCollector.fail().
+    public long retryInitialDelayMs = 0;
+    public double retryDelayMultiplier = 1.0;
+    public long retryDelayMaxMs = 60 * 1000;
+
+    public SpoutConfig(BrokerHosts hosts, String topic, String zkRoot, String id) {
+        super(hosts, topic);
+        this.zkRoot = zkRoot;
+        this.id = id;
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/StaticCoordinator.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/StaticCoordinator.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/StaticCoordinator.java
new file mode 100644
index 0000000..bdbc44d
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/StaticCoordinator.java
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+import org.apache.storm.kafka.trident.GlobalPartitionInformation;
+
+import java.util.*;
+
+
+public class StaticCoordinator implements PartitionCoordinator {
+    Map<Partition, PartitionManager> _managers = new HashMap<Partition, PartitionManager>();
+    List<PartitionManager> _allManagers = new ArrayList();
+
+    public StaticCoordinator(DynamicPartitionConnections connections, Map stormConf, SpoutConfig config, ZkState state, int taskIndex, int totalTasks, String topologyInstanceId) {
+        StaticHosts hosts = (StaticHosts) config.hosts;
+        List<GlobalPartitionInformation> partitions = new ArrayList<GlobalPartitionInformation>();
+        partitions.add(hosts.getPartitionInformation());
+        List<Partition> myPartitions = KafkaUtils.calculatePartitionsForTask(partitions, totalTasks, taskIndex);
+        for (Partition myPartition : myPartitions) {
+            _managers.put(myPartition, new PartitionManager(connections, topologyInstanceId, state, stormConf, config, myPartition));
+        }
+        _allManagers = new ArrayList(_managers.values());
+    }
+
+    @Override
+    public List<PartitionManager> getMyManagedPartitions() {
+        return _allManagers;
+    }
+
+    public PartitionManager getManager(Partition partition) {
+        return _managers.get(partition);
+    }
+
+    @Override
+    public void refresh() { return; }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/StaticHosts.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/StaticHosts.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/StaticHosts.java
new file mode 100644
index 0000000..33d5c16
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/StaticHosts.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+import org.apache.storm.kafka.trident.GlobalPartitionInformation;
+
+/**
+ * Date: 11/05/2013
+ * Time: 14:43
+ */
+public class StaticHosts implements BrokerHosts {
+
+
+    private GlobalPartitionInformation partitionInformation;
+
+    public StaticHosts(GlobalPartitionInformation partitionInformation) {
+        this.partitionInformation = partitionInformation;
+    }
+
+    public GlobalPartitionInformation getPartitionInformation() {
+        return partitionInformation;
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/StaticPartitionConnections.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/StaticPartitionConnections.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/StaticPartitionConnections.java
new file mode 100644
index 0000000..77a7211
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/StaticPartitionConnections.java
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+import kafka.javaapi.consumer.SimpleConsumer;
+
+import java.util.HashMap;
+import java.util.Map;
+
+public class StaticPartitionConnections {
+    Map<Integer, SimpleConsumer> _kafka = new HashMap<Integer, SimpleConsumer>();
+    KafkaConfig _config;
+    StaticHosts hosts;
+
+    public StaticPartitionConnections(KafkaConfig conf) {
+        _config = conf;
+        if (!(conf.hosts instanceof StaticHosts)) {
+            throw new RuntimeException("Must configure with static hosts");
+        }
+        this.hosts = (StaticHosts) conf.hosts;
+    }
+
+    public SimpleConsumer getConsumer(int partition) {
+        if (!_kafka.containsKey(partition)) {
+            Broker hp = hosts.getPartitionInformation().getBrokerFor(partition);
+            _kafka.put(partition, new SimpleConsumer(hp.host, hp.port, _config.socketTimeoutMs, _config.bufferSizeBytes, _config.clientId));
+
+        }
+        return _kafka.get(partition);
+    }
+
+    public void close() {
+        for (SimpleConsumer consumer : _kafka.values()) {
+            consumer.close();
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/StringKeyValueScheme.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/StringKeyValueScheme.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/StringKeyValueScheme.java
new file mode 100644
index 0000000..9ef7f74
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/StringKeyValueScheme.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+import org.apache.storm.tuple.Values;
+import com.google.common.collect.ImmutableMap;
+
+import java.nio.ByteBuffer;
+import java.util.List;
+
+public class StringKeyValueScheme extends StringScheme implements KeyValueScheme {
+
+    @Override
+    public List<Object> deserializeKeyAndValue(ByteBuffer key, ByteBuffer value) {
+        if ( key == null ) {
+            return deserialize(value);
+        }
+        String keyString = StringScheme.deserializeString(key);
+        String valueString = StringScheme.deserializeString(value);
+        return new Values(ImmutableMap.of(keyString, valueString));
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/StringMessageAndMetadataScheme.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/StringMessageAndMetadataScheme.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/StringMessageAndMetadataScheme.java
new file mode 100644
index 0000000..e57738d
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/StringMessageAndMetadataScheme.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Values;
+
+import java.nio.ByteBuffer;
+import java.util.List;
+
+public class StringMessageAndMetadataScheme extends StringScheme implements MessageMetadataScheme {
+    private static final long serialVersionUID = -5441841920447947374L;
+
+    public static final String STRING_SCHEME_PARTITION_KEY = "partition";
+    public static final String STRING_SCHEME_OFFSET = "offset";
+
+    @Override
+    public List<Object> deserializeMessageWithMetadata(ByteBuffer message, Partition partition, long offset) {
+        String stringMessage = StringScheme.deserializeString(message);
+        return new Values(stringMessage, partition.partition, offset);
+    }
+
+    @Override
+    public Fields getOutputFields() {
+        return new Fields(STRING_SCHEME_KEY, STRING_SCHEME_PARTITION_KEY, STRING_SCHEME_OFFSET);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/StringMultiSchemeWithTopic.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/StringMultiSchemeWithTopic.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/StringMultiSchemeWithTopic.java
new file mode 100644
index 0000000..d92a879
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/StringMultiSchemeWithTopic.java
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+import org.apache.storm.spout.MultiScheme;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Values;
+import sun.reflect.generics.reflectiveObjects.NotImplementedException;
+
+import java.nio.ByteBuffer;
+import java.util.Collections;
+import java.util.List;
+
+public class StringMultiSchemeWithTopic
+        implements MultiScheme {
+    public static final String STRING_SCHEME_KEY = "str";
+
+    public static final String TOPIC_KEY = "topic";
+
+    @Override
+    public Iterable<List<Object>> deserialize(ByteBuffer bytes) {
+        throw new NotImplementedException();
+    }
+
+    public Iterable<List<Object>> deserializeWithTopic(String topic, ByteBuffer bytes) {
+        List<Object> items = new Values(StringScheme.deserializeString(bytes), topic);
+        return Collections.singletonList(items);
+    }
+
+    public Fields getOutputFields() {
+        return new Fields(STRING_SCHEME_KEY, TOPIC_KEY);
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/StringScheme.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/StringScheme.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/StringScheme.java
new file mode 100644
index 0000000..e2a2c22
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/StringScheme.java
@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+import org.apache.storm.spout.Scheme;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Values;
+import org.apache.storm.utils.Utils;
+
+import java.nio.ByteBuffer;
+import java.nio.charset.Charset;
+import java.nio.charset.StandardCharsets;
+import java.util.List;
+
+public class StringScheme implements Scheme {
+    private static final Charset UTF8_CHARSET = StandardCharsets.UTF_8;
+    public static final String STRING_SCHEME_KEY = "str";
+
+    public List<Object> deserialize(ByteBuffer bytes) {
+        return new Values(deserializeString(bytes));
+    }
+
+    public static String deserializeString(ByteBuffer string) {
+        if (string.hasArray()) {
+            int base = string.arrayOffset();
+            return new String(string.array(), base + string.position(), string.remaining());
+        } else {
+            return new String(Utils.toByteArray(string), UTF8_CHARSET);
+        }
+    }
+
+    public Fields getOutputFields() {
+        return new Fields(STRING_SCHEME_KEY);
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/TopicOffsetOutOfRangeException.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/TopicOffsetOutOfRangeException.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/TopicOffsetOutOfRangeException.java
new file mode 100644
index 0000000..8e1c98f
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/TopicOffsetOutOfRangeException.java
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+public class TopicOffsetOutOfRangeException extends RuntimeException {
+
+    public TopicOffsetOutOfRangeException(String message) {
+        super(message);
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/ZkCoordinator.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/ZkCoordinator.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/ZkCoordinator.java
new file mode 100644
index 0000000..a53d566
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/ZkCoordinator.java
@@ -0,0 +1,113 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.storm.kafka.trident.GlobalPartitionInformation;
+
+import java.util.*;
+
+import static org.apache.storm.kafka.KafkaUtils.taskId;
+
+public class ZkCoordinator implements PartitionCoordinator {
+    public static final Logger LOG = LoggerFactory.getLogger(ZkCoordinator.class);
+
+    SpoutConfig _spoutConfig;
+    int _taskIndex;
+    int _totalTasks;
+    String _topologyInstanceId;
+    Map<Partition, PartitionManager> _managers = new HashMap();
+    List<PartitionManager> _cachedList = new ArrayList<PartitionManager>();
+    Long _lastRefreshTime = null;
+    int _refreshFreqMs;
+    DynamicPartitionConnections _connections;
+    DynamicBrokersReader _reader;
+    ZkState _state;
+    Map _stormConf;
+
+    public ZkCoordinator(DynamicPartitionConnections connections, Map stormConf, SpoutConfig spoutConfig, ZkState state, int taskIndex, int totalTasks, String topologyInstanceId) {
+        this(connections, stormConf, spoutConfig, state, taskIndex, totalTasks, topologyInstanceId, buildReader(stormConf, spoutConfig));
+    }
+
+    public ZkCoordinator(DynamicPartitionConnections connections, Map stormConf, SpoutConfig spoutConfig, ZkState state, int taskIndex, int totalTasks, String topologyInstanceId, DynamicBrokersReader reader) {
+        _spoutConfig = spoutConfig;
+        _connections = connections;
+        _taskIndex = taskIndex;
+        _totalTasks = totalTasks;
+        _topologyInstanceId = topologyInstanceId;
+        _stormConf = stormConf;
+        _state = state;
+        ZkHosts brokerConf = (ZkHosts) spoutConfig.hosts;
+        _refreshFreqMs = brokerConf.refreshFreqSecs * 1000;
+        _reader = reader;
+    }
+
+    private static DynamicBrokersReader buildReader(Map stormConf, SpoutConfig spoutConfig) {
+        ZkHosts hosts = (ZkHosts) spoutConfig.hosts;
+        return new DynamicBrokersReader(stormConf, hosts.brokerZkStr, hosts.brokerZkPath, spoutConfig.topic);
+    }
+
+    @Override
+    public List<PartitionManager> getMyManagedPartitions() {
+        if (_lastRefreshTime == null || (System.currentTimeMillis() - _lastRefreshTime) > _refreshFreqMs) {
+            refresh();
+            _lastRefreshTime = System.currentTimeMillis();
+        }
+        return _cachedList;
+    }
+
+    @Override
+    public void refresh() {
+        try {
+            LOG.info(taskId(_taskIndex, _totalTasks) + "Refreshing partition manager connections");
+            List<GlobalPartitionInformation> brokerInfo = _reader.getBrokerInfo();
+            List<Partition> mine = KafkaUtils.calculatePartitionsForTask(brokerInfo, _totalTasks, _taskIndex);
+
+            Set<Partition> curr = _managers.keySet();
+            Set<Partition> newPartitions = new HashSet<Partition>(mine);
+            newPartitions.removeAll(curr);
+
+            Set<Partition> deletedPartitions = new HashSet<Partition>(curr);
+            deletedPartitions.removeAll(mine);
+
+            LOG.info(taskId(_taskIndex, _totalTasks) + "Deleted partition managers: " + deletedPartitions.toString());
+
+            for (Partition id : deletedPartitions) {
+                PartitionManager man = _managers.remove(id);
+                man.close();
+            }
+            LOG.info(taskId(_taskIndex, _totalTasks) + "New partition managers: " + newPartitions.toString());
+
+            for (Partition id : newPartitions) {
+                PartitionManager man = new PartitionManager(_connections, _topologyInstanceId, _state, _stormConf, _spoutConfig, id);
+                _managers.put(id, man);
+            }
+
+        } catch (Exception e) {
+            throw new RuntimeException(e);
+        }
+        _cachedList = new ArrayList<PartitionManager>(_managers.values());
+        LOG.info(taskId(_taskIndex, _totalTasks) + "Finished refreshing");
+    }
+
+    @Override
+    public PartitionManager getManager(Partition partition) {
+        return _managers.get(partition);
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/ZkHosts.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/ZkHosts.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/ZkHosts.java
new file mode 100644
index 0000000..2c2a26f
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/ZkHosts.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+
+public class ZkHosts implements BrokerHosts {
+    private static final String DEFAULT_ZK_PATH = "/brokers";
+
+    public String brokerZkStr = null;
+    public String brokerZkPath = null; // e.g., /kafka/brokers
+    public int refreshFreqSecs = 60;
+
+    public ZkHosts(String brokerZkStr, String brokerZkPath) {
+        this.brokerZkStr = brokerZkStr;
+        this.brokerZkPath = brokerZkPath;
+    }
+
+    public ZkHosts(String brokerZkStr) {
+        this(brokerZkStr, DEFAULT_ZK_PATH);
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/ZkState.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/ZkState.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/ZkState.java
new file mode 100644
index 0000000..d12016b
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/ZkState.java
@@ -0,0 +1,116 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka;
+
+import org.apache.storm.Config;
+import org.apache.storm.utils.Utils;
+import org.apache.curator.framework.CuratorFramework;
+import org.apache.curator.framework.CuratorFrameworkFactory;
+import org.apache.curator.retry.RetryNTimes;
+import org.apache.zookeeper.CreateMode;
+import org.json.simple.JSONValue;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.nio.charset.Charset;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+public class ZkState {
+    public static final Logger LOG = LoggerFactory.getLogger(ZkState.class);
+    CuratorFramework _curator;
+
+    private CuratorFramework newCurator(Map stateConf) throws Exception {
+        Integer port = (Integer) stateConf.get(Config.TRANSACTIONAL_ZOOKEEPER_PORT);
+        String serverPorts = "";
+        for (String server : (List<String>) stateConf.get(Config.TRANSACTIONAL_ZOOKEEPER_SERVERS)) {
+            serverPorts = serverPorts + server + ":" + port + ",";
+        }
+        return CuratorFrameworkFactory.newClient(serverPorts,
+                Utils.getInt(stateConf.get(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT)),
+                Utils.getInt(stateConf.get(Config.STORM_ZOOKEEPER_CONNECTION_TIMEOUT)),
+                new RetryNTimes(Utils.getInt(stateConf.get(Config.STORM_ZOOKEEPER_RETRY_TIMES)),
+                        Utils.getInt(stateConf.get(Config.STORM_ZOOKEEPER_RETRY_INTERVAL))));
+    }
+
+    public CuratorFramework getCurator() {
+        assert _curator != null;
+        return _curator;
+    }
+
+    public ZkState(Map stateConf) {
+        stateConf = new HashMap(stateConf);
+
+        try {
+            _curator = newCurator(stateConf);
+            _curator.start();
+        } catch (Exception e) {
+            throw new RuntimeException(e);
+        }
+    }
+
+    public void writeJSON(String path, Map<Object, Object> data) {
+        LOG.debug("Writing {} the data {}", path, data.toString());
+        writeBytes(path, JSONValue.toJSONString(data).getBytes(Charset.forName("UTF-8")));
+    }
+
+    public void writeBytes(String path, byte[] bytes) {
+        try {
+            if (_curator.checkExists().forPath(path) == null) {
+                _curator.create()
+                        .creatingParentsIfNeeded()
+                        .withMode(CreateMode.PERSISTENT)
+                        .forPath(path, bytes);
+            } else {
+                _curator.setData().forPath(path, bytes);
+            }
+        } catch (Exception e) {
+            throw new RuntimeException(e);
+        }
+    }
+
+    public Map<Object, Object> readJSON(String path) {
+        try {
+            byte[] b = readBytes(path);
+            if (b == null) {
+                return null;
+            }
+            return (Map<Object, Object>) JSONValue.parse(new String(b, "UTF-8"));
+        } catch (Exception e) {
+            throw new RuntimeException(e);
+        }
+    }
+
+    public byte[] readBytes(String path) {
+        try {
+            if (_curator.checkExists().forPath(path) != null) {
+                return _curator.getData().forPath(path);
+            } else {
+                return null;
+            }
+        } catch (Exception e) {
+            throw new RuntimeException(e);
+        }
+    }
+
+    public void close() {
+        _curator.close();
+        _curator = null;
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/bolt/KafkaBolt.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/bolt/KafkaBolt.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/bolt/KafkaBolt.java
new file mode 100644
index 0000000..0ceac3a
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/bolt/KafkaBolt.java
@@ -0,0 +1,178 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka.bolt;
+
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.base.BaseRichBolt;
+import org.apache.storm.tuple.Tuple;
+import org.apache.storm.utils.TupleUtils;
+import org.apache.kafka.clients.producer.KafkaProducer;
+import org.apache.kafka.clients.producer.ProducerRecord;
+import org.apache.kafka.clients.producer.RecordMetadata;
+import org.apache.kafka.clients.producer.Callback;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.storm.kafka.bolt.mapper.FieldNameBasedTupleToKafkaMapper;
+import org.apache.storm.kafka.bolt.mapper.TupleToKafkaMapper;
+import org.apache.storm.kafka.bolt.selector.DefaultTopicSelector;
+import org.apache.storm.kafka.bolt.selector.KafkaTopicSelector;
+import java.util.concurrent.Future;
+import java.util.concurrent.ExecutionException;
+import java.util.Map;
+import java.util.Properties;
+
+
+/**
+ * Bolt implementation that can send Tuple data to Kafka
+ * <p/>
+ * It expects the producer configuration and topic in storm config under
+ * <p/>
+ * 'kafka.broker.properties' and 'topic'
+ * <p/>
+ * respectively.
+ * <p/>
+ * This bolt uses 0.8.2 Kafka Producer API.
+ * <p/>
+ * It works for sending tuples to older Kafka version (0.8.1).
+ */
+public class KafkaBolt<K, V> extends BaseRichBolt {
+
+    private static final Logger LOG = LoggerFactory.getLogger(KafkaBolt.class);
+
+    public static final String TOPIC = "topic";
+
+    private KafkaProducer<K, V> producer;
+    private OutputCollector collector;
+    private TupleToKafkaMapper<K,V> mapper;
+    private KafkaTopicSelector topicSelector;
+    private Properties boltSpecfiedProperties = new Properties();
+    /**
+     * With default setting for fireAndForget and async, the callback is called when the sending succeeds.
+     * By setting fireAndForget true, the send will not wait at all for kafka to ack.
+     * "acks" setting in 0.8.2 Producer API config doesn't matter if fireAndForget is set.
+     * By setting async false, synchronous sending is used. 
+     */
+    private boolean fireAndForget = false;
+    private boolean async = true;
+
+    public KafkaBolt() {}
+
+    public KafkaBolt<K,V> withTupleToKafkaMapper(TupleToKafkaMapper<K,V> mapper) {
+        this.mapper = mapper;
+        return this;
+    }
+
+    public KafkaBolt<K,V> withTopicSelector(KafkaTopicSelector selector) {
+        this.topicSelector = selector;
+        return this;
+    }
+
+    public KafkaBolt<K,V> withProducerProperties(Properties producerProperties) {
+        this.boltSpecfiedProperties = producerProperties;
+        return this;
+    }
+
+    @Override
+    public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
+        //for backward compatibility.
+        if(mapper == null) {
+            this.mapper = new FieldNameBasedTupleToKafkaMapper<K,V>();
+        }
+
+        //for backward compatibility.
+        if(topicSelector == null) {
+            this.topicSelector = new DefaultTopicSelector((String) stormConf.get(TOPIC));
+        }
+
+        producer = new KafkaProducer<>(boltSpecfiedProperties);
+        this.collector = collector;
+    }
+
+    @Override
+    public void execute(final Tuple input) {
+        if (TupleUtils.isTick(input)) {
+          collector.ack(input);
+          return; // Do not try to send ticks to Kafka
+        }
+        K key = null;
+        V message = null;
+        String topic = null;
+        try {
+            key = mapper.getKeyFromTuple(input);
+            message = mapper.getMessageFromTuple(input);
+            topic = topicSelector.getTopic(input);
+            if (topic != null ) {
+                Callback callback = null;
+
+                if (!fireAndForget && async) {
+                    callback = new Callback() {
+                        @Override
+                        public void onCompletion(RecordMetadata ignored, Exception e) {
+                            synchronized (collector) {
+                                if (e != null) {
+                                    collector.reportError(e);
+                                    collector.fail(input);
+                                } else {
+                                    collector.ack(input);
+                                }
+                            }
+                        }
+                    };
+                }
+                Future<RecordMetadata> result = producer.send(new ProducerRecord<K, V>(topic, key, message), callback);
+                if (!async) {
+                    try {
+                        result.get();
+                        collector.ack(input);
+                    } catch (ExecutionException err) {
+                        collector.reportError(err);
+                        collector.fail(input);
+                    }
+                } else if (fireAndForget) {
+                    collector.ack(input);
+                }
+            } else {
+                LOG.warn("skipping key = " + key + ", topic selector returned null.");
+                collector.ack(input);
+            }
+        } catch (Exception ex) {
+            collector.reportError(ex);
+            collector.fail(input);
+        }
+    }
+
+    @Override
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+
+    }
+
+    @Override
+    public void cleanup() {
+        producer.close();
+    }
+
+    public void setFireAndForget(boolean fireAndForget) {
+        this.fireAndForget = fireAndForget;
+    }
+
+    public void setAsync(boolean async) {
+        this.async = async;
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/bolt/mapper/FieldNameBasedTupleToKafkaMapper.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/bolt/mapper/FieldNameBasedTupleToKafkaMapper.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/bolt/mapper/FieldNameBasedTupleToKafkaMapper.java
new file mode 100644
index 0000000..672da8e
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/bolt/mapper/FieldNameBasedTupleToKafkaMapper.java
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka.bolt.mapper;
+
+import org.apache.storm.tuple.Tuple;
+
+public class FieldNameBasedTupleToKafkaMapper<K,V> implements TupleToKafkaMapper<K, V> {
+
+    public static final String BOLT_KEY = "key";
+    public static final String BOLT_MESSAGE = "message";
+    public String boltKeyField;
+    public String boltMessageField;
+
+    public FieldNameBasedTupleToKafkaMapper() {
+        this(BOLT_KEY, BOLT_MESSAGE);
+    }
+
+    public FieldNameBasedTupleToKafkaMapper(String boltKeyField, String boltMessageField) {
+        this.boltKeyField = boltKeyField;
+        this.boltMessageField = boltMessageField;
+    }
+
+    @Override
+    public K getKeyFromTuple(Tuple tuple) {
+        //for backward compatibility, we return null when key is not present.
+        return tuple.contains(boltKeyField) ? (K) tuple.getValueByField(boltKeyField) : null;
+    }
+
+    @Override
+    public V getMessageFromTuple(Tuple tuple) {
+        return (V) tuple.getValueByField(boltMessageField);
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/bolt/mapper/TupleToKafkaMapper.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/bolt/mapper/TupleToKafkaMapper.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/bolt/mapper/TupleToKafkaMapper.java
new file mode 100644
index 0000000..3890413
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/bolt/mapper/TupleToKafkaMapper.java
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka.bolt.mapper;
+
+import org.apache.storm.tuple.Tuple;
+
+import java.io.Serializable;
+
+/**
+ * as the really verbose name suggests this interface mapps a storm tuple to kafka key and message.
+ * @param <K> type of key.
+ * @param <V> type of value.
+ */
+public interface TupleToKafkaMapper<K,V> extends Serializable {
+    K getKeyFromTuple(Tuple tuple);
+    V getMessageFromTuple(Tuple tuple);
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/bolt/selector/DefaultTopicSelector.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/bolt/selector/DefaultTopicSelector.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/bolt/selector/DefaultTopicSelector.java
new file mode 100644
index 0000000..2aafc78
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/bolt/selector/DefaultTopicSelector.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka.bolt.selector;
+
+import org.apache.storm.tuple.Tuple;
+
+public class DefaultTopicSelector implements KafkaTopicSelector {
+
+    private final String topicName;
+
+    public DefaultTopicSelector(final String topicName) {
+        this.topicName = topicName;
+    }
+
+    @Override
+    public String getTopic(Tuple tuple) {
+        return topicName;
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/bolt/selector/KafkaTopicSelector.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/bolt/selector/KafkaTopicSelector.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/bolt/selector/KafkaTopicSelector.java
new file mode 100644
index 0000000..cb7fb44
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/bolt/selector/KafkaTopicSelector.java
@@ -0,0 +1,26 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka.bolt.selector;
+
+import org.apache.storm.tuple.Tuple;
+
+import java.io.Serializable;
+
+public interface KafkaTopicSelector extends Serializable {
+    String getTopic(Tuple tuple);
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/Coordinator.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/Coordinator.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/Coordinator.java
new file mode 100644
index 0000000..baec8cb
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/Coordinator.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka.trident;
+
+import org.apache.storm.kafka.KafkaUtils;
+import org.apache.storm.trident.spout.IOpaquePartitionedTridentSpout;
+import org.apache.storm.trident.spout.IPartitionedTridentSpout;
+
+import java.util.List;
+import java.util.Map;
+
+class Coordinator implements IPartitionedTridentSpout.Coordinator<List<GlobalPartitionInformation>>, IOpaquePartitionedTridentSpout.Coordinator<List<GlobalPartitionInformation>> {
+
+    private IBrokerReader reader;
+    private TridentKafkaConfig config;
+
+    public Coordinator(Map conf, TridentKafkaConfig tridentKafkaConfig) {
+        config = tridentKafkaConfig;
+        reader = KafkaUtils.makeBrokerReader(conf, config);
+    }
+
+    @Override
+    public void close() {
+        config.coordinator.close();
+    }
+
+    @Override
+    public boolean isReady(long txid) {
+        return config.coordinator.isReady(txid);
+    }
+
+    @Override
+    public List<GlobalPartitionInformation> getPartitionsForBatch() {
+        return reader.getAllBrokers();
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/DefaultCoordinator.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/DefaultCoordinator.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/DefaultCoordinator.java
new file mode 100644
index 0000000..7a7e32c
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/DefaultCoordinator.java
@@ -0,0 +1,31 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka.trident;
+
+public class DefaultCoordinator implements IBatchCoordinator {
+
+    @Override
+    public boolean isReady(long txid) {
+        return true;
+    }
+
+    @Override
+    public void close() {
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/GlobalPartitionInformation.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/GlobalPartitionInformation.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/GlobalPartitionInformation.java
new file mode 100644
index 0000000..3108ff8
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/GlobalPartitionInformation.java
@@ -0,0 +1,112 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka.trident;
+
+import com.google.common.base.Objects;
+import org.apache.storm.kafka.Broker;
+import org.apache.storm.kafka.Partition;
+
+import java.io.Serializable;
+import java.util.*;
+
+
+public class GlobalPartitionInformation implements Iterable<Partition>, Serializable {
+
+    private Map<Integer, Broker> partitionMap;
+    public String topic;
+
+    //Flag to keep the Partition Path Id backward compatible with Old implementation of Partition.getId() == "partition_" + partition
+    private Boolean bUseTopicNameForPartitionPathId;
+
+    public GlobalPartitionInformation(String topic, Boolean bUseTopicNameForPartitionPathId) {
+        this.topic = topic;
+        this.partitionMap = new TreeMap<Integer, Broker>();
+        this.bUseTopicNameForPartitionPathId = bUseTopicNameForPartitionPathId;
+    }
+
+    public GlobalPartitionInformation(String topic) {
+        this.topic = topic;
+        this.partitionMap = new TreeMap<Integer, Broker>();
+        this.bUseTopicNameForPartitionPathId = false;
+    }
+
+    public void addPartition(int partitionId, Broker broker) {
+        partitionMap.put(partitionId, broker);
+    }
+
+    @Override
+    public String toString() {
+        return "GlobalPartitionInformation{" +
+                "topic=" + topic +
+                ", partitionMap=" + partitionMap +
+                '}';
+    }
+
+    public Broker getBrokerFor(Integer partitionId) {
+        return partitionMap.get(partitionId);
+    }
+
+    public List<Partition> getOrderedPartitions() {
+        List<Partition> partitions = new LinkedList<Partition>();
+        for (Map.Entry<Integer, Broker> partition : partitionMap.entrySet()) {
+            partitions.add(new Partition(partition.getValue(), this.topic, partition.getKey(), this.bUseTopicNameForPartitionPathId));
+        }
+        return partitions;
+    }
+
+    @Override
+    public Iterator<Partition> iterator() {
+        final Iterator<Map.Entry<Integer, Broker>> iterator = partitionMap.entrySet().iterator();
+        final String topic = this.topic;
+        final Boolean bUseTopicNameForPartitionPathId = this.bUseTopicNameForPartitionPathId;
+        return new Iterator<Partition>() {
+            @Override
+            public boolean hasNext() {
+                return iterator.hasNext();
+            }
+
+            @Override
+            public Partition next() {
+                Map.Entry<Integer, Broker> next = iterator.next();
+                return new Partition(next.getValue(), topic , next.getKey(), bUseTopicNameForPartitionPathId);
+            }
+
+            @Override
+            public void remove() {
+                iterator.remove();
+            }
+        };
+    }
+
+    @Override
+    public int hashCode() {
+        return Objects.hashCode(partitionMap);
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj) {
+            return true;
+        }
+        if (obj == null || getClass() != obj.getClass()) {
+            return false;
+        }
+        final GlobalPartitionInformation other = (GlobalPartitionInformation) obj;
+        return Objects.equal(this.partitionMap, other.partitionMap);
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/IBatchCoordinator.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/IBatchCoordinator.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/IBatchCoordinator.java
new file mode 100644
index 0000000..41369ba
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/IBatchCoordinator.java
@@ -0,0 +1,26 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka.trident;
+
+import java.io.Serializable;
+
+public interface IBatchCoordinator extends Serializable {
+    boolean isReady(long txid);
+
+    void close();
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/IBrokerReader.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/IBrokerReader.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/IBrokerReader.java
new file mode 100644
index 0000000..904d8c9
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/IBrokerReader.java
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka.trident;
+
+import java.util.List;
+import java.util.Map;
+
+public interface IBrokerReader {
+
+    GlobalPartitionInformation getBrokerForTopic(String topic);
+
+    List<GlobalPartitionInformation> getAllBrokers();
+
+    void close();
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/MaxMetric.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/MaxMetric.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/MaxMetric.java
new file mode 100644
index 0000000..2332205
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/MaxMetric.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka.trident;
+
+
+import org.apache.storm.metric.api.ICombiner;
+
+public class MaxMetric implements ICombiner<Long> {
+    @Override
+    public Long identity() {
+        return null;
+    }
+
+    @Override
+    public Long combine(Long l1, Long l2) {
+        if (l1 == null) {
+            return l2;
+        }
+        if (l2 == null) {
+            return l1;
+        }
+        return Math.max(l1, l2);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/OpaqueTridentKafkaSpout.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/OpaqueTridentKafkaSpout.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/OpaqueTridentKafkaSpout.java
new file mode 100644
index 0000000..f540c87
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/OpaqueTridentKafkaSpout.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka.trident;
+
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.kafka.Partition;
+import org.apache.storm.trident.spout.IOpaquePartitionedTridentSpout;
+
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+
+
+public class OpaqueTridentKafkaSpout implements IOpaquePartitionedTridentSpout<List<GlobalPartitionInformation>, Partition, Map> {
+
+
+    TridentKafkaConfig _config;
+
+    public OpaqueTridentKafkaSpout(TridentKafkaConfig config) {
+        _config = config;
+    }
+
+    @Override
+    public IOpaquePartitionedTridentSpout.Emitter<List<GlobalPartitionInformation>, Partition, Map> getEmitter(Map conf, TopologyContext context) {
+        return new TridentKafkaEmitter(conf, context, _config, context
+                .getStormId()).asOpaqueEmitter();
+    }
+
+    @Override
+    public IOpaquePartitionedTridentSpout.Coordinator getCoordinator(Map conf, TopologyContext tc) {
+        return new org.apache.storm.kafka.trident.Coordinator(conf, _config);
+    }
+
+    @Override
+    public Fields getOutputFields() {
+        return _config.scheme.getOutputFields();
+    }
+
+    @Override
+    public Map<String, Object> getComponentConfiguration() {
+        return null;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/StaticBrokerReader.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/StaticBrokerReader.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/StaticBrokerReader.java
new file mode 100644
index 0000000..ba27651
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/StaticBrokerReader.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka.trident;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+
+public class StaticBrokerReader implements IBrokerReader {
+
+    private Map<String,GlobalPartitionInformation> brokers = new TreeMap<String,GlobalPartitionInformation>();
+
+    public StaticBrokerReader(String topic, GlobalPartitionInformation partitionInformation) {
+        this.brokers.put(topic, partitionInformation);
+    }
+
+    @Override
+    public GlobalPartitionInformation getBrokerForTopic(String topic) {
+        if (brokers.containsKey(topic)) return brokers.get(topic);
+        return null;
+    }
+
+    @Override
+    public List<GlobalPartitionInformation> getAllBrokers () {
+        List<GlobalPartitionInformation> list = new ArrayList<GlobalPartitionInformation>();
+        list.addAll(brokers.values());
+        return list;
+    }
+
+    @Override
+    public void close() {
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/TransactionalTridentKafkaSpout.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/TransactionalTridentKafkaSpout.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/TransactionalTridentKafkaSpout.java
new file mode 100644
index 0000000..ac5b49f
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/TransactionalTridentKafkaSpout.java
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka.trident;
+
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.kafka.Partition;
+import org.apache.storm.trident.spout.IPartitionedTridentSpout;
+
+import java.util.Map;
+import java.util.UUID;
+
+
+public class TransactionalTridentKafkaSpout implements IPartitionedTridentSpout<GlobalPartitionInformation, Partition, Map> {
+
+    TridentKafkaConfig _config;
+
+    public TransactionalTridentKafkaSpout(TridentKafkaConfig config) {
+        _config = config;
+    }
+
+
+    @Override
+    public IPartitionedTridentSpout.Coordinator getCoordinator(Map conf, TopologyContext context) {
+        return new org.apache.storm.kafka.trident.Coordinator(conf, _config);
+    }
+
+    @Override
+    public IPartitionedTridentSpout.Emitter getEmitter(Map conf, TopologyContext context) {
+        return new TridentKafkaEmitter(conf, context, _config, context
+                .getStormId()).asTransactionalEmitter();
+    }
+
+    @Override
+    public Fields getOutputFields() {
+        return _config.scheme.getOutputFields();
+    }
+
+    @Override
+    public Map<String, Object> getComponentConfiguration() {
+        return null;
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/d839d1bf/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/TridentKafkaConfig.java
----------------------------------------------------------------------
diff --git a/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/TridentKafkaConfig.java b/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/TridentKafkaConfig.java
new file mode 100644
index 0000000..b225e9a
--- /dev/null
+++ b/external/storm-kafka/src/jvm/org/apache/storm/kafka/trident/TridentKafkaConfig.java
@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.storm.kafka.trident;
+
+import org.apache.storm.kafka.BrokerHosts;
+import org.apache.storm.kafka.KafkaConfig;
+
+
+public class TridentKafkaConfig extends KafkaConfig {
+
+
+    public final IBatchCoordinator coordinator = new DefaultCoordinator();
+
+    public TridentKafkaConfig(BrokerHosts hosts, String topic) {
+        super(hosts, topic);
+    }
+
+    public TridentKafkaConfig(BrokerHosts hosts, String topic, String clientId) {
+        super(hosts, topic, clientId);
+    }
+
+}