You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@storm.apache.org by bo...@apache.org on 2015/11/04 18:18:44 UTC

[01/10] storm git commit: Adding dynamic profiling for worker

Repository: storm
Updated Branches:
  refs/heads/master f3568d73c -> f3ed08b9f


http://git-wip-us.apache.org/repos/asf/storm/blob/0c2021e6/storm-core/src/jvm/backtype/storm/generated/ProfileRequest.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/generated/ProfileRequest.java b/storm-core/src/jvm/backtype/storm/generated/ProfileRequest.java
new file mode 100644
index 0000000..a3a98ae
--- /dev/null
+++ b/storm-core/src/jvm/backtype/storm/generated/ProfileRequest.java
@@ -0,0 +1,631 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * Autogenerated by Thrift Compiler (0.9.2)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package backtype.storm.generated;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-24")
+public class ProfileRequest implements org.apache.thrift.TBase<ProfileRequest, ProfileRequest._Fields>, java.io.Serializable, Cloneable, Comparable<ProfileRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ProfileRequest");
+
+  private static final org.apache.thrift.protocol.TField NODE_INFO_FIELD_DESC = new org.apache.thrift.protocol.TField("nodeInfo", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+  private static final org.apache.thrift.protocol.TField ACTION_FIELD_DESC = new org.apache.thrift.protocol.TField("action", org.apache.thrift.protocol.TType.I32, (short)2);
+  private static final org.apache.thrift.protocol.TField TIME_STAMP_FIELD_DESC = new org.apache.thrift.protocol.TField("time_stamp", org.apache.thrift.protocol.TType.I64, (short)3);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new ProfileRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new ProfileRequestTupleSchemeFactory());
+  }
+
+  private NodeInfo nodeInfo; // required
+  private ProfileAction action; // required
+  private long time_stamp; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    NODE_INFO((short)1, "nodeInfo"),
+    /**
+     * 
+     * @see ProfileAction
+     */
+    ACTION((short)2, "action"),
+    TIME_STAMP((short)3, "time_stamp");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // NODE_INFO
+          return NODE_INFO;
+        case 2: // ACTION
+          return ACTION;
+        case 3: // TIME_STAMP
+          return TIME_STAMP;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __TIME_STAMP_ISSET_ID = 0;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.TIME_STAMP};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.NODE_INFO, new org.apache.thrift.meta_data.FieldMetaData("nodeInfo", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, NodeInfo.class)));
+    tmpMap.put(_Fields.ACTION, new org.apache.thrift.meta_data.FieldMetaData("action", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, ProfileAction.class)));
+    tmpMap.put(_Fields.TIME_STAMP, new org.apache.thrift.meta_data.FieldMetaData("time_stamp", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ProfileRequest.class, metaDataMap);
+  }
+
+  public ProfileRequest() {
+  }
+
+  public ProfileRequest(
+    NodeInfo nodeInfo,
+    ProfileAction action)
+  {
+    this();
+    this.nodeInfo = nodeInfo;
+    this.action = action;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public ProfileRequest(ProfileRequest other) {
+    __isset_bitfield = other.__isset_bitfield;
+    if (other.is_set_nodeInfo()) {
+      this.nodeInfo = new NodeInfo(other.nodeInfo);
+    }
+    if (other.is_set_action()) {
+      this.action = other.action;
+    }
+    this.time_stamp = other.time_stamp;
+  }
+
+  public ProfileRequest deepCopy() {
+    return new ProfileRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.nodeInfo = null;
+    this.action = null;
+    set_time_stamp_isSet(false);
+    this.time_stamp = 0;
+  }
+
+  public NodeInfo get_nodeInfo() {
+    return this.nodeInfo;
+  }
+
+  public void set_nodeInfo(NodeInfo nodeInfo) {
+    this.nodeInfo = nodeInfo;
+  }
+
+  public void unset_nodeInfo() {
+    this.nodeInfo = null;
+  }
+
+  /** Returns true if field nodeInfo is set (has been assigned a value) and false otherwise */
+  public boolean is_set_nodeInfo() {
+    return this.nodeInfo != null;
+  }
+
+  public void set_nodeInfo_isSet(boolean value) {
+    if (!value) {
+      this.nodeInfo = null;
+    }
+  }
+
+  /**
+   * 
+   * @see ProfileAction
+   */
+  public ProfileAction get_action() {
+    return this.action;
+  }
+
+  /**
+   * 
+   * @see ProfileAction
+   */
+  public void set_action(ProfileAction action) {
+    this.action = action;
+  }
+
+  public void unset_action() {
+    this.action = null;
+  }
+
+  /** Returns true if field action is set (has been assigned a value) and false otherwise */
+  public boolean is_set_action() {
+    return this.action != null;
+  }
+
+  public void set_action_isSet(boolean value) {
+    if (!value) {
+      this.action = null;
+    }
+  }
+
+  public long get_time_stamp() {
+    return this.time_stamp;
+  }
+
+  public void set_time_stamp(long time_stamp) {
+    this.time_stamp = time_stamp;
+    set_time_stamp_isSet(true);
+  }
+
+  public void unset_time_stamp() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TIME_STAMP_ISSET_ID);
+  }
+
+  /** Returns true if field time_stamp is set (has been assigned a value) and false otherwise */
+  public boolean is_set_time_stamp() {
+    return EncodingUtils.testBit(__isset_bitfield, __TIME_STAMP_ISSET_ID);
+  }
+
+  public void set_time_stamp_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TIME_STAMP_ISSET_ID, value);
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case NODE_INFO:
+      if (value == null) {
+        unset_nodeInfo();
+      } else {
+        set_nodeInfo((NodeInfo)value);
+      }
+      break;
+
+    case ACTION:
+      if (value == null) {
+        unset_action();
+      } else {
+        set_action((ProfileAction)value);
+      }
+      break;
+
+    case TIME_STAMP:
+      if (value == null) {
+        unset_time_stamp();
+      } else {
+        set_time_stamp((Long)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case NODE_INFO:
+      return get_nodeInfo();
+
+    case ACTION:
+      return get_action();
+
+    case TIME_STAMP:
+      return Long.valueOf(get_time_stamp());
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case NODE_INFO:
+      return is_set_nodeInfo();
+    case ACTION:
+      return is_set_action();
+    case TIME_STAMP:
+      return is_set_time_stamp();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof ProfileRequest)
+      return this.equals((ProfileRequest)that);
+    return false;
+  }
+
+  public boolean equals(ProfileRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_nodeInfo = true && this.is_set_nodeInfo();
+    boolean that_present_nodeInfo = true && that.is_set_nodeInfo();
+    if (this_present_nodeInfo || that_present_nodeInfo) {
+      if (!(this_present_nodeInfo && that_present_nodeInfo))
+        return false;
+      if (!this.nodeInfo.equals(that.nodeInfo))
+        return false;
+    }
+
+    boolean this_present_action = true && this.is_set_action();
+    boolean that_present_action = true && that.is_set_action();
+    if (this_present_action || that_present_action) {
+      if (!(this_present_action && that_present_action))
+        return false;
+      if (!this.action.equals(that.action))
+        return false;
+    }
+
+    boolean this_present_time_stamp = true && this.is_set_time_stamp();
+    boolean that_present_time_stamp = true && that.is_set_time_stamp();
+    if (this_present_time_stamp || that_present_time_stamp) {
+      if (!(this_present_time_stamp && that_present_time_stamp))
+        return false;
+      if (this.time_stamp != that.time_stamp)
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_nodeInfo = true && (is_set_nodeInfo());
+    list.add(present_nodeInfo);
+    if (present_nodeInfo)
+      list.add(nodeInfo);
+
+    boolean present_action = true && (is_set_action());
+    list.add(present_action);
+    if (present_action)
+      list.add(action.getValue());
+
+    boolean present_time_stamp = true && (is_set_time_stamp());
+    list.add(present_time_stamp);
+    if (present_time_stamp)
+      list.add(time_stamp);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(ProfileRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(is_set_nodeInfo()).compareTo(other.is_set_nodeInfo());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_nodeInfo()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.nodeInfo, other.nodeInfo);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(is_set_action()).compareTo(other.is_set_action());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_action()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.action, other.action);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(is_set_time_stamp()).compareTo(other.is_set_time_stamp());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_time_stamp()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.time_stamp, other.time_stamp);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("ProfileRequest(");
+    boolean first = true;
+
+    sb.append("nodeInfo:");
+    if (this.nodeInfo == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.nodeInfo);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("action:");
+    if (this.action == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.action);
+    }
+    first = false;
+    if (is_set_time_stamp()) {
+      if (!first) sb.append(", ");
+      sb.append("time_stamp:");
+      sb.append(this.time_stamp);
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!is_set_nodeInfo()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'nodeInfo' is unset! Struct:" + toString());
+    }
+
+    if (!is_set_action()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'action' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+    if (nodeInfo != null) {
+      nodeInfo.validate();
+    }
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class ProfileRequestStandardSchemeFactory implements SchemeFactory {
+    public ProfileRequestStandardScheme getScheme() {
+      return new ProfileRequestStandardScheme();
+    }
+  }
+
+  private static class ProfileRequestStandardScheme extends StandardScheme<ProfileRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, ProfileRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // NODE_INFO
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.nodeInfo = new NodeInfo();
+              struct.nodeInfo.read(iprot);
+              struct.set_nodeInfo_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // ACTION
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.action = backtype.storm.generated.ProfileAction.findByValue(iprot.readI32());
+              struct.set_action_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // TIME_STAMP
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.time_stamp = iprot.readI64();
+              struct.set_time_stamp_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, ProfileRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.nodeInfo != null) {
+        oprot.writeFieldBegin(NODE_INFO_FIELD_DESC);
+        struct.nodeInfo.write(oprot);
+        oprot.writeFieldEnd();
+      }
+      if (struct.action != null) {
+        oprot.writeFieldBegin(ACTION_FIELD_DESC);
+        oprot.writeI32(struct.action.getValue());
+        oprot.writeFieldEnd();
+      }
+      if (struct.is_set_time_stamp()) {
+        oprot.writeFieldBegin(TIME_STAMP_FIELD_DESC);
+        oprot.writeI64(struct.time_stamp);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class ProfileRequestTupleSchemeFactory implements SchemeFactory {
+    public ProfileRequestTupleScheme getScheme() {
+      return new ProfileRequestTupleScheme();
+    }
+  }
+
+  private static class ProfileRequestTupleScheme extends TupleScheme<ProfileRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, ProfileRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      struct.nodeInfo.write(oprot);
+      oprot.writeI32(struct.action.getValue());
+      BitSet optionals = new BitSet();
+      if (struct.is_set_time_stamp()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.is_set_time_stamp()) {
+        oprot.writeI64(struct.time_stamp);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, ProfileRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.nodeInfo = new NodeInfo();
+      struct.nodeInfo.read(iprot);
+      struct.set_nodeInfo_isSet(true);
+      struct.action = backtype.storm.generated.ProfileAction.findByValue(iprot.readI32());
+      struct.set_action_isSet(true);
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        struct.time_stamp = iprot.readI64();
+        struct.set_time_stamp_isSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/storm/blob/0c2021e6/storm-core/src/jvm/backtype/storm/security/auth/authorizer/SimpleACLAuthorizer.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/security/auth/authorizer/SimpleACLAuthorizer.java b/storm-core/src/jvm/backtype/storm/security/auth/authorizer/SimpleACLAuthorizer.java
index e856578..a2549a5 100644
--- a/storm-core/src/jvm/backtype/storm/security/auth/authorizer/SimpleACLAuthorizer.java
+++ b/storm-core/src/jvm/backtype/storm/security/auth/authorizer/SimpleACLAuthorizer.java
@@ -57,6 +57,14 @@ public class SimpleACLAuthorizer implements IAuthorizer {
             "getComponentPageInfo",
             "uploadNewCredentials",
             "setLogConfig",
+            "setWorkerProfiler",
+            "getWorkerProfileActionExpiry",
+            "getComponentPendingProfileActions",
+            "startProfiling",
+            "stopProfiling",
+            "dumpProfile",
+            "dumpJstack",
+            "dumpHeap",
             "getLogConfig"));
 
     protected Set<String> _admins;

http://git-wip-us.apache.org/repos/asf/storm/blob/0c2021e6/storm-core/src/native/worker-launcher/impl/main.c
----------------------------------------------------------------------
diff --git a/storm-core/src/native/worker-launcher/impl/main.c b/storm-core/src/native/worker-launcher/impl/main.c
index 7067cf9..a51f9f9 100644
--- a/storm-core/src/native/worker-launcher/impl/main.c
+++ b/storm-core/src/native/worker-launcher/impl/main.c
@@ -47,6 +47,7 @@ void display_usage(FILE *stream) {
   fprintf(stream, "   initialize stormdist dir: code-dir <code-directory>\n");
   fprintf(stream, "   remove a file/directory: rmr <directory>\n");
   fprintf(stream, "   launch a worker: worker <working-directory> <script-to-run>\n");
+  fprintf(stream, "   launch a profiler: profiler <working-directory> <script-to-run>\n");
   fprintf(stream, "   signal a worker: signal <pid> <signal>\n");
 }
 
@@ -176,6 +177,15 @@ int main(int argc, char **argv) {
     if (exit_code == 0) {
       exit_code = exec_as_user(working_dir, argv[optind]);
     }
+   } else if (strcasecmp("profiler", command) == 0) {
+    if (argc != 5) {
+      fprintf(ERRORFILE, "Incorrect number of arguments (%d vs 5) for profiler\n",
+	      argc);
+      fflush(ERRORFILE);
+      return INVALID_ARGUMENT_NUMBER;
+    }
+    working_dir = argv[optind++];
+    exit_code = exec_as_user(working_dir, argv[optind]);
   } else if (strcasecmp("signal", command) == 0) {
     if (argc != 5) {
       fprintf(ERRORFILE, "Incorrect number of arguments (%d vs 5) for signal\n",

http://git-wip-us.apache.org/repos/asf/storm/blob/0c2021e6/storm-core/src/native/worker-launcher/impl/worker-launcher.c
----------------------------------------------------------------------
diff --git a/storm-core/src/native/worker-launcher/impl/worker-launcher.c b/storm-core/src/native/worker-launcher/impl/worker-launcher.c
index 3a6c4b8..1c4b36a 100644
--- a/storm-core/src/native/worker-launcher/impl/worker-launcher.c
+++ b/storm-core/src/native/worker-launcher/impl/worker-launcher.c
@@ -743,6 +743,53 @@ int exec_as_user(const char * working_dir, const char * script_file) {
   return -1;
 }
 
+int fork_as_user(const char * working_dir, const char * script_file) {
+  char *script_file_dest = NULL;
+  script_file_dest = get_container_launcher_file(working_dir);
+  if (script_file_dest == NULL) {
+    return OUT_OF_MEMORY;
+  }
+
+  // open launch script
+  int script_file_source = open_file_as_nm(script_file);
+  if (script_file_source == -1) {
+    return -1;
+  }
+
+  setsid();
+
+  // give up root privs
+  if (change_user(user_detail->pw_uid, user_detail->pw_gid) != 0) {
+    return SETUID_OPER_FAILED;
+  }
+
+  if (copy_file(script_file_source, script_file, script_file_dest, S_IRWXU) != 0) {
+    return -1;
+  }
+
+  fcloseall();
+  umask(0027);
+  if (chdir(working_dir) != 0) {
+    fprintf(LOGFILE, "Can't change directory to %s -%s\n", working_dir,
+	    strerror(errno));
+    return -1;
+  }
+
+  int pid = fork();
+  if (pid == 0 && execlp(script_file_dest, script_file_dest, NULL) != 0) {
+    fprintf(LOGFILE, "Couldn't execute the container launch file %s - %s",
+            script_file_dest, strerror(errno));
+    return UNABLE_TO_EXECUTE_CONTAINER_SCRIPT;
+  } else {
+    fprintf(LOGFILE, "Launched the process from the container launch file %s - with pid %d",
+            script_file_dest, pid);
+    return 0;
+  }
+
+  //Unreachable
+  return -1;
+}
+
 /**
  * Delete the given directory as the user from each of the directories
  * user: the user doing the delete

http://git-wip-us.apache.org/repos/asf/storm/blob/0c2021e6/storm-core/src/native/worker-launcher/impl/worker-launcher.h
----------------------------------------------------------------------
diff --git a/storm-core/src/native/worker-launcher/impl/worker-launcher.h b/storm-core/src/native/worker-launcher/impl/worker-launcher.h
index 59ab998..3b1ec24 100644
--- a/storm-core/src/native/worker-launcher/impl/worker-launcher.h
+++ b/storm-core/src/native/worker-launcher/impl/worker-launcher.h
@@ -70,6 +70,8 @@ int setup_stormdist_dir(const char* local_dir);
 
 int exec_as_user(const char * working_dir, const char * args);
 
+int fork_as_user(const char * working_dir, const char * args);
+
 // delete a directory (or file) recursively as the user. The directory
 // could optionally be relative to the baseDir set of directories (if the same
 // directory appears on multiple disk volumes, the disk volumes should be passed

http://git-wip-us.apache.org/repos/asf/storm/blob/0c2021e6/storm-core/src/py/storm/Nimbus-remote
----------------------------------------------------------------------
diff --git a/storm-core/src/py/storm/Nimbus-remote b/storm-core/src/py/storm/Nimbus-remote
index d05a4b2..14acdc9 100644
--- a/storm-core/src/py/storm/Nimbus-remote
+++ b/storm-core/src/py/storm/Nimbus-remote
@@ -52,6 +52,8 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help':
   print('  void setLogConfig(string name, LogConfig config)')
   print('  LogConfig getLogConfig(string name)')
   print('  void debug(string name, string component, bool enable, double samplingPercentage)')
+  print('  void setWorkerProfiler(string id, ProfileRequest profileRequest)')
+  print('   getComponentPendingProfileActions(string id, string component_id, ProfileAction action)')
   print('  void uploadNewCredentials(string name, Credentials creds)')
   print('  string beginFileUpload()')
   print('  void uploadChunk(string location, string chunk)')
@@ -183,6 +185,18 @@ elif cmd == 'debug':
     sys.exit(1)
   pp.pprint(client.debug(args[0],args[1],eval(args[2]),eval(args[3]),))
 
+elif cmd == 'setWorkerProfiler':
+  if len(args) != 2:
+    print('setWorkerProfiler requires 2 args')
+    sys.exit(1)
+  pp.pprint(client.setWorkerProfiler(args[0],eval(args[1]),))
+
+elif cmd == 'getComponentPendingProfileActions':
+  if len(args) != 3:
+    print('getComponentPendingProfileActions requires 3 args')
+    sys.exit(1)
+  pp.pprint(client.getComponentPendingProfileActions(args[0],args[1],eval(args[2]),))
+
 elif cmd == 'uploadNewCredentials':
   if len(args) != 2:
     print('uploadNewCredentials requires 2 args')

http://git-wip-us.apache.org/repos/asf/storm/blob/0c2021e6/storm-core/src/py/storm/Nimbus.py
----------------------------------------------------------------------
diff --git a/storm-core/src/py/storm/Nimbus.py b/storm-core/src/py/storm/Nimbus.py
index 3c26a16..a446654 100644
--- a/storm-core/src/py/storm/Nimbus.py
+++ b/storm-core/src/py/storm/Nimbus.py
@@ -125,6 +125,23 @@ class Iface:
     """
     pass
 
+  def setWorkerProfiler(self, id, profileRequest):
+    """
+    Parameters:
+     - id
+     - profileRequest
+    """
+    pass
+
+  def getComponentPendingProfileActions(self, id, component_id, action):
+    """
+    Parameters:
+     - id
+     - component_id
+     - action
+    """
+    pass
+
   def uploadNewCredentials(self, name, creds):
     """
     Parameters:
@@ -600,6 +617,72 @@ class Client(Iface):
       raise result.aze
     return
 
+  def setWorkerProfiler(self, id, profileRequest):
+    """
+    Parameters:
+     - id
+     - profileRequest
+    """
+    self.send_setWorkerProfiler(id, profileRequest)
+    self.recv_setWorkerProfiler()
+
+  def send_setWorkerProfiler(self, id, profileRequest):
+    self._oprot.writeMessageBegin('setWorkerProfiler', TMessageType.CALL, self._seqid)
+    args = setWorkerProfiler_args()
+    args.id = id
+    args.profileRequest = profileRequest
+    args.write(self._oprot)
+    self._oprot.writeMessageEnd()
+    self._oprot.trans.flush()
+
+  def recv_setWorkerProfiler(self):
+    iprot = self._iprot
+    (fname, mtype, rseqid) = iprot.readMessageBegin()
+    if mtype == TMessageType.EXCEPTION:
+      x = TApplicationException()
+      x.read(iprot)
+      iprot.readMessageEnd()
+      raise x
+    result = setWorkerProfiler_result()
+    result.read(iprot)
+    iprot.readMessageEnd()
+    return
+
+  def getComponentPendingProfileActions(self, id, component_id, action):
+    """
+    Parameters:
+     - id
+     - component_id
+     - action
+    """
+    self.send_getComponentPendingProfileActions(id, component_id, action)
+    return self.recv_getComponentPendingProfileActions()
+
+  def send_getComponentPendingProfileActions(self, id, component_id, action):
+    self._oprot.writeMessageBegin('getComponentPendingProfileActions', TMessageType.CALL, self._seqid)
+    args = getComponentPendingProfileActions_args()
+    args.id = id
+    args.component_id = component_id
+    args.action = action
+    args.write(self._oprot)
+    self._oprot.writeMessageEnd()
+    self._oprot.trans.flush()
+
+  def recv_getComponentPendingProfileActions(self):
+    iprot = self._iprot
+    (fname, mtype, rseqid) = iprot.readMessageBegin()
+    if mtype == TMessageType.EXCEPTION:
+      x = TApplicationException()
+      x.read(iprot)
+      iprot.readMessageEnd()
+      raise x
+    result = getComponentPendingProfileActions_result()
+    result.read(iprot)
+    iprot.readMessageEnd()
+    if result.success is not None:
+      return result.success
+    raise TApplicationException(TApplicationException.MISSING_RESULT, "getComponentPendingProfileActions failed: unknown result");
+
   def uploadNewCredentials(self, name, creds):
     """
     Parameters:
@@ -1127,6 +1210,8 @@ class Processor(Iface, TProcessor):
     self._processMap["setLogConfig"] = Processor.process_setLogConfig
     self._processMap["getLogConfig"] = Processor.process_getLogConfig
     self._processMap["debug"] = Processor.process_debug
+    self._processMap["setWorkerProfiler"] = Processor.process_setWorkerProfiler
+    self._processMap["getComponentPendingProfileActions"] = Processor.process_getComponentPendingProfileActions
     self._processMap["uploadNewCredentials"] = Processor.process_uploadNewCredentials
     self._processMap["beginFileUpload"] = Processor.process_beginFileUpload
     self._processMap["uploadChunk"] = Processor.process_uploadChunk
@@ -1314,6 +1399,28 @@ class Processor(Iface, TProcessor):
     oprot.writeMessageEnd()
     oprot.trans.flush()
 
+  def process_setWorkerProfiler(self, seqid, iprot, oprot):
+    args = setWorkerProfiler_args()
+    args.read(iprot)
+    iprot.readMessageEnd()
+    result = setWorkerProfiler_result()
+    self._handler.setWorkerProfiler(args.id, args.profileRequest)
+    oprot.writeMessageBegin("setWorkerProfiler", TMessageType.REPLY, seqid)
+    result.write(oprot)
+    oprot.writeMessageEnd()
+    oprot.trans.flush()
+
+  def process_getComponentPendingProfileActions(self, seqid, iprot, oprot):
+    args = getComponentPendingProfileActions_args()
+    args.read(iprot)
+    iprot.readMessageEnd()
+    result = getComponentPendingProfileActions_result()
+    result.success = self._handler.getComponentPendingProfileActions(args.id, args.component_id, args.action)
+    oprot.writeMessageBegin("getComponentPendingProfileActions", TMessageType.REPLY, seqid)
+    result.write(oprot)
+    oprot.writeMessageEnd()
+    oprot.trans.flush()
+
   def process_uploadNewCredentials(self, seqid, iprot, oprot):
     args = uploadNewCredentials_args()
     args.read(iprot)
@@ -3163,6 +3270,295 @@ class debug_result:
   def __ne__(self, other):
     return not (self == other)
 
+class setWorkerProfiler_args:
+  """
+  Attributes:
+   - id
+   - profileRequest
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.STRING, 'id', None, None, ), # 1
+    (2, TType.STRUCT, 'profileRequest', (ProfileRequest, ProfileRequest.thrift_spec), None, ), # 2
+  )
+
+  def __init__(self, id=None, profileRequest=None,):
+    self.id = id
+    self.profileRequest = profileRequest
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.STRING:
+          self.id = iprot.readString().decode('utf-8')
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.STRUCT:
+          self.profileRequest = ProfileRequest()
+          self.profileRequest.read(iprot)
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('setWorkerProfiler_args')
+    if self.id is not None:
+      oprot.writeFieldBegin('id', TType.STRING, 1)
+      oprot.writeString(self.id.encode('utf-8'))
+      oprot.writeFieldEnd()
+    if self.profileRequest is not None:
+      oprot.writeFieldBegin('profileRequest', TType.STRUCT, 2)
+      self.profileRequest.write(oprot)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __hash__(self):
+    value = 17
+    value = (value * 31) ^ hash(self.id)
+    value = (value * 31) ^ hash(self.profileRequest)
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class setWorkerProfiler_result:
+
+  thrift_spec = (
+  )
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('setWorkerProfiler_result')
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __hash__(self):
+    value = 17
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class getComponentPendingProfileActions_args:
+  """
+  Attributes:
+   - id
+   - component_id
+   - action
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.STRING, 'id', None, None, ), # 1
+    (2, TType.STRING, 'component_id', None, None, ), # 2
+    (3, TType.I32, 'action', None, None, ), # 3
+  )
+
+  def __init__(self, id=None, component_id=None, action=None,):
+    self.id = id
+    self.component_id = component_id
+    self.action = action
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.STRING:
+          self.id = iprot.readString().decode('utf-8')
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.STRING:
+          self.component_id = iprot.readString().decode('utf-8')
+        else:
+          iprot.skip(ftype)
+      elif fid == 3:
+        if ftype == TType.I32:
+          self.action = iprot.readI32();
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('getComponentPendingProfileActions_args')
+    if self.id is not None:
+      oprot.writeFieldBegin('id', TType.STRING, 1)
+      oprot.writeString(self.id.encode('utf-8'))
+      oprot.writeFieldEnd()
+    if self.component_id is not None:
+      oprot.writeFieldBegin('component_id', TType.STRING, 2)
+      oprot.writeString(self.component_id.encode('utf-8'))
+      oprot.writeFieldEnd()
+    if self.action is not None:
+      oprot.writeFieldBegin('action', TType.I32, 3)
+      oprot.writeI32(self.action)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __hash__(self):
+    value = 17
+    value = (value * 31) ^ hash(self.id)
+    value = (value * 31) ^ hash(self.component_id)
+    value = (value * 31) ^ hash(self.action)
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class getComponentPendingProfileActions_result:
+  """
+  Attributes:
+   - success
+  """
+
+  thrift_spec = (
+    (0, TType.LIST, 'success', (TType.STRUCT,(ProfileRequest, ProfileRequest.thrift_spec)), None, ), # 0
+  )
+
+  def __init__(self, success=None,):
+    self.success = success
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 0:
+        if ftype == TType.LIST:
+          self.success = []
+          (_etype599, _size596) = iprot.readListBegin()
+          for _i600 in xrange(_size596):
+            _elem601 = ProfileRequest()
+            _elem601.read(iprot)
+            self.success.append(_elem601)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('getComponentPendingProfileActions_result')
+    if self.success is not None:
+      oprot.writeFieldBegin('success', TType.LIST, 0)
+      oprot.writeListBegin(TType.STRUCT, len(self.success))
+      for iter602 in self.success:
+        iter602.write(oprot)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __hash__(self):
+    value = 17
+    value = (value * 31) ^ hash(self.success)
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
 class uploadNewCredentials_args:
   """
   Attributes:

http://git-wip-us.apache.org/repos/asf/storm/blob/0c2021e6/storm-core/src/py/storm/ttypes.py
----------------------------------------------------------------------
diff --git a/storm-core/src/py/storm/ttypes.py b/storm-core/src/py/storm/ttypes.py
index 7f8adea..a14d62d 100644
--- a/storm-core/src/py/storm/ttypes.py
+++ b/storm-core/src/py/storm/ttypes.py
@@ -99,6 +99,32 @@ class NumErrorsChoice:
     "ONE": 2,
   }
 
+class ProfileAction:
+  JPROFILE_STOP = 0
+  JPROFILE_START = 1
+  JPROFILE_DUMP = 2
+  JMAP_DUMP = 3
+  JSTACK_DUMP = 4
+  JVM_RESTART = 5
+
+  _VALUES_TO_NAMES = {
+    0: "JPROFILE_STOP",
+    1: "JPROFILE_START",
+    2: "JPROFILE_DUMP",
+    3: "JMAP_DUMP",
+    4: "JSTACK_DUMP",
+    5: "JVM_RESTART",
+  }
+
+  _NAMES_TO_VALUES = {
+    "JPROFILE_STOP": 0,
+    "JPROFILE_START": 1,
+    "JPROFILE_DUMP": 2,
+    "JMAP_DUMP": 3,
+    "JSTACK_DUMP": 4,
+    "JVM_RESTART": 5,
+  }
+
 class LogLevelAction:
   UNCHANGED = 1
   UPDATE = 2
@@ -8553,6 +8579,102 @@ class LSWorkerHeartbeat:
   def __ne__(self, other):
     return not (self == other)
 
+class ProfileRequest:
+  """
+  Attributes:
+   - nodeInfo
+   - action
+   - time_stamp
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.STRUCT, 'nodeInfo', (NodeInfo, NodeInfo.thrift_spec), None, ), # 1
+    (2, TType.I32, 'action', None, None, ), # 2
+    (3, TType.I64, 'time_stamp', None, None, ), # 3
+  )
+
+  def __init__(self, nodeInfo=None, action=None, time_stamp=None,):
+    self.nodeInfo = nodeInfo
+    self.action = action
+    self.time_stamp = time_stamp
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.STRUCT:
+          self.nodeInfo = NodeInfo()
+          self.nodeInfo.read(iprot)
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.I32:
+          self.action = iprot.readI32();
+        else:
+          iprot.skip(ftype)
+      elif fid == 3:
+        if ftype == TType.I64:
+          self.time_stamp = iprot.readI64();
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('ProfileRequest')
+    if self.nodeInfo is not None:
+      oprot.writeFieldBegin('nodeInfo', TType.STRUCT, 1)
+      self.nodeInfo.write(oprot)
+      oprot.writeFieldEnd()
+    if self.action is not None:
+      oprot.writeFieldBegin('action', TType.I32, 2)
+      oprot.writeI32(self.action)
+      oprot.writeFieldEnd()
+    if self.time_stamp is not None:
+      oprot.writeFieldBegin('time_stamp', TType.I64, 3)
+      oprot.writeI64(self.time_stamp)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    if self.nodeInfo is None:
+      raise TProtocol.TProtocolException(message='Required field nodeInfo is unset!')
+    if self.action is None:
+      raise TProtocol.TProtocolException(message='Required field action is unset!')
+    return
+
+
+  def __hash__(self):
+    value = 17
+    value = (value * 31) ^ hash(self.nodeInfo)
+    value = (value * 31) ^ hash(self.action)
+    value = (value * 31) ^ hash(self.time_stamp)
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
 class GetInfoOptions:
   """
   Attributes:

http://git-wip-us.apache.org/repos/asf/storm/blob/0c2021e6/storm-core/src/storm.thrift
----------------------------------------------------------------------
diff --git a/storm-core/src/storm.thrift b/storm-core/src/storm.thrift
index 3c07cac..51e1236 100644
--- a/storm-core/src/storm.thrift
+++ b/storm-core/src/storm.thrift
@@ -467,6 +467,21 @@ enum NumErrorsChoice {
   ONE
 }
 
+enum ProfileAction {
+  JPROFILE_STOP,
+  JPROFILE_START,
+  JPROFILE_DUMP,
+  JMAP_DUMP,
+  JSTACK_DUMP,
+  JVM_RESTART
+}
+
+struct ProfileRequest {
+  1: required NodeInfo nodeInfo,
+  2: required ProfileAction action,
+  3: optional i64 time_stamp; 
+}
+
 struct GetInfoOptions {
   1: optional NumErrorsChoice num_err_choice;
 }
@@ -523,6 +538,11 @@ service Nimbus {
   * The 'samplingPercentage' will limit loggging to a percentage of generated tuples.
   **/
   void debug(1: string name, 2: string component, 3: bool enable, 4: double samplingPercentage) throws (1: NotAliveException e, 2: AuthorizationException aze);
+
+  // dynamic profile actions
+  void setWorkerProfiler(1: string id, 2: ProfileRequest  profileRequest);
+  list<ProfileRequest> getComponentPendingProfileActions(1: string id, 2: string component_id, 3: ProfileAction action);
+
   void uploadNewCredentials(1: string name, 2: Credentials creds) throws (1: NotAliveException e, 2: InvalidTopologyException ite, 3: AuthorizationException aze);
 
   // need to add functions for asking about status of storms, what nodes they're running on, looking at task logs

http://git-wip-us.apache.org/repos/asf/storm/blob/0c2021e6/storm-core/src/ui/public/component.html
----------------------------------------------------------------------
diff --git a/storm-core/src/ui/public/component.html b/storm-core/src/ui/public/component.html
index 4be5860..60a85cf 100644
--- a/storm-core/src/ui/public/component.html
+++ b/storm-core/src/ui/public/component.html
@@ -58,6 +58,9 @@
     <div id="component-output-stats" class="col-md-12"></div>
   </div>
   <div class="row">
+    <div id="profiler-control" class="col-md-12"></div>
+  </div>
+  <div class="row">
     <div id="component-executor-stats" class="col-md-12"></div>
   </div>
   <div class="row">
@@ -76,6 +79,15 @@ $(document).ajaxStop($.unblockUI);
 $(document).ajaxStart(function(){
     $.blockUI({ message: '<img src="images/spinner.gif" /> <h3>Loading component summary...</h3>'});
 });
+function jsError(other) {
+    try {
+      other();
+    } catch (err) {
+      $.get("/templates/json-error-template.html", function(template) {
+        $("#json-response-error").append(Mustache.render($(template).filter("#json-error-template").html(),{error: "JS Error", errorMessage: err}));
+      });
+    }
+}
 $(document).ready(function() {
     var componentId = $.url("?id");
     var topologyId = $.url("?topology_id");
@@ -121,11 +133,45 @@ $(document).ready(function() {
         var componentStatsDetail = $("#component-stats-detail")
         var inputStats = $("#component-input-stats");
         var outputStats = $("#component-output-stats");
+        var profilerControl = $("#profiler-control");
         var executorStats = $("#component-executor-stats");
         var componentErrors = $("#component-errors");
         $.get("/templates/component-page-template.html", function(template) {
-            componentSummary.append(Mustache.render($(template).filter("#component-summary-template").html(),response));
-            componentActions.append(Mustache.render($(template).filter("#component-actions-template").html(),buttonJsonData));
+            response["hosts"] = [];
+            for(var comp_index in response["executorStats"]) {
+                var comp = response["executorStats"][comp_index];
+                var host_port = comp["host"] + ":" + comp["port"];
+                if($.inArray(host_port, response["hosts"]) == -1) {
+                    response["hosts"].push(host_port);
+                }
+            }
+
+            response["hosts"] = $.map(response["hosts"], function(host_port) {
+                return {"name": host_port};
+            });
+
+            response["profilerActive"] = $.map(response["profilerActive"], function(active_map) {
+                var date = new Date();
+                var millis = date.getTime() + parseInt(active_map["timestamp"]);
+                date = new Date(millis);
+                active_map["timestamp"] = date.toTimeString();
+                return active_map;
+            });
+
+            jsError(function() {
+              componentSummary.append(Mustache.render($(template).filter("#component-summary-template").html(),response));
+            });
+
+            jsError(function() {
+              componentActions.append(Mustache.render($(template).filter("#component-actions-template").html(),buttonJsonData));
+            });
+
+            jsError(function() {
+                var part = $(template).filter('#profiler-active-partial').html();
+                var partials = {"profilerActive": part};
+                profilerControl.append(Mustache.render($(template).filter("#profiling-template").html(), response, partials));
+            });
+
             if(response["componentType"] == "spout") {
                 componentStatsDetail.append(Mustache.render($(template).filter("#spout-stats-detail-template").html(),response));
                 //window, emitted, transferred, complete latency, acked, failed
@@ -213,6 +259,121 @@ $(document).ready(function() {
         });
     });
 });
+
+function profiler_selected_worker() {
+    return $("#selected_worker").val();
+}
+
+function start_profiling(id) {
+    var topologyId = $.url("?topology_id");
+    var timeout = $("#timeout").val();
+
+    if(timeout == "") { timeout = 10; }
+    if(isNaN(parseFloat(timeout)) || !isFinite(timeout)) {
+        alert("Must specify a numeric timeout");
+        return;
+    }
+
+    var url = "/api/v1/topology/"+topologyId+"/profiling/start/" + id + "/" + timeout;
+    $.get(url, function(response,status,jqXHR) {
+        jsError(function() {
+            $.get("/templates/component-page-template.html", function(template) {
+                var host_port_split = id.split(":");
+                var host = host_port_split[0];
+                var port = host_port_split[1];
+                var millis = new Date().getTime() + (timeout * 60000);
+                var timestamp = new Date(millis).toTimeString();
+                
+                var mustache = Mustache.render($(template).filter("#profiler-active-partial").html(), {"profilerActive": [{
+                    "host": host,
+                    "port": port,
+                    "timestamp": timestamp,
+                    "dumplink": response["dumplink"]}]});
+                $("#profiler-table-body").append(mustache);
+            });
+        });
+    })
+    .fail(function(response) {
+        alert( "Starting profiler for " + id + " failed: \n" + JSON.stringify(response));
+    });
+}
+
+function stop_profiling(id) {
+    var topologyId = $.url("?topology_id");
+    var url = "/api/v1/topology/"+topologyId+"/profiling/stop/" + id;
+
+    $("#stop_" + id).prop('disabled', true);
+    setTimeout(function(){ $("#stop_" + id).prop('disabled', false); }, 5000);
+    
+    $.get(url, function(response,status,jqXHR) {
+        alert("Submitted request to stop profiling...");
+    })
+    .fail(function(response) {
+        alert( "Stopping profiler for " + id + " failed: \n" + JSON.stringify(response));
+    });
+    
+}
+
+function dump_profile(id) {
+    var topologyId = $.url("?topology_id");
+    var url = "/api/v1/topology/"+topologyId+"/profiling/dumpprofile/" + id;
+
+    $("#dump_profile_" + id).prop('disabled', true);
+    setTimeout(function(){ $("#dump_profile_" + id).prop('disabled', false); }, 5000);
+    
+    $.get(url, function(response,status,jqXHR) {
+        alert("Submitted request to dump profile snapshot...");
+    })
+    .fail(function(response) {
+        alert( "Dumping profile data for " + id + " failed: \n" + JSON.stringify(response));
+    });
+}
+
+function dump_jstack(id) {
+    var topologyId = $.url("?topology_id");
+    var url = "/api/v1/topology/"+topologyId+"/profiling/dumpjstack/" + id;
+
+    $("#dump_jstack_" + id).prop('disabled', true);
+    setTimeout(function(){ $("#dump_jstack_" + id).prop('disabled', false); }, 5000);
+    
+    $.get(url, function(response,status,jqXHR) {
+        alert("Submitted request for jstack dump...");
+    })
+    .fail(function(response) {
+        alert( "Dumping JStack for " + id + " failed: \n" + JSON.stringify(response));
+    });
+}
+
+function restart_worker_jvm(id) {
+    var topologyId = $.url("?topology_id");
+    var url = "/api/v1/topology/"+topologyId+"/profiling/restartworker/" + id;
+
+    $("#restart_worker_jvm_" + id).prop('disabled', true);
+    setTimeout(function(){ $("#restart_worker_jvm_" + id).prop('disabled', false); }, 5000);
+
+    $.get(url, function(response,status,jqXHR) {
+        alert("Submitted request for restarting worker...");
+    })
+    .fail(function(response) {
+        alert( "Failed to restart worker for " + id + " failed: \n" + JSON.stringify(response));
+    });
+}
+
+function dump_heap(id) {
+    var topologyId = $.url("?topology_id");
+    var url = "/api/v1/topology/"+topologyId+"/profiling/dumpheap/" + id;
+    var heap = $("#dump_heap_" + id);
+    $("#dump_heap_" + id).prop('disabled', true);
+    setTimeout(function(){ $("#dump_heap_" + id).prop('disabled', false); }, 5000);
+    
+    $.get(url, function(response,status,jqXHR) {
+        alert("Submitted request for jmap dump...");
+    })
+    .fail(function(response) {
+        alert( "Dumping Heap for " + id + " failed: \n" + JSON.stringify(response));
+    });
+}
+
 </script>
 </div>
 </body>

http://git-wip-us.apache.org/repos/asf/storm/blob/0c2021e6/storm-core/src/ui/public/templates/component-page-template.html
----------------------------------------------------------------------
diff --git a/storm-core/src/ui/public/templates/component-page-template.html b/storm-core/src/ui/public/templates/component-page-template.html
index 7a4c894..f21b30d 100644
--- a/storm-core/src/ui/public/templates/component-page-template.html
+++ b/storm-core/src/ui/public/templates/component-page-template.html
@@ -227,6 +227,59 @@
     </tbody>
   </table>
 </script>
+<script id="profiling-template" type="text/html">
+  <h2>Profiling and Debugging</h2>
+  Use the following controls to profile and debug the components on this page.
+  <table class="table table-striped compact">
+    <thead>
+      <tr>
+        <th class="header">Component</th>
+        <th class="header">
+          <span data-original-title="The status of a running profiler or the timeout for one you're starting (in minutes)" data-toggle="tooltip">
+            Status / Timeout (Minutes)
+          </span>
+        </th>
+        <th class="header">Actions</th>
+      </tr>
+    </thead>
+    <tbody id="profiler-table-body">
+      <tr>
+        <td>
+          <select id="selected_worker">
+            {{#hosts}}
+            <option value="{{name}}">{{name}}</option>
+            {{/hosts}}
+          </select>
+        </td>
+        <td>
+          <input id="timeout" class="timeout_input" type="text" value="" placeholder="10"/>
+        </td>
+        <td>
+          <input type="button" value="Start" name="start" onClick="start_profiling(profiler_selected_worker())" class="btn btn-secondary"/>
+          <input type="button" value="JStack" name="jstack" onClick="dump_jstack(profiler_selected_worker())" class="btn btn-secondary"/>
+          <input type="button" value="Restart Worker" name="jvmrestart" onClick="restart_worker_jvm(profiler_selected_worker())" class="btn btn-secondary"/>
+          <input type="button" value="Heap" name="heap" onClick="dump_heap(profiler_selected_worker())" class="btn btn-secondary"/>
+        </td>
+      </tr>
+      {{> profilerActive}}
+    </tbody>
+  </table>
+</script>
+<script id="profiler-active-partial" type="text/html">
+  {{#profilerActive}}
+  <tr>
+    <td>{{host}}:{{port}}</td>
+    <td>Active until {{timestamp}}</td>
+    <td>
+      <input id="stop_{{host}}:{{port}}" type="button" value="Stop" name="stop" onClick="stop_profiling('{{host}}:{{port}}')" class="btn btn-secondary"/>
+      <input id="dump_profile_{{host}}:{{port}}" type="button" value="Dump Profile" name="dumpjprofile" onClick="dump_profile('{{host}}:{{port}}')" class="btn btn-secondary"/>
+      <input id="dump_jstack_{{host}}:{{port}}" type="button" value="JStack" name="jstack" onClick="dump_jstack('{{host}}:{{port}}')" class="btn btn-secondary"/>
+      <input id="restart_worker_jvm_{{host}}:{{port}}" type="button" value="Restart Worker" name="jvmrestart" onClick="restart_worker_jvm('{{host}}:{{port}}')" class="btn btn-secondary"/>
+      <input id="dump_heap_{{host}}:{{port}}" type="button" value="Heap" name="heap" onClick="dump_heap('{{host}}:{{port}}')" class="btn btn-secondary"/> <a href="{{dumplink}}">My Dump Files</a>
+    </td>
+  </tr>
+  {{/profilerActive}}
+</script>
 <script id="bolt-stats-template" type="text/html">
   <h2>Bolt stats</h2>
   <table class="table table-striped compact" id="bolt-stats-table">


[07/10] storm git commit: Update REST API documentation for profiling and debugging endpoints.

Posted by bo...@apache.org.
Update REST API documentation for profiling and debugging endpoints.


Project: http://git-wip-us.apache.org/repos/asf/storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/storm/commit/8d91ad94
Tree: http://git-wip-us.apache.org/repos/asf/storm/tree/8d91ad94
Diff: http://git-wip-us.apache.org/repos/asf/storm/diff/8d91ad94

Branch: refs/heads/master
Commit: 8d91ad942374108fce588d3a144cb1831d505521
Parents: 25f31a0
Author: Kishor Patil <kp...@yahoo-inc.com>
Authored: Wed Nov 4 00:24:19 2015 -0600
Committer: Kishor Patil <kp...@yahoo-inc.com>
Committed: Wed Nov 4 00:24:19 2015 -0600

----------------------------------------------------------------------
 STORM-UI-REST-API.md | 195 ++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 195 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/storm/blob/8d91ad94/STORM-UI-REST-API.md
----------------------------------------------------------------------
diff --git a/STORM-UI-REST-API.md b/STORM-UI-REST-API.md
index ba31179..6260afa 100644
--- a/STORM-UI-REST-API.md
+++ b/STORM-UI-REST-API.md
@@ -636,6 +636,201 @@ Sample response:
 }
 ```
 
+## Profiling and Debugging GET Operations
+
+###  /api/v1/topology/:id/profiling/start/:host-port/:timeout (GET)
+
+Request to start profiler on worker with timeout. Returns status and link to profiler artifacts for worker.
+
+|Parameter |Value   |Description  |
+|----------|--------|-------------|
+|id   	   |String (required)| Topology Id  |
+|:host-port |String (required)| Worker Id |
+|:timeout |String (required)| Time out for profiler to stop in minutes |
+
+Response fields:
+
+|Field  |Value |Description|
+|-----	|----- |-----------|
+|id   | String | Worker id|
+|status | String | Response Status |
+|timeout | String | Requested timeout
+|dumplink | String | Link to logviewer URL for worker profiler documents.|
+
+Examples:
+
+```no-highlight
+1. http://ui-daemon-host-name:8080/api/v1/topology/wordcount-1-1446614150/profiling/start/10.11.1.7:6701/10
+2. http://ui-daemon-host-name:8080/api/v1/topology/wordcount-1-1446614150/profiling/start/10.11.1.7:6701/5
+3. http://ui-daemon-host-name:8080/api/v1/topology/wordcount-1-1446614150/profiling/start/10.11.1.7:6701/20
+```
+
+Sample response:
+
+```json
+{
+   "status": "ok",
+   "id": "10.11.1.7:6701",
+   "timeout": "10",
+   "dumplink": "http:\/\/10.11.1.7:8000\/dumps\/wordcount-1-1446614150\/10.11.1.7%3A6701"
+}
+```
+
+###  /api/v1/topology/:id/profiling/dumpprofile/:host-port (GET)
+
+Request to dump profiler recording on worker. Returns status and worker id for the request.
+
+|Parameter |Value   |Description  |
+|----------|--------|-------------|
+|id   	   |String (required)| Topology Id  |
+|:host-port |String (required)| Worker Id |
+
+Response fields:
+
+|Field  |Value |Description|
+|-----	|----- |-----------|
+|id   | String | Worker id|
+|status | String | Response Status |
+
+Examples:
+
+```no-highlight
+1. http://ui-daemon-host-name:8080/api/v1/topology/wordcount-1-1446614150/profiling/dumpprofile/10.11.1.7:6701
+```
+
+Sample response:
+
+```json
+{
+   "status": "ok",
+   "id": "10.11.1.7:6701",
+}
+```
+
+###  /api/v1/topology/:id/profiling/stop/:host-port (GET)
+
+Request to stop profiler on worker. Returns status and worker id for the request.
+
+|Parameter |Value   |Description  |
+|----------|--------|-------------|
+|id   	   |String (required)| Topology Id  |
+|:host-port |String (required)| Worker Id |
+
+Response fields:
+
+|Field  |Value |Description|
+|-----	|----- |-----------|
+|id   | String | Worker id|
+|status | String | Response Status |
+
+Examples:
+
+```no-highlight
+1. http://ui-daemon-host-name:8080/api/v1/topology/wordcount-1-1446614150/profiling/stop/10.11.1.7:6701
+```
+
+Sample response:
+
+```json
+{
+   "status": "ok",
+   "id": "10.11.1.7:6701",
+}
+```
+
+###  /api/v1/topology/:id/profiling/dumpjstack/:host-port (GET)
+
+Request to dump jstack on worker. Returns status and worker id for the request.
+
+|Parameter |Value   |Description  |
+|----------|--------|-------------|
+|id   	   |String (required)| Topology Id  |
+|:host-port |String (required)| Worker Id |
+
+Response fields:
+
+|Field  |Value |Description|
+|-----	|----- |-----------|
+|id   | String | Worker id|
+|status | String | Response Status |
+
+Examples:
+
+```no-highlight
+1. http://ui-daemon-host-name:8080/api/v1/topology/wordcount-1-1446614150/profiling/dumpjstack/10.11.1.7:6701
+```
+
+Sample response:
+
+```json
+{
+   "status": "ok",
+   "id": "10.11.1.7:6701",
+}
+```
+
+###  /api/v1/topology/:id/profiling/dumpheap/:host-port (GET)
+
+Request to dump heap (jmap) on worker. Returns status and worker id for the request.
+
+|Parameter |Value   |Description  |
+|----------|--------|-------------|
+|id   	   |String (required)| Topology Id  |
+|:host-port |String (required)| Worker Id |
+
+Response fields:
+
+|Field  |Value |Description|
+|-----	|----- |-----------|
+|id   | String | Worker id|
+|status | String | Response Status |
+
+Examples:
+
+```no-highlight
+1. http://ui-daemon-host-name:8080/api/v1/topology/wordcount-1-1446614150/profiling/dumpheap/10.11.1.7:6701
+```
+
+Sample response:
+
+```json
+{
+   "status": "ok",
+   "id": "10.11.1.7:6701",
+}
+```
+
+###  /api/v1/topology/:id/profiling/restartworker/:host-port (GET)
+
+Request to request the worker. Returns status and worker id for the request.
+
+|Parameter |Value   |Description  |
+|----------|--------|-------------|
+|id   	   |String (required)| Topology Id  |
+|:host-port |String (required)| Worker Id |
+
+Response fields:
+
+|Field  |Value |Description|
+|-----	|----- |-----------|
+|id   | String | Worker id|
+|status | String | Response Status |
+
+Examples:
+
+```no-highlight
+1. http://ui-daemon-host-name:8080/api/v1/topology/wordcount-1-1446614150/profiling/restartworker/10.11.1.7:6701
+```
+
+Sample response:
+
+```json
+{
+   "status": "ok",
+   "id": "10.11.1.7:6701",
+}
+```
+
 ## POST Operations
 
 ### /api/v1/topology/:id/activate (POST)


[05/10] storm git commit: If dynamic worker profiling is disabled, match worker childopts.

Posted by bo...@apache.org.
If dynamic worker profiling is disabled, match worker childopts.


Project: http://git-wip-us.apache.org/repos/asf/storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/storm/commit/f697044a
Tree: http://git-wip-us.apache.org/repos/asf/storm/tree/f697044a
Diff: http://git-wip-us.apache.org/repos/asf/storm/diff/f697044a

Branch: refs/heads/master
Commit: f697044a466038ee081fb668be5497c7b5080a8e
Parents: d2f9305
Author: Kishor Patil <kp...@yahoo-inc.com>
Authored: Tue Nov 3 00:19:10 2015 -0600
Committer: Kishor Patil <kp...@yahoo-inc.com>
Committed: Tue Nov 3 00:19:10 2015 -0600

----------------------------------------------------------------------
 docs/DYNAMIC_WORKER_PROFILING.md                        | 2 +-
 storm-core/src/clj/backtype/storm/daemon/supervisor.clj | 4 +++-
 2 files changed, 4 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/storm/blob/f697044a/docs/DYNAMIC_WORKER_PROFILING.md
----------------------------------------------------------------------
diff --git a/docs/DYNAMIC_WORKER_PROFILING.md b/docs/DYNAMIC_WORKER_PROFILING.md
index 4b55a80..9bc8da3 100644
--- a/docs/DYNAMIC_WORKER_PROFILING.md
+++ b/docs/DYNAMIC_WORKER_PROFILING.md
@@ -25,5 +25,5 @@ Click on "My Dump Files" to go the logviewer UI for list of worker specific dump
 Configuration
 -------------
 
-The "worker.profiler.command" can be configured to point to specific pluggable profiler, heapdump commands. The "worker.profiler.enabled" can be disabled if plugin is not available.
+The "worker.profiler.command" can be configured to point to specific pluggable profiler, heapdump commands. The "worker.profiler.enabled" can be disabled if plugin is not available or jdk does not support Jprofile flight recording so that worker JVM options will not have "worker.profiler.childopts". To use different profiler plugin, you can change these configuration.
 

http://git-wip-us.apache.org/repos/asf/storm/blob/f697044a/storm-core/src/clj/backtype/storm/daemon/supervisor.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/daemon/supervisor.clj b/storm-core/src/clj/backtype/storm/daemon/supervisor.clj
index e5740cb..c86b73f 100644
--- a/storm-core/src/clj/backtype/storm/daemon/supervisor.clj
+++ b/storm-core/src/clj/backtype/storm/daemon/supervisor.clj
@@ -844,7 +844,9 @@
                              (substitute-childopts s worker-id storm-id port mem-onheap))
           topo-worker-childopts (when-let [s (storm-conf TOPOLOGY-WORKER-CHILDOPTS)]
                                   (substitute-childopts s worker-id storm-id port mem-onheap))
-          worker--profiler-childopts (substitute-childopts (conf WORKER-PROFILER-CHILDOPTS) worker-id storm-id port mem-onheap)
+          worker--profiler-childopts (if (conf WORKER-PROFILER-ENABLED)
+                                       (substitute-childopts (conf WORKER-PROFILER-CHILDOPTS) worker-id storm-id port mem-onheap)
+                                       "")
           topology-worker-environment (if-let [env (storm-conf TOPOLOGY-ENVIRONMENT)]
                                         (merge env {"LD_LIBRARY_PATH" jlp})
                                         {"LD_LIBRARY_PATH" jlp})


[03/10] storm git commit: Adding dynamic profiling for worker

Posted by bo...@apache.org.
Adding dynamic profiling for worker


Project: http://git-wip-us.apache.org/repos/asf/storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/storm/commit/0c2021e6
Tree: http://git-wip-us.apache.org/repos/asf/storm/tree/0c2021e6
Diff: http://git-wip-us.apache.org/repos/asf/storm/diff/0c2021e6

Branch: refs/heads/master
Commit: 0c2021e6331cb5324a2803aff9783b539a05f63c
Parents: 8ba776b
Author: Kishor Patil <kp...@yahoo-inc.com>
Authored: Mon Nov 2 18:51:58 2015 +0000
Committer: Kishor Patil <kp...@yahoo-inc.com>
Committed: Mon Nov 2 18:51:58 2015 +0000

----------------------------------------------------------------------
 bin/flight.bash                                 |  139 ++
 conf/defaults.yaml                              |    1 +
 docs/DYNAMIC_WORKER_PROFILING.md                |   24 +
 docs/images/dynamic_profiling_debugging_1.png   |  Bin 0 -> 93635 bytes
 docs/images/dynamic_profiling_debugging_2.png   |  Bin 0 -> 138120 bytes
 docs/images/dynamic_profiling_debugging_3.png   |  Bin 0 -> 96974 bytes
 storm-core/src/clj/backtype/storm/cluster.clj   |   57 +-
 storm-core/src/clj/backtype/storm/config.clj    |    4 +
 storm-core/src/clj/backtype/storm/converter.clj |   19 +-
 .../src/clj/backtype/storm/daemon/logviewer.clj |   45 +
 .../src/clj/backtype/storm/daemon/nimbus.clj    |   35 +-
 .../clj/backtype/storm/daemon/supervisor.clj    |  156 +-
 .../src/clj/backtype/storm/daemon/worker.clj    |    4 +-
 storm-core/src/clj/backtype/storm/stats.clj     |    9 +
 storm-core/src/clj/backtype/storm/ui/core.clj   |  149 +-
 storm-core/src/clj/backtype/storm/util.clj      |   10 +-
 storm-core/src/jvm/backtype/storm/Config.java   |    6 +
 .../jvm/backtype/storm/generated/Nimbus.java    | 1977 +++++++++++++++++-
 .../backtype/storm/generated/ProfileAction.java |   74 +
 .../storm/generated/ProfileRequest.java         |  631 ++++++
 .../auth/authorizer/SimpleACLAuthorizer.java    |    8 +
 .../src/native/worker-launcher/impl/main.c      |   10 +
 .../worker-launcher/impl/worker-launcher.c      |   47 +
 .../worker-launcher/impl/worker-launcher.h      |    2 +
 storm-core/src/py/storm/Nimbus-remote           |   14 +
 storm-core/src/py/storm/Nimbus.py               |  396 ++++
 storm-core/src/py/storm/ttypes.py               |  122 ++
 storm-core/src/storm.thrift                     |   20 +
 storm-core/src/ui/public/component.html         |  165 +-
 .../templates/component-page-template.html      |   53 +
 30 files changed, 4149 insertions(+), 28 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/storm/blob/0c2021e6/bin/flight.bash
----------------------------------------------------------------------
diff --git a/bin/flight.bash b/bin/flight.bash
new file mode 100755
index 0000000..05068b5
--- /dev/null
+++ b/bin/flight.bash
@@ -0,0 +1,139 @@
+#!/bin/bash
+
+JDKPATH="/home/y/share/yjava_jdk/java"
+BINPATH="/usr/bin"
+USER=`whoami`
+
+#SETTINGS=/Library/Java/JavaVirtualMachines/jdk1.8.0_51.jdk/Contents/Home/jre/lib/jfr/profile.jfc
+SETTINGS=profile
+
+platform='unknown'
+unamestr=`uname`
+if [[ "$unamestr" == 'Linux' ]]; then
+    platform='linux'
+elif [[ "$unamestr" == 'Darwin' ]]; then
+    platform='darwin'
+elif [[ "$unamestr" == 'FreeBSD' ]]; then
+    platform='freebsd'
+fi
+
+if [[ $platform == 'linux' ]]; then
+    BINPATH="$JDKPATH/bin"
+elif [[ $platform == 'darwin' ]]; then
+    BINPATH="/usr/bin"
+fi
+
+function start_record {
+    # start_record pid
+    already_recording=false
+    for rid in `get_recording_ids $1`; do
+        already_recording=true
+        break;
+    done
+    if [ "$already_recording" = false ]; then
+        $BINPATH/jcmd $1 JFR.start settings=${SETTINGS}
+    fi
+}
+
+function dump_record {
+    for rid in `get_recording_ids $1`; do
+        FILENAME=recording-$1-${rid}-${NOW}.jfr
+        $BINPATH/jcmd $1 JFR.dump recording=$rid filename="$2/${FILENAME}"
+    done
+}
+
+function jstack_record {
+    FILENAME=jstack-$1-${NOW}.txt
+    $BINPATH/jstack $1 > "$2/${FILENAME}"
+}
+
+function jmap_record {
+    FILENAME=recording-$1-${NOW}.bin
+    $BINPATH/jmap -dump:format=b,file="$2/${FILENAME}" $1
+}
+
+function stop_record {
+    for rid in `get_recording_ids $1`; do
+        FILENAME=recording-$1-${rid}-${NOW}.jfr
+        $BINPATH/jcmd $1 JFR.dump recording=$rid filename="$2/${FILENAME}"
+        $BINPATH/jcmd $1 JFR.stop recording=$rid
+    done
+}
+
+function get_recording_ids {
+    $BINPATH/jcmd $1 JFR.check | perl -n -e '/recording=([0-9]+)/ && print "$1 "'
+}
+
+function usage_and_quit {
+    echo "Usage: $0 pid start [profile_settings]"
+    echo "       $0 pid dump target_dir"
+    echo "       $0 pid stop target_dir"
+    echo "       $0 pid jstack target_dir"
+    echo "       $0 pid jmap target_dir"
+    echo "       $0 pid kill"
+    exit -1
+}
+
+# Before using this script: make sure FlightRecorder is enabled
+
+if [ "$#" -le 1 ]; then
+    echo "Wrong number of arguments.."
+    usage_and_quit
+
+fi
+# call this script with the process pid, example: "./flight PID start" or "./flight PID stop"
+PID="$1"
+CMD="$2"
+
+if /bin/ps -p $PID > /dev/null
+then
+    if [[ $platform == 'linux' ]]; then
+        USER=`/bin/ps -ouser --noheader $PID`
+    elif [[ $platform == 'darwin' ]]; then
+        USER=`/bin/ps -ouser $PID`
+    fi
+else
+    echo "No such pid running: $PID"
+    usage_and_quit
+fi
+
+if [ "$CMD" != "start" ] && [ "$CMD" != "kill" ]; then
+    if [[ $3 ]] && [[ -d $3 ]]
+    then
+        TARGETDIR="$3"
+        mkdir -p ${TARGETDIR}
+    else
+        echo "Missing target directory"
+        usage_and_quit
+    fi
+fi
+
+NOW=`date +'%Y%m%d-%H%M%S'`
+if [ "$CMD" = "" ]; then
+    usage_and_quit
+elif [ "$CMD" = "kill" ]; then
+    echo "Killing process with pid: $PID"
+    kill -9 ${PID}
+elif [ "$CMD" = "start" ]; then
+    if [[ $3 ]]
+    then
+        SETTINGS=$3
+    fi
+    start_record ${PID}
+elif [ "$CMD" = "stop" ]; then
+    echo "Capturing dump before stopping in dir $TARGETDIR"
+    stop_record ${PID} ${TARGETDIR}
+elif [ "$CMD" = "jstack" ]; then
+    echo "Capturing dump in dir $TARGETDIR"
+    jstack_record ${PID} ${TARGETDIR}
+elif [ "$CMD" = "jmap" ]; then
+    echo "Capturing dump in dir $TARGETDIR"
+    jmap_record ${PID} ${TARGETDIR}
+elif [ "$CMD" = "dump" ]; then
+    echo "Capturing dump in dir $TARGETDIR"
+    dump_record ${PID} ${TARGETDIR}
+else
+    usage_and_quit
+fi
+
+

http://git-wip-us.apache.org/repos/asf/storm/blob/0c2021e6/conf/defaults.yaml
----------------------------------------------------------------------
diff --git a/conf/defaults.yaml b/conf/defaults.yaml
index 160c29f..84babc3 100644
--- a/conf/defaults.yaml
+++ b/conf/defaults.yaml
@@ -144,6 +144,7 @@ supervisor.cpu.capacity: 400.0
 worker.heap.memory.mb: 768
 worker.childopts: "-Xmx%HEAP-MEM%m -XX:+PrintGCDetails -Xloggc:artifacts/gc.log -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=1M -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=artifacts/heapdump"
 worker.gc.childopts: ""
+worker.profiler.childopts: "-XX:+UnlockCommercialFeatures -XX:+FlightRecorder"
 worker.heartbeat.frequency.secs: 1
 
 # check whether dynamic log levels can be reset from DEBUG to INFO in workers

http://git-wip-us.apache.org/repos/asf/storm/blob/0c2021e6/docs/DYNAMIC_WORKER_PROFILING.md
----------------------------------------------------------------------
diff --git a/docs/DYNAMIC_WORKER_PROFILING.md b/docs/DYNAMIC_WORKER_PROFILING.md
new file mode 100644
index 0000000..727322f
--- /dev/null
+++ b/docs/DYNAMIC_WORKER_PROFILING.md
@@ -0,0 +1,24 @@
+Dynamic Worker Profiling
+==========================
+
+In multi-tenant mode, storm launches long-running JVMs across cluster without sudo access to user. Self-serving of Java heap-dumps, jstacks and java profiling of these JVMs would improve users' ability to analyze and debug issues when monitoring it actively.
+
+The storm dynamic profiler lets you dynamically take heap-dumps, jprofile or jstack for a worker jvm running on stock cluster. It let user download these dumps from the browser and use your favorite tools to analyze it  The UI component page provides list workers for the component and action buttons. The logviewer lets you download the dumps generated by these logs. Please see the screenshots for more information.
+
+Using the Storm UI
+-------------
+
+In order to request for heap-dump, jstack, start/stop/dump jprofile or restart a worker, click on a running topology, then click on specific component, then you can select worker from the dropdown for that particular component and then click on “Start","Heap", "Jstack" or "Restart Worker" in the "Profiing and Debugging" section.
+
+![Profiling and Debugging](images/dynamic_profiling_debugging_1.png "Profiling and Debugging")
+
+For start jprofile, provide a timeout in minutes (or 10 if not needed). Then click on “Start”.
+
+![After starting jprofile for worker](images/dynamic_profiling_debugging_2.png "After jprofile for worker ")
+
+To stop the jprofile logging click on the “Stop” button. This dumps the jprofile stats and stops the profiling. Refresh the page for the line to disappear from the UI.
+
+Click on "My Dump Files" to go the logviewer UI for list of worker specific dump files.
+
+![Dump Files Links for worker](images/dynamic_profiling_debugging_3.png "Dump Files Links for worker")
+

http://git-wip-us.apache.org/repos/asf/storm/blob/0c2021e6/docs/images/dynamic_profiling_debugging_1.png
----------------------------------------------------------------------
diff --git a/docs/images/dynamic_profiling_debugging_1.png b/docs/images/dynamic_profiling_debugging_1.png
new file mode 100644
index 0000000..3913e86
Binary files /dev/null and b/docs/images/dynamic_profiling_debugging_1.png differ

http://git-wip-us.apache.org/repos/asf/storm/blob/0c2021e6/docs/images/dynamic_profiling_debugging_2.png
----------------------------------------------------------------------
diff --git a/docs/images/dynamic_profiling_debugging_2.png b/docs/images/dynamic_profiling_debugging_2.png
new file mode 100644
index 0000000..66c0236
Binary files /dev/null and b/docs/images/dynamic_profiling_debugging_2.png differ

http://git-wip-us.apache.org/repos/asf/storm/blob/0c2021e6/docs/images/dynamic_profiling_debugging_3.png
----------------------------------------------------------------------
diff --git a/docs/images/dynamic_profiling_debugging_3.png b/docs/images/dynamic_profiling_debugging_3.png
new file mode 100644
index 0000000..5706d7e
Binary files /dev/null and b/docs/images/dynamic_profiling_debugging_3.png differ

http://git-wip-us.apache.org/repos/asf/storm/blob/0c2021e6/storm-core/src/clj/backtype/storm/cluster.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/cluster.clj b/storm-core/src/clj/backtype/storm/cluster.clj
index ec42da6..cf1ece6 100644
--- a/storm-core/src/clj/backtype/storm/cluster.clj
+++ b/storm-core/src/clj/backtype/storm/cluster.clj
@@ -17,7 +17,7 @@
 (ns backtype.storm.cluster
   (:import [org.apache.zookeeper.data Stat ACL Id]
            [backtype.storm.generated SupervisorInfo Assignment StormBase ClusterWorkerHeartbeat ErrorInfo Credentials NimbusSummary
-            LogConfig]
+            LogConfig ProfileAction ProfileRequest NodeInfo]
            [java.io Serializable])
   (:import [org.apache.zookeeper KeeperException KeeperException$NoNodeException ZooDefs ZooDefs$Ids ZooDefs$Perms])
   (:import [org.apache.curator.framework.state ConnectionStateListener ConnectionState])
@@ -186,6 +186,10 @@
   (active-storms [this])
   (storm-base [this storm-id callback])
   (get-worker-heartbeat [this storm-id node port])
+  (get-worker-profile-requests [this storm-id nodeinfo thrift?])
+  (get-topology-profile-requests [this storm-id thrift?])
+  (set-worker-profile-request [this storm-id profile-request])
+  (delete-topology-profile-requests [this storm-id profile-request])
   (executor-beats [this storm-id executor->node+port])
   (supervisors [this callback])
   (supervisor-info [this supervisor-id]) ;; returns nil if doesn't exist
@@ -228,6 +232,7 @@
 (def NIMBUSES-ROOT "nimbuses")
 (def CREDENTIALS-ROOT "credentials")
 (def LOGCONFIG-ROOT "logconfigs")
+(def PROFILERCONFIG-ROOT "profilerconfigs")
 
 (def ASSIGNMENTS-SUBTREE (str "/" ASSIGNMENTS-ROOT))
 (def STORMS-SUBTREE (str "/" STORMS-ROOT))
@@ -239,6 +244,7 @@
 (def NIMBUSES-SUBTREE (str "/" NIMBUSES-ROOT))
 (def CREDENTIALS-SUBTREE (str "/" CREDENTIALS-ROOT))
 (def LOGCONFIG-SUBTREE (str "/" LOGCONFIG-ROOT))
+(def PROFILERCONFIG-SUBTREE (str "/" PROFILERCONFIG-ROOT))
 
 (defn supervisor-path
   [id]
@@ -302,6 +308,12 @@
   [storm-id]
   (str LOGCONFIG-SUBTREE "/" storm-id))
 
+(defn profiler-config-path
+  ([storm-id]
+   (str PROFILERCONFIG-SUBTREE "/" storm-id))
+  ([storm-id host port request-type]
+   (str (profiler-config-path storm-id) "/" host "_" port "_" request-type)))
+
 (defn- issue-callback!
   [cb-atom]
   (let [cb @cb-atom]
@@ -497,6 +509,48 @@
         [this storm-id log-config]
         (.set_data cluster-state (log-config-path storm-id) (Utils/serialize log-config) acls))
 
+      (set-worker-profile-request
+        [this storm-id profile-request]
+        (let [request-type (.get_action profile-request)
+              host (.get_node (.get_nodeInfo profile-request))
+              port (first (.get_port (.get_nodeInfo profile-request)))]
+          (.set_data cluster-state
+                     (profiler-config-path storm-id host port request-type)
+                     (Utils/serialize profile-request)
+                     acls)))
+
+      (get-topology-profile-requests
+        [this storm-id thrift?]
+        (let [path (profiler-config-path storm-id)
+              requests (if (exists-node? cluster-state path false)
+                         (dofor [c (.get_children cluster-state path false)]
+                                (let [raw (.get_data cluster-state (str path "/" c) false)
+                                      request (maybe-deserialize raw ProfileRequest)]
+                                      (if thrift?
+                                        request
+                                        (clojurify-profile-request request)))))]
+          requests))
+
+      (delete-topology-profile-requests
+        [this storm-id profile-request]
+        (let [profile-request-inst (thriftify-profile-request profile-request)
+              action (:action profile-request)
+              host (:host profile-request)
+              port (:port profile-request)]
+          (.delete_node cluster-state
+           (profiler-config-path storm-id host port action))))
+          
+      (get-worker-profile-requests
+        [this storm-id node-info thrift?]
+        (let [host (:host node-info)
+              port (:port node-info)
+              profile-requests (get-topology-profile-requests this storm-id thrift?)]
+          (if thrift?
+            (filter #(and (= host (.get_node (.get_nodeInfo %))) (= port (first (.get_port (.get_nodeInfo  %)))))
+                    profile-requests)
+            (filter #(and (= host (:host %)) (= port (:port %)))
+                    profile-requests))))
+      
       (worker-heartbeat!
         [this storm-id node port info]
         (let [thrift-worker-hb (thriftify-zk-worker-hb info)]
@@ -607,6 +661,7 @@
         (delete-node cluster-state (code-distributor-path storm-id))
         (delete-node cluster-state (credentials-path storm-id))
         (delete-node cluster-state (log-config-path storm-id))
+        (delete-node cluster-state (profiler-config-path storm-id))
         (remove-storm-base! this storm-id))
 
       (set-credentials!

http://git-wip-us.apache.org/repos/asf/storm/blob/0c2021e6/storm-core/src/clj/backtype/storm/config.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/config.clj b/storm-core/src/clj/backtype/storm/config.clj
index f06f6e9..c9e151a 100644
--- a/storm-core/src/clj/backtype/storm/config.clj
+++ b/storm-core/src/clj/backtype/storm/config.clj
@@ -229,6 +229,10 @@
   ([conf id port]
    (str (worker-artifacts-root conf id) file-path-separator port)))
 
+(defn worker-artifacts-pid-path
+  [conf id port]
+  (str (worker-artifacts-root conf id port) file-path-separator "worker.pid"))
+
 (defn get-log-metadata-file
   ([fname]
     (let [[id port & _] (str/split fname (re-pattern file-path-separator))]

http://git-wip-us.apache.org/repos/asf/storm/blob/0c2021e6/storm-core/src/clj/backtype/storm/converter.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/converter.clj b/storm-core/src/clj/backtype/storm/converter.clj
index 21920e4..52a1817 100644
--- a/storm-core/src/clj/backtype/storm/converter.clj
+++ b/storm-core/src/clj/backtype/storm/converter.clj
@@ -16,7 +16,7 @@
 (ns backtype.storm.converter
   (:import [backtype.storm.generated SupervisorInfo NodeInfo Assignment WorkerResources
             StormBase TopologyStatus ClusterWorkerHeartbeat ExecutorInfo ErrorInfo Credentials RebalanceOptions KillOptions
-            TopologyActionOptions DebugOptions])
+            TopologyActionOptions DebugOptions ProfileRequest])
   (:use [backtype.storm util stats log])
   (:require [backtype.storm.daemon [common :as common]]))
 
@@ -249,6 +249,23 @@
     (.set_host (:host error))
     (.set_port (:port error))))
 
+(defn clojurify-profile-request
+  [^ProfileRequest request]
+  (when request
+    {:host (.get_node (.get_nodeInfo request))
+     :port (first (.get_port (.get_nodeInfo request)))
+     :action     (.get_action request)
+     :timestamp  (.get_time_stamp request)}))
+
+(defn thriftify-profile-request
+  [profile-request]
+  (let [nodeinfo (doto (NodeInfo.)
+                   (.set_node (:host profile-request))
+                   (.set_port (set [(:port profile-request)])))
+        request (ProfileRequest. nodeinfo (:action profile-request))]
+    (.set_time_stamp request (:timestamp profile-request))
+    request))
+
 (defn thriftify-credentials [credentials]
     (doto (Credentials.)
       (.set_creds (if credentials credentials {}))))

http://git-wip-us.apache.org/repos/asf/storm/blob/0c2021e6/storm-core/src/clj/backtype/storm/daemon/logviewer.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/daemon/logviewer.clj b/storm-core/src/clj/backtype/storm/daemon/logviewer.clj
index 353a58e..5303a25 100644
--- a/storm-core/src/clj/backtype/storm/daemon/logviewer.clj
+++ b/storm-core/src/clj/backtype/storm/daemon/logviewer.clj
@@ -548,6 +548,51 @@ Note that if anything goes wrong, this will throw an Error and exit."
       (catch InvalidRequestException ex
         (log-error ex)
         (ring-response-from-exception ex))))
+  (GET "/dumps/:topo-id/:host-port/:filename"
+       [:as {:keys [servlet-request servlet-response log-root]} topo-id host-port filename &m]
+     (let [port (second (split host-port #":"))]
+       (-> (resp/response (File. (str log-root
+                                      file-path-separator
+                                      topo-id
+                                      file-path-separator
+                                      port
+                                      file-path-separator
+                                      filename)))
+           (resp/content-type "application/octet-stream"))))
+  (GET "/dumps/:topo-id/:host-port"
+       [:as {:keys [servlet-request servlet-response log-root]} topo-id host-port &m]
+     (let [user (.getUserName http-creds-handler servlet-request)
+           port (second (split host-port #":"))
+           dir (File. (str log-root
+                           file-path-separator
+                           topo-id
+                           file-path-separator
+                           port))
+           files (filter (comp not nil?)
+                         (for [f (.listFiles dir)]
+                           (let [name (.getName f)]
+                             (if (or
+                                  (.endsWith name ".txt")
+                                  (.endsWith name ".jfr")
+                                  (.endsWith name ".bin"))
+                               (.getName f)))))]
+       (if (.exists dir)
+         (if (or (blank? (*STORM-CONF* UI-FILTER))
+               (authorized-log-user? user "worker.log" *STORM-CONF*))
+           (html4
+             [:head
+              [:title "File Dumps - Storm Log Viewer"]
+              (include-css "/css/bootstrap-3.3.1.min.css")
+              (include-css "/css/jquery.dataTables.1.10.4.min.css")
+              (include-css "/css/style.css")]
+             [:body
+              [:ul
+               (for [file files]
+                 [:li
+                  [:a {:href (str "/dumps/" topo-id "/" host-port "/" file)} file ]])]])
+           (unauthorized-user-html user))
+         (-> (resp/response "Page not found")
+           (resp/status 404)))))
   (GET "/daemonlog" [:as req & m]
     (try
       (let [servlet-request (:servlet-request req)

http://git-wip-us.apache.org/repos/asf/storm/blob/0c2021e6/storm-core/src/clj/backtype/storm/daemon/nimbus.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/daemon/nimbus.clj b/storm-core/src/clj/backtype/storm/daemon/nimbus.clj
index faf7cde..205c6f4 100644
--- a/storm-core/src/clj/backtype/storm/daemon/nimbus.clj
+++ b/storm-core/src/clj/backtype/storm/daemon/nimbus.clj
@@ -20,7 +20,7 @@
   (:import [org.apache.thrift.transport TNonblockingServerTransport TNonblockingServerSocket])
   (:import [org.apache.commons.io FileUtils])
   (:import [java.nio ByteBuffer]
-           [java.util Collections HashMap]
+           [java.util Collections List HashMap]
            [backtype.storm.generated NimbusSummary])
   (:import [java.io FileNotFoundException File FileOutputStream])
   (:import [java.net InetAddress])
@@ -36,7 +36,8 @@
             ExecutorInfo InvalidTopologyException Nimbus$Iface Nimbus$Processor SubmitOptions TopologyInitialStatus
             KillOptions RebalanceOptions ClusterSummary SupervisorSummary TopologySummary TopologyInfo
             ExecutorSummary AuthorizationException GetInfoOptions NumErrorsChoice
-            ComponentPageInfo TopologyPageInfo LogConfig LogLevel LogLevelAction])
+            ComponentPageInfo TopologyPageInfo LogConfig LogLevel LogLevelAction
+            ProfileRequest ProfileAction NodeInfo])
   (:import [backtype.storm.daemon Shutdownable])
   (:use [backtype.storm util config log timer zookeeper])
   (:require [backtype.storm [cluster :as cluster]
@@ -1335,6 +1336,36 @@
           (locking (:submit-lock nimbus)
             (.update-storm! storm-cluster-state storm-id storm-base-updates))))
 
+      (^void setWorkerProfiler
+        [this ^String id ^ProfileRequest profileRequest]
+        (let [topology-conf (try-read-storm-conf conf id)
+              storm-name (topology-conf TOPOLOGY-NAME)
+              _ (check-authorization! nimbus storm-name topology-conf "setWorkerProfiler")
+              storm-cluster-state (:storm-cluster-state nimbus)]
+          (.set-worker-profile-request storm-cluster-state id profileRequest)))
+
+      (^List getComponentPendingProfileActions
+        [this ^String id ^String component_id ^ProfileAction action]
+        (let [info (get-common-topo-info id "getComponentPendingProfileActions")
+              storm-cluster-state (:storm-cluster-state info)
+              task->component (:task->component info)
+              {:keys [executor->node+port node->host]} (:assignment info)
+              executor->host+port (map-val (fn [[node port]]
+                                             [(node->host node) port])
+                                    executor->node+port)
+              nodeinfos (stats/extract-nodeinfos-from-hb-for-comp executor->host+port task->component false component_id)
+              all-pending-actions-for-topology (.get-topology-profile-requests storm-cluster-state id true)
+              latest-profile-actions (remove nil? (map (fn [nodeInfo]
+                                                         (->> all-pending-actions-for-topology
+                                                              (filter #(and (= (:host nodeInfo) (.get_node (.get_nodeInfo %)))
+                                                                         (= (:port nodeInfo) (first (.get_port (.get_nodeInfo  %))))))
+                                                              (filter #(= action (.get_action %)))
+                                                              (sort-by #(.get_time_stamp %) >)
+                                                              first))
+                                                    nodeinfos))]
+          (log-message "Latest profile actions for topology " id " component " component_id " " (pr-str latest-profile-actions))
+          latest-profile-actions))
+
       (^void setLogConfig [this ^String id ^LogConfig log-config-msg]
         (let [topology-conf (try-read-storm-conf conf id)
               storm-name (topology-conf TOPOLOGY-NAME)

http://git-wip-us.apache.org/repos/asf/storm/blob/0c2021e6/storm-core/src/clj/backtype/storm/daemon/supervisor.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/daemon/supervisor.clj b/storm-core/src/clj/backtype/storm/daemon/supervisor.clj
index 52b2057..8fe6eed 100644
--- a/storm-core/src/clj/backtype/storm/daemon/supervisor.clj
+++ b/storm-core/src/clj/backtype/storm/daemon/supervisor.clj
@@ -26,7 +26,7 @@
   (:use [backtype.storm config util log timer local-state])
   (:import [backtype.storm.utils VersionInfo])
   (:import [backtype.storm Config])
-  (:import [backtype.storm.generated WorkerResources])
+  (:import [backtype.storm.generated WorkerResources ProfileAction])
   (:use [backtype.storm.daemon common])
   (:require [backtype.storm.daemon [worker :as worker]]
             [backtype.storm [process-simulator :as psim] [cluster :as cluster] [event :as event]]
@@ -34,6 +34,7 @@
   (:import [org.apache.zookeeper data.ACL ZooDefs$Ids ZooDefs$Perms])
   (:import [org.yaml.snakeyaml Yaml]
            [org.yaml.snakeyaml.constructor SafeConstructor])
+  (:import [java.util Date])
   (:gen-class
     :methods [^{:static true} [launch [backtype.storm.scheduler.ISupervisor] void]]))
 
@@ -59,9 +60,16 @@
                         {sid (.assignment-info-with-version storm-cluster-state sid callback)})
                       {sid nil})))
            (apply merge)
-           (filter-val not-nil?))]
-
+           (filter-val not-nil?))
+          new-profiler-actions
+          (->>
+            (dofor [sid (distinct storm-ids)]
+                   (if-let [topo-profile-actions (.get-topology-profile-requests storm-cluster-state sid false)]
+                      {sid topo-profile-actions}))
+           (apply merge))]
+         
       {:assignments (into {} (for [[k v] new-assignments] [k (:data v)]))
+       :profiler-actions new-profiler-actions
        :versions new-assignments})))
 
 (defn- read-my-executors [assignments-snapshot storm-id assignment-id]
@@ -318,6 +326,7 @@
    :sync-retry (atom 0)
    :code-distributor (mk-code-distributor conf)
    :download-lock (Object.)
+   :stormid->profiler-actions (atom {})
    })
 
 (defn sync-processes [supervisor]
@@ -449,9 +458,10 @@
           ^LocalState local-state (:local-state supervisor)
           sync-callback (fn [& ignored] (.add event-manager this))
           assignment-versions @(:assignment-versions supervisor)
-          {assignments-snapshot :assignments versions :versions}  (assignments-snapshot
-                                                                   storm-cluster-state sync-callback
-                                                                  assignment-versions)
+          {assignments-snapshot :assignments
+           storm-id->profiler-actions :profiler-actions
+           versions :versions}
+          (assignments-snapshot storm-cluster-state sync-callback assignment-versions)
           storm-code-map (read-storm-code-locations assignments-snapshot)
           downloaded-storm-ids (set (read-downloaded-storm-ids conf))
           existing-assignment (ls-local-assignments local-state)
@@ -467,6 +477,7 @@
       (log-debug "Downloaded storm ids: " downloaded-storm-ids)
       (log-debug "All assignment: " all-assignment)
       (log-debug "New assignment: " new-assignment)
+      (log-debug "Storm Ids Profiler Actions" storm-id->profiler-actions)
 
       ;; download code first
       ;; This might take awhile
@@ -486,6 +497,8 @@
       (ls-local-assignments! local-state
             new-assignment)
       (reset! (:assignment-versions supervisor) versions)
+      (reset! (:stormid->profiler-actions supervisor) storm-id->profiler-actions)
+
       (reset! (:curr-assignment supervisor) new-assignment)
       ;; remove any downloaded code that's no longer assigned or active
       ;; important that this happens after setting the local assignment so that
@@ -508,6 +521,121 @@
   {Config/SUPERVISOR_MEMORY_CAPACITY_MB (double (conf SUPERVISOR-MEMORY-CAPACITY-MB))
    Config/SUPERVISOR_CPU_CAPACITY (double (conf SUPERVISOR-CPU-CAPACITY))})
 
+(defn jvm-cmd [cmd]
+  (let [java-home (.get (System/getenv) "JAVA_HOME")]
+    (if (nil? java-home)
+      cmd
+      (str java-home file-path-separator "bin" file-path-separator cmd))))
+
+(defn java-cmd []
+  (jvm-cmd "java"))
+
+(def PROFILE-CMD "flight.bash")
+
+(defn jmap-dump-cmd [pid target-dir]
+  [PROFILE-CMD pid "jmap" target-dir])
+
+(defn jstack-dump-cmd [pid target-dir]
+  [PROFILE-CMD pid "jstack" target-dir])
+
+(defn jprofile-start [pid]
+  [PROFILE-CMD pid "start" ])
+
+(defn jprofile-stop [pid target-dir]
+  [PROFILE-CMD pid "stop" target-dir])
+
+(defn jprofile-dump [pid workers-artifacts-directory]
+  [PROFILE-CMD pid "dump" workers-artifacts-directory])
+
+(defn jprofile-jvm-restart [pid]
+  [PROFILE-CMD pid "kill" ])
+
+(defn- delete-topology-profiler-action [storm-cluster-state storm-id profile-action]
+  (log-message "Deleting profiler action.." profile-action)
+  (.delete-topology-profile-requests storm-cluster-state storm-id profile-action))
+
+(defnk launch-profiler-action-for-worker
+  "Launch profiler action for a worker"
+  [conf user target-dir command :environment {} :exit-code-on-profile-action nil :log-prefix nil]
+  (if-let [run-worker-as-user (conf SUPERVISOR-RUN-WORKER-AS-USER)]
+    (let [container-file (container-file-path target-dir)
+          script-file (script-file-path target-dir)]
+      (log-message "Running as user:" user " command:" (shell-cmd command))
+      (if (exists-file? container-file) (rmr-as-user conf container-file container-file))
+      (if (exists-file? script-file) (rmr-as-user conf script-file script-file))
+      (worker-launcher
+        conf
+        user
+        ["profiler" target-dir (write-script target-dir command :environment environment)]
+        :log-prefix log-prefix
+        :exit-code-callback exit-code-on-profile-action
+        :directory (File. target-dir)))
+    (launch-process
+      command
+      :environment environment
+      :log-prefix log-prefix
+      :exit-code-callback exit-code-on-profile-action
+      :directory (File. target-dir))))
+
+(defn mk-run-profiler-actions-for-all-topologies
+  "Returns a function that downloads all profile-actions listed for all topologies assigned
+  to this supervisor, executes those actions as user and deletes them from zookeeper."
+  [supervisor]
+  (fn []
+    (try
+      (let [conf (:conf supervisor)
+            stormid->profiler-actions @(:stormid->profiler-actions supervisor)
+            storm-cluster-state (:storm-cluster-state supervisor)
+            hostname (:my-hostname supervisor)
+            new-assignment @(:curr-assignment supervisor)
+            assigned-storm-ids (assigned-storm-ids-from-port-assignments new-assignment)]
+        (doseq [[storm-id profiler-actions] stormid->profiler-actions]
+          (when (not (empty? profiler-actions))
+            (doseq [pro-action profiler-actions]
+              (if (= hostname (:host pro-action))
+                (let [port (:port pro-action)
+                      action ^ProfileAction (:action pro-action)
+                      stop? (> (System/currentTimeMillis) (:timestamp pro-action))
+                      target-dir (worker-artifacts-root conf storm-id port)
+                      storm-conf (read-supervisor-storm-conf conf storm-id)
+                      user (storm-conf TOPOLOGY-SUBMITTER-USER)
+                      environment (if-let [env (storm-conf TOPOLOGY-ENVIRONMENT)] env {})
+                      worker-pid (slurp (worker-artifacts-pid-path conf storm-id port))
+                      log-prefix (str "ProfilerAction process " storm-id ":" port " PROFILER_ACTION: " action " ")
+                      ;; Until PROFILER_STOP action is invalid, keep launching profiler start in case worker restarted
+                      ;; The profiler plugin script validates if JVM is recording before starting another recording.
+                      command (cond
+                                (= action ProfileAction/JMAP_DUMP) (jmap-dump-cmd worker-pid target-dir)
+                                (= action ProfileAction/JSTACK_DUMP) (jstack-dump-cmd worker-pid target-dir)
+                                (= action ProfileAction/JPROFILE_DUMP) (jprofile-dump worker-pid target-dir)
+                                (= action ProfileAction/JVM_RESTART) (jprofile-jvm-restart worker-pid)
+                                (and (not stop?)
+                                     (= action ProfileAction/JPROFILE_STOP))
+                                  (jprofile-start worker-pid) ;; Ensure the profiler is still running
+                                (and stop? (= action ProfileAction/JPROFILE_STOP)) (jprofile-stop worker-pid target-dir))
+                      action-on-exit (fn [exit-code]
+                                       (log-message log-prefix " profile-action exited for code: " exit-code)
+                                       (if (and (= exit-code 0) stop?)
+                                         (delete-topology-profiler-action storm-cluster-state storm-id pro-action)))
+                      command (->> command (map str) (filter (complement empty?)))]
+
+                  (try
+                    (launch-profiler-action-for-worker conf
+                      user
+                      target-dir
+                      command
+                      :environment environment
+                      :exit-code-on-profile-action action-on-exit
+                      :log-prefix log-prefix)
+                    (catch IOException ioe
+                      (log-error ioe
+                        (str "Error in processing ProfilerAction '" action "' for " storm-id ":" port ", will retry later.")))
+                    (catch RuntimeException rte
+                      (log-error rte
+                        (str "Error in processing ProfilerAction '" action "' for " storm-id ":" port ", will retry later."))))))))))
+      (catch Exception e
+        (log-error e "Error running profiler actions, will retry again later")))))
+
 ;; in local state, supervisor stores who its current assignments are
 ;; another thread launches events to restart any dead processes if necessary
 (defserverfn mk-supervisor [conf shared-context ^ISupervisor isupervisor]
@@ -518,6 +646,7 @@
         [event-manager processes-event-manager :as managers] [(event/event-manager false) (event/event-manager false)]
         sync-processes (partial sync-processes supervisor)
         synchronize-supervisor (mk-synchronize-supervisor supervisor sync-processes event-manager processes-event-manager)
+        run-profiler-actions-fn (mk-run-profiler-actions-for-all-topologies supervisor)
         heartbeat-fn (fn [] (.supervisor-heartbeat!
                                (:storm-cluster-state supervisor)
                                (:supervisor-id supervisor)
@@ -545,7 +674,12 @@
       (schedule-recurring (:event-timer supervisor)
                           0
                           (conf SUPERVISOR-MONITOR-FREQUENCY-SECS)
-                          (fn [] (.add processes-event-manager sync-processes))))
+                          (fn [] (.add processes-event-manager sync-processes)))
+      ;; Launch a thread that Runs profiler commands . Starts with 30 seconds delay, every 30 seconds
+      (schedule-recurring (:event-timer supervisor)
+                          30
+                          30
+                          (fn [] (.add event-manager run-profiler-actions-fn))))
     (log-message "Starting supervisor with id " (:supervisor-id supervisor) " at host " (:my-hostname supervisor))
     (reify
      Shutdownable
@@ -662,12 +796,6 @@
       (sequential? value) (vec (map sub-fn value))
       :else (-> value sub-fn (clojure.string/split #"\s+")))))
 
-(defn java-cmd []
-  (let [java-home (.get (System/getenv) "JAVA_HOME")]
-    (if (nil? java-home)
-      "java"
-      (str java-home file-path-separator "bin" file-path-separator "java")
-      )))
 
 (defn create-artifacts-link
   "Create a symlink from workder directory to its port artifacts directory"
@@ -717,6 +845,7 @@
                              (substitute-childopts s worker-id storm-id port mem-onheap))
           topo-worker-childopts (when-let [s (storm-conf TOPOLOGY-WORKER-CHILDOPTS)]
                                   (substitute-childopts s worker-id storm-id port mem-onheap))
+          worker--profiler-childopts (substitute-childopts (conf WORKER-PROFILER-CHILDOPTS) worker-id storm-id port mem-onheap)
           topology-worker-environment (if-let [env (storm-conf TOPOLOGY-ENVIRONMENT)]
                                         (merge env {"LD_LIBRARY_PATH" jlp})
                                         {"LD_LIBRARY_PATH" jlp})
@@ -737,6 +866,7 @@
                     worker-childopts
                     topo-worker-childopts
                     gc-opts
+                    worker--profiler-childopts
                     [(str "-Djava.library.path=" jlp)
                      (str "-Dlogfile.name=" logfilename)
                      (str "-Dstorm.home=" storm-home)

http://git-wip-us.apache.org/repos/asf/storm/blob/0c2021e6/storm-core/src/clj/backtype/storm/daemon/worker.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/daemon/worker.clj b/storm-core/src/clj/backtype/storm/daemon/worker.clj
index f304bdd..2fd316c 100644
--- a/storm-core/src/clj/backtype/storm/daemon/worker.clj
+++ b/storm-core/src/clj/backtype/storm/daemon/worker.clj
@@ -538,7 +538,9 @@
   ;; because in local mode, its not a separate
   ;; process. supervisor will register it in this case
   (when (= :distributed (cluster-mode conf))
-    (touch (worker-pid-path conf worker-id (process-pid))))
+    (let [pid (process-pid)]
+      (touch (worker-pid-path conf worker-id pid))
+      (spit (worker-artifacts-pid-path conf storm-id port) pid)))
 
   (declare establish-log-setting-callback)
 

http://git-wip-us.apache.org/repos/asf/storm/blob/0c2021e6/storm-core/src/clj/backtype/storm/stats.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/stats.clj b/storm-core/src/clj/backtype/storm/stats.clj
index 16a00ec..b73fd8b 100644
--- a/storm-core/src/clj/backtype/storm/stats.clj
+++ b/storm-core/src/clj/backtype/storm/stats.clj
@@ -810,6 +810,15 @@
       (.containsKey bolts id) :bolt
       (.containsKey spouts id) :spout)))
 
+(defn extract-nodeinfos-from-hb-for-comp
+  ([exec->host+port task->component include-sys? comp-id]
+   (distinct (for [[[start end :as executor] [host port]] exec->host+port
+         :let [id (task->component start)]
+         :when (and (or (nil? comp-id) (= comp-id id))
+                 (or include-sys? (not (Utils/isSystemId id))))]
+     {:host host
+      :port port}))))
+
 (defn extract-data-from-hb
   ([exec->host+port task->component beats include-sys? topology comp-id]
    (for [[[start end :as executor] [host port]] exec->host+port

http://git-wip-us.apache.org/repos/asf/storm/blob/0c2021e6/storm-core/src/clj/backtype/storm/ui/core.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/ui/core.clj b/storm-core/src/clj/backtype/storm/ui/core.clj
index 0e12210..c6ffe22 100644
--- a/storm-core/src/clj/backtype/storm/ui/core.clj
+++ b/storm-core/src/clj/backtype/storm/ui/core.clj
@@ -21,13 +21,13 @@
         ring.middleware.multipart-params)
   (:use [ring.middleware.json :only [wrap-json-params]])
   (:use [hiccup core page-helpers])
-  (:use [backtype.storm config util log stats tuple zookeeper])
+  (:use [backtype.storm config util log stats tuple zookeeper converter])
   (:use [backtype.storm.ui helpers])
   (:use [backtype.storm.daemon [common :only [ACKER-COMPONENT-ID ACKER-INIT-STREAM-ID ACKER-ACK-STREAM-ID
                                               ACKER-FAIL-STREAM-ID mk-authorization-handler]]])
-  (:use [clojure.string :only [blank? lower-case trim]])
   (:import [backtype.storm.utils Utils]
            [backtype.storm.generated NimbusSummary])
+  (:use [clojure.string :only [blank? lower-case trim split]])
   (:import [backtype.storm.generated ExecutorSpecificStats
             ExecutorStats ExecutorSummary ExecutorInfo TopologyInfo SpoutStats BoltStats
             ErrorInfo ClusterSummary SupervisorSummary TopologySummary
@@ -38,7 +38,7 @@
             ExecutorAggregateStats SpecificAggregateStats ComponentPageInfo
             LogConfig LogLevel LogLevelAction])
   (:import [backtype.storm.security.auth AuthUtils ReqContext])
-  (:import [backtype.storm.generated AuthorizationException])
+  (:import [backtype.storm.generated AuthorizationException ProfileRequest ProfileAction NodeInfo])
   (:import [backtype.storm.security.auth AuthUtils])
   (:import [backtype.storm.utils VersionInfo])
   (:import [java.io File])
@@ -159,6 +159,13 @@
     (.get_error_time_secs ^ErrorInfo error)
     ""))
 
+(defn worker-dump-link [host port topology-id]
+  (url-format "http://%s:%s/dumps/%s/%s"
+              (url-encode host)
+              (*STORM-CONF* LOGVIEWER-PORT)
+              (url-encode topology-id)
+              (str (url-encode host) ":" (url-encode port))))
+
 (defn stats-times
   [stats-map]
   (sort-by #(Integer/parseInt %)
@@ -734,6 +741,22 @@
                           (.get_exec_stats info))}
     (-> info .get_errors (component-errors topology-id secure?))))
 
+(defn get-active-profile-actions
+  [nimbus topology-id component]
+  (let [profile-actions  (.getComponentPendingProfileActions nimbus
+                                               topology-id
+                                               component
+                                 ProfileAction/JPROFILE_STOP)
+        latest-profile-actions (map clojurify-profile-request profile-actions)
+        active-actions (map (fn [profile-action]
+                              {"host" (:host profile-action)
+                               "port" (str (:port profile-action))
+                               "dumplink" (worker-dump-link (:host profile-action) (str (:port profile-action)) topology-id)
+                               "timestamp" (str (- (:timestamp profile-action) (System/currentTimeMillis)))})
+                            latest-profile-actions)]
+    (log-message "Latest-active actions are: " (pr active-actions))
+    active-actions))
+
 (defn component-page
   [topology-id component window include-sys? user secure?]
   (thrift/with-configured-nimbus-connection nimbus
@@ -775,8 +798,9 @@
                                       component
                                       (.get_eventlog_host comp-page-info)
                                       (.get_eventlog_port comp-page-info)
-                                      secure?)))))
-
+                                      secure?)
+       "profilerActive" (get-active-profile-actions nimbus topology-id component)))))
+    
 (defn- level-to-dict [level]
   (if level
     (let [timeout (.get_reset_log_level_timeout_secs level)
@@ -986,6 +1010,121 @@
         (log-message "Setting topology " id " log config " new-log-config)
         (.setLogConfig nimbus id new-log-config)
         (json-response (log-config id) (m "callback")))))
+
+  (GET "/api/v1/topology/:id/profiling/start/:host-port/:timeout"
+       [:as {:keys [servlet-request]} id host-port timeout & m]
+       (thrift/with-configured-nimbus-connection nimbus
+         (let [user (.getUserName http-creds-handler servlet-request)
+               topology-conf (from-json
+                              (.getTopologyConf ^Nimbus$Client nimbus id))]
+           (assert-authorized-user "setWorkerProfiler" (topology-config id)))
+
+         (let [[host, port] (split host-port #":")
+               nodeinfo (NodeInfo. host (set [(Long. port)]))
+               timestamp (+ (System/currentTimeMillis) (* 60000 (Long. timeout)))
+               request (ProfileRequest. nodeinfo
+                                        ProfileAction/JPROFILE_STOP)]
+           (.set_time_stamp request timestamp)
+           (.setWorkerProfiler nimbus id request)
+           (json-response {"status" "ok"
+                           "id" host-port
+                           "timeout" timeout
+                           "dumplink" (worker-dump-link
+                                       host
+                                       port
+                                       id)}
+                          (m "callback")))))
+
+  (GET "/api/v1/topology/:id/profiling/stop/:host-port"
+       [:as {:keys [servlet-request]} id host-port & m]
+       (thrift/with-configured-nimbus-connection nimbus
+         (let [user (.getUserName http-creds-handler servlet-request)
+               topology-conf (from-json
+                              (.getTopologyConf ^Nimbus$Client nimbus id))]
+           (assert-authorized-user "setWorkerProfiler" (topology-config id)))
+         (let [[host, port] (split host-port #":")
+               nodeinfo (NodeInfo. host (set [(Long. port)]))
+               timestamp 0
+               request (ProfileRequest. nodeinfo
+                                        ProfileAction/JPROFILE_STOP)]
+           (.set_time_stamp request timestamp)
+           (.setWorkerProfiler nimbus id request)
+           (json-response {"status" "ok"
+                           "id" host-port}
+                          (m "callback")))))
+  
+  (GET "/api/v1/topology/:id/profiling/dumpprofile/:host-port"
+       [:as {:keys [servlet-request]} id host-port & m]
+       (thrift/with-configured-nimbus-connection nimbus
+         (let [user (.getUserName http-creds-handler servlet-request)
+               topology-conf (from-json
+                              (.getTopologyConf ^Nimbus$Client nimbus id))]
+           (assert-authorized-user "setWorkerProfiler" (topology-config id)))
+         (let [[host, port] (split host-port #":")
+               nodeinfo (NodeInfo. host (set [(Long. port)]))
+               timestamp (System/currentTimeMillis)
+               request (ProfileRequest. nodeinfo
+                                        ProfileAction/JPROFILE_DUMP)]
+           (.set_time_stamp request timestamp)
+           (.setWorkerProfiler nimbus id request)
+           (json-response {"status" "ok"
+                           "id" host-port}
+                          (m "callback")))))
+
+  (GET "/api/v1/topology/:id/profiling/dumpjstack/:host-port"
+       [:as {:keys [servlet-request]} id host-port & m]
+       (thrift/with-configured-nimbus-connection nimbus
+         (let [user (.getUserName http-creds-handler servlet-request)
+               topology-conf (from-json
+                              (.getTopologyConf ^Nimbus$Client nimbus id))]
+           (assert-authorized-user "setWorkerProfiler" (topology-config id)))
+         (let [[host, port] (split host-port #":")
+               nodeinfo (NodeInfo. host (set [(Long. port)]))
+               timestamp (System/currentTimeMillis)
+               request (ProfileRequest. nodeinfo
+                                        ProfileAction/JSTACK_DUMP)]
+           (.set_time_stamp request timestamp)
+           (.setWorkerProfiler nimbus id request)
+           (json-response {"status" "ok"
+                           "id" host-port}
+                          (m "callback")))))
+
+  (GET "/api/v1/topology/:id/profiling/restartworker/:host-port"
+       [:as {:keys [servlet-request]} id host-port & m]
+       (thrift/with-configured-nimbus-connection nimbus
+         (let [user (.getUserName http-creds-handler servlet-request)
+               topology-conf (from-json
+                              (.getTopologyConf ^Nimbus$Client nimbus id))]
+           (assert-authorized-user "setWorkerProfiler" (topology-config id)))
+         (let [[host, port] (split host-port #":")
+               nodeinfo (NodeInfo. host (set [(Long. port)]))
+               timestamp (System/currentTimeMillis)
+               request (ProfileRequest. nodeinfo
+                                        ProfileAction/JVM_RESTART)]
+           (.set_time_stamp request timestamp)
+           (.setWorkerProfiler nimbus id request)
+           (json-response {"status" "ok"
+                           "id" host-port}
+                          (m "callback")))))
+       
+  (GET "/api/v1/topology/:id/profiling/dumpheap/:host-port"
+       [:as {:keys [servlet-request]} id host-port & m]
+       (thrift/with-configured-nimbus-connection nimbus
+         (let [user (.getUserName http-creds-handler servlet-request)
+               topology-conf (from-json
+                              (.getTopologyConf ^Nimbus$Client nimbus id))]
+           (assert-authorized-user "setWorkerProfiler" (topology-config id)))
+         (let [[host, port] (split host-port #":")
+               nodeinfo (NodeInfo. host (set [(Long. port)]))
+               timestamp (System/currentTimeMillis)
+               request (ProfileRequest. nodeinfo
+                                        ProfileAction/JMAP_DUMP)]
+           (.set_time_stamp request timestamp)
+           (.setWorkerProfiler nimbus id request)
+           (json-response {"status" "ok"
+                           "id" host-port}
+                          (m "callback")))))
+  
   (GET "/" [:as {cookies :cookies}]
     (resp/redirect "/index.html"))
   (route/resources "/")

http://git-wip-us.apache.org/repos/asf/storm/blob/0c2021e6/storm-core/src/clj/backtype/storm/util.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/util.clj b/storm-core/src/clj/backtype/storm/util.clj
index 9ec8cd3..dc8ba2d 100644
--- a/storm-core/src/clj/backtype/storm/util.clj
+++ b/storm-core/src/clj/backtype/storm/util.clj
@@ -517,11 +517,17 @@
     (map #(str \' (clojure.string/escape % {\' "'\"'\"'"}) \'))
       (clojure.string/join " ")))
 
+(defn script-file-path [dir]
+  (str dir file-path-separator "storm-worker-script.sh"))
+
+(defn container-file-path [dir]
+  (str dir file-path-separator "launch_container.sh"))
+
 (defnk write-script
   [dir command :environment {}]
   (let [script-src (str "#!/bin/bash\n" (clojure.string/join "" (map (fn [[k v]] (str (shell-cmd ["export" (str k "=" v)]) ";\n")) environment)) "\nexec " (shell-cmd command) ";")
-        script-path (str dir "/storm-worker-script.sh")
-        - (spit script-path script-src)]
+        script-path (script-file-path dir)
+        _ (spit script-path script-src)]
     script-path
   ))
 

http://git-wip-us.apache.org/repos/asf/storm/blob/0c2021e6/storm-core/src/jvm/backtype/storm/Config.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/Config.java b/storm-core/src/jvm/backtype/storm/Config.java
index 73e41cb..303a3a0 100644
--- a/storm-core/src/jvm/backtype/storm/Config.java
+++ b/storm-core/src/jvm/backtype/storm/Config.java
@@ -1087,6 +1087,12 @@ public class Config extends HashMap<String, Object> {
     public static final String WORKER_HEAP_MEMORY_MB = "worker.heap.memory.mb";
 
     /**
+     * The jvm profiler opts provided to workers launched by this supervisor.
+     */
+    @isStringOrStringList
+    public static final String WORKER_PROFILER_CHILDOPTS = "worker.profiler.childopts";
+
+    /**
      * The jvm opts provided to workers launched by this supervisor for GC. All "%ID%" substrings are replaced
      * with an identifier for this worker.  Because the JVM complains about multiple GC opts the topology
      * can override this default value by setting topology.worker.gc.childopts.


[10/10] storm git commit: Added STORM-1157 to Changelog

Posted by bo...@apache.org.
Added STORM-1157 to Changelog


Project: http://git-wip-us.apache.org/repos/asf/storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/storm/commit/f3ed08b9
Tree: http://git-wip-us.apache.org/repos/asf/storm/tree/f3ed08b9
Diff: http://git-wip-us.apache.org/repos/asf/storm/diff/f3ed08b9

Branch: refs/heads/master
Commit: f3ed08b9ffb32f0d0c756f176756484366f37f2c
Parents: 6e0fc9e
Author: Robert (Bobby) Evans <ev...@yahoo-inc.com>
Authored: Wed Nov 4 11:10:53 2015 -0600
Committer: Robert (Bobby) Evans <ev...@yahoo-inc.com>
Committed: Wed Nov 4 11:10:53 2015 -0600

----------------------------------------------------------------------
 CHANGELOG.md | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/storm/blob/f3ed08b9/CHANGELOG.md
----------------------------------------------------------------------
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 64e4a8a..5c9a20e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,4 +1,5 @@
 ## 0.11.0
+ * STORM-1157: Adding dynamic profiling for worker, restarting worker, jstack, heap dump, and profiling
  * STORM-1123: TupleImpl - Unnecessary variable initialization.
  * STORM-1153: Use static final instead of just static for class members.
  * STORM-817: Kafka Wildcard Topic Support.


[06/10] storm git commit: Moving Documentation to more appropriate location

Posted by bo...@apache.org.
Moving Documentation to more appropriate location


Project: http://git-wip-us.apache.org/repos/asf/storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/storm/commit/25f31a0e
Tree: http://git-wip-us.apache.org/repos/asf/storm/tree/25f31a0e
Diff: http://git-wip-us.apache.org/repos/asf/storm/diff/25f31a0e

Branch: refs/heads/master
Commit: 25f31a0eee02ceee75f72a8f82f9f33e98cdd192
Parents: f697044
Author: Kishor Patil <kp...@yahoo-inc.com>
Authored: Tue Nov 3 15:16:58 2015 -0600
Committer: Kishor Patil <kp...@yahoo-inc.com>
Committed: Tue Nov 3 15:16:58 2015 -0600

----------------------------------------------------------------------
 docs/DYNAMIC_LOG_LEVEL_SETTINGS.md              |  41 -------------------
 docs/DYNAMIC_WORKER_PROFILING.md                |  29 -------------
 docs/documentation/Documentation.md             |   2 +
 .../documentation/dynamic-log-level-settings.md |  41 +++++++++++++++++++
 docs/documentation/dynamic-worker-profiling.md  |  29 +++++++++++++
 .../images/dynamic_log_level_settings_1.png     | Bin 0 -> 93689 bytes
 .../images/dynamic_log_level_settings_2.png     | Bin 0 -> 78785 bytes
 .../images/dynamic_profiling_debugging_1.png    | Bin 0 -> 93635 bytes
 .../images/dynamic_profiling_debugging_2.png    | Bin 0 -> 138120 bytes
 .../images/dynamic_profiling_debugging_3.png    | Bin 0 -> 96974 bytes
 docs/images/dynamic_log_level_settings_1.png    | Bin 93689 -> 0 bytes
 docs/images/dynamic_log_level_settings_2.png    | Bin 78785 -> 0 bytes
 docs/images/dynamic_profiling_debugging_1.png   | Bin 93635 -> 0 bytes
 docs/images/dynamic_profiling_debugging_2.png   | Bin 138120 -> 0 bytes
 docs/images/dynamic_profiling_debugging_3.png   | Bin 96974 -> 0 bytes
 15 files changed, 72 insertions(+), 70 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/storm/blob/25f31a0e/docs/DYNAMIC_LOG_LEVEL_SETTINGS.md
----------------------------------------------------------------------
diff --git a/docs/DYNAMIC_LOG_LEVEL_SETTINGS.md b/docs/DYNAMIC_LOG_LEVEL_SETTINGS.md
deleted file mode 100644
index f38b708..0000000
--- a/docs/DYNAMIC_LOG_LEVEL_SETTINGS.md
+++ /dev/null
@@ -1,41 +0,0 @@
-Dynamic Log Level Settings
-==========================
-
-We have added the ability to set log level settings for a running topology using the Storm UI and the Storm CLI. 
-
-The log level settings apply the same way as you'd expect from log4j, as all we are doing is telling log4j to set the level of the logger you provide. If you set the log level of a parent logger, the children loggers start using that level (unless the children have a more restrictive level already). A timeout can optionally be provided (except for DEBUG mode, where it’s required in the UI), if workers should reset log levels automatically.
-
-This revert action is triggered using a polling mechanism (every 30 seconds, but this is configurable), so you should expect your timeouts to be the value you provided plus anywhere between 0 and the setting's value.
-
-Using the Storm UI
--------------
-
-In order to set a level, click on a running topology, and then click on “Change Log Level” in the Topology Actions section.
-
-![Change Log Level dialog](images/dynamic_log_level_settings_1.png "Change Log Level dialog")
-
-Next, provide the logger name, select the level you expect (e.g. WARN), and a timeout in seconds (or 0 if not needed). Then click on “Add”.
-
-![After adding a log level setting](images/dynamic_log_level_settings_2.png "After adding a log level setting")
-
-To clear the log level click on the “Clear” button. This reverts the log level back to what it was before you added the setting. The log level line will disappear from the UI.
-
-While there is a delay resetting log levels back, setting the log level in the first place is immediate (or as quickly as the message can travel from the UI/CLI to the workers by way of nimbus and zookeeper).
-
-Using the CLI
--------------
-
-Using the CLI, issue the command:
-
-`./bin/storm set_log_level [topology name] -l [logger name]=[LEVEL]:[TIMEOUT]`
-
-For example:
-
-`./bin/storm set_log_level my_topology -l ROOT=DEBUG:30`
-
-Sets the ROOT logger to DEBUG for 30 seconds.
-
-`./bin/storm set_log_level my_topology -r ROOT`
-
-Clears the ROOT logger dynamic log level, resetting it to its original value.
-

http://git-wip-us.apache.org/repos/asf/storm/blob/25f31a0e/docs/DYNAMIC_WORKER_PROFILING.md
----------------------------------------------------------------------
diff --git a/docs/DYNAMIC_WORKER_PROFILING.md b/docs/DYNAMIC_WORKER_PROFILING.md
deleted file mode 100644
index 9bc8da3..0000000
--- a/docs/DYNAMIC_WORKER_PROFILING.md
+++ /dev/null
@@ -1,29 +0,0 @@
-Dynamic Worker Profiling
-==========================
-
-In multi-tenant mode, storm launches long-running JVMs across cluster without sudo access to user. Self-serving of Java heap-dumps, jstacks and java profiling of these JVMs would improve users' ability to analyze and debug issues when monitoring it actively.
-
-The storm dynamic profiler lets you dynamically take heap-dumps, jprofile or jstack for a worker jvm running on stock cluster. It let user download these dumps from the browser and use your favorite tools to analyze it  The UI component page provides list workers for the component and action buttons. The logviewer lets you download the dumps generated by these logs. Please see the screenshots for more information.
-
-Using the Storm UI
--------------
-
-In order to request for heap-dump, jstack, start/stop/dump jprofile or restart a worker, click on a running topology, then click on specific component, then you can select worker from the dropdown for that particular component and then click on “Start","Heap", "Jstack" or "Restart Worker" in the "Profiing and Debugging" section.
-
-![Profiling and Debugging](images/dynamic_profiling_debugging_1.png "Profiling and Debugging")
-
-For start jprofile, provide a timeout in minutes (or 10 if not needed). Then click on “Start”.
-
-![After starting jprofile for worker](images/dynamic_profiling_debugging_2.png "After jprofile for worker ")
-
-To stop the jprofile logging click on the “Stop” button. This dumps the jprofile stats and stops the profiling. Refresh the page for the line to disappear from the UI.
-
-Click on "My Dump Files" to go the logviewer UI for list of worker specific dump files.
-
-![Dump Files Links for worker](images/dynamic_profiling_debugging_3.png "Dump Files Links for worker")
-
-Configuration
--------------
-
-The "worker.profiler.command" can be configured to point to specific pluggable profiler, heapdump commands. The "worker.profiler.enabled" can be disabled if plugin is not available or jdk does not support Jprofile flight recording so that worker JVM options will not have "worker.profiler.childopts". To use different profiler plugin, you can change these configuration.
-

http://git-wip-us.apache.org/repos/asf/storm/blob/25f31a0e/docs/documentation/Documentation.md
----------------------------------------------------------------------
diff --git a/docs/documentation/Documentation.md b/docs/documentation/Documentation.md
index f32268c..48e18e8 100644
--- a/docs/documentation/Documentation.md
+++ b/docs/documentation/Documentation.md
@@ -44,6 +44,8 @@ Trident is an alternative interface to Storm. It provides exactly-once processin
 * [Hooks](Hooks.html)
 * [Metrics](Metrics.html)
 * [Lifecycle of a trident tuple]()
+* [Dynamic Log Level Settings](dynamic-log-level-settings.html)
+* [Dynamic Worker Profiling](dynamic-worker-profiling.html)
 
 ### Advanced
 

http://git-wip-us.apache.org/repos/asf/storm/blob/25f31a0e/docs/documentation/dynamic-log-level-settings.md
----------------------------------------------------------------------
diff --git a/docs/documentation/dynamic-log-level-settings.md b/docs/documentation/dynamic-log-level-settings.md
new file mode 100644
index 0000000..f38b708
--- /dev/null
+++ b/docs/documentation/dynamic-log-level-settings.md
@@ -0,0 +1,41 @@
+Dynamic Log Level Settings
+==========================
+
+We have added the ability to set log level settings for a running topology using the Storm UI and the Storm CLI. 
+
+The log level settings apply the same way as you'd expect from log4j, as all we are doing is telling log4j to set the level of the logger you provide. If you set the log level of a parent logger, the children loggers start using that level (unless the children have a more restrictive level already). A timeout can optionally be provided (except for DEBUG mode, where it’s required in the UI), if workers should reset log levels automatically.
+
+This revert action is triggered using a polling mechanism (every 30 seconds, but this is configurable), so you should expect your timeouts to be the value you provided plus anywhere between 0 and the setting's value.
+
+Using the Storm UI
+-------------
+
+In order to set a level, click on a running topology, and then click on “Change Log Level” in the Topology Actions section.
+
+![Change Log Level dialog](images/dynamic_log_level_settings_1.png "Change Log Level dialog")
+
+Next, provide the logger name, select the level you expect (e.g. WARN), and a timeout in seconds (or 0 if not needed). Then click on “Add”.
+
+![After adding a log level setting](images/dynamic_log_level_settings_2.png "After adding a log level setting")
+
+To clear the log level click on the “Clear” button. This reverts the log level back to what it was before you added the setting. The log level line will disappear from the UI.
+
+While there is a delay resetting log levels back, setting the log level in the first place is immediate (or as quickly as the message can travel from the UI/CLI to the workers by way of nimbus and zookeeper).
+
+Using the CLI
+-------------
+
+Using the CLI, issue the command:
+
+`./bin/storm set_log_level [topology name] -l [logger name]=[LEVEL]:[TIMEOUT]`
+
+For example:
+
+`./bin/storm set_log_level my_topology -l ROOT=DEBUG:30`
+
+Sets the ROOT logger to DEBUG for 30 seconds.
+
+`./bin/storm set_log_level my_topology -r ROOT`
+
+Clears the ROOT logger dynamic log level, resetting it to its original value.
+

http://git-wip-us.apache.org/repos/asf/storm/blob/25f31a0e/docs/documentation/dynamic-worker-profiling.md
----------------------------------------------------------------------
diff --git a/docs/documentation/dynamic-worker-profiling.md b/docs/documentation/dynamic-worker-profiling.md
new file mode 100644
index 0000000..9bc8da3
--- /dev/null
+++ b/docs/documentation/dynamic-worker-profiling.md
@@ -0,0 +1,29 @@
+Dynamic Worker Profiling
+==========================
+
+In multi-tenant mode, storm launches long-running JVMs across cluster without sudo access to user. Self-serving of Java heap-dumps, jstacks and java profiling of these JVMs would improve users' ability to analyze and debug issues when monitoring it actively.
+
+The storm dynamic profiler lets you dynamically take heap-dumps, jprofile or jstack for a worker jvm running on stock cluster. It let user download these dumps from the browser and use your favorite tools to analyze it  The UI component page provides list workers for the component and action buttons. The logviewer lets you download the dumps generated by these logs. Please see the screenshots for more information.
+
+Using the Storm UI
+-------------
+
+In order to request for heap-dump, jstack, start/stop/dump jprofile or restart a worker, click on a running topology, then click on specific component, then you can select worker from the dropdown for that particular component and then click on “Start","Heap", "Jstack" or "Restart Worker" in the "Profiing and Debugging" section.
+
+![Profiling and Debugging](images/dynamic_profiling_debugging_1.png "Profiling and Debugging")
+
+For start jprofile, provide a timeout in minutes (or 10 if not needed). Then click on “Start”.
+
+![After starting jprofile for worker](images/dynamic_profiling_debugging_2.png "After jprofile for worker ")
+
+To stop the jprofile logging click on the “Stop” button. This dumps the jprofile stats and stops the profiling. Refresh the page for the line to disappear from the UI.
+
+Click on "My Dump Files" to go the logviewer UI for list of worker specific dump files.
+
+![Dump Files Links for worker](images/dynamic_profiling_debugging_3.png "Dump Files Links for worker")
+
+Configuration
+-------------
+
+The "worker.profiler.command" can be configured to point to specific pluggable profiler, heapdump commands. The "worker.profiler.enabled" can be disabled if plugin is not available or jdk does not support Jprofile flight recording so that worker JVM options will not have "worker.profiler.childopts". To use different profiler plugin, you can change these configuration.
+

http://git-wip-us.apache.org/repos/asf/storm/blob/25f31a0e/docs/documentation/images/dynamic_log_level_settings_1.png
----------------------------------------------------------------------
diff --git a/docs/documentation/images/dynamic_log_level_settings_1.png b/docs/documentation/images/dynamic_log_level_settings_1.png
new file mode 100644
index 0000000..71d42e7
Binary files /dev/null and b/docs/documentation/images/dynamic_log_level_settings_1.png differ

http://git-wip-us.apache.org/repos/asf/storm/blob/25f31a0e/docs/documentation/images/dynamic_log_level_settings_2.png
----------------------------------------------------------------------
diff --git a/docs/documentation/images/dynamic_log_level_settings_2.png b/docs/documentation/images/dynamic_log_level_settings_2.png
new file mode 100644
index 0000000..d0e61a7
Binary files /dev/null and b/docs/documentation/images/dynamic_log_level_settings_2.png differ

http://git-wip-us.apache.org/repos/asf/storm/blob/25f31a0e/docs/documentation/images/dynamic_profiling_debugging_1.png
----------------------------------------------------------------------
diff --git a/docs/documentation/images/dynamic_profiling_debugging_1.png b/docs/documentation/images/dynamic_profiling_debugging_1.png
new file mode 100644
index 0000000..3913e86
Binary files /dev/null and b/docs/documentation/images/dynamic_profiling_debugging_1.png differ

http://git-wip-us.apache.org/repos/asf/storm/blob/25f31a0e/docs/documentation/images/dynamic_profiling_debugging_2.png
----------------------------------------------------------------------
diff --git a/docs/documentation/images/dynamic_profiling_debugging_2.png b/docs/documentation/images/dynamic_profiling_debugging_2.png
new file mode 100644
index 0000000..66c0236
Binary files /dev/null and b/docs/documentation/images/dynamic_profiling_debugging_2.png differ

http://git-wip-us.apache.org/repos/asf/storm/blob/25f31a0e/docs/documentation/images/dynamic_profiling_debugging_3.png
----------------------------------------------------------------------
diff --git a/docs/documentation/images/dynamic_profiling_debugging_3.png b/docs/documentation/images/dynamic_profiling_debugging_3.png
new file mode 100644
index 0000000..5706d7e
Binary files /dev/null and b/docs/documentation/images/dynamic_profiling_debugging_3.png differ

http://git-wip-us.apache.org/repos/asf/storm/blob/25f31a0e/docs/images/dynamic_log_level_settings_1.png
----------------------------------------------------------------------
diff --git a/docs/images/dynamic_log_level_settings_1.png b/docs/images/dynamic_log_level_settings_1.png
deleted file mode 100644
index 71d42e7..0000000
Binary files a/docs/images/dynamic_log_level_settings_1.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/storm/blob/25f31a0e/docs/images/dynamic_log_level_settings_2.png
----------------------------------------------------------------------
diff --git a/docs/images/dynamic_log_level_settings_2.png b/docs/images/dynamic_log_level_settings_2.png
deleted file mode 100644
index d0e61a7..0000000
Binary files a/docs/images/dynamic_log_level_settings_2.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/storm/blob/25f31a0e/docs/images/dynamic_profiling_debugging_1.png
----------------------------------------------------------------------
diff --git a/docs/images/dynamic_profiling_debugging_1.png b/docs/images/dynamic_profiling_debugging_1.png
deleted file mode 100644
index 3913e86..0000000
Binary files a/docs/images/dynamic_profiling_debugging_1.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/storm/blob/25f31a0e/docs/images/dynamic_profiling_debugging_2.png
----------------------------------------------------------------------
diff --git a/docs/images/dynamic_profiling_debugging_2.png b/docs/images/dynamic_profiling_debugging_2.png
deleted file mode 100644
index 66c0236..0000000
Binary files a/docs/images/dynamic_profiling_debugging_2.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/storm/blob/25f31a0e/docs/images/dynamic_profiling_debugging_3.png
----------------------------------------------------------------------
diff --git a/docs/images/dynamic_profiling_debugging_3.png b/docs/images/dynamic_profiling_debugging_3.png
deleted file mode 100644
index 5706d7e..0000000
Binary files a/docs/images/dynamic_profiling_debugging_3.png and /dev/null differ


[02/10] storm git commit: Adding dynamic profiling for worker

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/0c2021e6/storm-core/src/jvm/backtype/storm/generated/Nimbus.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/generated/Nimbus.java b/storm-core/src/jvm/backtype/storm/generated/Nimbus.java
index 59f74fb..9b2229f 100644
--- a/storm-core/src/jvm/backtype/storm/generated/Nimbus.java
+++ b/storm-core/src/jvm/backtype/storm/generated/Nimbus.java
@@ -51,7 +51,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-9")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-11-2")
 public class Nimbus {
 
   public interface Iface {
@@ -88,6 +88,10 @@ public class Nimbus {
      */
     public void debug(String name, String component, boolean enable, double samplingPercentage) throws NotAliveException, AuthorizationException, org.apache.thrift.TException;
 
+    public void setWorkerProfiler(String id, ProfileRequest profileRequest) throws org.apache.thrift.TException;
+
+    public List<ProfileRequest> getComponentPendingProfileActions(String id, String component_id, ProfileAction action) throws org.apache.thrift.TException;
+
     public void uploadNewCredentials(String name, Credentials creds) throws NotAliveException, InvalidTopologyException, AuthorizationException, org.apache.thrift.TException;
 
     public String beginFileUpload() throws AuthorizationException, org.apache.thrift.TException;
@@ -152,6 +156,10 @@ public class Nimbus {
 
     public void debug(String name, String component, boolean enable, double samplingPercentage, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
 
+    public void setWorkerProfiler(String id, ProfileRequest profileRequest, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
+    public void getComponentPendingProfileActions(String id, String component_id, ProfileAction action, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
     public void uploadNewCredentials(String name, Credentials creds, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
 
     public void beginFileUpload(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
@@ -477,6 +485,52 @@ public class Nimbus {
       return;
     }
 
+    public void setWorkerProfiler(String id, ProfileRequest profileRequest) throws org.apache.thrift.TException
+    {
+      send_setWorkerProfiler(id, profileRequest);
+      recv_setWorkerProfiler();
+    }
+
+    public void send_setWorkerProfiler(String id, ProfileRequest profileRequest) throws org.apache.thrift.TException
+    {
+      setWorkerProfiler_args args = new setWorkerProfiler_args();
+      args.set_id(id);
+      args.set_profileRequest(profileRequest);
+      sendBase("setWorkerProfiler", args);
+    }
+
+    public void recv_setWorkerProfiler() throws org.apache.thrift.TException
+    {
+      setWorkerProfiler_result result = new setWorkerProfiler_result();
+      receiveBase(result, "setWorkerProfiler");
+      return;
+    }
+
+    public List<ProfileRequest> getComponentPendingProfileActions(String id, String component_id, ProfileAction action) throws org.apache.thrift.TException
+    {
+      send_getComponentPendingProfileActions(id, component_id, action);
+      return recv_getComponentPendingProfileActions();
+    }
+
+    public void send_getComponentPendingProfileActions(String id, String component_id, ProfileAction action) throws org.apache.thrift.TException
+    {
+      getComponentPendingProfileActions_args args = new getComponentPendingProfileActions_args();
+      args.set_id(id);
+      args.set_component_id(component_id);
+      args.set_action(action);
+      sendBase("getComponentPendingProfileActions", args);
+    }
+
+    public List<ProfileRequest> recv_getComponentPendingProfileActions() throws org.apache.thrift.TException
+    {
+      getComponentPendingProfileActions_result result = new getComponentPendingProfileActions_result();
+      receiveBase(result, "getComponentPendingProfileActions");
+      if (result.is_set_success()) {
+        return result.success;
+      }
+      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getComponentPendingProfileActions failed: unknown result");
+    }
+
     public void uploadNewCredentials(String name, Credentials creds) throws NotAliveException, InvalidTopologyException, AuthorizationException, org.apache.thrift.TException
     {
       send_uploadNewCredentials(name, creds);
@@ -1267,6 +1321,79 @@ public class Nimbus {
       }
     }
 
+    public void setWorkerProfiler(String id, ProfileRequest profileRequest, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      setWorkerProfiler_call method_call = new setWorkerProfiler_call(id, profileRequest, resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    public static class setWorkerProfiler_call extends org.apache.thrift.async.TAsyncMethodCall {
+      private String id;
+      private ProfileRequest profileRequest;
+      public setWorkerProfiler_call(String id, ProfileRequest profileRequest, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+        this.id = id;
+        this.profileRequest = profileRequest;
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("setWorkerProfiler", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        setWorkerProfiler_args args = new setWorkerProfiler_args();
+        args.set_id(id);
+        args.set_profileRequest(profileRequest);
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public void getResult() throws org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        (new Client(prot)).recv_setWorkerProfiler();
+      }
+    }
+
+    public void getComponentPendingProfileActions(String id, String component_id, ProfileAction action, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      getComponentPendingProfileActions_call method_call = new getComponentPendingProfileActions_call(id, component_id, action, resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    public static class getComponentPendingProfileActions_call extends org.apache.thrift.async.TAsyncMethodCall {
+      private String id;
+      private String component_id;
+      private ProfileAction action;
+      public getComponentPendingProfileActions_call(String id, String component_id, ProfileAction action, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+        this.id = id;
+        this.component_id = component_id;
+        this.action = action;
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getComponentPendingProfileActions", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        getComponentPendingProfileActions_args args = new getComponentPendingProfileActions_args();
+        args.set_id(id);
+        args.set_component_id(component_id);
+        args.set_action(action);
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public List<ProfileRequest> getResult() throws org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        return (new Client(prot)).recv_getComponentPendingProfileActions();
+      }
+    }
+
     public void uploadNewCredentials(String name, Credentials creds, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
       checkReady();
       uploadNewCredentials_call method_call = new uploadNewCredentials_call(name, creds, resultHandler, this, ___protocolFactory, ___transport);
@@ -1785,6 +1912,8 @@ public class Nimbus {
       processMap.put("setLogConfig", new setLogConfig());
       processMap.put("getLogConfig", new getLogConfig());
       processMap.put("debug", new debug());
+      processMap.put("setWorkerProfiler", new setWorkerProfiler());
+      processMap.put("getComponentPendingProfileActions", new getComponentPendingProfileActions());
       processMap.put("uploadNewCredentials", new uploadNewCredentials());
       processMap.put("beginFileUpload", new beginFileUpload());
       processMap.put("uploadChunk", new uploadChunk());
@@ -2057,6 +2186,46 @@ public class Nimbus {
       }
     }
 
+    public static class setWorkerProfiler<I extends Iface> extends org.apache.thrift.ProcessFunction<I, setWorkerProfiler_args> {
+      public setWorkerProfiler() {
+        super("setWorkerProfiler");
+      }
+
+      public setWorkerProfiler_args getEmptyArgsInstance() {
+        return new setWorkerProfiler_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public setWorkerProfiler_result getResult(I iface, setWorkerProfiler_args args) throws org.apache.thrift.TException {
+        setWorkerProfiler_result result = new setWorkerProfiler_result();
+        iface.setWorkerProfiler(args.id, args.profileRequest);
+        return result;
+      }
+    }
+
+    public static class getComponentPendingProfileActions<I extends Iface> extends org.apache.thrift.ProcessFunction<I, getComponentPendingProfileActions_args> {
+      public getComponentPendingProfileActions() {
+        super("getComponentPendingProfileActions");
+      }
+
+      public getComponentPendingProfileActions_args getEmptyArgsInstance() {
+        return new getComponentPendingProfileActions_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public getComponentPendingProfileActions_result getResult(I iface, getComponentPendingProfileActions_args args) throws org.apache.thrift.TException {
+        getComponentPendingProfileActions_result result = new getComponentPendingProfileActions_result();
+        result.success = iface.getComponentPendingProfileActions(args.id, args.component_id, args.action);
+        return result;
+      }
+    }
+
     public static class uploadNewCredentials<I extends Iface> extends org.apache.thrift.ProcessFunction<I, uploadNewCredentials_args> {
       public uploadNewCredentials() {
         super("uploadNewCredentials");
@@ -2458,6 +2627,8 @@ public class Nimbus {
       processMap.put("setLogConfig", new setLogConfig());
       processMap.put("getLogConfig", new getLogConfig());
       processMap.put("debug", new debug());
+      processMap.put("setWorkerProfiler", new setWorkerProfiler());
+      processMap.put("getComponentPendingProfileActions", new getComponentPendingProfileActions());
       processMap.put("uploadNewCredentials", new uploadNewCredentials());
       processMap.put("beginFileUpload", new beginFileUpload());
       processMap.put("uploadChunk", new uploadChunk());
@@ -3080,6 +3251,107 @@ public class Nimbus {
       }
     }
 
+    public static class setWorkerProfiler<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, setWorkerProfiler_args, Void> {
+      public setWorkerProfiler() {
+        super("setWorkerProfiler");
+      }
+
+      public setWorkerProfiler_args getEmptyArgsInstance() {
+        return new setWorkerProfiler_args();
+      }
+
+      public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<Void>() { 
+          public void onComplete(Void o) {
+            setWorkerProfiler_result result = new setWorkerProfiler_result();
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            setWorkerProfiler_result result = new setWorkerProfiler_result();
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, setWorkerProfiler_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
+        iface.setWorkerProfiler(args.id, args.profileRequest,resultHandler);
+      }
+    }
+
+    public static class getComponentPendingProfileActions<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, getComponentPendingProfileActions_args, List<ProfileRequest>> {
+      public getComponentPendingProfileActions() {
+        super("getComponentPendingProfileActions");
+      }
+
+      public getComponentPendingProfileActions_args getEmptyArgsInstance() {
+        return new getComponentPendingProfileActions_args();
+      }
+
+      public AsyncMethodCallback<List<ProfileRequest>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<List<ProfileRequest>>() { 
+          public void onComplete(List<ProfileRequest> o) {
+            getComponentPendingProfileActions_result result = new getComponentPendingProfileActions_result();
+            result.success = o;
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            getComponentPendingProfileActions_result result = new getComponentPendingProfileActions_result();
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, getComponentPendingProfileActions_args args, org.apache.thrift.async.AsyncMethodCallback<List<ProfileRequest>> resultHandler) throws TException {
+        iface.getComponentPendingProfileActions(args.id, args.component_id, args.action,resultHandler);
+      }
+    }
+
     public static class uploadNewCredentials<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, uploadNewCredentials_args, Void> {
       public uploadNewCredentials() {
         super("uploadNewCredentials");
@@ -13588,6 +13860,1709 @@ public class Nimbus {
 
   }
 
+  public static class setWorkerProfiler_args implements org.apache.thrift.TBase<setWorkerProfiler_args, setWorkerProfiler_args._Fields>, java.io.Serializable, Cloneable, Comparable<setWorkerProfiler_args>   {
+    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("setWorkerProfiler_args");
+
+    private static final org.apache.thrift.protocol.TField ID_FIELD_DESC = new org.apache.thrift.protocol.TField("id", org.apache.thrift.protocol.TType.STRING, (short)1);
+    private static final org.apache.thrift.protocol.TField PROFILE_REQUEST_FIELD_DESC = new org.apache.thrift.protocol.TField("profileRequest", org.apache.thrift.protocol.TType.STRUCT, (short)2);
+
+    private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+    static {
+      schemes.put(StandardScheme.class, new setWorkerProfiler_argsStandardSchemeFactory());
+      schemes.put(TupleScheme.class, new setWorkerProfiler_argsTupleSchemeFactory());
+    }
+
+    private String id; // required
+    private ProfileRequest profileRequest; // required
+
+    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+    public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+      ID((short)1, "id"),
+      PROFILE_REQUEST((short)2, "profileRequest");
+
+      private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+      static {
+        for (_Fields field : EnumSet.allOf(_Fields.class)) {
+          byName.put(field.getFieldName(), field);
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, or null if its not found.
+       */
+      public static _Fields findByThriftId(int fieldId) {
+        switch(fieldId) {
+          case 1: // ID
+            return ID;
+          case 2: // PROFILE_REQUEST
+            return PROFILE_REQUEST;
+          default:
+            return null;
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, throwing an exception
+       * if it is not found.
+       */
+      public static _Fields findByThriftIdOrThrow(int fieldId) {
+        _Fields fields = findByThriftId(fieldId);
+        if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+        return fields;
+      }
+
+      /**
+       * Find the _Fields constant that matches name, or null if its not found.
+       */
+      public static _Fields findByName(String name) {
+        return byName.get(name);
+      }
+
+      private final short _thriftId;
+      private final String _fieldName;
+
+      _Fields(short thriftId, String fieldName) {
+        _thriftId = thriftId;
+        _fieldName = fieldName;
+      }
+
+      public short getThriftFieldId() {
+        return _thriftId;
+      }
+
+      public String getFieldName() {
+        return _fieldName;
+      }
+    }
+
+    // isset id assignments
+    public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+    static {
+      Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+      tmpMap.put(_Fields.ID, new org.apache.thrift.meta_data.FieldMetaData("id", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+      tmpMap.put(_Fields.PROFILE_REQUEST, new org.apache.thrift.meta_data.FieldMetaData("profileRequest", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ProfileRequest.class)));
+      metaDataMap = Collections.unmodifiableMap(tmpMap);
+      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(setWorkerProfiler_args.class, metaDataMap);
+    }
+
+    public setWorkerProfiler_args() {
+    }
+
+    public setWorkerProfiler_args(
+      String id,
+      ProfileRequest profileRequest)
+    {
+      this();
+      this.id = id;
+      this.profileRequest = profileRequest;
+    }
+
+    /**
+     * Performs a deep copy on <i>other</i>.
+     */
+    public setWorkerProfiler_args(setWorkerProfiler_args other) {
+      if (other.is_set_id()) {
+        this.id = other.id;
+      }
+      if (other.is_set_profileRequest()) {
+        this.profileRequest = new ProfileRequest(other.profileRequest);
+      }
+    }
+
+    public setWorkerProfiler_args deepCopy() {
+      return new setWorkerProfiler_args(this);
+    }
+
+    @Override
+    public void clear() {
+      this.id = null;
+      this.profileRequest = null;
+    }
+
+    public String get_id() {
+      return this.id;
+    }
+
+    public void set_id(String id) {
+      this.id = id;
+    }
+
+    public void unset_id() {
+      this.id = null;
+    }
+
+    /** Returns true if field id is set (has been assigned a value) and false otherwise */
+    public boolean is_set_id() {
+      return this.id != null;
+    }
+
+    public void set_id_isSet(boolean value) {
+      if (!value) {
+        this.id = null;
+      }
+    }
+
+    public ProfileRequest get_profileRequest() {
+      return this.profileRequest;
+    }
+
+    public void set_profileRequest(ProfileRequest profileRequest) {
+      this.profileRequest = profileRequest;
+    }
+
+    public void unset_profileRequest() {
+      this.profileRequest = null;
+    }
+
+    /** Returns true if field profileRequest is set (has been assigned a value) and false otherwise */
+    public boolean is_set_profileRequest() {
+      return this.profileRequest != null;
+    }
+
+    public void set_profileRequest_isSet(boolean value) {
+      if (!value) {
+        this.profileRequest = null;
+      }
+    }
+
+    public void setFieldValue(_Fields field, Object value) {
+      switch (field) {
+      case ID:
+        if (value == null) {
+          unset_id();
+        } else {
+          set_id((String)value);
+        }
+        break;
+
+      case PROFILE_REQUEST:
+        if (value == null) {
+          unset_profileRequest();
+        } else {
+          set_profileRequest((ProfileRequest)value);
+        }
+        break;
+
+      }
+    }
+
+    public Object getFieldValue(_Fields field) {
+      switch (field) {
+      case ID:
+        return get_id();
+
+      case PROFILE_REQUEST:
+        return get_profileRequest();
+
+      }
+      throw new IllegalStateException();
+    }
+
+    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+    public boolean isSet(_Fields field) {
+      if (field == null) {
+        throw new IllegalArgumentException();
+      }
+
+      switch (field) {
+      case ID:
+        return is_set_id();
+      case PROFILE_REQUEST:
+        return is_set_profileRequest();
+      }
+      throw new IllegalStateException();
+    }
+
+    @Override
+    public boolean equals(Object that) {
+      if (that == null)
+        return false;
+      if (that instanceof setWorkerProfiler_args)
+        return this.equals((setWorkerProfiler_args)that);
+      return false;
+    }
+
+    public boolean equals(setWorkerProfiler_args that) {
+      if (that == null)
+        return false;
+
+      boolean this_present_id = true && this.is_set_id();
+      boolean that_present_id = true && that.is_set_id();
+      if (this_present_id || that_present_id) {
+        if (!(this_present_id && that_present_id))
+          return false;
+        if (!this.id.equals(that.id))
+          return false;
+      }
+
+      boolean this_present_profileRequest = true && this.is_set_profileRequest();
+      boolean that_present_profileRequest = true && that.is_set_profileRequest();
+      if (this_present_profileRequest || that_present_profileRequest) {
+        if (!(this_present_profileRequest && that_present_profileRequest))
+          return false;
+        if (!this.profileRequest.equals(that.profileRequest))
+          return false;
+      }
+
+      return true;
+    }
+
+    @Override
+    public int hashCode() {
+      List<Object> list = new ArrayList<Object>();
+
+      boolean present_id = true && (is_set_id());
+      list.add(present_id);
+      if (present_id)
+        list.add(id);
+
+      boolean present_profileRequest = true && (is_set_profileRequest());
+      list.add(present_profileRequest);
+      if (present_profileRequest)
+        list.add(profileRequest);
+
+      return list.hashCode();
+    }
+
+    @Override
+    public int compareTo(setWorkerProfiler_args other) {
+      if (!getClass().equals(other.getClass())) {
+        return getClass().getName().compareTo(other.getClass().getName());
+      }
+
+      int lastComparison = 0;
+
+      lastComparison = Boolean.valueOf(is_set_id()).compareTo(other.is_set_id());
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+      if (is_set_id()) {
+        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.id, other.id);
+        if (lastComparison != 0) {
+          return lastComparison;
+        }
+      }
+      lastComparison = Boolean.valueOf(is_set_profileRequest()).compareTo(other.is_set_profileRequest());
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+      if (is_set_profileRequest()) {
+        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.profileRequest, other.profileRequest);
+        if (lastComparison != 0) {
+          return lastComparison;
+        }
+      }
+      return 0;
+    }
+
+    public _Fields fieldForId(int fieldId) {
+      return _Fields.findByThriftId(fieldId);
+    }
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+      schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+      schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+    }
+
+    @Override
+    public String toString() {
+      StringBuilder sb = new StringBuilder("setWorkerProfiler_args(");
+      boolean first = true;
+
+      sb.append("id:");
+      if (this.id == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.id);
+      }
+      first = false;
+      if (!first) sb.append(", ");
+      sb.append("profileRequest:");
+      if (this.profileRequest == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.profileRequest);
+      }
+      first = false;
+      sb.append(")");
+      return sb.toString();
+    }
+
+    public void validate() throws org.apache.thrift.TException {
+      // check for required fields
+      // check for sub-struct validity
+      if (profileRequest != null) {
+        profileRequest.validate();
+      }
+    }
+
+    private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+      try {
+        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+      } catch (org.apache.thrift.TException te) {
+        throw new java.io.IOException(te);
+      }
+    }
+
+    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+      try {
+        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+      } catch (org.apache.thrift.TException te) {
+        throw new java.io.IOException(te);
+      }
+    }
+
+    private static class setWorkerProfiler_argsStandardSchemeFactory implements SchemeFactory {
+      public setWorkerProfiler_argsStandardScheme getScheme() {
+        return new setWorkerProfiler_argsStandardScheme();
+      }
+    }
+
+    private static class setWorkerProfiler_argsStandardScheme extends StandardScheme<setWorkerProfiler_args> {
+
+      public void read(org.apache.thrift.protocol.TProtocol iprot, setWorkerProfiler_args struct) throws org.apache.thrift.TException {
+        org.apache.thrift.protocol.TField schemeField;
+        iprot.readStructBegin();
+        while (true)
+        {
+          schemeField = iprot.readFieldBegin();
+          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+            break;
+          }
+          switch (schemeField.id) {
+            case 1: // ID
+              if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+                struct.id = iprot.readString();
+                struct.set_id_isSet(true);
+              } else { 
+                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+              }
+              break;
+            case 2: // PROFILE_REQUEST
+              if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+                struct.profileRequest = new ProfileRequest();
+                struct.profileRequest.read(iprot);
+                struct.set_profileRequest_isSet(true);
+              } else { 
+                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+              }
+              break;
+            default:
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+          }
+          iprot.readFieldEnd();
+        }
+        iprot.readStructEnd();
+        struct.validate();
+      }
+
+      public void write(org.apache.thrift.protocol.TProtocol oprot, setWorkerProfiler_args struct) throws org.apache.thrift.TException {
+        struct.validate();
+
+        oprot.writeStructBegin(STRUCT_DESC);
+        if (struct.id != null) {
+          oprot.writeFieldBegin(ID_FIELD_DESC);
+          oprot.writeString(struct.id);
+          oprot.writeFieldEnd();
+        }
+        if (struct.profileRequest != null) {
+          oprot.writeFieldBegin(PROFILE_REQUEST_FIELD_DESC);
+          struct.profileRequest.write(oprot);
+          oprot.writeFieldEnd();
+        }
+        oprot.writeFieldStop();
+        oprot.writeStructEnd();
+      }
+
+    }
+
+    private static class setWorkerProfiler_argsTupleSchemeFactory implements SchemeFactory {
+      public setWorkerProfiler_argsTupleScheme getScheme() {
+        return new setWorkerProfiler_argsTupleScheme();
+      }
+    }
+
+    private static class setWorkerProfiler_argsTupleScheme extends TupleScheme<setWorkerProfiler_args> {
+
+      @Override
+      public void write(org.apache.thrift.protocol.TProtocol prot, setWorkerProfiler_args struct) throws org.apache.thrift.TException {
+        TTupleProtocol oprot = (TTupleProtocol) prot;
+        BitSet optionals = new BitSet();
+        if (struct.is_set_id()) {
+          optionals.set(0);
+        }
+        if (struct.is_set_profileRequest()) {
+          optionals.set(1);
+        }
+        oprot.writeBitSet(optionals, 2);
+        if (struct.is_set_id()) {
+          oprot.writeString(struct.id);
+        }
+        if (struct.is_set_profileRequest()) {
+          struct.profileRequest.write(oprot);
+        }
+      }
+
+      @Override
+      public void read(org.apache.thrift.protocol.TProtocol prot, setWorkerProfiler_args struct) throws org.apache.thrift.TException {
+        TTupleProtocol iprot = (TTupleProtocol) prot;
+        BitSet incoming = iprot.readBitSet(2);
+        if (incoming.get(0)) {
+          struct.id = iprot.readString();
+          struct.set_id_isSet(true);
+        }
+        if (incoming.get(1)) {
+          struct.profileRequest = new ProfileRequest();
+          struct.profileRequest.read(iprot);
+          struct.set_profileRequest_isSet(true);
+        }
+      }
+    }
+
+  }
+
+  public static class setWorkerProfiler_result implements org.apache.thrift.TBase<setWorkerProfiler_result, setWorkerProfiler_result._Fields>, java.io.Serializable, Cloneable, Comparable<setWorkerProfiler_result>   {
+    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("setWorkerProfiler_result");
+
+
+    private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+    static {
+      schemes.put(StandardScheme.class, new setWorkerProfiler_resultStandardSchemeFactory());
+      schemes.put(TupleScheme.class, new setWorkerProfiler_resultTupleSchemeFactory());
+    }
+
+
+    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+    public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+;
+
+      private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+      static {
+        for (_Fields field : EnumSet.allOf(_Fields.class)) {
+          byName.put(field.getFieldName(), field);
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, or null if its not found.
+       */
+      public static _Fields findByThriftId(int fieldId) {
+        switch(fieldId) {
+          default:
+            return null;
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, throwing an exception
+       * if it is not found.
+       */
+      public static _Fields findByThriftIdOrThrow(int fieldId) {
+        _Fields fields = findByThriftId(fieldId);
+        if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+        return fields;
+      }
+
+      /**
+       * Find the _Fields constant that matches name, or null if its not found.
+       */
+      public static _Fields findByName(String name) {
+        return byName.get(name);
+      }
+
+      private final short _thriftId;
+      private final String _fieldName;
+
+      _Fields(short thriftId, String fieldName) {
+        _thriftId = thriftId;
+        _fieldName = fieldName;
+      }
+
+      public short getThriftFieldId() {
+        return _thriftId;
+      }
+
+      public String getFieldName() {
+        return _fieldName;
+      }
+    }
+    public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+    static {
+      Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+      metaDataMap = Collections.unmodifiableMap(tmpMap);
+      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(setWorkerProfiler_result.class, metaDataMap);
+    }
+
+    public setWorkerProfiler_result() {
+    }
+
+    /**
+     * Performs a deep copy on <i>other</i>.
+     */
+    public setWorkerProfiler_result(setWorkerProfiler_result other) {
+    }
+
+    public setWorkerProfiler_result deepCopy() {
+      return new setWorkerProfiler_result(this);
+    }
+
+    @Override
+    public void clear() {
+    }
+
+    public void setFieldValue(_Fields field, Object value) {
+      switch (field) {
+      }
+    }
+
+    public Object getFieldValue(_Fields field) {
+      switch (field) {
+      }
+      throw new IllegalStateException();
+    }
+
+    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+    public boolean isSet(_Fields field) {
+      if (field == null) {
+        throw new IllegalArgumentException();
+      }
+
+      switch (field) {
+      }
+      throw new IllegalStateException();
+    }
+
+    @Override
+    public boolean equals(Object that) {
+      if (that == null)
+        return false;
+      if (that instanceof setWorkerProfiler_result)
+        return this.equals((setWorkerProfiler_result)that);
+      return false;
+    }
+
+    public boolean equals(setWorkerProfiler_result that) {
+      if (that == null)
+        return false;
+
+      return true;
+    }
+
+    @Override
+    public int hashCode() {
+      List<Object> list = new ArrayList<Object>();
+
+      return list.hashCode();
+    }
+
+    @Override
+    public int compareTo(setWorkerProfiler_result other) {
+      if (!getClass().equals(other.getClass())) {
+        return getClass().getName().compareTo(other.getClass().getName());
+      }
+
+      int lastComparison = 0;
+
+      return 0;
+    }
+
+    public _Fields fieldForId(int fieldId) {
+      return _Fields.findByThriftId(fieldId);
+    }
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+      schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+      schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+      }
+
+    @Override
+    public String toString() {
+      StringBuilder sb = new StringBuilder("setWorkerProfiler_result(");
+      boolean first = true;
+
+      sb.append(")");
+      return sb.toString();
+    }
+
+    public void validate() throws org.apache.thrift.TException {
+      // check for required fields
+      // check for sub-struct validity
+    }
+
+    private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+      try {
+        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+      } catch (org.apache.thrift.TException te) {
+        throw new java.io.IOException(te);
+      }
+    }
+
+    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+      try {
+        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+      } catch (org.apache.thrift.TException te) {
+        throw new java.io.IOException(te);
+      }
+    }
+
+    private static class setWorkerProfiler_resultStandardSchemeFactory implements SchemeFactory {
+      public setWorkerProfiler_resultStandardScheme getScheme() {
+        return new setWorkerProfiler_resultStandardScheme();
+      }
+    }
+
+    private static class setWorkerProfiler_resultStandardScheme extends StandardScheme<setWorkerProfiler_result> {
+
+      public void read(org.apache.thrift.protocol.TProtocol iprot, setWorkerProfiler_result struct) throws org.apache.thrift.TException {
+        org.apache.thrift.protocol.TField schemeField;
+        iprot.readStructBegin();
+        while (true)
+        {
+          schemeField = iprot.readFieldBegin();
+          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+            break;
+          }
+          switch (schemeField.id) {
+            default:
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+          }
+          iprot.readFieldEnd();
+        }
+        iprot.readStructEnd();
+        struct.validate();
+      }
+
+      public void write(org.apache.thrift.protocol.TProtocol oprot, setWorkerProfiler_result struct) throws org.apache.thrift.TException {
+        struct.validate();
+
+        oprot.writeStructBegin(STRUCT_DESC);
+        oprot.writeFieldStop();
+        oprot.writeStructEnd();
+      }
+
+    }
+
+    private static class setWorkerProfiler_resultTupleSchemeFactory implements SchemeFactory {
+      public setWorkerProfiler_resultTupleScheme getScheme() {
+        return new setWorkerProfiler_resultTupleScheme();
+      }
+    }
+
+    private static class setWorkerProfiler_resultTupleScheme extends TupleScheme<setWorkerProfiler_result> {
+
+      @Override
+      public void write(org.apache.thrift.protocol.TProtocol prot, setWorkerProfiler_result struct) throws org.apache.thrift.TException {
+        TTupleProtocol oprot = (TTupleProtocol) prot;
+      }
+
+      @Override
+      public void read(org.apache.thrift.protocol.TProtocol prot, setWorkerProfiler_result struct) throws org.apache.thrift.TException {
+        TTupleProtocol iprot = (TTupleProtocol) prot;
+      }
+    }
+
+  }
+
+  public static class getComponentPendingProfileActions_args implements org.apache.thrift.TBase<getComponentPendingProfileActions_args, getComponentPendingProfileActions_args._Fields>, java.io.Serializable, Cloneable, Comparable<getComponentPendingProfileActions_args>   {
+    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getComponentPendingProfileActions_args");
+
+    private static final org.apache.thrift.protocol.TField ID_FIELD_DESC = new org.apache.thrift.protocol.TField("id", org.apache.thrift.protocol.TType.STRING, (short)1);
+    private static final org.apache.thrift.protocol.TField COMPONENT_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("component_id", org.apache.thrift.protocol.TType.STRING, (short)2);
+    private static final org.apache.thrift.protocol.TField ACTION_FIELD_DESC = new org.apache.thrift.protocol.TField("action", org.apache.thrift.protocol.TType.I32, (short)3);
+
+    private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+    static {
+      schemes.put(StandardScheme.class, new getComponentPendingProfileActions_argsStandardSchemeFactory());
+      schemes.put(TupleScheme.class, new getComponentPendingProfileActions_argsTupleSchemeFactory());
+    }
+
+    private String id; // required
+    private String component_id; // required
+    private ProfileAction action; // required
+
+    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+    public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+      ID((short)1, "id"),
+      COMPONENT_ID((short)2, "component_id"),
+      /**
+       * 
+       * @see ProfileAction
+       */
+      ACTION((short)3, "action");
+
+      private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+      static {
+        for (_Fields field : EnumSet.allOf(_Fields.class)) {
+          byName.put(field.getFieldName(), field);
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, or null if its not found.
+       */
+      public static _Fields findByThriftId(int fieldId) {
+        switch(fieldId) {
+          case 1: // ID
+            return ID;
+          case 2: // COMPONENT_ID
+            return COMPONENT_ID;
+          case 3: // ACTION
+            return ACTION;
+          default:
+            return null;
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, throwing an exception
+       * if it is not found.
+       */
+      public static _Fields findByThriftIdOrThrow(int fieldId) {
+        _Fields fields = findByThriftId(fieldId);
+        if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+        return fields;
+      }
+
+      /**
+       * Find the _Fields constant that matches name, or null if its not found.
+       */
+      public static _Fields findByName(String name) {
+        return byName.get(name);
+      }
+
+      private final short _thriftId;
+      private final String _fieldName;
+
+      _Fields(short thriftId, String fieldName) {
+        _thriftId = thriftId;
+        _fieldName = fieldName;
+      }
+
+      public short getThriftFieldId() {
+        return _thriftId;
+      }
+
+      public String getFieldName() {
+        return _fieldName;
+      }
+    }
+
+    // isset id assignments
+    public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+    static {
+      Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+      tmpMap.put(_Fields.ID, new org.apache.thrift.meta_data.FieldMetaData("id", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+      tmpMap.put(_Fields.COMPONENT_ID, new org.apache.thrift.meta_data.FieldMetaData("component_id", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+      tmpMap.put(_Fields.ACTION, new org.apache.thrift.meta_data.FieldMetaData("action", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, ProfileAction.class)));
+      metaDataMap = Collections.unmodifiableMap(tmpMap);
+      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getComponentPendingProfileActions_args.class, metaDataMap);
+    }
+
+    public getComponentPendingProfileActions_args() {
+    }
+
+    public getComponentPendingProfileActions_args(
+      String id,
+      String component_id,
+      ProfileAction action)
+    {
+      this();
+      this.id = id;
+      this.component_id = component_id;
+      this.action = action;
+    }
+
+    /**
+     * Performs a deep copy on <i>other</i>.
+     */
+    public getComponentPendingProfileActions_args(getComponentPendingProfileActions_args other) {
+      if (other.is_set_id()) {
+        this.id = other.id;
+      }
+      if (other.is_set_component_id()) {
+        this.component_id = other.component_id;
+      }
+      if (other.is_set_action()) {
+        this.action = other.action;
+      }
+    }
+
+    public getComponentPendingProfileActions_args deepCopy() {
+      return new getComponentPendingProfileActions_args(this);
+    }
+
+    @Override
+    public void clear() {
+      this.id = null;
+      this.component_id = null;
+      this.action = null;
+    }
+
+    public String get_id() {
+      return this.id;
+    }
+
+    public void set_id(String id) {
+      this.id = id;
+    }
+
+    public void unset_id() {
+      this.id = null;
+    }
+
+    /** Returns true if field id is set (has been assigned a value) and false otherwise */
+    public boolean is_set_id() {
+      return this.id != null;
+    }
+
+    public void set_id_isSet(boolean value) {
+      if (!value) {
+        this.id = null;
+      }
+    }
+
+    public String get_component_id() {
+      return this.component_id;
+    }
+
+    public void set_component_id(String component_id) {
+      this.component_id = component_id;
+    }
+
+    public void unset_component_id() {
+      this.component_id = null;
+    }
+
+    /** Returns true if field component_id is set (has been assigned a value) and false otherwise */
+    public boolean is_set_component_id() {
+      return this.component_id != null;
+    }
+
+    public void set_component_id_isSet(boolean value) {
+      if (!value) {
+        this.component_id = null;
+      }
+    }
+
+    /**
+     * 
+     * @see ProfileAction
+     */
+    public ProfileAction get_action() {
+      return this.action;
+    }
+
+    /**
+     * 
+     * @see ProfileAction
+     */
+    public void set_action(ProfileAction action) {
+      this.action = action;
+    }
+
+    public void unset_action() {
+      this.action = null;
+    }
+
+    /** Returns true if field action is set (has been assigned a value) and false otherwise */
+    public boolean is_set_action() {
+      return this.action != null;
+    }
+
+    public void set_action_isSet(boolean value) {
+      if (!value) {
+        this.action = null;
+      }
+    }
+
+    public void setFieldValue(_Fields field, Object value) {
+      switch (field) {
+      case ID:
+        if (value == null) {
+          unset_id();
+        } else {
+          set_id((String)value);
+        }
+        break;
+
+      case COMPONENT_ID:
+        if (value == null) {
+          unset_component_id();
+        } else {
+          set_component_id((String)value);
+        }
+        break;
+
+      case ACTION:
+        if (value == null) {
+          unset_action();
+        } else {
+          set_action((ProfileAction)value);
+        }
+        break;
+
+      }
+    }
+
+    public Object getFieldValue(_Fields field) {
+      switch (field) {
+      case ID:
+        return get_id();
+
+      case COMPONENT_ID:
+        return get_component_id();
+
+      case ACTION:
+        return get_action();
+
+      }
+      throw new IllegalStateException();
+    }
+
+    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+    public boolean isSet(_Fields field) {
+      if (field == null) {
+        throw new IllegalArgumentException();
+      }
+
+      switch (field) {
+      case ID:
+        return is_set_id();
+      case COMPONENT_ID:
+        return is_set_component_id();
+      case ACTION:
+        return is_set_action();
+      }
+      throw new IllegalStateException();
+    }
+
+    @Override
+    public boolean equals(Object that) {
+      if (that == null)
+        return false;
+      if (that instanceof getComponentPendingProfileActions_args)
+        return this.equals((getComponentPendingProfileActions_args)that);
+      return false;
+    }
+
+    public boolean equals(getComponentPendingProfileActions_args that) {
+      if (that == null)
+        return false;
+
+      boolean this_present_id = true && this.is_set_id();
+      boolean that_present_id = true && that.is_set_id();
+      if (this_present_id || that_present_id) {
+        if (!(this_present_id && that_present_id))
+          return false;
+        if (!this.id.equals(that.id))
+          return false;
+      }
+
+      boolean this_present_component_id = true && this.is_set_component_id();
+      boolean that_present_component_id = true && that.is_set_component_id();
+      if (this_present_component_id || that_present_component_id) {
+        if (!(this_present_component_id && that_present_component_id))
+          return false;
+        if (!this.component_id.equals(that.component_id))
+          return false;
+      }
+
+      boolean this_present_action = true && this.is_set_action();
+      boolean that_present_action = true && that.is_set_action();
+      if (this_present_action || that_present_action) {
+        if (!(this_present_action && that_present_action))
+          return false;
+        if (!this.action.equals(that.action))
+          return false;
+      }
+
+      return true;
+    }
+
+    @Override
+    public int hashCode() {
+      List<Object> list = new ArrayList<Object>();
+
+      boolean present_id = true && (is_set_id());
+      list.add(present_id);
+      if (present_id)
+        list.add(id);
+
+      boolean present_component_id = true && (is_set_component_id());
+      list.add(present_component_id);
+      if (present_component_id)
+        list.add(component_id);
+
+      boolean present_action = true && (is_set_action());
+      list.add(present_action);
+      if (present_action)
+        list.add(action.getValue());
+
+      return list.hashCode();
+    }
+
+    @Override
+    public int compareTo(getComponentPendingProfileActions_args other) {
+      if (!getClass().equals(other.getClass())) {
+        return getClass().getName().compareTo(other.getClass().getName());
+      }
+
+      int lastComparison = 0;
+
+      lastComparison = Boolean.valueOf(is_set_id()).compareTo(other.is_set_id());
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+      if (is_set_id()) {
+        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.id, other.id);
+        if (lastComparison != 0) {
+          return lastComparison;
+        }
+      }
+      lastComparison = Boolean.valueOf(is_set_component_id()).compareTo(other.is_set_component_id());
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+      if (is_set_component_id()) {
+        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.component_id, other.component_id);
+        if (lastComparison != 0) {
+          return lastComparison;
+        }
+      }
+      lastComparison = Boolean.valueOf(is_set_action()).compareTo(other.is_set_action());
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+      if (is_set_action()) {
+        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.action, other.action);
+        if (lastComparison != 0) {
+          return lastComparison;
+        }
+      }
+      return 0;
+    }
+
+    public _Fields fieldForId(int fieldId) {
+      return _Fields.findByThriftId(fieldId);
+    }
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+      schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+      schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+    }
+
+    @Override
+    public String toString() {
+      StringBuilder sb = new StringBuilder("getComponentPendingProfileActions_args(");
+      boolean first = true;
+
+      sb.append("id:");
+      if (this.id == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.id);
+      }
+      first = false;
+      if (!first) sb.append(", ");
+      sb.append("component_id:");
+      if (this.component_id == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.component_id);
+      }
+      first = false;
+      if (!first) sb.append(", ");
+      sb.append("action:");
+      if (this.action == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.action);
+      }
+      first = false;
+      sb.append(")");
+      return sb.toString();
+    }
+
+    public void validate() throws org.apache.thrift.TException {
+      // check for required fields
+      // check for sub-struct validity
+    }
+
+    private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+      try {
+        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+      } catch (org.apache.thrift.TException te) {
+        throw new java.io.IOException(te);
+      }
+    }
+
+    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+      try {
+        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+      } catch (org.apache.thrift.TException te) {
+        throw new java.io.IOException(te);
+      }
+    }
+
+    private static class getComponentPendingProfileActions_argsStandardSchemeFactory implements SchemeFactory {
+      public getComponentPendingProfileActions_argsStandardScheme getScheme() {
+        return new getComponentPendingProfileActions_argsStandardScheme();
+      }
+    }
+
+    private static class getComponentPendingProfileActions_argsStandardScheme extends StandardScheme<getComponentPendingProfileActions_args> {
+
+      public void read(org.apache.thrift.protocol.TProtocol iprot, getComponentPendingProfileActions_args struct) throws org.apache.thrift.TException {
+        org.apache.thrift.protocol.TField schemeField;
+        iprot.readStructBegin();
+        while (true)
+        {
+          schemeField = iprot.readFieldBegin();
+          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+            break;
+          }
+          switch (schemeField.id) {
+            case 1: // ID
+              if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+                struct.id = iprot.readString();
+                struct.set_id_isSet(true);
+              } else { 
+                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+              }
+              break;
+            case 2: // COMPONENT_ID
+              if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+                struct.component_id = iprot.readString();
+                struct.set_component_id_isSet(true);
+              } else { 
+                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+              }
+              break;
+            case 3: // ACTION
+              if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+                struct.action = backtype.storm.generated.ProfileAction.findByValue(iprot.readI32());
+                struct.set_action_isSet(true);
+              } else { 
+                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+              }
+              break;
+            default:
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+          }
+          iprot.readFieldEnd();
+        }
+        iprot.readStructEnd();
+        struct.validate();
+      }
+
+      public void write(org.apache.thrift.protocol.TProtocol oprot, getComponentPendingProfileActions_args struct) throws org.apache.thrift.TException {
+        struct.validate();
+
+        oprot.writeStructBegin(STRUCT_DESC);
+        if (struct.id != null) {
+          oprot.writeFieldBegin(ID_FIELD_DESC);
+          oprot.writeString(struct.id);
+          oprot.writeFieldEnd();
+        }
+        if (struct.component_id != null) {
+          oprot.writeFieldBegin(COMPONENT_ID_FIELD_DESC);
+          oprot.writeString(struct.component_id);
+          oprot.writeFieldEnd();
+        }
+        if (struct.action != null) {
+          oprot.writeFieldBegin(ACTION_FIELD_DESC);
+          oprot.writeI32(struct.action.getValue());
+          oprot.writeFieldEnd();
+        }
+        oprot.writeFieldStop();
+        oprot.writeStructEnd();
+      }
+
+    }
+
+    private static class getComponentPendingProfileActions_argsTupleSchemeFactory implements SchemeFactory {
+      public getComponentPendingProfileActions_argsTupleScheme getScheme() {
+        return new getComponentPendingProfileActions_argsTupleScheme();
+      }
+    }
+
+    private static class getComponentPendingProfileActions_argsTupleScheme extends TupleScheme<getComponentPendingProfileActions_args> {
+
+      @Override
+      public void write(org.apache.thrift.protocol.TProtocol prot, getComponentPendingProfileActions_args struct) throws org.apache.thrift.TException {
+        TTupleProtocol oprot = (TTupleProtocol) prot;
+        BitSet optionals = new BitSet();
+        if (struct.is_set_id()) {
+          optionals.set(0);
+        }
+        if (struct.is_set_component_id()) {
+          optionals.set(1);
+        }
+        if (struct.is_set_action()) {
+          optionals.set(2);
+        }
+        oprot.writeBitSet(optionals, 3);
+        if (struct.is_set_id()) {
+          oprot.writeString(struct.id);
+        }
+        if (struct.is_set_component_id()) {
+          oprot.writeString(struct.component_id);
+        }
+        if (struct.is_set_action()) {
+          oprot.writeI32(struct.action.getValue());
+        }
+      }
+
+      @Override
+      public void read(org.apache.thrift.protocol.TProtocol prot, getComponentPendingProfileActions_args struct) throws org.apache.thrift.TException {
+        TTupleProtocol iprot = (TTupleProtocol) prot;
+        BitSet incoming = iprot.readBitSet(3);
+        if (incoming.get(0)) {
+          struct.id = iprot.readString();
+          struct.set_id_isSet(true);
+        }
+        if (incoming.get(1)) {
+          struct.component_id = iprot.readString();
+          struct.set_component_id_isSet(true);
+        }
+        if (incoming.get(2)) {
+          struct.action = backtype.storm.generated.ProfileAction.findByValue(iprot.readI32());
+          struct.set_action_isSet(true);
+        }
+      }
+    }
+
+  }
+
+  public static class getComponentPendingProfileActions_result implements org.apache.thrift.TBase<getComponentPendingProfileActions_result, getComponentPendingProfileActions_result._Fields>, java.io.Serializable, Cloneable, Comparable<getComponentPendingProfileActions_result>   {
+    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getComponentPendingProfileActions_result");
+
+    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0);
+
+    private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+    static {
+      schemes.put(StandardScheme.class, new getComponentPendingProfileActions_resultStandardSchemeFactory());
+      schemes.put(TupleScheme.class, new getComponentPendingProfileActions_resultTupleSchemeFactory());
+    }
+
+    private List<ProfileRequest> success; // required
+
+    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+    public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+      SUCCESS((short)0, "success");
+
+      private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+      static {
+        for (_Fields field : EnumSet.allOf(_Fields.class)) {
+          byName.put(field.getFieldName(), field);
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, or null if its not found.
+       */
+      public static _Fields findByThriftId(int fieldId) {
+        switch(fieldId) {
+          case 0: // SUCCESS
+            return SUCCESS;
+          default:
+            return null;
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, throwing an exception
+       * if it is not found.
+       */
+      public static _Fields findByThriftIdOrThrow(int fieldId) {
+        _Fields fields = findByThriftId(fieldId);
+        if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+        return fields;
+      }
+
+      /**
+       * Find the _Fields constant that matches name, or null if its not found.
+       */
+      public static _Fields findByName(String name) {
+        return byName.get(name);
+      }
+
+      private final short _thriftId;
+      private final String _fieldName;
+
+      _Fields(short thriftId, String fieldName) {
+        _thriftId = thriftId;
+        _fieldName = fieldName;
+      }
+
+      public short getThriftFieldId() {
+        return _thriftId;
+      }
+
+      public String getFieldName() {
+        return _fieldName;
+      }
+    }
+
+    // isset id assignments
+    public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+    static {
+      Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+              new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ProfileRequest.class))));
+      metaDataMap = Collections.unmodifiableMap(tmpMap);
+      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getComponentPendingProfileActions_result.class, metaDataMap);
+    }
+
+    public getComponentPendingProfileActions_result() {
+    }
+
+    public getComponentPendingProfileActions_result(
+      List<ProfileRequest> success)
+    {
+      this();
+      this.success = success;
+    }
+
+    /**
+     * Performs a deep copy on <i>other</i>.
+     */
+    public getComponentPendingProfileActions_result(getComponentPendingProfileActions_result other) {
+      if (other.is_set_success()) {
+        List<ProfileRequest> __this__success = new ArrayList<ProfileRequest>(other.success.size());
+        for (ProfileRequest other_element : other.success) {
+          __this__success.add(new ProfileRequest(other_element));
+        }
+        this.success = __this__success;
+      }
+    }
+
+    public getComponentPendingProfileActions_result deepCopy() {
+      return new getComponentPendingProfileActions_result(this);
+    }
+
+    @Override
+    public void clear() {
+      this.success = null;
+    }
+
+    public int get_success_size() {
+      return (this.success == null) ? 0 : this.success.size();
+    }
+
+    public java.util.Iterator<ProfileRequest> get_success_iterator() {
+      return (this.success == null) ? null : this.success.iterator();
+    }
+
+    public void add_to_success(ProfileRequest elem) {
+      if (this.success == null) {
+        this.success = new ArrayList<ProfileRequest>();
+      }
+      this.success.add(elem);
+    }
+
+    public List<ProfileRequest> get_success() {
+      return this.success;
+    }
+
+    public void set_success(List<ProfileRequest> success) {
+      this.success = success;
+    }
+
+    public void unset_success() {
+      this.success = null;
+    }
+
+    /** Returns true if field success is set (has been assigned a value) and false otherwise */
+    public boolean is_set_success() {
+      return this.success != null;
+    }
+
+    public void set_success_isSet(boolean value) {
+      if (!value) {
+        this.success = null;
+      }
+    }
+
+    public void setFieldValue(_Fields field, Object value) {
+      switch (field) {
+      case SUCCESS:
+        if (value == null) {
+          unset_success();
+        } else {
+          set_success((List<ProfileRequest>)value);
+        }
+        break;
+
+      }
+    }
+
+    public Object getFieldValue(_Fields field) {
+      switch (field) {
+      case SUCCESS:
+        return get_success();
+
+      }
+      throw new IllegalStateException();
+    }
+
+    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+    public boolean isSet(_Fields field) {
+      if (field == null) {
+        throw new IllegalArgumentException();
+      }
+
+      switch (field) {
+      case SUCCESS:
+        return is_set_success();
+      }
+      throw new IllegalStateException();
+    }
+
+    @Override
+    public boolean equals(Object that) {
+      if (that == null)
+        return false;
+      if (that instanceof getComponentPendingProfileActions_result)
+        return this.equals((getComponentPendingProfileActions_result)that);
+      return false;
+    }
+
+    public boolean equals(getComponentPendingProfileActions_result that) {
+      if (that == null)
+        return false;
+
+      boolean this_present_success = true && this.is_set_success();
+      boolean that_present_success = true && that.is_set_success();
+      if (this_present_success || that_present_success) {
+        if (!(this_present_success && that_present_success))
+          return false;
+        if (!this.success.equals(that.success))
+          return false;
+      }
+
+      return true;
+    }
+
+    @Override
+    public int hashCode() {
+      List<Object> list = new ArrayList<Object>();
+
+      boolean present_success = true && (is_set_success());
+      list.add(present_success);
+      if (present_success)
+        list.add(success);
+
+      return list.hashCode();
+    }
+
+    @Override
+    public int compareTo(getComponentPendingProfileActions_result other) {
+      if (!getClass().equals(other.getClass())) {
+        return getClass().getName().compareTo(other.getClass().getName());
+      }
+
+      int lastComparison = 0;
+
+      lastComparison = Boolean.valueOf(is_set_success()).compareTo(other.is_set_success());
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+      if (is_set_success()) {
+        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success);
+        if (lastComparison != 0) {
+          return lastComparison;
+        }
+      }
+      return 0;
+    }
+
+    public _Fields fieldForId(int fieldId) {
+      return _Fields.findByThriftId(fieldId);
+    }
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+      schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+      schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+      }
+
+    @Override
+    public String toString() {
+      StringBuilder sb = new StringBuilder("getComponentPendingProfileActions_result(");
+      boolean first = true;
+
+      sb.append("success:");
+      if (this.success == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.success);
+      }
+      first = false;
+      sb.append(")");
+      return sb.toString();
+    }
+
+    public void validate() throws org.apache.thrift.TException {
+      // check for required fields
+      // check for sub-struct validity
+    }
+
+    private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+      try {
+        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+      } catch (org.apache.thrift.TException te) {
+        throw new java.io.IOException(te);
+      }
+    }
+
+    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+      try {
+        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+      } catch (org.apache.thrift.TException te) {
+        throw new java.io.IOException(te);
+      }
+    }
+
+    private static class getComponentPendingProfileActions_resultStandardSchemeFactory implements SchemeFactory {
+      public getComponentPendingProfileActions_resultStandardScheme getScheme() {
+        return new getComponentPendingProfileActions_resultStandardScheme();
+      }
+    }
+
+    private static class getComponentPendingProfileActions_resultStandardScheme extends StandardScheme<getComponentPendingProfileActions_result> {
+
+      public void read(org.apache.thrift.protocol.TProtocol iprot, getComponentPendingProfileActions_result struct) throws org.apache.thrift.TException {
+        org.apache.thrift.protocol.TField schemeField;
+        iprot.readStructBegin();
+        while (true)
+        {
+          schemeField = iprot.readFieldBegin();
+          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+            break;
+          }
+          switch (schemeField.id) {
+            case 0: // SUCCESS
+              if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+                {
+                  org.apache.thrift.protocol.TList _list666 = iprot.readListBegin();
+                  struct.success = new ArrayList<ProfileRequest>(_list666.size);
+                  ProfileRequest _elem667;
+                  for (int _i668 = 0; _i668 < _list666.size; ++_i668)
+                  {
+                    _elem667 = new ProfileRequest();
+                    _elem667.read(iprot);
+                    struct.success.add(_elem667);
+                  }
+                  iprot.readListEnd();
+                }
+                struct.set_success_isSet(true);
+              } else { 
+                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+              }
+              break;
+            default:
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+          }
+          iprot.readFieldEnd();
+        }
+        iprot.readStructEnd();
+        struct.validate();
+      }
+
+      public void write(org.apache.thrift.protocol.TProtocol oprot, getComponentPendingProfileActions_result struct) throws org.apache.thrift.TException {
+        struct.validate();
+
+        oprot.writeStructBegin(STRUCT_DESC);
+        if (struct.success != null) {
+          oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
+          {
+            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
+            for (ProfileRequest _iter669 : struct.success)
+            {
+              _iter669.write(oprot);
+            }
+            oprot.writeListEnd();
+          }
+          oprot.writeFieldEnd();
+        }
+        oprot.writeFieldStop();
+        oprot.writeStructEnd();
+      }
+
+    }
+
+    private static class getComponentPendingProfileActions_resultTupleSchemeFactory implements SchemeFactory {
+      public getComponentPendingProfileActions_resultTupleScheme getScheme() {
+        return new getComponentPendingProfileActions_resultTupleScheme();
+      }
+    }
+
+    private static class getComponentPendingProfileActions_resultTupleScheme extends TupleScheme<getComponentPendingProfileActions_result> {
+
+      @Override
+      public void write(org.apache.thrift.protocol.TProtocol prot, getComponentPendingProfileActions_result struct) throws org.apache.thrift.TException {
+        TTupleProtocol oprot = (TTupleProtocol) prot;
+        BitSet optionals = new BitSet();
+        if (struct.is_set_success()) {
+          optionals.set(0);
+        }
+        oprot.writeBitSet(optionals, 1);
+        if (struct.is_set_success()) {
+          {
+            oprot.writeI32(struct.success.size());
+            for (ProfileRequest _iter670 : struct.success)
+            {
+              _iter670.write(oprot);
+            }
+          }
+        }
+      }
+
+      @Override
+      public void read(org.apache.thrift.protocol.TProtocol prot, getComponentPendingProfileActions_result struct) throws org.apache.thrift.TException {
+        TTupleProtocol iprot = (TTupleProtocol) prot;
+        BitSet incoming = iprot.readBitSet(1);
+        if (incoming.get(0)) {
+          {
+            org.apache.thrift.protocol.TList _list671 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.success = new ArrayList<ProfileRequest>(_list671.size);
+            ProfileRequest _elem672;
+            for (int _i673 = 0; _i673 < _list671.size; ++_i673)
+            {
+              _elem672 = new ProfileRequest();
+              _elem672.read(iprot);
+              struct.success.add(_elem672);
+            }
+          }
+          struct.set_success_isSet(true);
+        }
+      }
+    }
+
+  }
+
   public static class uploadNewCredentials_args implements org.apache.thrift.TBase<uploadNewCredentials_args, uploadNewCredentials_args._Fields>, java.io.Serializable, Cloneable, Comparable<uploadNewCredentials_args>   {
     private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("uploadNewCredentials_args");
 

http://git-wip-us.apache.org/repos/asf/storm/blob/0c2021e6/storm-core/src/jvm/backtype/storm/generated/ProfileAction.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/generated/ProfileAction.java b/storm-core/src/jvm/backtype/storm/generated/ProfileAction.java
new file mode 100644
index 0000000..584fd03
--- /dev/null
+++ b/storm-core/src/jvm/backtype/storm/generated/ProfileAction.java
@@ -0,0 +1,74 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * Autogenerated by Thrift Compiler (0.9.2)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package backtype.storm.generated;
+
+
+import java.util.Map;
+import java.util.HashMap;
+import org.apache.thrift.TEnum;
+
+public enum ProfileAction implements org.apache.thrift.TEnum {
+  JPROFILE_STOP(0),
+  JPROFILE_START(1),
+  JPROFILE_DUMP(2),
+  JMAP_DUMP(3),
+  JSTACK_DUMP(4),
+  JVM_RESTART(5);
+
+  private final int value;
+
+  private ProfileAction(int value) {
+    this.value = value;
+  }
+
+  /**
+   * Get the integer value of this enum value, as defined in the Thrift IDL.
+   */
+  public int getValue() {
+    return value;
+  }
+
+  /**
+   * Find a the enum type by its integer value, as defined in the Thrift IDL.
+   * @return null if the value is not found.
+   */
+  public static ProfileAction findByValue(int value) { 
+    switch (value) {
+      case 0:
+        return JPROFILE_STOP;
+      case 1:
+        return JPROFILE_START;
+      case 2:
+        return JPROFILE_DUMP;
+      case 3:
+        return JMAP_DUMP;
+      case 4:
+        return JSTACK_DUMP;
+      case 5:
+        return JVM_RESTART;
+      default:
+        return null;
+    }
+  }
+}


[04/10] storm git commit: Make worker profiling optional and pluggable

Posted by bo...@apache.org.
Make worker profiling optional and pluggable


Project: http://git-wip-us.apache.org/repos/asf/storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/storm/commit/d2f9305a
Tree: http://git-wip-us.apache.org/repos/asf/storm/tree/d2f9305a
Diff: http://git-wip-us.apache.org/repos/asf/storm/diff/d2f9305a

Branch: refs/heads/master
Commit: d2f9305abb16ee3573abaab56de7b1e673168cd5
Parents: 0c2021e
Author: Kishor Patil <kp...@yahoo-inc.com>
Authored: Mon Nov 2 23:51:33 2015 -0600
Committer: Kishor Patil <kp...@yahoo-inc.com>
Committed: Mon Nov 2 23:51:33 2015 -0600

----------------------------------------------------------------------
 conf/defaults.yaml                              |  2 +
 docs/DYNAMIC_WORKER_PROFILING.md                |  5 +++
 .../clj/backtype/storm/daemon/supervisor.clj    | 39 ++++++++++----------
 storm-core/src/clj/backtype/storm/ui/core.clj   | 31 ++++++++++++----
 storm-core/src/jvm/backtype/storm/Config.java   | 15 ++++++++
 storm-core/src/ui/public/component.html         | 12 +++---
 6 files changed, 72 insertions(+), 32 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/storm/blob/d2f9305a/conf/defaults.yaml
----------------------------------------------------------------------
diff --git a/conf/defaults.yaml b/conf/defaults.yaml
index 84babc3..322d386 100644
--- a/conf/defaults.yaml
+++ b/conf/defaults.yaml
@@ -145,6 +145,8 @@ worker.heap.memory.mb: 768
 worker.childopts: "-Xmx%HEAP-MEM%m -XX:+PrintGCDetails -Xloggc:artifacts/gc.log -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=1M -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=artifacts/heapdump"
 worker.gc.childopts: ""
 worker.profiler.childopts: "-XX:+UnlockCommercialFeatures -XX:+FlightRecorder"
+worker.profiler.enabled: true
+worker.profiler.command: "flight.bash"
 worker.heartbeat.frequency.secs: 1
 
 # check whether dynamic log levels can be reset from DEBUG to INFO in workers

http://git-wip-us.apache.org/repos/asf/storm/blob/d2f9305a/docs/DYNAMIC_WORKER_PROFILING.md
----------------------------------------------------------------------
diff --git a/docs/DYNAMIC_WORKER_PROFILING.md b/docs/DYNAMIC_WORKER_PROFILING.md
index 727322f..4b55a80 100644
--- a/docs/DYNAMIC_WORKER_PROFILING.md
+++ b/docs/DYNAMIC_WORKER_PROFILING.md
@@ -22,3 +22,8 @@ Click on "My Dump Files" to go the logviewer UI for list of worker specific dump
 
 ![Dump Files Links for worker](images/dynamic_profiling_debugging_3.png "Dump Files Links for worker")
 
+Configuration
+-------------
+
+The "worker.profiler.command" can be configured to point to specific pluggable profiler, heapdump commands. The "worker.profiler.enabled" can be disabled if plugin is not available.
+

http://git-wip-us.apache.org/repos/asf/storm/blob/d2f9305a/storm-core/src/clj/backtype/storm/daemon/supervisor.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/daemon/supervisor.clj b/storm-core/src/clj/backtype/storm/daemon/supervisor.clj
index 8fe6eed..e5740cb 100644
--- a/storm-core/src/clj/backtype/storm/daemon/supervisor.clj
+++ b/storm-core/src/clj/backtype/storm/daemon/supervisor.clj
@@ -530,25 +530,23 @@
 (defn java-cmd []
   (jvm-cmd "java"))
 
-(def PROFILE-CMD "flight.bash")
+(defn jmap-dump-cmd [profile-cmd pid target-dir]
+  [profile-cmd pid "jmap" target-dir])
 
-(defn jmap-dump-cmd [pid target-dir]
-  [PROFILE-CMD pid "jmap" target-dir])
+(defn jstack-dump-cmd [profile-cmd pid target-dir]
+  [profile-cmd pid "jstack" target-dir])
 
-(defn jstack-dump-cmd [pid target-dir]
-  [PROFILE-CMD pid "jstack" target-dir])
+(defn jprofile-start [profile-cmd pid]
+  [profile-cmd pid "start"])
 
-(defn jprofile-start [pid]
-  [PROFILE-CMD pid "start" ])
+(defn jprofile-stop [profile-cmd pid target-dir]
+  [profile-cmd pid "stop" target-dir])
 
-(defn jprofile-stop [pid target-dir]
-  [PROFILE-CMD pid "stop" target-dir])
+(defn jprofile-dump [profile-cmd pid workers-artifacts-directory]
+  [profile-cmd pid "dump" workers-artifacts-directory])
 
-(defn jprofile-dump [pid workers-artifacts-directory]
-  [PROFILE-CMD pid "dump" workers-artifacts-directory])
-
-(defn jprofile-jvm-restart [pid]
-  [PROFILE-CMD pid "kill" ])
+(defn jprofile-jvm-restart [profile-cmd pid]
+  [profile-cmd pid "kill"])
 
 (defn- delete-topology-profiler-action [storm-cluster-state storm-id profile-action]
   (log-message "Deleting profiler action.." profile-action)
@@ -587,6 +585,7 @@
             stormid->profiler-actions @(:stormid->profiler-actions supervisor)
             storm-cluster-state (:storm-cluster-state supervisor)
             hostname (:my-hostname supervisor)
+            profile-cmd (conf WORKER-PROFILER-COMMAND)
             new-assignment @(:curr-assignment supervisor)
             assigned-storm-ids (assigned-storm-ids-from-port-assignments new-assignment)]
         (doseq [[storm-id profiler-actions] stormid->profiler-actions]
@@ -605,14 +604,14 @@
                       ;; Until PROFILER_STOP action is invalid, keep launching profiler start in case worker restarted
                       ;; The profiler plugin script validates if JVM is recording before starting another recording.
                       command (cond
-                                (= action ProfileAction/JMAP_DUMP) (jmap-dump-cmd worker-pid target-dir)
-                                (= action ProfileAction/JSTACK_DUMP) (jstack-dump-cmd worker-pid target-dir)
-                                (= action ProfileAction/JPROFILE_DUMP) (jprofile-dump worker-pid target-dir)
-                                (= action ProfileAction/JVM_RESTART) (jprofile-jvm-restart worker-pid)
+                                (= action ProfileAction/JMAP_DUMP) (jmap-dump-cmd profile-cmd worker-pid target-dir)
+                                (= action ProfileAction/JSTACK_DUMP) (jstack-dump-cmd profile-cmd worker-pid target-dir)
+                                (= action ProfileAction/JPROFILE_DUMP) (jprofile-dump profile-cmd worker-pid target-dir)
+                                (= action ProfileAction/JVM_RESTART) (jprofile-jvm-restart profile-cmd worker-pid)
                                 (and (not stop?)
                                      (= action ProfileAction/JPROFILE_STOP))
-                                  (jprofile-start worker-pid) ;; Ensure the profiler is still running
-                                (and stop? (= action ProfileAction/JPROFILE_STOP)) (jprofile-stop worker-pid target-dir))
+                                  (jprofile-start profile-cmd worker-pid) ;; Ensure the profiler is still running
+                                (and stop? (= action ProfileAction/JPROFILE_STOP)) (jprofile-stop profile-cmd worker-pid target-dir))
                       action-on-exit (fn [exit-code]
                                        (log-message log-prefix " profile-action exited for code: " exit-code)
                                        (if (and (= exit-code 0) stop?)

http://git-wip-us.apache.org/repos/asf/storm/blob/d2f9305a/storm-core/src/clj/backtype/storm/ui/core.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/backtype/storm/ui/core.clj b/storm-core/src/clj/backtype/storm/ui/core.clj
index c6ffe22..60404f0 100644
--- a/storm-core/src/clj/backtype/storm/ui/core.clj
+++ b/storm-core/src/clj/backtype/storm/ui/core.clj
@@ -83,6 +83,14 @@
            (throw (AuthorizationException.
                    (str "UI request '" op "' for '" user "' user is not authorized")))))))))
 
+
+(defn assert-authorized-profiler-action
+  [op]
+  (if-not (*STORM-CONF* WORKER-PROFILER-ENABLED)
+    (throw (AuthorizationException.
+             (str "UI request for profiler action '" op "' is disabled.")))))
+
+
 (defn executor-summary-type
   [topology ^ExecutorSummary s]
   (component-type topology (.get_component_id s)))
@@ -799,7 +807,10 @@
                                       (.get_eventlog_host comp-page-info)
                                       (.get_eventlog_port comp-page-info)
                                       secure?)
-       "profilerActive" (get-active-profile-actions nimbus topology-id component)))))
+       "profileActionEnabled" (*STORM-CONF* WORKER-PROFILER-ENABLED)
+       "profilerActive" (if (*STORM-CONF* WORKER-PROFILER-ENABLED)
+                          (get-active-profile-actions nimbus topology-id component)
+                          [])))))
     
 (defn- level-to-dict [level]
   (if level
@@ -1017,7 +1028,8 @@
          (let [user (.getUserName http-creds-handler servlet-request)
                topology-conf (from-json
                               (.getTopologyConf ^Nimbus$Client nimbus id))]
-           (assert-authorized-user "setWorkerProfiler" (topology-config id)))
+           (assert-authorized-user "setWorkerProfiler" (topology-config id))
+           (assert-authorized-profiler-action "start"))
 
          (let [[host, port] (split host-port #":")
                nodeinfo (NodeInfo. host (set [(Long. port)]))
@@ -1041,7 +1053,8 @@
          (let [user (.getUserName http-creds-handler servlet-request)
                topology-conf (from-json
                               (.getTopologyConf ^Nimbus$Client nimbus id))]
-           (assert-authorized-user "setWorkerProfiler" (topology-config id)))
+           (assert-authorized-user "setWorkerProfiler" (topology-config id))
+           (assert-authorized-profiler-action "stop"))
          (let [[host, port] (split host-port #":")
                nodeinfo (NodeInfo. host (set [(Long. port)]))
                timestamp 0
@@ -1059,7 +1072,8 @@
          (let [user (.getUserName http-creds-handler servlet-request)
                topology-conf (from-json
                               (.getTopologyConf ^Nimbus$Client nimbus id))]
-           (assert-authorized-user "setWorkerProfiler" (topology-config id)))
+           (assert-authorized-user "setWorkerProfiler" (topology-config id))
+           (assert-authorized-profiler-action "dumpprofile"))
          (let [[host, port] (split host-port #":")
                nodeinfo (NodeInfo. host (set [(Long. port)]))
                timestamp (System/currentTimeMillis)
@@ -1077,7 +1091,8 @@
          (let [user (.getUserName http-creds-handler servlet-request)
                topology-conf (from-json
                               (.getTopologyConf ^Nimbus$Client nimbus id))]
-           (assert-authorized-user "setWorkerProfiler" (topology-config id)))
+           (assert-authorized-user "setWorkerProfiler" (topology-config id))
+           (assert-authorized-profiler-action "dumpjstack"))
          (let [[host, port] (split host-port #":")
                nodeinfo (NodeInfo. host (set [(Long. port)]))
                timestamp (System/currentTimeMillis)
@@ -1095,7 +1110,8 @@
          (let [user (.getUserName http-creds-handler servlet-request)
                topology-conf (from-json
                               (.getTopologyConf ^Nimbus$Client nimbus id))]
-           (assert-authorized-user "setWorkerProfiler" (topology-config id)))
+           (assert-authorized-user "setWorkerProfiler" (topology-config id))
+           (assert-authorized-profiler-action "restartworker"))
          (let [[host, port] (split host-port #":")
                nodeinfo (NodeInfo. host (set [(Long. port)]))
                timestamp (System/currentTimeMillis)
@@ -1113,7 +1129,8 @@
          (let [user (.getUserName http-creds-handler servlet-request)
                topology-conf (from-json
                               (.getTopologyConf ^Nimbus$Client nimbus id))]
-           (assert-authorized-user "setWorkerProfiler" (topology-config id)))
+           (assert-authorized-user "setWorkerProfiler" (topology-config id))
+           (assert-authorized-profiler-action "dumpheap"))
          (let [[host, port] (split host-port #":")
                nodeinfo (NodeInfo. host (set [(Long. port)]))
                timestamp (System/currentTimeMillis)

http://git-wip-us.apache.org/repos/asf/storm/blob/d2f9305a/storm-core/src/jvm/backtype/storm/Config.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/backtype/storm/Config.java b/storm-core/src/jvm/backtype/storm/Config.java
index 303a3a0..ee6de1b 100644
--- a/storm-core/src/jvm/backtype/storm/Config.java
+++ b/storm-core/src/jvm/backtype/storm/Config.java
@@ -1093,6 +1093,21 @@ public class Config extends HashMap<String, Object> {
     public static final String WORKER_PROFILER_CHILDOPTS = "worker.profiler.childopts";
 
     /**
+     * This configuration would enable or disable component page profiing and debugging for workers.
+     */
+    @isBoolean
+    public static final String WORKER_PROFILER_ENABLED = "worker.profiler.enabled";
+
+    /**
+     * The command launched supervisor with worker arguments
+     * pid, action and [target_directory]
+     * Where action is - start profile, stop profile, jstack, heapdump and kill against pid
+     *
+     */
+    @isString
+    public static final String WORKER_PROFILER_COMMAND = "worker.profiler.command";
+
+    /**
      * The jvm opts provided to workers launched by this supervisor for GC. All "%ID%" substrings are replaced
      * with an identifier for this worker.  Because the JVM complains about multiple GC opts the topology
      * can override this default value by setting topology.worker.gc.childopts.

http://git-wip-us.apache.org/repos/asf/storm/blob/d2f9305a/storm-core/src/ui/public/component.html
----------------------------------------------------------------------
diff --git a/storm-core/src/ui/public/component.html b/storm-core/src/ui/public/component.html
index 60a85cf..ae2463c 100644
--- a/storm-core/src/ui/public/component.html
+++ b/storm-core/src/ui/public/component.html
@@ -166,11 +166,13 @@ $(document).ready(function() {
               componentActions.append(Mustache.render($(template).filter("#component-actions-template").html(),buttonJsonData));
             });
 
-            jsError(function() {
-                var part = $(template).filter('#profiler-active-partial').html();
-                var partials = {"profilerActive": part};
-                profilerControl.append(Mustache.render($(template).filter("#profiling-template").html(), response, partials));
-            });
+            if(response["profileActionEnabled"] == true) {
+                jsError(function () {
+                    var part = $(template).filter('#profiler-active-partial').html();
+                    var partials = {"profilerActive": part};
+                    profilerControl.append(Mustache.render($(template).filter("#profiling-template").html(), response, partials));
+                });
+            }
 
             if(response["componentType"] == "spout") {
                 componentStatsDetail.append(Mustache.render($(template).filter("#spout-stats-detail-template").html(),response));


[09/10] storm git commit: Merge branch 'profiling-worker' of https://github.com/kishorvpatil/incubator-storm into STORM-1157

Posted by bo...@apache.org.
Merge branch 'profiling-worker' of https://github.com/kishorvpatil/incubator-storm into STORM-1157

STORM-1157: Adding dynamic profiling for worker, restarting worker, jstack, heap dump, and profiling


Project: http://git-wip-us.apache.org/repos/asf/storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/storm/commit/6e0fc9ea
Tree: http://git-wip-us.apache.org/repos/asf/storm/tree/6e0fc9ea
Diff: http://git-wip-us.apache.org/repos/asf/storm/diff/6e0fc9ea

Branch: refs/heads/master
Commit: 6e0fc9ea4da8d3d2ae919c05000bf6f2a1c709b8
Parents: f3568d7 a4876b2
Author: Robert (Bobby) Evans <ev...@yahoo-inc.com>
Authored: Wed Nov 4 11:10:20 2015 -0600
Committer: Robert (Bobby) Evans <ev...@yahoo-inc.com>
Committed: Wed Nov 4 11:10:20 2015 -0600

----------------------------------------------------------------------
 STORM-UI-REST-API.md                            |  735 -------
 bin/flight.bash                                 |  139 ++
 conf/defaults.yaml                              |    3 +
 docs/DYNAMIC_LOG_LEVEL_SETTINGS.md              |   41 -
 docs/documentation/Documentation.md             |    3 +
 .../documentation/dynamic-log-level-settings.md |   41 +
 docs/documentation/dynamic-worker-profiling.md  |   29 +
 .../images/dynamic_log_level_settings_1.png     |  Bin 0 -> 93689 bytes
 .../images/dynamic_log_level_settings_2.png     |  Bin 0 -> 78785 bytes
 .../images/dynamic_profiling_debugging_1.png    |  Bin 0 -> 93635 bytes
 .../images/dynamic_profiling_debugging_2.png    |  Bin 0 -> 138120 bytes
 .../images/dynamic_profiling_debugging_3.png    |  Bin 0 -> 96974 bytes
 docs/documentation/ui-rest-api.md               |  930 ++++++++
 docs/images/dynamic_log_level_settings_1.png    |  Bin 93689 -> 0 bytes
 docs/images/dynamic_log_level_settings_2.png    |  Bin 78785 -> 0 bytes
 storm-core/src/clj/backtype/storm/cluster.clj   |   57 +-
 storm-core/src/clj/backtype/storm/config.clj    |    4 +
 storm-core/src/clj/backtype/storm/converter.clj |   19 +-
 .../src/clj/backtype/storm/daemon/logviewer.clj |   45 +
 .../src/clj/backtype/storm/daemon/nimbus.clj    |   35 +-
 .../clj/backtype/storm/daemon/supervisor.clj    |  157 +-
 .../src/clj/backtype/storm/daemon/worker.clj    |    4 +-
 storm-core/src/clj/backtype/storm/stats.clj     |    9 +
 storm-core/src/clj/backtype/storm/ui/core.clj   |  166 +-
 storm-core/src/clj/backtype/storm/util.clj      |   10 +-
 storm-core/src/jvm/backtype/storm/Config.java   |   21 +
 .../jvm/backtype/storm/generated/Nimbus.java    | 1977 +++++++++++++++++-
 .../backtype/storm/generated/ProfileAction.java |   74 +
 .../storm/generated/ProfileRequest.java         |  631 ++++++
 .../auth/authorizer/SimpleACLAuthorizer.java    |    8 +
 .../src/native/worker-launcher/impl/main.c      |   10 +
 .../worker-launcher/impl/worker-launcher.c      |   47 +
 .../worker-launcher/impl/worker-launcher.h      |    2 +
 storm-core/src/py/storm/Nimbus-remote           |   14 +
 storm-core/src/py/storm/Nimbus.py               |  396 ++++
 storm-core/src/py/storm/ttypes.py               |  122 ++
 storm-core/src/storm.thrift                     |   20 +
 storm-core/src/ui/public/component.html         |  167 +-
 .../templates/component-page-template.html      |   53 +
 39 files changed, 5165 insertions(+), 804 deletions(-)
----------------------------------------------------------------------



[08/10] storm git commit: Fixing rest-api documentation and relocating it.

Posted by bo...@apache.org.
Fixing rest-api documentation and relocating it.


Project: http://git-wip-us.apache.org/repos/asf/storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/storm/commit/a4876b2d
Tree: http://git-wip-us.apache.org/repos/asf/storm/tree/a4876b2d
Diff: http://git-wip-us.apache.org/repos/asf/storm/diff/a4876b2d

Branch: refs/heads/master
Commit: a4876b2dc972e4a05f820125da2b933233a217ef
Parents: 8d91ad9
Author: Kishor Patil <kp...@yahoo-inc.com>
Authored: Wed Nov 4 11:03:32 2015 -0600
Committer: Kishor Patil <kp...@yahoo-inc.com>
Committed: Wed Nov 4 11:03:32 2015 -0600

----------------------------------------------------------------------
 STORM-UI-REST-API.md                | 930 -------------------------------
 docs/documentation/Documentation.md |   1 +
 docs/documentation/ui-rest-api.md   | 930 +++++++++++++++++++++++++++++++
 3 files changed, 931 insertions(+), 930 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/storm/blob/a4876b2d/STORM-UI-REST-API.md
----------------------------------------------------------------------
diff --git a/STORM-UI-REST-API.md b/STORM-UI-REST-API.md
deleted file mode 100644
index 6260afa..0000000
--- a/STORM-UI-REST-API.md
+++ /dev/null
@@ -1,930 +0,0 @@
-# Storm UI REST API
-
-The Storm UI daemon provides a REST API that allows you to interact with a Storm cluster, which includes retrieving
-metrics data and configuration information as well as management operations such as starting or stopping topologies.
-
-
-# Data format
-
-The REST API returns JSON responses and supports JSONP.
-Clients can pass a callback query parameter to wrap JSON in the callback function.
-
-
-# Using the UI REST API
-
-_Note: It is recommended to ignore undocumented elements in the JSON response because future versions of Storm may not_
-_support those elements anymore._
-
-
-## REST API Base URL
-
-The REST API is part of the UI daemon of Storm (started by `storm ui`) and thus runs on the same host and port as the
-Storm UI (the UI daemon is often run on the same host as the Nimbus daemon).  The port is configured by `ui.port`,
-which is set to `8080` by default (see [defaults.yaml](conf/defaults.yaml)).
-
-The API base URL would thus be:
-
-    http://<ui-host>:<ui-port>/api/v1/...
-
-You can use a tool such as `curl` to talk to the REST API:
-
-    # Request the cluster configuration.
-    # Note: We assume ui.port is configured to the default value of 8080.
-    $ curl http://<ui-host>:8080/api/v1/cluster/configuration
-
-##Impersonating a user in secure environment
-In a secure environment an authenticated user can impersonate another user. To impersonate a user the caller must pass
-`doAsUser` param or header with value set to the user that the request needs to be performed as. Please see SECURITY.MD
-to learn more about how to setup impersonation ACLs and authorization. The rest API uses the same configs and acls that
-are used by nimbus.
-
-Examples:
-
-```no-highlight
- 1. http://ui-daemon-host-name:8080/api/v1/topology/wordcount-1-1425844354\?doAsUser=testUSer1
- 2. curl 'http://localhost:8080/api/v1/topology/wordcount-1-1425844354/activate' -X POST -H 'doAsUser:testUSer1'
-```
-
-## GET Operations
-
-### /api/v1/cluster/configuration (GET)
-
-Returns the cluster configuration.
-
-Sample response (does not include all the data fields):
-
-```json
-  {
-    "dev.zookeeper.path": "/tmp/dev-storm-zookeeper",
-    "topology.tick.tuple.freq.secs": null,
-    "topology.builtin.metrics.bucket.size.secs": 60,
-    "topology.fall.back.on.java.serialization": true,
-    "topology.max.error.report.per.interval": 5,
-    "zmq.linger.millis": 5000,
-    "topology.skip.missing.kryo.registrations": false,
-    "storm.messaging.netty.client_worker_threads": 1,
-    "ui.childopts": "-Xmx768m",
-    "storm.zookeeper.session.timeout": 20000,
-    "nimbus.reassign": true,
-    "topology.trident.batch.emit.interval.millis": 500,
-    "storm.messaging.netty.flush.check.interval.ms": 10,
-    "nimbus.monitor.freq.secs": 10,
-    "logviewer.childopts": "-Xmx128m",
-    "java.library.path": "/usr/local/lib:/opt/local/lib:/usr/lib",
-    "topology.executor.send.buffer.size": 1024,
-    }
-```
-
-### /api/v1/cluster/summary (GET)
-
-Returns cluster summary information such as nimbus uptime or number of supervisors.
-
-Response fields:
-
-|Field  |Value|Description
-|---	|---	|---
-|stormVersion|String| Storm version|
-|supervisors|Integer| Number of supervisors running|
-|topologies| Integer| Number of topologies running| 
-|slotsTotal| Integer|Total number of available worker slots|
-|slotsUsed| Integer| Number of worker slots used|
-|slotsFree| Integer |Number of worker slots available|
-|executorsTotal| Integer |Total number of executors|
-|tasksTotal| Integer |Total tasks|
-
-Sample response:
-
-```json
-   {
-    "stormVersion": "0.9.2-incubating-SNAPSHOT",
-    "supervisors": 1,
-    "slotsTotal": 4,
-    "slotsUsed": 3,
-    "slotsFree": 1,
-    "executorsTotal": 28,
-    "tasksTotal": 28
-    }
-```
-
-### /api/v1/supervisor/summary (GET)
-
-Returns summary information for all supervisors.
-
-Response fields:
-
-|Field  |Value|Description|
-|---	|---	|---
-|id| String | Supervisor's id|
-|host| String| Supervisor's host name|
-|uptime| String| Shows how long the supervisor is running|
-|uptimeSeconds| Integer| Shows how long the supervisor is running in seconds|
-|slotsTotal| Integer| Total number of available worker slots for this supervisor|
-|slotsUsed| Integer| Number of worker slots used on this supervisor|
-
-Sample response:
-
-```json
-{
-    "supervisors": [
-        {
-            "id": "0b879808-2a26-442b-8f7d-23101e0c3696",
-            "host": "10.11.1.7",
-            "uptime": "5m 58s",
-            "uptimeSeconds": 358,
-            "slotsTotal": 4,
-            "slotsUsed": 3
-        }
-    ]
-}
-```
-
-### /api/v1/nimbus/summary (GET)
-
-Returns summary information for all nimbus hosts.
-
-Response fields:
-
-|Field  |Value|Description|
-|---	|---	|---
-|host| String | Nimbus' host name|
-|port| int| Nimbus' port number|
-|status| String| Possible values are Leader, Not a Leader, Dead|
-|nimbusUpTime| String| Shows since how long the nimbus has been running|
-|nimbusUpTimeSeconds| String| Shows since how long the nimbus has been running in seconds|
-|nimbusLogLink| String| Logviewer url to view the nimbus.log|
-|version| String| Version of storm this nimbus host is running|
-
-Sample response:
-
-```json
-{
-    "nimbuses":[
-        {
-            "host":"192.168.202.1",
-            "port":6627,
-            "nimbusLogLink":"http:\/\/192.168.202.1:8000\/log?file=nimbus.log",
-            "status":Leader,
-            "version":"0.10.0-SNAPSHOT",
-            "nimbusUpTime":"3m 33s",
-            "nimbusUpTimeSeconds":"213"
-        }
-    ]
-}
-```
-
-### /api/v1/topology/summary (GET)
-
-Returns summary information for all topologies.
-
-Response fields:
-
-|Field  |Value | Description|
-|---	|---	|---
-|id| String| Topology Id|
-|name| String| Topology Name|
-|status| String| Topology Status|
-|uptime| String|  Shows how long the topology is running|
-|uptimeSeconds| Integer|  Shows how long the topology is running in seconds|
-|tasksTotal| Integer |Total number of tasks for this topology|
-|workersTotal| Integer |Number of workers used for this topology|
-|executorsTotal| Integer |Number of executors used for this topology|
-|replicationCount| Integer |Number of nimbus hosts on which this topology code is replicated|
-|requestedMemOnHeap| Double|Requested On-Heap Memory by User (MB)
-|requestedMemOffHeap| Double|Requested Off-Heap Memory by User (MB)|
-|requestedTotalMem| Double|Requested Total Memory by User (MB)|
-|requestedCpu| Double|Requested CPU by User (%)|
-|assignedMemOnHeap| Double|Assigned On-Heap Memory by Scheduler (MB)|
-|assignedMemOffHeap| Double|Assigned Off-Heap Memory by Scheduler (MB)|
-|assignedTotalMem| Double|Assigned Total Memory by Scheduler (MB)|
-|assignedCpu| Double|Assigned CPU by Scheduler (%)|
-
-Sample response:
-
-```json
-{
-    "topologies": [
-        {
-            "id": "WordCount3-1-1402960825",
-            "name": "WordCount3",
-            "status": "ACTIVE",
-            "uptime": "6m 5s",
-            "uptimeSeconds": 365,
-            "tasksTotal": 28,
-            "workersTotal": 3,
-            "executorsTotal": 28,
-            "replicationCount": 1,
-            "requestedMemOnHeap": 640,
-            "requestedMemOffHeap": 128,
-            "requestedTotalMem": 768,
-            "requestedCpu": 80,
-            "assignedMemOnHeap": 640,
-            "assignedMemOffHeap": 128,
-            "assignedTotalMem": 768,
-            "assignedCpu": 80
-        }
-    ]
-}
-```
-
-### /api/v1/topology/:id (GET)
-
-Returns topology information and statistics.  Substitute id with topology id.
-
-Request parameters:
-
-|Parameter |Value   |Description  |
-|----------|--------|-------------|
-|id   	   |String (required)| Topology Id  |
-|window    |String. Default value :all-time| Window duration for metrics in seconds|
-|sys       |String. Values 1 or 0. Default value 0| Controls including sys stats part of the response|
-
-
-Response fields:
-
-|Field  |Value |Description|
-|---	|---	|---
-|id| String| Topology Id|
-|name| String |Topology Name|
-|uptime| String |How long the topology has been running|
-|uptimeSeconds| Integer |How long the topology has been running in seconds|
-|status| String |Current status of the topology, e.g. "ACTIVE"|
-|tasksTotal| Integer |Total number of tasks for this topology|
-|workersTotal| Integer |Number of workers used for this topology|
-|executorsTotal| Integer |Number of executors used for this topology|
-|msgTimeout| Integer | Number of seconds a tuple has before the spout considers it failed |
-|windowHint| String | window param value in "hh mm ss" format. Default value is "All Time"|
-|topologyStats| Array | Array of all the topology related stats per time window|
-|topologyStats.windowPretty| String |Duration passed in HH:MM:SS format|
-|topologyStats.window| String |User requested time window for metrics|
-|topologyStats.emitted| Long |Number of messages emitted in given window|
-|topologyStats.trasferred| Long |Number messages transferred in given window|
-|topologyStats.completeLatency| String (double value returned in String format) |Total latency for processing the message|
-|topologyStats.acked| Long |Number of messages acked in given window|
-|topologyStats.failed| Long |Number of messages failed in given window|
-|spouts| Array | Array of all the spout components in the topology|
-|spouts.spoutId| String |Spout id|
-|spouts.executors| Integer |Number of executors for the spout|
-|spouts.emitted| Long |Number of messages emitted in given window |
-|spouts.completeLatency| String (double value returned in String format) |Total latency for processing the message|
-|spouts.transferred| Long |Total number of messages  transferred in given window|
-|spouts.tasks| Integer |Total number of tasks for the spout|
-|spouts.lastError| String |Shows the last error happened in a spout|
-|spouts.errorLapsedSecs| Integer | Number of seconds elapsed since that last error happened in a spout|
-|spouts.errorWorkerLogLink| String | Link to the worker log that reported the exception |
-|spouts.acked| Long |Number of messages acked|
-|spouts.failed| Long |Number of messages failed|
-|bolts| Array | Array of bolt components in the topology|
-|bolts.boltId| String |Bolt id|
-|bolts.capacity| String (double value returned in String format) |This value indicates number of messages executed * average execute latency / time window|
-|bolts.processLatency| String (double value returned in String format)  |Average time of the bolt to ack a message after it was received|
-|bolts.executeLatency| String (double value returned in String format) |Average time to run the execute method of the bolt|
-|bolts.executors| Integer |Number of executor tasks in the bolt component|
-|bolts.tasks| Integer |Number of instances of bolt|
-|bolts.acked| Long |Number of tuples acked by the bolt|
-|bolts.failed| Long |Number of tuples failed by the bolt|
-|bolts.lastError| String |Shows the last error occurred in the bolt|
-|bolts.errorLapsedSecs| Integer |Number of seconds elapsed since that last error happened in a bolt|
-|bolts.errorWorkerLogLink| String | Link to the worker log that reported the exception |
-|bolts.emitted| Long |Number of tuples emitted|
-|replicationCount| Integer |Number of nimbus hosts on which this topology code is replicated|
-
-Examples:
-
-```no-highlight
- 1. http://ui-daemon-host-name:8080/api/v1/topology/WordCount3-1-1402960825
- 2. http://ui-daemon-host-name:8080/api/v1/topology/WordCount3-1-1402960825?sys=1
- 3. http://ui-daemon-host-name:8080/api/v1/topology/WordCount3-1-1402960825?window=600
-```
-
-Sample response:
-
-```json
- {
-    "name": "WordCount3",
-    "id": "WordCount3-1-1402960825",
-    "workersTotal": 3,
-    "window": "600",
-    "status": "ACTIVE",
-    "tasksTotal": 28,
-    "executorsTotal": 28,
-    "uptime": "29m 19s",
-    "uptimeSeconds": 1759,
-    "msgTimeout": 30,
-    "windowHint": "10m 0s",
-    "topologyStats": [
-        {
-            "windowPretty": "10m 0s",
-            "window": "600",
-            "emitted": 397960,
-            "transferred": 213380,
-            "completeLatency": "0.000",
-            "acked": 213460,
-            "failed": 0
-        },
-        {
-            "windowPretty": "3h 0m 0s",
-            "window": "10800",
-            "emitted": 1190260,
-            "transferred": 638260,
-            "completeLatency": "0.000",
-            "acked": 638280,
-            "failed": 0
-        },
-        {
-            "windowPretty": "1d 0h 0m 0s",
-            "window": "86400",
-            "emitted": 1190260,
-            "transferred": 638260,
-            "completeLatency": "0.000",
-            "acked": 638280,
-            "failed": 0
-        },
-        {
-            "windowPretty": "All time",
-            "window": ":all-time",
-            "emitted": 1190260,
-            "transferred": 638260,
-            "completeLatency": "0.000",
-            "acked": 638280,
-            "failed": 0
-        }
-    ],
-    "spouts": [
-        {
-            "executors": 5,
-            "emitted": 28880,
-            "completeLatency": "0.000",
-            "transferred": 28880,
-            "acked": 0,
-            "spoutId": "spout",
-            "tasks": 5,
-            "lastError": "",
-            "errorLapsedSecs": null,
-            "failed": 0
-        }
-    ],
-        "bolts": [
-        {
-            "executors": 12,
-            "emitted": 184580,
-            "transferred": 0,
-            "acked": 184640,
-            "executeLatency": "0.048",
-            "tasks": 12,
-            "executed": 184620,
-            "processLatency": "0.043",
-            "boltId": "count",
-            "lastError": "",
-            "errorLapsedSecs": null,
-            "capacity": "0.003",
-            "failed": 0
-        },
-        {
-            "executors": 8,
-            "emitted": 184500,
-            "transferred": 184500,
-            "acked": 28820,
-            "executeLatency": "0.024",
-            "tasks": 8,
-            "executed": 28780,
-            "processLatency": "2.112",
-            "boltId": "split",
-            "lastError": "",
-            "errorLapsedSecs": null,
-            "capacity": "0.000",
-            "failed": 0
-        }
-    ],
-    "configuration": {
-        "storm.id": "WordCount3-1-1402960825",
-        "dev.zookeeper.path": "/tmp/dev-storm-zookeeper",
-        "topology.tick.tuple.freq.secs": null,
-        "topology.builtin.metrics.bucket.size.secs": 60,
-        "topology.fall.back.on.java.serialization": true,
-        "topology.max.error.report.per.interval": 5,
-        "zmq.linger.millis": 5000,
-        "topology.skip.missing.kryo.registrations": false,
-        "storm.messaging.netty.client_worker_threads": 1,
-        "ui.childopts": "-Xmx768m",
-        "storm.zookeeper.session.timeout": 20000,
-        "nimbus.reassign": true,
-        "topology.trident.batch.emit.interval.millis": 500,
-        "storm.messaging.netty.flush.check.interval.ms": 10,
-        "nimbus.monitor.freq.secs": 10,
-        "logviewer.childopts": "-Xmx128m",
-        "java.library.path": "/usr/local/lib:/opt/local/lib:/usr/lib",
-        "topology.executor.send.buffer.size": 1024,
-        "storm.local.dir": "storm-local",
-        "storm.messaging.netty.buffer_size": 5242880,
-        "supervisor.worker.start.timeout.secs": 120,
-        "topology.enable.message.timeouts": true,
-        "nimbus.cleanup.inbox.freq.secs": 600,
-        "nimbus.inbox.jar.expiration.secs": 3600,
-        "drpc.worker.threads": 64,
-        "topology.worker.shared.thread.pool.size": 4,
-        "nimbus.host": "hw10843.local",
-        "storm.messaging.netty.min_wait_ms": 100,
-        "storm.zookeeper.port": 2181,
-        "transactional.zookeeper.port": null,
-        "topology.executor.receive.buffer.size": 1024,
-        "transactional.zookeeper.servers": null,
-        "storm.zookeeper.root": "/storm",
-        "storm.zookeeper.retry.intervalceiling.millis": 30000,
-        "supervisor.enable": true,
-        "storm.messaging.netty.server_worker_threads": 1
-    },
-    "replicationCount": 1
-}
-```
-
-
-### /api/v1/topology/:id/component/:component (GET)
-
-Returns detailed metrics and executor information
-
-|Parameter |Value   |Description  |
-|----------|--------|-------------|
-|id   	   |String (required)| Topology Id  |
-|component |String (required)| Component Id |
-|window    |String. Default value :all-time| window duration for metrics in seconds|
-|sys       |String. Values 1 or 0. Default value 0| controls including sys stats part of the response|
-
-Response fields:
-
-|Field  |Value |Description|
-|---	|---	|---
-|id   | String | Component id|
-|name | String | Topology name|
-|componentType | String | component type: SPOUT or BOLT|
-|windowHint| String | window param value in "hh mm ss" format. Default value is "All Time"|
-|executors| Integer |Number of executor tasks in the component|
-|componentErrors| Array of Errors | List of component errors|
-|componentErrors.errorTime| Long | Timestamp when the exception occurred (Prior to 0.11.0, this field was named 'time'.)|
-|componentErrors.errorHost| String | host name for the error|
-|componentErrors.errorPort| String | port for the error|
-|componentErrors.error| String |Shows the error happened in a component|
-|componentErrors.errorLapsedSecs| Integer | Number of seconds elapsed since the error happened in a component |
-|componentErrors.errorWorkerLogLink| String | Link to the worker log that reported the exception |
-|topologyId| String | Topology id|
-|tasks| Integer |Number of instances of component|
-|window    |String. Default value "All Time" | window duration for metrics in seconds|
-|spoutSummary or boltStats| Array |Array of component stats. **Please note this element tag can be spoutSummary or boltStats depending on the componentType**|
-|spoutSummary.windowPretty| String |Duration passed in HH:MM:SS format|
-|spoutSummary.window| String | window duration for metrics in seconds|
-|spoutSummary.emitted| Long |Number of messages emitted in given window |
-|spoutSummary.completeLatency| String (double value returned in String format) |Total latency for processing the message|
-|spoutSummary.transferred| Long |Total number of messages  transferred in given window|
-|spoutSummary.acked| Long |Number of messages acked|
-|spoutSummary.failed| Long |Number of messages failed|
-|boltStats.windowPretty| String |Duration passed in HH:MM:SS format|
-|boltStats..window| String | window duration for metrics in seconds|
-|boltStats.transferred| Long |Total number of messages  transferred in given window|
-|boltStats.processLatency| String (double value returned in String format)  |Average time of the bolt to ack a message after it was received|
-|boltStats.acked| Long |Number of messages acked|
-|boltStats.failed| Long |Number of messages failed|
-
-Examples:
-
-```no-highlight
-1. http://ui-daemon-host-name:8080/api/v1/topology/WordCount3-1-1402960825/component/spout
-2. http://ui-daemon-host-name:8080/api/v1/topology/WordCount3-1-1402960825/component/spout?sys=1
-3. http://ui-daemon-host-name:8080/api/v1/topology/WordCount3-1-1402960825/component/spout?window=600
-```
-
-Sample response:
-
-```json
-{
-    "name": "WordCount3",
-    "id": "spout",
-    "componentType": "spout",
-    "windowHint": "10m 0s",
-    "executors": 5,
-    "componentErrors":[{"errorTime": 1406006074000,
-                        "errorHost": "10.11.1.70",
-                        "errorPort": 6701,
-                        "errorWorkerLogLink": "http://10.11.1.7:8000/log?file=worker-6701.log",
-                        "errorLapsedSecs": 16,
-                        "error": "java.lang.RuntimeException: java.lang.StringIndexOutOfBoundsException: Some Error\n\tat backtype.storm.utils.DisruptorQueue.consumeBatchToCursor(DisruptorQueue.java:128)\n\tat backtype.storm.utils.DisruptorQueue.consumeBatchWhenAvailable(DisruptorQueue.java:99)\n\tat backtype.storm.disruptor$consume_batch_when_available.invoke(disruptor.clj:80)\n\tat backtype...more.."
-    }],
-    "topologyId": "WordCount3-1-1402960825",
-    "tasks": 5,
-    "window": "600",
-    "spoutSummary": [
-        {
-            "windowPretty": "10m 0s",
-            "window": "600",
-            "emitted": 28500,
-            "transferred": 28460,
-            "completeLatency": "0.000",
-            "acked": 0,
-            "failed": 0
-        },
-        {
-            "windowPretty": "3h 0m 0s",
-            "window": "10800",
-            "emitted": 127640,
-            "transferred": 127440,
-            "completeLatency": "0.000",
-            "acked": 0,
-            "failed": 0
-        },
-        {
-            "windowPretty": "1d 0h 0m 0s",
-            "window": "86400",
-            "emitted": 127640,
-            "transferred": 127440,
-            "completeLatency": "0.000",
-            "acked": 0,
-            "failed": 0
-        },
-        {
-            "windowPretty": "All time",
-            "window": ":all-time",
-            "emitted": 127640,
-            "transferred": 127440,
-            "completeLatency": "0.000",
-            "acked": 0,
-            "failed": 0
-        }
-    ],
-    "outputStats": [
-        {
-            "stream": "__metrics",
-            "emitted": 40,
-            "transferred": 0,
-            "completeLatency": "0",
-            "acked": 0,
-            "failed": 0
-        },
-        {
-            "stream": "default",
-            "emitted": 28460,
-            "transferred": 28460,
-            "completeLatency": "0",
-            "acked": 0,
-            "failed": 0
-        }
-    ],
-    "executorStats": [
-        {
-            "workerLogLink": "http://10.11.1.7:8000/log?file=worker-6701.log",
-            "emitted": 5720,
-            "port": 6701,
-            "completeLatency": "0.000",
-            "transferred": 5720,
-            "host": "10.11.1.7",
-            "acked": 0,
-            "uptime": "43m 4s",
-            "uptimeSeconds": 2584,
-            "id": "[24-24]",
-            "failed": 0
-        },
-        {
-            "workerLogLink": "http://10.11.1.7:8000/log?file=worker-6703.log",
-            "emitted": 5700,
-            "port": 6703,
-            "completeLatency": "0.000",
-            "transferred": 5700,
-            "host": "10.11.1.7",
-            "acked": 0,
-            "uptime": "42m 57s",
-            "uptimeSeconds": 2577,
-            "id": "[25-25]",
-            "failed": 0
-        },
-        {
-            "workerLogLink": "http://10.11.1.7:8000/log?file=worker-6702.log",
-            "emitted": 5700,
-            "port": 6702,
-            "completeLatency": "0.000",
-            "transferred": 5680,
-            "host": "10.11.1.7",
-            "acked": 0,
-            "uptime": "42m 57s",
-            "uptimeSeconds": 2577,
-            "id": "[26-26]",
-            "failed": 0
-        },
-        {
-            "workerLogLink": "http://10.11.1.7:8000/log?file=worker-6701.log",
-            "emitted": 5700,
-            "port": 6701,
-            "completeLatency": "0.000",
-            "transferred": 5680,
-            "host": "10.11.1.7",
-            "acked": 0,
-            "uptime": "43m 4s",
-            "uptimeSeconds": 2584,
-            "id": "[27-27]",
-            "failed": 0
-        },
-        {
-            "workerLogLink": "http://10.11.1.7:8000/log?file=worker-6703.log",
-            "emitted": 5680,
-            "port": 6703,
-            "completeLatency": "0.000",
-            "transferred": 5680,
-            "host": "10.11.1.7",
-            "acked": 0,
-            "uptime": "42m 57s",
-            "uptimeSeconds": 2577,
-            "id": "[28-28]",
-            "failed": 0
-        }
-    ]
-}
-```
-
-## Profiling and Debugging GET Operations
-
-###  /api/v1/topology/:id/profiling/start/:host-port/:timeout (GET)
-
-Request to start profiler on worker with timeout. Returns status and link to profiler artifacts for worker.
-
-|Parameter |Value   |Description  |
-|----------|--------|-------------|
-|id   	   |String (required)| Topology Id  |
-|:host-port |String (required)| Worker Id |
-|:timeout |String (required)| Time out for profiler to stop in minutes |
-
-Response fields:
-
-|Field  |Value |Description|
-|-----	|----- |-----------|
-|id   | String | Worker id|
-|status | String | Response Status |
-|timeout | String | Requested timeout
-|dumplink | String | Link to logviewer URL for worker profiler documents.|
-
-Examples:
-
-```no-highlight
-1. http://ui-daemon-host-name:8080/api/v1/topology/wordcount-1-1446614150/profiling/start/10.11.1.7:6701/10
-2. http://ui-daemon-host-name:8080/api/v1/topology/wordcount-1-1446614150/profiling/start/10.11.1.7:6701/5
-3. http://ui-daemon-host-name:8080/api/v1/topology/wordcount-1-1446614150/profiling/start/10.11.1.7:6701/20
-```
-
-Sample response:
-
-```json
-{
-   "status": "ok",
-   "id": "10.11.1.7:6701",
-   "timeout": "10",
-   "dumplink": "http:\/\/10.11.1.7:8000\/dumps\/wordcount-1-1446614150\/10.11.1.7%3A6701"
-}
-```
-
-###  /api/v1/topology/:id/profiling/dumpprofile/:host-port (GET)
-
-Request to dump profiler recording on worker. Returns status and worker id for the request.
-
-|Parameter |Value   |Description  |
-|----------|--------|-------------|
-|id   	   |String (required)| Topology Id  |
-|:host-port |String (required)| Worker Id |
-
-Response fields:
-
-|Field  |Value |Description|
-|-----	|----- |-----------|
-|id   | String | Worker id|
-|status | String | Response Status |
-
-Examples:
-
-```no-highlight
-1. http://ui-daemon-host-name:8080/api/v1/topology/wordcount-1-1446614150/profiling/dumpprofile/10.11.1.7:6701
-```
-
-Sample response:
-
-```json
-{
-   "status": "ok",
-   "id": "10.11.1.7:6701",
-}
-```
-
-###  /api/v1/topology/:id/profiling/stop/:host-port (GET)
-
-Request to stop profiler on worker. Returns status and worker id for the request.
-
-|Parameter |Value   |Description  |
-|----------|--------|-------------|
-|id   	   |String (required)| Topology Id  |
-|:host-port |String (required)| Worker Id |
-
-Response fields:
-
-|Field  |Value |Description|
-|-----	|----- |-----------|
-|id   | String | Worker id|
-|status | String | Response Status |
-
-Examples:
-
-```no-highlight
-1. http://ui-daemon-host-name:8080/api/v1/topology/wordcount-1-1446614150/profiling/stop/10.11.1.7:6701
-```
-
-Sample response:
-
-```json
-{
-   "status": "ok",
-   "id": "10.11.1.7:6701",
-}
-```
-
-###  /api/v1/topology/:id/profiling/dumpjstack/:host-port (GET)
-
-Request to dump jstack on worker. Returns status and worker id for the request.
-
-|Parameter |Value   |Description  |
-|----------|--------|-------------|
-|id   	   |String (required)| Topology Id  |
-|:host-port |String (required)| Worker Id |
-
-Response fields:
-
-|Field  |Value |Description|
-|-----	|----- |-----------|
-|id   | String | Worker id|
-|status | String | Response Status |
-
-Examples:
-
-```no-highlight
-1. http://ui-daemon-host-name:8080/api/v1/topology/wordcount-1-1446614150/profiling/dumpjstack/10.11.1.7:6701
-```
-
-Sample response:
-
-```json
-{
-   "status": "ok",
-   "id": "10.11.1.7:6701",
-}
-```
-
-###  /api/v1/topology/:id/profiling/dumpheap/:host-port (GET)
-
-Request to dump heap (jmap) on worker. Returns status and worker id for the request.
-
-|Parameter |Value   |Description  |
-|----------|--------|-------------|
-|id   	   |String (required)| Topology Id  |
-|:host-port |String (required)| Worker Id |
-
-Response fields:
-
-|Field  |Value |Description|
-|-----	|----- |-----------|
-|id   | String | Worker id|
-|status | String | Response Status |
-
-Examples:
-
-```no-highlight
-1. http://ui-daemon-host-name:8080/api/v1/topology/wordcount-1-1446614150/profiling/dumpheap/10.11.1.7:6701
-```
-
-Sample response:
-
-```json
-{
-   "status": "ok",
-   "id": "10.11.1.7:6701",
-}
-```
-
-###  /api/v1/topology/:id/profiling/restartworker/:host-port (GET)
-
-Request to request the worker. Returns status and worker id for the request.
-
-|Parameter |Value   |Description  |
-|----------|--------|-------------|
-|id   	   |String (required)| Topology Id  |
-|:host-port |String (required)| Worker Id |
-
-Response fields:
-
-|Field  |Value |Description|
-|-----	|----- |-----------|
-|id   | String | Worker id|
-|status | String | Response Status |
-
-Examples:
-
-```no-highlight
-1. http://ui-daemon-host-name:8080/api/v1/topology/wordcount-1-1446614150/profiling/restartworker/10.11.1.7:6701
-```
-
-Sample response:
-
-```json
-{
-   "status": "ok",
-   "id": "10.11.1.7:6701",
-}
-```
-
-## POST Operations
-
-### /api/v1/topology/:id/activate (POST)
-
-Activates a topology.
-
-|Parameter |Value   |Description  |
-|----------|--------|-------------|
-|id   	   |String (required)| Topology Id  |
-
-Sample Response:
-
-```json
-{"topologyOperation":"activate","topologyId":"wordcount-1-1420308665","status":"success"}
-```
-
-
-### /api/v1/topology/:id/deactivate (POST)
-
-Deactivates a topology.
-
-|Parameter |Value   |Description  |
-|----------|--------|-------------|
-|id   	   |String (required)| Topology Id  |
-
-Sample Response:
-
-```json
-{"topologyOperation":"deactivate","topologyId":"wordcount-1-1420308665","status":"success"}
-```
-
-
-### /api/v1/topology/:id/rebalance/:wait-time (POST)
-
-Rebalances a topology.
-
-|Parameter |Value   |Description  |
-|----------|--------|-------------|
-|id   	   |String (required)| Topology Id  |
-|wait-time |String (required)| Wait time before rebalance happens |
-|rebalanceOptions| Json (optional) | topology rebalance options |
-
-
-Sample rebalanceOptions json:
-
-```json
-{"rebalanceOptions" : {"numWorkers" : 2, "executors" : {"spout" :4, "count" : 10}}, "callback" : "foo"}
-```
-
-Examples:
-
-```no-highlight
-curl  -i -b ~/cookiejar.txt -c ~/cookiejar.txt -X POST  
--H "Content-Type: application/json" 
--d  '{"rebalanceOptions": {"numWorkers": 2, "executors": { "spout" : "5", "split": 7, "count": 5 }}, "callback":"foo"}' 
-http://localhost:8080/api/v1/topology/wordcount-1-1420308665/rebalance/0
-```
-
-Sample Response:
-
-```json
-{"topologyOperation":"rebalance","topologyId":"wordcount-1-1420308665","status":"success"}
-```
-
-
-
-### /api/v1/topology/:id/kill/:wait-time (POST)
-
-Kills a topology.
-
-|Parameter |Value   |Description  |
-|----------|--------|-------------|
-|id   	   |String (required)| Topology Id  |
-|wait-time |String (required)| Wait time before rebalance happens |
-
-Caution: Small wait times (0-5 seconds) may increase the probability of triggering the bug reported in
-[STORM-112](https://issues.apache.org/jira/browse/STORM-112), which may result in broker Supervisor
-daemons.
-
-Sample Response:
-
-```json
-{"topologyOperation":"kill","topologyId":"wordcount-1-1420308665","status":"success"}
-```
-
-## API errors
-
-The API returns 500 HTTP status codes in case of any errors.
-
-Sample response:
-
-```json
-{
-  "error": "Internal Server Error",
-  "errorMessage": "java.lang.NullPointerException\n\tat clojure.core$name.invoke(core.clj:1505)\n\tat backtype.storm.ui.core$component_page.invoke(core.clj:752)\n\tat backtype.storm.ui.core$fn__7766.invoke(core.clj:782)\n\tat compojure.core$make_route$fn__5755.invoke(core.clj:93)\n\tat compojure.core$if_route$fn__5743.invoke(core.clj:39)\n\tat compojure.core$if_method$fn__5736.invoke(core.clj:24)\n\tat compojure.core$routing$fn__5761.invoke(core.clj:106)\n\tat clojure.core$some.invoke(core.clj:2443)\n\tat compojure.core$routing.doInvoke(core.clj:106)\n\tat clojure.lang.RestFn.applyTo(RestFn.java:139)\n\tat clojure.core$apply.invoke(core.clj:619)\n\tat compojure.core$routes$fn__5765.invoke(core.clj:111)\n\tat ring.middleware.reload$wrap_reload$fn__6880.invoke(reload.clj:14)\n\tat backtype.storm.ui.core$catch_errors$fn__7800.invoke(core.clj:836)\n\tat ring.middleware.keyword_params$wrap_keyword_params$fn__6319.invoke(keyword_params.clj:27)\n\tat ring.middleware.nested_params$wrap_nest
 ed_params$fn__6358.invoke(nested_params.clj:65)\n\tat ring.middleware.params$wrap_params$fn__6291.invoke(params.clj:55)\n\tat ring.middleware.multipart_params$wrap_multipart_params$fn__6386.invoke(multipart_params.clj:103)\n\tat ring.middleware.flash$wrap_flash$fn__6675.invoke(flash.clj:14)\n\tat ring.middleware.session$wrap_session$fn__6664.invoke(session.clj:43)\n\tat ring.middleware.cookies$wrap_cookies$fn__6595.invoke(cookies.clj:160)\n\tat ring.adapter.jetty$proxy_handler$fn__6112.invoke(jetty.clj:16)\n\tat ring.adapter.jetty.proxy$org.mortbay.jetty.handler.AbstractHandler$0.handle(Unknown Source)\n\tat org.mortbay.jetty.handler.HandlerWrapper.handle(HandlerWrapper.java:152)\n\tat org.mortbay.jetty.Server.handle(Server.java:326)\n\tat org.mortbay.jetty.HttpConnection.handleRequest(HttpConnection.java:542)\n\tat org.mortbay.jetty.HttpConnection$RequestHandler.headerComplete(HttpConnection.java:928)\n\tat org.mortbay.jetty.HttpParser.parseNext(HttpParser.java:549)\n\tat org.mortb
 ay.jetty.HttpParser.parseAvailable(HttpParser.java:212)\n\tat org.mortbay.jetty.HttpConnection.handle(HttpConnection.java:404)\n\tat org.mortbay.jetty.bio.SocketConnector$Connection.run(SocketConnector.java:228)\n\tat org.mortbay.thread.QueuedThreadPool$PoolThread.run(QueuedThreadPool.java:582)\n"
-}
-```

http://git-wip-us.apache.org/repos/asf/storm/blob/a4876b2d/docs/documentation/Documentation.md
----------------------------------------------------------------------
diff --git a/docs/documentation/Documentation.md b/docs/documentation/Documentation.md
index 48e18e8..bcf6ec9 100644
--- a/docs/documentation/Documentation.md
+++ b/docs/documentation/Documentation.md
@@ -44,6 +44,7 @@ Trident is an alternative interface to Storm. It provides exactly-once processin
 * [Hooks](Hooks.html)
 * [Metrics](Metrics.html)
 * [Lifecycle of a trident tuple]()
+* [UI REST API](ui-rest-api.html)
 * [Dynamic Log Level Settings](dynamic-log-level-settings.html)
 * [Dynamic Worker Profiling](dynamic-worker-profiling.html)
 

http://git-wip-us.apache.org/repos/asf/storm/blob/a4876b2d/docs/documentation/ui-rest-api.md
----------------------------------------------------------------------
diff --git a/docs/documentation/ui-rest-api.md b/docs/documentation/ui-rest-api.md
new file mode 100644
index 0000000..b9b747c
--- /dev/null
+++ b/docs/documentation/ui-rest-api.md
@@ -0,0 +1,930 @@
+# Storm UI REST API
+
+The Storm UI daemon provides a REST API that allows you to interact with a Storm cluster, which includes retrieving
+metrics data and configuration information as well as management operations such as starting or stopping topologies.
+
+
+# Data format
+
+The REST API returns JSON responses and supports JSONP.
+Clients can pass a callback query parameter to wrap JSON in the callback function.
+
+
+# Using the UI REST API
+
+_Note: It is recommended to ignore undocumented elements in the JSON response because future versions of Storm may not_
+_support those elements anymore._
+
+
+## REST API Base URL
+
+The REST API is part of the UI daemon of Storm (started by `storm ui`) and thus runs on the same host and port as the
+Storm UI (the UI daemon is often run on the same host as the Nimbus daemon).  The port is configured by `ui.port`,
+which is set to `8080` by default (see [defaults.yaml](conf/defaults.yaml)).
+
+The API base URL would thus be:
+
+    http://<ui-host>:<ui-port>/api/v1/...
+
+You can use a tool such as `curl` to talk to the REST API:
+
+    # Request the cluster configuration.
+    # Note: We assume ui.port is configured to the default value of 8080.
+    $ curl http://<ui-host>:8080/api/v1/cluster/configuration
+
+##Impersonating a user in secure environment
+In a secure environment an authenticated user can impersonate another user. To impersonate a user the caller must pass
+`doAsUser` param or header with value set to the user that the request needs to be performed as. Please see SECURITY.MD
+to learn more about how to setup impersonation ACLs and authorization. The rest API uses the same configs and acls that
+are used by nimbus.
+
+Examples:
+
+```no-highlight
+ 1. http://ui-daemon-host-name:8080/api/v1/topology/wordcount-1-1425844354\?doAsUser=testUSer1
+ 2. curl 'http://localhost:8080/api/v1/topology/wordcount-1-1425844354/activate' -X POST -H 'doAsUser:testUSer1'
+```
+
+## GET Operations
+
+### /api/v1/cluster/configuration (GET)
+
+Returns the cluster configuration.
+
+Sample response (does not include all the data fields):
+
+```json
+  {
+    "dev.zookeeper.path": "/tmp/dev-storm-zookeeper",
+    "topology.tick.tuple.freq.secs": null,
+    "topology.builtin.metrics.bucket.size.secs": 60,
+    "topology.fall.back.on.java.serialization": true,
+    "topology.max.error.report.per.interval": 5,
+    "zmq.linger.millis": 5000,
+    "topology.skip.missing.kryo.registrations": false,
+    "storm.messaging.netty.client_worker_threads": 1,
+    "ui.childopts": "-Xmx768m",
+    "storm.zookeeper.session.timeout": 20000,
+    "nimbus.reassign": true,
+    "topology.trident.batch.emit.interval.millis": 500,
+    "storm.messaging.netty.flush.check.interval.ms": 10,
+    "nimbus.monitor.freq.secs": 10,
+    "logviewer.childopts": "-Xmx128m",
+    "java.library.path": "/usr/local/lib:/opt/local/lib:/usr/lib",
+    "topology.executor.send.buffer.size": 1024,
+    }
+```
+
+### /api/v1/cluster/summary (GET)
+
+Returns cluster summary information such as nimbus uptime or number of supervisors.
+
+Response fields:
+
+|Field  |Value|Description
+|---	|---	|---
+|stormVersion|String| Storm version|
+|supervisors|Integer| Number of supervisors running|
+|topologies| Integer| Number of topologies running| 
+|slotsTotal| Integer|Total number of available worker slots|
+|slotsUsed| Integer| Number of worker slots used|
+|slotsFree| Integer |Number of worker slots available|
+|executorsTotal| Integer |Total number of executors|
+|tasksTotal| Integer |Total tasks|
+
+Sample response:
+
+```json
+   {
+    "stormVersion": "0.9.2-incubating-SNAPSHOT",
+    "supervisors": 1,
+    "slotsTotal": 4,
+    "slotsUsed": 3,
+    "slotsFree": 1,
+    "executorsTotal": 28,
+    "tasksTotal": 28
+    }
+```
+
+### /api/v1/supervisor/summary (GET)
+
+Returns summary information for all supervisors.
+
+Response fields:
+
+|Field  |Value|Description|
+|---	|---	|---
+|id| String | Supervisor's id|
+|host| String| Supervisor's host name|
+|uptime| String| Shows how long the supervisor is running|
+|uptimeSeconds| Integer| Shows how long the supervisor is running in seconds|
+|slotsTotal| Integer| Total number of available worker slots for this supervisor|
+|slotsUsed| Integer| Number of worker slots used on this supervisor|
+
+Sample response:
+
+```json
+{
+    "supervisors": [
+        {
+            "id": "0b879808-2a26-442b-8f7d-23101e0c3696",
+            "host": "10.11.1.7",
+            "uptime": "5m 58s",
+            "uptimeSeconds": 358,
+            "slotsTotal": 4,
+            "slotsUsed": 3
+        }
+    ]
+}
+```
+
+### /api/v1/nimbus/summary (GET)
+
+Returns summary information for all nimbus hosts.
+
+Response fields:
+
+|Field  |Value|Description|
+|---	|---	|---
+|host| String | Nimbus' host name|
+|port| int| Nimbus' port number|
+|status| String| Possible values are Leader, Not a Leader, Dead|
+|nimbusUpTime| String| Shows since how long the nimbus has been running|
+|nimbusUpTimeSeconds| String| Shows since how long the nimbus has been running in seconds|
+|nimbusLogLink| String| Logviewer url to view the nimbus.log|
+|version| String| Version of storm this nimbus host is running|
+
+Sample response:
+
+```json
+{
+    "nimbuses":[
+        {
+            "host":"192.168.202.1",
+            "port":6627,
+            "nimbusLogLink":"http:\/\/192.168.202.1:8000\/log?file=nimbus.log",
+            "status":Leader,
+            "version":"0.10.0-SNAPSHOT",
+            "nimbusUpTime":"3m 33s",
+            "nimbusUpTimeSeconds":"213"
+        }
+    ]
+}
+```
+
+### /api/v1/topology/summary (GET)
+
+Returns summary information for all topologies.
+
+Response fields:
+
+|Field  |Value | Description|
+|---	|---	|---
+|id| String| Topology Id|
+|name| String| Topology Name|
+|status| String| Topology Status|
+|uptime| String|  Shows how long the topology is running|
+|uptimeSeconds| Integer|  Shows how long the topology is running in seconds|
+|tasksTotal| Integer |Total number of tasks for this topology|
+|workersTotal| Integer |Number of workers used for this topology|
+|executorsTotal| Integer |Number of executors used for this topology|
+|replicationCount| Integer |Number of nimbus hosts on which this topology code is replicated|
+|requestedMemOnHeap| Double|Requested On-Heap Memory by User (MB)
+|requestedMemOffHeap| Double|Requested Off-Heap Memory by User (MB)|
+|requestedTotalMem| Double|Requested Total Memory by User (MB)|
+|requestedCpu| Double|Requested CPU by User (%)|
+|assignedMemOnHeap| Double|Assigned On-Heap Memory by Scheduler (MB)|
+|assignedMemOffHeap| Double|Assigned Off-Heap Memory by Scheduler (MB)|
+|assignedTotalMem| Double|Assigned Total Memory by Scheduler (MB)|
+|assignedCpu| Double|Assigned CPU by Scheduler (%)|
+
+Sample response:
+
+```json
+{
+    "topologies": [
+        {
+            "id": "WordCount3-1-1402960825",
+            "name": "WordCount3",
+            "status": "ACTIVE",
+            "uptime": "6m 5s",
+            "uptimeSeconds": 365,
+            "tasksTotal": 28,
+            "workersTotal": 3,
+            "executorsTotal": 28,
+            "replicationCount": 1,
+            "requestedMemOnHeap": 640,
+            "requestedMemOffHeap": 128,
+            "requestedTotalMem": 768,
+            "requestedCpu": 80,
+            "assignedMemOnHeap": 640,
+            "assignedMemOffHeap": 128,
+            "assignedTotalMem": 768,
+            "assignedCpu": 80
+        }
+    ]
+}
+```
+
+### /api/v1/topology/:id (GET)
+
+Returns topology information and statistics.  Substitute id with topology id.
+
+Request parameters:
+
+|Parameter |Value   |Description  |
+|----------|--------|-------------|
+|id   	   |String (required)| Topology Id  |
+|window    |String. Default value :all-time| Window duration for metrics in seconds|
+|sys       |String. Values 1 or 0. Default value 0| Controls including sys stats part of the response|
+
+
+Response fields:
+
+|Field  |Value |Description|
+|---	|---	|---
+|id| String| Topology Id|
+|name| String |Topology Name|
+|uptime| String |How long the topology has been running|
+|uptimeSeconds| Integer |How long the topology has been running in seconds|
+|status| String |Current status of the topology, e.g. "ACTIVE"|
+|tasksTotal| Integer |Total number of tasks for this topology|
+|workersTotal| Integer |Number of workers used for this topology|
+|executorsTotal| Integer |Number of executors used for this topology|
+|msgTimeout| Integer | Number of seconds a tuple has before the spout considers it failed |
+|windowHint| String | window param value in "hh mm ss" format. Default value is "All Time"|
+|topologyStats| Array | Array of all the topology related stats per time window|
+|topologyStats.windowPretty| String |Duration passed in HH:MM:SS format|
+|topologyStats.window| String |User requested time window for metrics|
+|topologyStats.emitted| Long |Number of messages emitted in given window|
+|topologyStats.trasferred| Long |Number messages transferred in given window|
+|topologyStats.completeLatency| String (double value returned in String format) |Total latency for processing the message|
+|topologyStats.acked| Long |Number of messages acked in given window|
+|topologyStats.failed| Long |Number of messages failed in given window|
+|spouts| Array | Array of all the spout components in the topology|
+|spouts.spoutId| String |Spout id|
+|spouts.executors| Integer |Number of executors for the spout|
+|spouts.emitted| Long |Number of messages emitted in given window |
+|spouts.completeLatency| String (double value returned in String format) |Total latency for processing the message|
+|spouts.transferred| Long |Total number of messages  transferred in given window|
+|spouts.tasks| Integer |Total number of tasks for the spout|
+|spouts.lastError| String |Shows the last error happened in a spout|
+|spouts.errorLapsedSecs| Integer | Number of seconds elapsed since that last error happened in a spout|
+|spouts.errorWorkerLogLink| String | Link to the worker log that reported the exception |
+|spouts.acked| Long |Number of messages acked|
+|spouts.failed| Long |Number of messages failed|
+|bolts| Array | Array of bolt components in the topology|
+|bolts.boltId| String |Bolt id|
+|bolts.capacity| String (double value returned in String format) |This value indicates number of messages executed * average execute latency / time window|
+|bolts.processLatency| String (double value returned in String format)  |Average time of the bolt to ack a message after it was received|
+|bolts.executeLatency| String (double value returned in String format) |Average time to run the execute method of the bolt|
+|bolts.executors| Integer |Number of executor tasks in the bolt component|
+|bolts.tasks| Integer |Number of instances of bolt|
+|bolts.acked| Long |Number of tuples acked by the bolt|
+|bolts.failed| Long |Number of tuples failed by the bolt|
+|bolts.lastError| String |Shows the last error occurred in the bolt|
+|bolts.errorLapsedSecs| Integer |Number of seconds elapsed since that last error happened in a bolt|
+|bolts.errorWorkerLogLink| String | Link to the worker log that reported the exception |
+|bolts.emitted| Long |Number of tuples emitted|
+|replicationCount| Integer |Number of nimbus hosts on which this topology code is replicated|
+
+Examples:
+
+```no-highlight
+ 1. http://ui-daemon-host-name:8080/api/v1/topology/WordCount3-1-1402960825
+ 2. http://ui-daemon-host-name:8080/api/v1/topology/WordCount3-1-1402960825?sys=1
+ 3. http://ui-daemon-host-name:8080/api/v1/topology/WordCount3-1-1402960825?window=600
+```
+
+Sample response:
+
+```json
+ {
+    "name": "WordCount3",
+    "id": "WordCount3-1-1402960825",
+    "workersTotal": 3,
+    "window": "600",
+    "status": "ACTIVE",
+    "tasksTotal": 28,
+    "executorsTotal": 28,
+    "uptime": "29m 19s",
+    "uptimeSeconds": 1759,
+    "msgTimeout": 30,
+    "windowHint": "10m 0s",
+    "topologyStats": [
+        {
+            "windowPretty": "10m 0s",
+            "window": "600",
+            "emitted": 397960,
+            "transferred": 213380,
+            "completeLatency": "0.000",
+            "acked": 213460,
+            "failed": 0
+        },
+        {
+            "windowPretty": "3h 0m 0s",
+            "window": "10800",
+            "emitted": 1190260,
+            "transferred": 638260,
+            "completeLatency": "0.000",
+            "acked": 638280,
+            "failed": 0
+        },
+        {
+            "windowPretty": "1d 0h 0m 0s",
+            "window": "86400",
+            "emitted": 1190260,
+            "transferred": 638260,
+            "completeLatency": "0.000",
+            "acked": 638280,
+            "failed": 0
+        },
+        {
+            "windowPretty": "All time",
+            "window": ":all-time",
+            "emitted": 1190260,
+            "transferred": 638260,
+            "completeLatency": "0.000",
+            "acked": 638280,
+            "failed": 0
+        }
+    ],
+    "spouts": [
+        {
+            "executors": 5,
+            "emitted": 28880,
+            "completeLatency": "0.000",
+            "transferred": 28880,
+            "acked": 0,
+            "spoutId": "spout",
+            "tasks": 5,
+            "lastError": "",
+            "errorLapsedSecs": null,
+            "failed": 0
+        }
+    ],
+        "bolts": [
+        {
+            "executors": 12,
+            "emitted": 184580,
+            "transferred": 0,
+            "acked": 184640,
+            "executeLatency": "0.048",
+            "tasks": 12,
+            "executed": 184620,
+            "processLatency": "0.043",
+            "boltId": "count",
+            "lastError": "",
+            "errorLapsedSecs": null,
+            "capacity": "0.003",
+            "failed": 0
+        },
+        {
+            "executors": 8,
+            "emitted": 184500,
+            "transferred": 184500,
+            "acked": 28820,
+            "executeLatency": "0.024",
+            "tasks": 8,
+            "executed": 28780,
+            "processLatency": "2.112",
+            "boltId": "split",
+            "lastError": "",
+            "errorLapsedSecs": null,
+            "capacity": "0.000",
+            "failed": 0
+        }
+    ],
+    "configuration": {
+        "storm.id": "WordCount3-1-1402960825",
+        "dev.zookeeper.path": "/tmp/dev-storm-zookeeper",
+        "topology.tick.tuple.freq.secs": null,
+        "topology.builtin.metrics.bucket.size.secs": 60,
+        "topology.fall.back.on.java.serialization": true,
+        "topology.max.error.report.per.interval": 5,
+        "zmq.linger.millis": 5000,
+        "topology.skip.missing.kryo.registrations": false,
+        "storm.messaging.netty.client_worker_threads": 1,
+        "ui.childopts": "-Xmx768m",
+        "storm.zookeeper.session.timeout": 20000,
+        "nimbus.reassign": true,
+        "topology.trident.batch.emit.interval.millis": 500,
+        "storm.messaging.netty.flush.check.interval.ms": 10,
+        "nimbus.monitor.freq.secs": 10,
+        "logviewer.childopts": "-Xmx128m",
+        "java.library.path": "/usr/local/lib:/opt/local/lib:/usr/lib",
+        "topology.executor.send.buffer.size": 1024,
+        "storm.local.dir": "storm-local",
+        "storm.messaging.netty.buffer_size": 5242880,
+        "supervisor.worker.start.timeout.secs": 120,
+        "topology.enable.message.timeouts": true,
+        "nimbus.cleanup.inbox.freq.secs": 600,
+        "nimbus.inbox.jar.expiration.secs": 3600,
+        "drpc.worker.threads": 64,
+        "topology.worker.shared.thread.pool.size": 4,
+        "nimbus.host": "hw10843.local",
+        "storm.messaging.netty.min_wait_ms": 100,
+        "storm.zookeeper.port": 2181,
+        "transactional.zookeeper.port": null,
+        "topology.executor.receive.buffer.size": 1024,
+        "transactional.zookeeper.servers": null,
+        "storm.zookeeper.root": "/storm",
+        "storm.zookeeper.retry.intervalceiling.millis": 30000,
+        "supervisor.enable": true,
+        "storm.messaging.netty.server_worker_threads": 1
+    },
+    "replicationCount": 1
+}
+```
+
+
+### /api/v1/topology/:id/component/:component (GET)
+
+Returns detailed metrics and executor information
+
+|Parameter |Value   |Description  |
+|----------|--------|-------------|
+|id   	   |String (required)| Topology Id  |
+|component |String (required)| Component Id |
+|window    |String. Default value :all-time| window duration for metrics in seconds|
+|sys       |String. Values 1 or 0. Default value 0| controls including sys stats part of the response|
+
+Response fields:
+
+|Field  |Value |Description|
+|---	|---	|---
+|id   | String | Component id|
+|name | String | Topology name|
+|componentType | String | component type: SPOUT or BOLT|
+|windowHint| String | window param value in "hh mm ss" format. Default value is "All Time"|
+|executors| Integer |Number of executor tasks in the component|
+|componentErrors| Array of Errors | List of component errors|
+|componentErrors.errorTime| Long | Timestamp when the exception occurred (Prior to 0.11.0, this field was named 'time'.)|
+|componentErrors.errorHost| String | host name for the error|
+|componentErrors.errorPort| String | port for the error|
+|componentErrors.error| String |Shows the error happened in a component|
+|componentErrors.errorLapsedSecs| Integer | Number of seconds elapsed since the error happened in a component |
+|componentErrors.errorWorkerLogLink| String | Link to the worker log that reported the exception |
+|topologyId| String | Topology id|
+|tasks| Integer |Number of instances of component|
+|window    |String. Default value "All Time" | window duration for metrics in seconds|
+|spoutSummary or boltStats| Array |Array of component stats. **Please note this element tag can be spoutSummary or boltStats depending on the componentType**|
+|spoutSummary.windowPretty| String |Duration passed in HH:MM:SS format|
+|spoutSummary.window| String | window duration for metrics in seconds|
+|spoutSummary.emitted| Long |Number of messages emitted in given window |
+|spoutSummary.completeLatency| String (double value returned in String format) |Total latency for processing the message|
+|spoutSummary.transferred| Long |Total number of messages  transferred in given window|
+|spoutSummary.acked| Long |Number of messages acked|
+|spoutSummary.failed| Long |Number of messages failed|
+|boltStats.windowPretty| String |Duration passed in HH:MM:SS format|
+|boltStats..window| String | window duration for metrics in seconds|
+|boltStats.transferred| Long |Total number of messages  transferred in given window|
+|boltStats.processLatency| String (double value returned in String format)  |Average time of the bolt to ack a message after it was received|
+|boltStats.acked| Long |Number of messages acked|
+|boltStats.failed| Long |Number of messages failed|
+
+Examples:
+
+```no-highlight
+1. http://ui-daemon-host-name:8080/api/v1/topology/WordCount3-1-1402960825/component/spout
+2. http://ui-daemon-host-name:8080/api/v1/topology/WordCount3-1-1402960825/component/spout?sys=1
+3. http://ui-daemon-host-name:8080/api/v1/topology/WordCount3-1-1402960825/component/spout?window=600
+```
+
+Sample response:
+
+```json
+{
+    "name": "WordCount3",
+    "id": "spout",
+    "componentType": "spout",
+    "windowHint": "10m 0s",
+    "executors": 5,
+    "componentErrors":[{"errorTime": 1406006074000,
+                        "errorHost": "10.11.1.70",
+                        "errorPort": 6701,
+                        "errorWorkerLogLink": "http://10.11.1.7:8000/log?file=worker-6701.log",
+                        "errorLapsedSecs": 16,
+                        "error": "java.lang.RuntimeException: java.lang.StringIndexOutOfBoundsException: Some Error\n\tat backtype.storm.utils.DisruptorQueue.consumeBatchToCursor(DisruptorQueue.java:128)\n\tat backtype.storm.utils.DisruptorQueue.consumeBatchWhenAvailable(DisruptorQueue.java:99)\n\tat backtype.storm.disruptor$consume_batch_when_available.invoke(disruptor.clj:80)\n\tat backtype...more.."
+    }],
+    "topologyId": "WordCount3-1-1402960825",
+    "tasks": 5,
+    "window": "600",
+    "spoutSummary": [
+        {
+            "windowPretty": "10m 0s",
+            "window": "600",
+            "emitted": 28500,
+            "transferred": 28460,
+            "completeLatency": "0.000",
+            "acked": 0,
+            "failed": 0
+        },
+        {
+            "windowPretty": "3h 0m 0s",
+            "window": "10800",
+            "emitted": 127640,
+            "transferred": 127440,
+            "completeLatency": "0.000",
+            "acked": 0,
+            "failed": 0
+        },
+        {
+            "windowPretty": "1d 0h 0m 0s",
+            "window": "86400",
+            "emitted": 127640,
+            "transferred": 127440,
+            "completeLatency": "0.000",
+            "acked": 0,
+            "failed": 0
+        },
+        {
+            "windowPretty": "All time",
+            "window": ":all-time",
+            "emitted": 127640,
+            "transferred": 127440,
+            "completeLatency": "0.000",
+            "acked": 0,
+            "failed": 0
+        }
+    ],
+    "outputStats": [
+        {
+            "stream": "__metrics",
+            "emitted": 40,
+            "transferred": 0,
+            "completeLatency": "0",
+            "acked": 0,
+            "failed": 0
+        },
+        {
+            "stream": "default",
+            "emitted": 28460,
+            "transferred": 28460,
+            "completeLatency": "0",
+            "acked": 0,
+            "failed": 0
+        }
+    ],
+    "executorStats": [
+        {
+            "workerLogLink": "http://10.11.1.7:8000/log?file=worker-6701.log",
+            "emitted": 5720,
+            "port": 6701,
+            "completeLatency": "0.000",
+            "transferred": 5720,
+            "host": "10.11.1.7",
+            "acked": 0,
+            "uptime": "43m 4s",
+            "uptimeSeconds": 2584,
+            "id": "[24-24]",
+            "failed": 0
+        },
+        {
+            "workerLogLink": "http://10.11.1.7:8000/log?file=worker-6703.log",
+            "emitted": 5700,
+            "port": 6703,
+            "completeLatency": "0.000",
+            "transferred": 5700,
+            "host": "10.11.1.7",
+            "acked": 0,
+            "uptime": "42m 57s",
+            "uptimeSeconds": 2577,
+            "id": "[25-25]",
+            "failed": 0
+        },
+        {
+            "workerLogLink": "http://10.11.1.7:8000/log?file=worker-6702.log",
+            "emitted": 5700,
+            "port": 6702,
+            "completeLatency": "0.000",
+            "transferred": 5680,
+            "host": "10.11.1.7",
+            "acked": 0,
+            "uptime": "42m 57s",
+            "uptimeSeconds": 2577,
+            "id": "[26-26]",
+            "failed": 0
+        },
+        {
+            "workerLogLink": "http://10.11.1.7:8000/log?file=worker-6701.log",
+            "emitted": 5700,
+            "port": 6701,
+            "completeLatency": "0.000",
+            "transferred": 5680,
+            "host": "10.11.1.7",
+            "acked": 0,
+            "uptime": "43m 4s",
+            "uptimeSeconds": 2584,
+            "id": "[27-27]",
+            "failed": 0
+        },
+        {
+            "workerLogLink": "http://10.11.1.7:8000/log?file=worker-6703.log",
+            "emitted": 5680,
+            "port": 6703,
+            "completeLatency": "0.000",
+            "transferred": 5680,
+            "host": "10.11.1.7",
+            "acked": 0,
+            "uptime": "42m 57s",
+            "uptimeSeconds": 2577,
+            "id": "[28-28]",
+            "failed": 0
+        }
+    ]
+}
+```
+
+## Profiling and Debugging GET Operations
+
+###  /api/v1/topology/:id/profiling/start/:host-port/:timeout (GET)
+
+Request to start profiler on worker with timeout. Returns status and link to profiler artifacts for worker.
+
+|Parameter |Value   |Description  |
+|----------|--------|-------------|
+|id   	   |String (required)| Topology Id  |
+|host-port |String (required)| Worker Id |
+|timeout |String (required)| Time out for profiler to stop in minutes |
+
+Response fields:
+
+|Field  |Value |Description|
+|-----	|----- |-----------|
+|id   | String | Worker id|
+|status | String | Response Status |
+|timeout | String | Requested timeout
+|dumplink | String | Link to logviewer URL for worker profiler documents.|
+
+Examples:
+
+```no-highlight
+1. http://ui-daemon-host-name:8080/api/v1/topology/wordcount-1-1446614150/profiling/start/10.11.1.7:6701/10
+2. http://ui-daemon-host-name:8080/api/v1/topology/wordcount-1-1446614150/profiling/start/10.11.1.7:6701/5
+3. http://ui-daemon-host-name:8080/api/v1/topology/wordcount-1-1446614150/profiling/start/10.11.1.7:6701/20
+```
+
+Sample response:
+
+```json
+{
+   "status": "ok",
+   "id": "10.11.1.7:6701",
+   "timeout": "10",
+   "dumplink": "http:\/\/10.11.1.7:8000\/dumps\/wordcount-1-1446614150\/10.11.1.7%3A6701"
+}
+```
+
+###  /api/v1/topology/:id/profiling/dumpprofile/:host-port (GET)
+
+Request to dump profiler recording on worker. Returns status and worker id for the request.
+
+|Parameter |Value   |Description  |
+|----------|--------|-------------|
+|id   	   |String (required)| Topology Id  |
+|host-port |String (required)| Worker Id |
+
+Response fields:
+
+|Field  |Value |Description|
+|-----	|----- |-----------|
+|id   | String | Worker id|
+|status | String | Response Status |
+
+Examples:
+
+```no-highlight
+1. http://ui-daemon-host-name:8080/api/v1/topology/wordcount-1-1446614150/profiling/dumpprofile/10.11.1.7:6701
+```
+
+Sample response:
+
+```json
+{
+   "status": "ok",
+   "id": "10.11.1.7:6701",
+}
+```
+
+###  /api/v1/topology/:id/profiling/stop/:host-port (GET)
+
+Request to stop profiler on worker. Returns status and worker id for the request.
+
+|Parameter |Value   |Description  |
+|----------|--------|-------------|
+|id   	   |String (required)| Topology Id  |
+|host-port |String (required)| Worker Id |
+
+Response fields:
+
+|Field  |Value |Description|
+|-----	|----- |-----------|
+|id   | String | Worker id|
+|status | String | Response Status |
+
+Examples:
+
+```no-highlight
+1. http://ui-daemon-host-name:8080/api/v1/topology/wordcount-1-1446614150/profiling/stop/10.11.1.7:6701
+```
+
+Sample response:
+
+```json
+{
+   "status": "ok",
+   "id": "10.11.1.7:6701",
+}
+```
+
+###  /api/v1/topology/:id/profiling/dumpjstack/:host-port (GET)
+
+Request to dump jstack on worker. Returns status and worker id for the request.
+
+|Parameter |Value   |Description  |
+|----------|--------|-------------|
+|id   	   |String (required)| Topology Id  |
+|host-port |String (required)| Worker Id |
+
+Response fields:
+
+|Field  |Value |Description|
+|-----	|----- |-----------|
+|id   | String | Worker id|
+|status | String | Response Status |
+
+Examples:
+
+```no-highlight
+1. http://ui-daemon-host-name:8080/api/v1/topology/wordcount-1-1446614150/profiling/dumpjstack/10.11.1.7:6701
+```
+
+Sample response:
+
+```json
+{
+   "status": "ok",
+   "id": "10.11.1.7:6701",
+}
+```
+
+###  /api/v1/topology/:id/profiling/dumpheap/:host-port (GET)
+
+Request to dump heap (jmap) on worker. Returns status and worker id for the request.
+
+|Parameter |Value   |Description  |
+|----------|--------|-------------|
+|id   	   |String (required)| Topology Id  |
+|host-port |String (required)| Worker Id |
+
+Response fields:
+
+|Field  |Value |Description|
+|-----	|----- |-----------|
+|id   | String | Worker id|
+|status | String | Response Status |
+
+Examples:
+
+```no-highlight
+1. http://ui-daemon-host-name:8080/api/v1/topology/wordcount-1-1446614150/profiling/dumpheap/10.11.1.7:6701
+```
+
+Sample response:
+
+```json
+{
+   "status": "ok",
+   "id": "10.11.1.7:6701",
+}
+```
+
+###  /api/v1/topology/:id/profiling/restartworker/:host-port (GET)
+
+Request to request the worker. Returns status and worker id for the request.
+
+|Parameter |Value   |Description  |
+|----------|--------|-------------|
+|id   	   |String (required)| Topology Id  |
+|host-port |String (required)| Worker Id |
+
+Response fields:
+
+|Field  |Value |Description|
+|-----	|----- |-----------|
+|id   | String | Worker id|
+|status | String | Response Status |
+
+Examples:
+
+```no-highlight
+1. http://ui-daemon-host-name:8080/api/v1/topology/wordcount-1-1446614150/profiling/restartworker/10.11.1.7:6701
+```
+
+Sample response:
+
+```json
+{
+   "status": "ok",
+   "id": "10.11.1.7:6701",
+}
+```
+
+## POST Operations
+
+### /api/v1/topology/:id/activate (POST)
+
+Activates a topology.
+
+|Parameter |Value   |Description  |
+|----------|--------|-------------|
+|id   	   |String (required)| Topology Id  |
+
+Sample Response:
+
+```json
+{"topologyOperation":"activate","topologyId":"wordcount-1-1420308665","status":"success"}
+```
+
+
+### /api/v1/topology/:id/deactivate (POST)
+
+Deactivates a topology.
+
+|Parameter |Value   |Description  |
+|----------|--------|-------------|
+|id   	   |String (required)| Topology Id  |
+
+Sample Response:
+
+```json
+{"topologyOperation":"deactivate","topologyId":"wordcount-1-1420308665","status":"success"}
+```
+
+
+### /api/v1/topology/:id/rebalance/:wait-time (POST)
+
+Rebalances a topology.
+
+|Parameter |Value   |Description  |
+|----------|--------|-------------|
+|id   	   |String (required)| Topology Id  |
+|wait-time |String (required)| Wait time before rebalance happens |
+|rebalanceOptions| Json (optional) | topology rebalance options |
+
+
+Sample rebalanceOptions json:
+
+```json
+{"rebalanceOptions" : {"numWorkers" : 2, "executors" : {"spout" :4, "count" : 10}}, "callback" : "foo"}
+```
+
+Examples:
+
+```no-highlight
+curl  -i -b ~/cookiejar.txt -c ~/cookiejar.txt -X POST  
+-H "Content-Type: application/json" 
+-d  '{"rebalanceOptions": {"numWorkers": 2, "executors": { "spout" : "5", "split": 7, "count": 5 }}, "callback":"foo"}' 
+http://localhost:8080/api/v1/topology/wordcount-1-1420308665/rebalance/0
+```
+
+Sample Response:
+
+```json
+{"topologyOperation":"rebalance","topologyId":"wordcount-1-1420308665","status":"success"}
+```
+
+
+
+### /api/v1/topology/:id/kill/:wait-time (POST)
+
+Kills a topology.
+
+|Parameter |Value   |Description  |
+|----------|--------|-------------|
+|id   	   |String (required)| Topology Id  |
+|wait-time |String (required)| Wait time before rebalance happens |
+
+Caution: Small wait times (0-5 seconds) may increase the probability of triggering the bug reported in
+[STORM-112](https://issues.apache.org/jira/browse/STORM-112), which may result in broker Supervisor
+daemons.
+
+Sample Response:
+
+```json
+{"topologyOperation":"kill","topologyId":"wordcount-1-1420308665","status":"success"}
+```
+
+## API errors
+
+The API returns 500 HTTP status codes in case of any errors.
+
+Sample response:
+
+```json
+{
+  "error": "Internal Server Error",
+  "errorMessage": "java.lang.NullPointerException\n\tat clojure.core$name.invoke(core.clj:1505)\n\tat backtype.storm.ui.core$component_page.invoke(core.clj:752)\n\tat backtype.storm.ui.core$fn__7766.invoke(core.clj:782)\n\tat compojure.core$make_route$fn__5755.invoke(core.clj:93)\n\tat compojure.core$if_route$fn__5743.invoke(core.clj:39)\n\tat compojure.core$if_method$fn__5736.invoke(core.clj:24)\n\tat compojure.core$routing$fn__5761.invoke(core.clj:106)\n\tat clojure.core$some.invoke(core.clj:2443)\n\tat compojure.core$routing.doInvoke(core.clj:106)\n\tat clojure.lang.RestFn.applyTo(RestFn.java:139)\n\tat clojure.core$apply.invoke(core.clj:619)\n\tat compojure.core$routes$fn__5765.invoke(core.clj:111)\n\tat ring.middleware.reload$wrap_reload$fn__6880.invoke(reload.clj:14)\n\tat backtype.storm.ui.core$catch_errors$fn__7800.invoke(core.clj:836)\n\tat ring.middleware.keyword_params$wrap_keyword_params$fn__6319.invoke(keyword_params.clj:27)\n\tat ring.middleware.nested_params$wrap_nest
 ed_params$fn__6358.invoke(nested_params.clj:65)\n\tat ring.middleware.params$wrap_params$fn__6291.invoke(params.clj:55)\n\tat ring.middleware.multipart_params$wrap_multipart_params$fn__6386.invoke(multipart_params.clj:103)\n\tat ring.middleware.flash$wrap_flash$fn__6675.invoke(flash.clj:14)\n\tat ring.middleware.session$wrap_session$fn__6664.invoke(session.clj:43)\n\tat ring.middleware.cookies$wrap_cookies$fn__6595.invoke(cookies.clj:160)\n\tat ring.adapter.jetty$proxy_handler$fn__6112.invoke(jetty.clj:16)\n\tat ring.adapter.jetty.proxy$org.mortbay.jetty.handler.AbstractHandler$0.handle(Unknown Source)\n\tat org.mortbay.jetty.handler.HandlerWrapper.handle(HandlerWrapper.java:152)\n\tat org.mortbay.jetty.Server.handle(Server.java:326)\n\tat org.mortbay.jetty.HttpConnection.handleRequest(HttpConnection.java:542)\n\tat org.mortbay.jetty.HttpConnection$RequestHandler.headerComplete(HttpConnection.java:928)\n\tat org.mortbay.jetty.HttpParser.parseNext(HttpParser.java:549)\n\tat org.mortb
 ay.jetty.HttpParser.parseAvailable(HttpParser.java:212)\n\tat org.mortbay.jetty.HttpConnection.handle(HttpConnection.java:404)\n\tat org.mortbay.jetty.bio.SocketConnector$Connection.run(SocketConnector.java:228)\n\tat org.mortbay.thread.QueuedThreadPool$PoolThread.run(QueuedThreadPool.java:582)\n"
+}
+```