You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@storm.apache.org by pt...@apache.org on 2015/12/01 23:04:39 UTC

[01/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Repository: storm
Updated Branches:
  refs/heads/jstorm-import 27fb31c1c -> 7eaf06513


http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/zk/Zookeeper.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/zk/Zookeeper.java b/jstorm-core/src/main/java/com/alibaba/jstorm/zk/Zookeeper.java
index ed9989d..ca40e16 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/zk/Zookeeper.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/zk/Zookeeper.java
@@ -53,8 +53,7 @@ public class Zookeeper {
 
     private static Logger LOG = LoggerFactory.getLogger(Zookeeper.class);
 
-    public CuratorFramework mkClient(Map conf, List<String> servers,
-            Object port, String root) {
+    public CuratorFramework mkClient(Map conf, List<String> servers, Object port, String root) {
         return mkClient(conf, servers, port, root, new DefaultWatcherCallBack());
     }
 
@@ -63,64 +62,50 @@ public class Zookeeper {
      * 
      * @return
      */
-    public CuratorFramework mkClient(Map conf, List<String> servers,
-            Object port, String root, final WatcherCallBack watcher) {
+    public CuratorFramework mkClient(Map conf, List<String> servers, Object port, String root, final WatcherCallBack watcher) {
 
         CuratorFramework fk = Utils.newCurator(conf, servers, port, root);
 
         fk.getCuratorListenable().addListener(new CuratorListener() {
             @Override
-            public void eventReceived(CuratorFramework _fk, CuratorEvent e)
-                    throws Exception {
+            public void eventReceived(CuratorFramework _fk, CuratorEvent e) throws Exception {
                 if (e.getType().equals(CuratorEventType.WATCHED)) {
                     WatchedEvent event = e.getWatchedEvent();
 
-                    watcher.execute(event.getState(), event.getType(),
-                            event.getPath());
+                    watcher.execute(event.getState(), event.getType(), event.getPath());
                 }
 
             }
         });
 
-        fk.getUnhandledErrorListenable().addListener(
-                new UnhandledErrorListener() {
-                    @Override
-                    public void unhandledError(String msg, Throwable error) {
-                        String errmsg =
-                                "Unrecoverable Zookeeper error, halting process: "
-                                        + msg;
-                        LOG.error(errmsg, error);
-                        JStormUtils.halt_process(1,
-                                "Unrecoverable Zookeeper error");
-
-                    }
-                });
+        fk.getUnhandledErrorListenable().addListener(new UnhandledErrorListener() {
+            @Override
+            public void unhandledError(String msg, Throwable error) {
+                String errmsg = "Unrecoverable Zookeeper error, halting process: " + msg;
+                LOG.error(errmsg, error);
+                JStormUtils.halt_process(1, "Unrecoverable Zookeeper error");
+
+            }
+        });
         fk.start();
         return fk;
     }
 
-    public String createNode(CuratorFramework zk, String path, byte[] data,
-            org.apache.zookeeper.CreateMode mode) throws Exception {
+    public String createNode(CuratorFramework zk, String path, byte[] data, org.apache.zookeeper.CreateMode mode) throws Exception {
 
         String npath = PathUtils.normalize_path(path);
 
-        return zk.create().withMode(mode).withACL(ZooDefs.Ids.OPEN_ACL_UNSAFE)
-                .forPath(npath, data);
+        return zk.create().withMode(mode).withACL(ZooDefs.Ids.OPEN_ACL_UNSAFE).forPath(npath, data);
     }
 
-    public String createNode(CuratorFramework zk, String path, byte[] data)
-            throws Exception {
-        return createNode(zk, path, data,
-                org.apache.zookeeper.CreateMode.PERSISTENT);
+    public String createNode(CuratorFramework zk, String path, byte[] data) throws Exception {
+        return createNode(zk, path, data, org.apache.zookeeper.CreateMode.PERSISTENT);
     }
 
-    public boolean existsNode(CuratorFramework zk, String path, boolean watch)
-            throws Exception {
+    public boolean existsNode(CuratorFramework zk, String path, boolean watch) throws Exception {
         Stat stat = null;
         if (watch) {
-            stat =
-                    zk.checkExists().watched()
-                            .forPath(PathUtils.normalize_path(path));
+            stat = zk.checkExists().watched().forPath(PathUtils.normalize_path(path));
         } else {
             stat = zk.checkExists().forPath(PathUtils.normalize_path(path));
         }
@@ -147,8 +132,7 @@ public class Zookeeper {
 
         mkdirs(zk, PathUtils.parent_path(npath));
         try {
-            createNode(zk, npath, JStormUtils.barr((byte) 7),
-                    org.apache.zookeeper.CreateMode.PERSISTENT);
+            createNode(zk, npath, JStormUtils.barr((byte) 7), org.apache.zookeeper.CreateMode.PERSISTENT);
         } catch (KeeperException e) {
             ;// this can happen when multiple clients doing mkdir at same
              // time
@@ -158,8 +142,7 @@ public class Zookeeper {
 
     }
 
-    public byte[] getData(CuratorFramework zk, String path, boolean watch)
-            throws Exception {
+    public byte[] getData(CuratorFramework zk, String path, boolean watch) throws Exception {
         String npath = PathUtils.normalize_path(path);
         try {
             if (existsNode(zk, npath, watch)) {
@@ -176,8 +159,7 @@ public class Zookeeper {
         return null;
     }
 
-    public List<String> getChildren(CuratorFramework zk, String path,
-            boolean watch) throws Exception {
+    public List<String> getChildren(CuratorFramework zk, String path, boolean watch) throws Exception {
 
         String npath = PathUtils.normalize_path(path);
 
@@ -188,41 +170,26 @@ public class Zookeeper {
         }
     }
 
-    public Stat setData(CuratorFramework zk, String path, byte[] data)
-            throws Exception {
+    public Stat setData(CuratorFramework zk, String path, byte[] data) throws Exception {
         String npath = PathUtils.normalize_path(path);
         return zk.setData().forPath(npath, data);
     }
 
-    public boolean exists(CuratorFramework zk, String path, boolean watch)
-            throws Exception {
+    public boolean exists(CuratorFramework zk, String path, boolean watch) throws Exception {
         return existsNode(zk, path, watch);
     }
 
-    public void deletereRcursive(CuratorFramework zk, String path)
-            throws Exception {
+    public void deletereRcursive(CuratorFramework zk, String path) throws Exception {
 
         String npath = PathUtils.normalize_path(path);
 
         if (existsNode(zk, npath, false)) {
-
-            List<String> childs = getChildren(zk, npath, false);
-
-            for (String child : childs) {
-
-                String childFullPath = PathUtils.full_path(npath, child);
-
-                deletereRcursive(zk, childFullPath);
-            }
-
-            deleteNode(zk, npath);
+            zk.delete().guaranteed().deletingChildrenIfNeeded().forPath(npath);
         }
     }
 
-    public static Factory mkInprocessZookeeper(String localdir, int port)
-            throws IOException, InterruptedException {
-        LOG.info("Starting inprocess zookeeper at port " + port + " and dir "
-                + localdir);
+    public static Factory mkInprocessZookeeper(String localdir, int port) throws IOException, InterruptedException {
+        LOG.info("Starting inprocess zookeeper at port " + port + " and dir " + localdir);
         File localfile = new File(localdir);
         ZooKeeperServer zk = new ZooKeeperServer(localfile, localfile, 2000);
         Factory factory = new Factory(new InetSocketAddress(port), 0);


[41/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/Nimbus.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/Nimbus.java b/jstorm-core/src/main/java/backtype/storm/generated/Nimbus.java
index c25d643..b0fb528 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/Nimbus.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/Nimbus.java
@@ -34,138 +34,170 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class Nimbus {
 
   public interface Iface {
 
-    public void submitTopology(String name, String uploadedJarLocation, String jsonConf, StormTopology topology) throws AlreadyAliveException, InvalidTopologyException, TopologyAssignException, org.apache.thrift.TException;
+    public void submitTopology(String name, String uploadedJarLocation, String jsonConf, StormTopology topology) throws AlreadyAliveException, InvalidTopologyException, TopologyAssignException, TException;
 
-    public void submitTopologyWithOpts(String name, String uploadedJarLocation, String jsonConf, StormTopology topology, SubmitOptions options) throws AlreadyAliveException, InvalidTopologyException, TopologyAssignException, org.apache.thrift.TException;
+    public void submitTopologyWithOpts(String name, String uploadedJarLocation, String jsonConf, StormTopology topology, SubmitOptions options) throws AlreadyAliveException, InvalidTopologyException, TopologyAssignException, TException;
 
-    public void killTopology(String name) throws NotAliveException, org.apache.thrift.TException;
+    public void killTopology(String name) throws NotAliveException, TException;
 
-    public void killTopologyWithOpts(String name, KillOptions options) throws NotAliveException, org.apache.thrift.TException;
+    public void killTopologyWithOpts(String name, KillOptions options) throws NotAliveException, TException;
 
-    public void activate(String name) throws NotAliveException, org.apache.thrift.TException;
+    public void activate(String name) throws NotAliveException, TException;
 
-    public void deactivate(String name) throws NotAliveException, org.apache.thrift.TException;
+    public void deactivate(String name) throws NotAliveException, TException;
 
-    public void rebalance(String name, RebalanceOptions options) throws NotAliveException, InvalidTopologyException, org.apache.thrift.TException;
+    public void rebalance(String name, RebalanceOptions options) throws NotAliveException, InvalidTopologyException, TException;
 
-    public void metricMonitor(String name, MonitorOptions options) throws NotAliveException, org.apache.thrift.TException;
+    public void metricMonitor(String name, MonitorOptions options) throws NotAliveException, TException;
 
-    public void restart(String name, String jsonConf) throws NotAliveException, InvalidTopologyException, TopologyAssignException, org.apache.thrift.TException;
+    public void restart(String name, String jsonConf) throws NotAliveException, InvalidTopologyException, TopologyAssignException, TException;
 
-    public void beginLibUpload(String libName) throws org.apache.thrift.TException;
+    public void beginLibUpload(String libName) throws TException;
 
-    public String beginFileUpload() throws org.apache.thrift.TException;
+    public String beginFileUpload() throws TException;
 
-    public void uploadChunk(String location, ByteBuffer chunk) throws org.apache.thrift.TException;
+    public void uploadChunk(String location, ByteBuffer chunk) throws TException;
 
-    public void finishFileUpload(String location) throws org.apache.thrift.TException;
+    public void finishFileUpload(String location) throws TException;
 
-    public String beginFileDownload(String file) throws org.apache.thrift.TException;
+    public String beginFileDownload(String file) throws TException;
 
-    public ByteBuffer downloadChunk(String id) throws org.apache.thrift.TException;
+    public ByteBuffer downloadChunk(String id) throws TException;
 
-    public void finishFileDownload(String id) throws org.apache.thrift.TException;
+    public void finishFileDownload(String id) throws TException;
 
-    public String getNimbusConf() throws org.apache.thrift.TException;
+    public String getNimbusConf() throws TException;
 
-    public String getTopologyConf(String id) throws NotAliveException, org.apache.thrift.TException;
+    public String getTopologyConf(String id) throws NotAliveException, TException;
 
-    public String getTopologyId(String topologyName) throws NotAliveException, org.apache.thrift.TException;
+    public String getTopologyId(String topologyName) throws NotAliveException, TException;
 
-    public ClusterSummary getClusterInfo() throws org.apache.thrift.TException;
+    public ClusterSummary getClusterInfo() throws TException;
 
-    public SupervisorWorkers getSupervisorWorkers(String host) throws NotAliveException, org.apache.thrift.TException;
+    public SupervisorWorkers getSupervisorWorkers(String host) throws NotAliveException, TException;
 
-    public TopologyInfo getTopologyInfo(String id) throws NotAliveException, org.apache.thrift.TException;
+    public TopologyInfo getTopologyInfo(String id) throws NotAliveException, TException;
 
-    public TopologyInfo getTopologyInfoByName(String topologyName) throws NotAliveException, org.apache.thrift.TException;
+    public TopologyInfo getTopologyInfoByName(String topologyName) throws NotAliveException, TException;
 
-    public StormTopology getTopology(String id) throws NotAliveException, org.apache.thrift.TException;
+    public StormTopology getTopology(String id) throws NotAliveException, TException;
 
-    public StormTopology getUserTopology(String id) throws NotAliveException, org.apache.thrift.TException;
+    public StormTopology getUserTopology(String id) throws NotAliveException, TException;
 
-    public void workerUploadMetric(WorkerUploadMetrics uploadMetrics) throws org.apache.thrift.TException;
+    public void uploadTopologyMetrics(String topologyId, TopologyMetric topologyMetrics) throws TException;
 
-    public TopologyMetric getTopologyMetric(String topologyName) throws org.apache.thrift.TException;
+    public Map<String,Long> registerMetrics(String topologyId, Set<String> metrics) throws TException;
 
-    public NettyMetric getNettyMetric(String topologyName, int pos) throws org.apache.thrift.TException;
+    public TopologyMetric getTopologyMetrics(String topologyId) throws TException;
 
-    public NettyMetric getServerNettyMetric(String topologyName, String serverName) throws org.apache.thrift.TException;
+    public List<MetricInfo> getMetrics(String topologyId, int type) throws TException;
 
-    public String getVersion() throws org.apache.thrift.TException;
+    public MetricInfo getNettyMetrics(String topologyId) throws TException;
 
-    public void updateConf(String name, String conf) throws NotAliveException, InvalidTopologyException, org.apache.thrift.TException;
+    public MetricInfo getNettyMetricsByHost(String topologyId, String host) throws TException;
+
+    public MetricInfo getPagingNettyMetrics(String topologyId, String host, int page) throws TException;
+
+    public int getNettyMetricSizeByHost(String topologyId, String host) throws TException;
+
+    public MetricInfo getTaskMetrics(String topologyId, String component) throws TException;
+
+    public List<MetricInfo> getTaskAndStreamMetrics(String topologyId, int taskId) throws TException;
+
+    public List<MetricInfo> getSummarizedTopologyMetrics(String topologyId) throws TException;
+
+    public String getVersion() throws TException;
+
+    public void updateTopology(String name, String uploadedLocation, String updateConf) throws NotAliveException, InvalidTopologyException, TException;
+
+    public void updateTaskHeartbeat(TopologyTaskHbInfo taskHbs) throws TException;
 
   }
 
   public interface AsyncIface {
 
-    public void submitTopology(String name, String uploadedJarLocation, String jsonConf, StormTopology topology, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+    public void submitTopology(String name, String uploadedJarLocation, String jsonConf, StormTopology topology, AsyncMethodCallback resultHandler) throws TException;
+
+    public void submitTopologyWithOpts(String name, String uploadedJarLocation, String jsonConf, StormTopology topology, SubmitOptions options, AsyncMethodCallback resultHandler) throws TException;
+
+    public void killTopology(String name, AsyncMethodCallback resultHandler) throws TException;
+
+    public void killTopologyWithOpts(String name, KillOptions options, AsyncMethodCallback resultHandler) throws TException;
+
+    public void activate(String name, AsyncMethodCallback resultHandler) throws TException;
+
+    public void deactivate(String name, AsyncMethodCallback resultHandler) throws TException;
+
+    public void rebalance(String name, RebalanceOptions options, AsyncMethodCallback resultHandler) throws TException;
+
+    public void metricMonitor(String name, MonitorOptions options, AsyncMethodCallback resultHandler) throws TException;
+
+    public void restart(String name, String jsonConf, AsyncMethodCallback resultHandler) throws TException;
 
-    public void submitTopologyWithOpts(String name, String uploadedJarLocation, String jsonConf, StormTopology topology, SubmitOptions options, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+    public void beginLibUpload(String libName, AsyncMethodCallback resultHandler) throws TException;
 
-    public void killTopology(String name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+    public void beginFileUpload(AsyncMethodCallback resultHandler) throws TException;
 
-    public void killTopologyWithOpts(String name, KillOptions options, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+    public void uploadChunk(String location, ByteBuffer chunk, AsyncMethodCallback resultHandler) throws TException;
 
-    public void activate(String name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+    public void finishFileUpload(String location, AsyncMethodCallback resultHandler) throws TException;
 
-    public void deactivate(String name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+    public void beginFileDownload(String file, AsyncMethodCallback resultHandler) throws TException;
 
-    public void rebalance(String name, RebalanceOptions options, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+    public void downloadChunk(String id, AsyncMethodCallback resultHandler) throws TException;
 
-    public void metricMonitor(String name, MonitorOptions options, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+    public void finishFileDownload(String id, AsyncMethodCallback resultHandler) throws TException;
 
-    public void restart(String name, String jsonConf, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+    public void getNimbusConf(AsyncMethodCallback resultHandler) throws TException;
 
-    public void beginLibUpload(String libName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+    public void getTopologyConf(String id, AsyncMethodCallback resultHandler) throws TException;
 
-    public void beginFileUpload(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+    public void getTopologyId(String topologyName, AsyncMethodCallback resultHandler) throws TException;
 
-    public void uploadChunk(String location, ByteBuffer chunk, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+    public void getClusterInfo(AsyncMethodCallback resultHandler) throws TException;
 
-    public void finishFileUpload(String location, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+    public void getSupervisorWorkers(String host, AsyncMethodCallback resultHandler) throws TException;
 
-    public void beginFileDownload(String file, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+    public void getTopologyInfo(String id, AsyncMethodCallback resultHandler) throws TException;
 
-    public void downloadChunk(String id, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+    public void getTopologyInfoByName(String topologyName, AsyncMethodCallback resultHandler) throws TException;
 
-    public void finishFileDownload(String id, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+    public void getTopology(String id, AsyncMethodCallback resultHandler) throws TException;
 
-    public void getNimbusConf(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+    public void getUserTopology(String id, AsyncMethodCallback resultHandler) throws TException;
 
-    public void getTopologyConf(String id, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+    public void uploadTopologyMetrics(String topologyId, TopologyMetric topologyMetrics, AsyncMethodCallback resultHandler) throws TException;
 
-    public void getTopologyId(String topologyName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+    public void registerMetrics(String topologyId, Set<String> metrics, AsyncMethodCallback resultHandler) throws TException;
 
-    public void getClusterInfo(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+    public void getTopologyMetrics(String topologyId, AsyncMethodCallback resultHandler) throws TException;
 
-    public void getSupervisorWorkers(String host, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+    public void getMetrics(String topologyId, int type, AsyncMethodCallback resultHandler) throws TException;
 
-    public void getTopologyInfo(String id, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+    public void getNettyMetrics(String topologyId, AsyncMethodCallback resultHandler) throws TException;
 
-    public void getTopologyInfoByName(String topologyName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+    public void getNettyMetricsByHost(String topologyId, String host, AsyncMethodCallback resultHandler) throws TException;
 
-    public void getTopology(String id, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+    public void getPagingNettyMetrics(String topologyId, String host, int page, AsyncMethodCallback resultHandler) throws TException;
 
-    public void getUserTopology(String id, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+    public void getNettyMetricSizeByHost(String topologyId, String host, AsyncMethodCallback resultHandler) throws TException;
 
-    public void workerUploadMetric(WorkerUploadMetrics uploadMetrics, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+    public void getTaskMetrics(String topologyId, String component, AsyncMethodCallback resultHandler) throws TException;
 
-    public void getTopologyMetric(String topologyName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+    public void getTaskAndStreamMetrics(String topologyId, int taskId, AsyncMethodCallback resultHandler) throws TException;
 
-    public void getNettyMetric(String topologyName, int pos, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+    public void getSummarizedTopologyMetrics(String topologyId, AsyncMethodCallback resultHandler) throws TException;
 
-    public void getServerNettyMetric(String topologyName, String serverName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+    public void getVersion(AsyncMethodCallback resultHandler) throws TException;
 
-    public void getVersion(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+    public void updateTopology(String name, String uploadedLocation, String updateConf, AsyncMethodCallback resultHandler) throws TException;
 
-    public void updateConf(String name, String conf, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+    public void updateTaskHeartbeat(TopologyTaskHbInfo taskHbs, AsyncMethodCallback resultHandler) throws TException;
 
   }
 
@@ -189,13 +221,13 @@ public class Nimbus {
       super(iprot, oprot);
     }
 
-    public void submitTopology(String name, String uploadedJarLocation, String jsonConf, StormTopology topology) throws AlreadyAliveException, InvalidTopologyException, TopologyAssignException, org.apache.thrift.TException
+    public void submitTopology(String name, String uploadedJarLocation, String jsonConf, StormTopology topology) throws AlreadyAliveException, InvalidTopologyException, TopologyAssignException, TException
     {
       send_submitTopology(name, uploadedJarLocation, jsonConf, topology);
       recv_submitTopology();
     }
 
-    public void send_submitTopology(String name, String uploadedJarLocation, String jsonConf, StormTopology topology) throws org.apache.thrift.TException
+    public void send_submitTopology(String name, String uploadedJarLocation, String jsonConf, StormTopology topology) throws TException
     {
       submitTopology_args args = new submitTopology_args();
       args.set_name(name);
@@ -205,7 +237,7 @@ public class Nimbus {
       sendBase("submitTopology", args);
     }
 
-    public void recv_submitTopology() throws AlreadyAliveException, InvalidTopologyException, TopologyAssignException, org.apache.thrift.TException
+    public void recv_submitTopology() throws AlreadyAliveException, InvalidTopologyException, TopologyAssignException, TException
     {
       submitTopology_result result = new submitTopology_result();
       receiveBase(result, "submitTopology");
@@ -221,13 +253,13 @@ public class Nimbus {
       return;
     }
 
-    public void submitTopologyWithOpts(String name, String uploadedJarLocation, String jsonConf, StormTopology topology, SubmitOptions options) throws AlreadyAliveException, InvalidTopologyException, TopologyAssignException, org.apache.thrift.TException
+    public void submitTopologyWithOpts(String name, String uploadedJarLocation, String jsonConf, StormTopology topology, SubmitOptions options) throws AlreadyAliveException, InvalidTopologyException, TopologyAssignException, TException
     {
       send_submitTopologyWithOpts(name, uploadedJarLocation, jsonConf, topology, options);
       recv_submitTopologyWithOpts();
     }
 
-    public void send_submitTopologyWithOpts(String name, String uploadedJarLocation, String jsonConf, StormTopology topology, SubmitOptions options) throws org.apache.thrift.TException
+    public void send_submitTopologyWithOpts(String name, String uploadedJarLocation, String jsonConf, StormTopology topology, SubmitOptions options) throws TException
     {
       submitTopologyWithOpts_args args = new submitTopologyWithOpts_args();
       args.set_name(name);
@@ -238,7 +270,7 @@ public class Nimbus {
       sendBase("submitTopologyWithOpts", args);
     }
 
-    public void recv_submitTopologyWithOpts() throws AlreadyAliveException, InvalidTopologyException, TopologyAssignException, org.apache.thrift.TException
+    public void recv_submitTopologyWithOpts() throws AlreadyAliveException, InvalidTopologyException, TopologyAssignException, TException
     {
       submitTopologyWithOpts_result result = new submitTopologyWithOpts_result();
       receiveBase(result, "submitTopologyWithOpts");
@@ -254,20 +286,20 @@ public class Nimbus {
       return;
     }
 
-    public void killTopology(String name) throws NotAliveException, org.apache.thrift.TException
+    public void killTopology(String name) throws NotAliveException, TException
     {
       send_killTopology(name);
       recv_killTopology();
     }
 
-    public void send_killTopology(String name) throws org.apache.thrift.TException
+    public void send_killTopology(String name) throws TException
     {
       killTopology_args args = new killTopology_args();
       args.set_name(name);
       sendBase("killTopology", args);
     }
 
-    public void recv_killTopology() throws NotAliveException, org.apache.thrift.TException
+    public void recv_killTopology() throws NotAliveException, TException
     {
       killTopology_result result = new killTopology_result();
       receiveBase(result, "killTopology");
@@ -277,13 +309,13 @@ public class Nimbus {
       return;
     }
 
-    public void killTopologyWithOpts(String name, KillOptions options) throws NotAliveException, org.apache.thrift.TException
+    public void killTopologyWithOpts(String name, KillOptions options) throws NotAliveException, TException
     {
       send_killTopologyWithOpts(name, options);
       recv_killTopologyWithOpts();
     }
 
-    public void send_killTopologyWithOpts(String name, KillOptions options) throws org.apache.thrift.TException
+    public void send_killTopologyWithOpts(String name, KillOptions options) throws TException
     {
       killTopologyWithOpts_args args = new killTopologyWithOpts_args();
       args.set_name(name);
@@ -291,7 +323,7 @@ public class Nimbus {
       sendBase("killTopologyWithOpts", args);
     }
 
-    public void recv_killTopologyWithOpts() throws NotAliveException, org.apache.thrift.TException
+    public void recv_killTopologyWithOpts() throws NotAliveException, TException
     {
       killTopologyWithOpts_result result = new killTopologyWithOpts_result();
       receiveBase(result, "killTopologyWithOpts");
@@ -301,20 +333,20 @@ public class Nimbus {
       return;
     }
 
-    public void activate(String name) throws NotAliveException, org.apache.thrift.TException
+    public void activate(String name) throws NotAliveException, TException
     {
       send_activate(name);
       recv_activate();
     }
 
-    public void send_activate(String name) throws org.apache.thrift.TException
+    public void send_activate(String name) throws TException
     {
       activate_args args = new activate_args();
       args.set_name(name);
       sendBase("activate", args);
     }
 
-    public void recv_activate() throws NotAliveException, org.apache.thrift.TException
+    public void recv_activate() throws NotAliveException, TException
     {
       activate_result result = new activate_result();
       receiveBase(result, "activate");
@@ -324,20 +356,20 @@ public class Nimbus {
       return;
     }
 
-    public void deactivate(String name) throws NotAliveException, org.apache.thrift.TException
+    public void deactivate(String name) throws NotAliveException, TException
     {
       send_deactivate(name);
       recv_deactivate();
     }
 
-    public void send_deactivate(String name) throws org.apache.thrift.TException
+    public void send_deactivate(String name) throws TException
     {
       deactivate_args args = new deactivate_args();
       args.set_name(name);
       sendBase("deactivate", args);
     }
 
-    public void recv_deactivate() throws NotAliveException, org.apache.thrift.TException
+    public void recv_deactivate() throws NotAliveException, TException
     {
       deactivate_result result = new deactivate_result();
       receiveBase(result, "deactivate");
@@ -347,13 +379,13 @@ public class Nimbus {
       return;
     }
 
-    public void rebalance(String name, RebalanceOptions options) throws NotAliveException, InvalidTopologyException, org.apache.thrift.TException
+    public void rebalance(String name, RebalanceOptions options) throws NotAliveException, InvalidTopologyException, TException
     {
       send_rebalance(name, options);
       recv_rebalance();
     }
 
-    public void send_rebalance(String name, RebalanceOptions options) throws org.apache.thrift.TException
+    public void send_rebalance(String name, RebalanceOptions options) throws TException
     {
       rebalance_args args = new rebalance_args();
       args.set_name(name);
@@ -361,7 +393,7 @@ public class Nimbus {
       sendBase("rebalance", args);
     }
 
-    public void recv_rebalance() throws NotAliveException, InvalidTopologyException, org.apache.thrift.TException
+    public void recv_rebalance() throws NotAliveException, InvalidTopologyException, TException
     {
       rebalance_result result = new rebalance_result();
       receiveBase(result, "rebalance");
@@ -374,13 +406,13 @@ public class Nimbus {
       return;
     }
 
-    public void metricMonitor(String name, MonitorOptions options) throws NotAliveException, org.apache.thrift.TException
+    public void metricMonitor(String name, MonitorOptions options) throws NotAliveException, TException
     {
       send_metricMonitor(name, options);
       recv_metricMonitor();
     }
 
-    public void send_metricMonitor(String name, MonitorOptions options) throws org.apache.thrift.TException
+    public void send_metricMonitor(String name, MonitorOptions options) throws TException
     {
       metricMonitor_args args = new metricMonitor_args();
       args.set_name(name);
@@ -388,7 +420,7 @@ public class Nimbus {
       sendBase("metricMonitor", args);
     }
 
-    public void recv_metricMonitor() throws NotAliveException, org.apache.thrift.TException
+    public void recv_metricMonitor() throws NotAliveException, TException
     {
       metricMonitor_result result = new metricMonitor_result();
       receiveBase(result, "metricMonitor");
@@ -398,13 +430,13 @@ public class Nimbus {
       return;
     }
 
-    public void restart(String name, String jsonConf) throws NotAliveException, InvalidTopologyException, TopologyAssignException, org.apache.thrift.TException
+    public void restart(String name, String jsonConf) throws NotAliveException, InvalidTopologyException, TopologyAssignException, TException
     {
       send_restart(name, jsonConf);
       recv_restart();
     }
 
-    public void send_restart(String name, String jsonConf) throws org.apache.thrift.TException
+    public void send_restart(String name, String jsonConf) throws TException
     {
       restart_args args = new restart_args();
       args.set_name(name);
@@ -412,7 +444,7 @@ public class Nimbus {
       sendBase("restart", args);
     }
 
-    public void recv_restart() throws NotAliveException, InvalidTopologyException, TopologyAssignException, org.apache.thrift.TException
+    public void recv_restart() throws NotAliveException, InvalidTopologyException, TopologyAssignException, TException
     {
       restart_result result = new restart_result();
       receiveBase(result, "restart");
@@ -428,39 +460,39 @@ public class Nimbus {
       return;
     }
 
-    public void beginLibUpload(String libName) throws org.apache.thrift.TException
+    public void beginLibUpload(String libName) throws TException
     {
       send_beginLibUpload(libName);
       recv_beginLibUpload();
     }
 
-    public void send_beginLibUpload(String libName) throws org.apache.thrift.TException
+    public void send_beginLibUpload(String libName) throws TException
     {
       beginLibUpload_args args = new beginLibUpload_args();
       args.set_libName(libName);
       sendBase("beginLibUpload", args);
     }
 
-    public void recv_beginLibUpload() throws org.apache.thrift.TException
+    public void recv_beginLibUpload() throws TException
     {
       beginLibUpload_result result = new beginLibUpload_result();
       receiveBase(result, "beginLibUpload");
       return;
     }
 
-    public String beginFileUpload() throws org.apache.thrift.TException
+    public String beginFileUpload() throws TException
     {
       send_beginFileUpload();
       return recv_beginFileUpload();
     }
 
-    public void send_beginFileUpload() throws org.apache.thrift.TException
+    public void send_beginFileUpload() throws TException
     {
       beginFileUpload_args args = new beginFileUpload_args();
       sendBase("beginFileUpload", args);
     }
 
-    public String recv_beginFileUpload() throws org.apache.thrift.TException
+    public String recv_beginFileUpload() throws TException
     {
       beginFileUpload_result result = new beginFileUpload_result();
       receiveBase(result, "beginFileUpload");
@@ -470,13 +502,13 @@ public class Nimbus {
       throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "beginFileUpload failed: unknown result");
     }
 
-    public void uploadChunk(String location, ByteBuffer chunk) throws org.apache.thrift.TException
+    public void uploadChunk(String location, ByteBuffer chunk) throws TException
     {
       send_uploadChunk(location, chunk);
       recv_uploadChunk();
     }
 
-    public void send_uploadChunk(String location, ByteBuffer chunk) throws org.apache.thrift.TException
+    public void send_uploadChunk(String location, ByteBuffer chunk) throws TException
     {
       uploadChunk_args args = new uploadChunk_args();
       args.set_location(location);
@@ -484,47 +516,47 @@ public class Nimbus {
       sendBase("uploadChunk", args);
     }
 
-    public void recv_uploadChunk() throws org.apache.thrift.TException
+    public void recv_uploadChunk() throws TException
     {
       uploadChunk_result result = new uploadChunk_result();
       receiveBase(result, "uploadChunk");
       return;
     }
 
-    public void finishFileUpload(String location) throws org.apache.thrift.TException
+    public void finishFileUpload(String location) throws TException
     {
       send_finishFileUpload(location);
       recv_finishFileUpload();
     }
 
-    public void send_finishFileUpload(String location) throws org.apache.thrift.TException
+    public void send_finishFileUpload(String location) throws TException
     {
       finishFileUpload_args args = new finishFileUpload_args();
       args.set_location(location);
       sendBase("finishFileUpload", args);
     }
 
-    public void recv_finishFileUpload() throws org.apache.thrift.TException
+    public void recv_finishFileUpload() throws TException
     {
       finishFileUpload_result result = new finishFileUpload_result();
       receiveBase(result, "finishFileUpload");
       return;
     }
 
-    public String beginFileDownload(String file) throws org.apache.thrift.TException
+    public String beginFileDownload(String file) throws TException
     {
       send_beginFileDownload(file);
       return recv_beginFileDownload();
     }
 
-    public void send_beginFileDownload(String file) throws org.apache.thrift.TException
+    public void send_beginFileDownload(String file) throws TException
     {
       beginFileDownload_args args = new beginFileDownload_args();
       args.set_file(file);
       sendBase("beginFileDownload", args);
     }
 
-    public String recv_beginFileDownload() throws org.apache.thrift.TException
+    public String recv_beginFileDownload() throws TException
     {
       beginFileDownload_result result = new beginFileDownload_result();
       receiveBase(result, "beginFileDownload");
@@ -534,20 +566,20 @@ public class Nimbus {
       throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "beginFileDownload failed: unknown result");
     }
 
-    public ByteBuffer downloadChunk(String id) throws org.apache.thrift.TException
+    public ByteBuffer downloadChunk(String id) throws TException
     {
       send_downloadChunk(id);
       return recv_downloadChunk();
     }
 
-    public void send_downloadChunk(String id) throws org.apache.thrift.TException
+    public void send_downloadChunk(String id) throws TException
     {
       downloadChunk_args args = new downloadChunk_args();
       args.set_id(id);
       sendBase("downloadChunk", args);
     }
 
-    public ByteBuffer recv_downloadChunk() throws org.apache.thrift.TException
+    public ByteBuffer recv_downloadChunk() throws TException
     {
       downloadChunk_result result = new downloadChunk_result();
       receiveBase(result, "downloadChunk");
@@ -557,39 +589,39 @@ public class Nimbus {
       throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "downloadChunk failed: unknown result");
     }
 
-    public void finishFileDownload(String id) throws org.apache.thrift.TException
+    public void finishFileDownload(String id) throws TException
     {
       send_finishFileDownload(id);
       recv_finishFileDownload();
     }
 
-    public void send_finishFileDownload(String id) throws org.apache.thrift.TException
+    public void send_finishFileDownload(String id) throws TException
     {
       finishFileDownload_args args = new finishFileDownload_args();
       args.set_id(id);
       sendBase("finishFileDownload", args);
     }
 
-    public void recv_finishFileDownload() throws org.apache.thrift.TException
+    public void recv_finishFileDownload() throws TException
     {
       finishFileDownload_result result = new finishFileDownload_result();
       receiveBase(result, "finishFileDownload");
       return;
     }
 
-    public String getNimbusConf() throws org.apache.thrift.TException
+    public String getNimbusConf() throws TException
     {
       send_getNimbusConf();
       return recv_getNimbusConf();
     }
 
-    public void send_getNimbusConf() throws org.apache.thrift.TException
+    public void send_getNimbusConf() throws TException
     {
       getNimbusConf_args args = new getNimbusConf_args();
       sendBase("getNimbusConf", args);
     }
 
-    public String recv_getNimbusConf() throws org.apache.thrift.TException
+    public String recv_getNimbusConf() throws TException
     {
       getNimbusConf_result result = new getNimbusConf_result();
       receiveBase(result, "getNimbusConf");
@@ -599,20 +631,20 @@ public class Nimbus {
       throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getNimbusConf failed: unknown result");
     }
 
-    public String getTopologyConf(String id) throws NotAliveException, org.apache.thrift.TException
+    public String getTopologyConf(String id) throws NotAliveException, TException
     {
       send_getTopologyConf(id);
       return recv_getTopologyConf();
     }
 
-    public void send_getTopologyConf(String id) throws org.apache.thrift.TException
+    public void send_getTopologyConf(String id) throws TException
     {
       getTopologyConf_args args = new getTopologyConf_args();
       args.set_id(id);
       sendBase("getTopologyConf", args);
     }
 
-    public String recv_getTopologyConf() throws NotAliveException, org.apache.thrift.TException
+    public String recv_getTopologyConf() throws NotAliveException, TException
     {
       getTopologyConf_result result = new getTopologyConf_result();
       receiveBase(result, "getTopologyConf");
@@ -625,20 +657,20 @@ public class Nimbus {
       throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getTopologyConf failed: unknown result");
     }
 
-    public String getTopologyId(String topologyName) throws NotAliveException, org.apache.thrift.TException
+    public String getTopologyId(String topologyName) throws NotAliveException, TException
     {
       send_getTopologyId(topologyName);
       return recv_getTopologyId();
     }
 
-    public void send_getTopologyId(String topologyName) throws org.apache.thrift.TException
+    public void send_getTopologyId(String topologyName) throws TException
     {
       getTopologyId_args args = new getTopologyId_args();
       args.set_topologyName(topologyName);
       sendBase("getTopologyId", args);
     }
 
-    public String recv_getTopologyId() throws NotAliveException, org.apache.thrift.TException
+    public String recv_getTopologyId() throws NotAliveException, TException
     {
       getTopologyId_result result = new getTopologyId_result();
       receiveBase(result, "getTopologyId");
@@ -651,19 +683,19 @@ public class Nimbus {
       throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getTopologyId failed: unknown result");
     }
 
-    public ClusterSummary getClusterInfo() throws org.apache.thrift.TException
+    public ClusterSummary getClusterInfo() throws TException
     {
       send_getClusterInfo();
       return recv_getClusterInfo();
     }
 
-    public void send_getClusterInfo() throws org.apache.thrift.TException
+    public void send_getClusterInfo() throws TException
     {
       getClusterInfo_args args = new getClusterInfo_args();
       sendBase("getClusterInfo", args);
     }
 
-    public ClusterSummary recv_getClusterInfo() throws org.apache.thrift.TException
+    public ClusterSummary recv_getClusterInfo() throws TException
     {
       getClusterInfo_result result = new getClusterInfo_result();
       receiveBase(result, "getClusterInfo");
@@ -673,20 +705,20 @@ public class Nimbus {
       throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getClusterInfo failed: unknown result");
     }
 
-    public SupervisorWorkers getSupervisorWorkers(String host) throws NotAliveException, org.apache.thrift.TException
+    public SupervisorWorkers getSupervisorWorkers(String host) throws NotAliveException, TException
     {
       send_getSupervisorWorkers(host);
       return recv_getSupervisorWorkers();
     }
 
-    public void send_getSupervisorWorkers(String host) throws org.apache.thrift.TException
+    public void send_getSupervisorWorkers(String host) throws TException
     {
       getSupervisorWorkers_args args = new getSupervisorWorkers_args();
       args.set_host(host);
       sendBase("getSupervisorWorkers", args);
     }
 
-    public SupervisorWorkers recv_getSupervisorWorkers() throws NotAliveException, org.apache.thrift.TException
+    public SupervisorWorkers recv_getSupervisorWorkers() throws NotAliveException, TException
     {
       getSupervisorWorkers_result result = new getSupervisorWorkers_result();
       receiveBase(result, "getSupervisorWorkers");
@@ -699,20 +731,20 @@ public class Nimbus {
       throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getSupervisorWorkers failed: unknown result");
     }
 
-    public TopologyInfo getTopologyInfo(String id) throws NotAliveException, org.apache.thrift.TException
+    public TopologyInfo getTopologyInfo(String id) throws NotAliveException, TException
     {
       send_getTopologyInfo(id);
       return recv_getTopologyInfo();
     }
 
-    public void send_getTopologyInfo(String id) throws org.apache.thrift.TException
+    public void send_getTopologyInfo(String id) throws TException
     {
       getTopologyInfo_args args = new getTopologyInfo_args();
       args.set_id(id);
       sendBase("getTopologyInfo", args);
     }
 
-    public TopologyInfo recv_getTopologyInfo() throws NotAliveException, org.apache.thrift.TException
+    public TopologyInfo recv_getTopologyInfo() throws NotAliveException, TException
     {
       getTopologyInfo_result result = new getTopologyInfo_result();
       receiveBase(result, "getTopologyInfo");
@@ -725,20 +757,20 @@ public class Nimbus {
       throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getTopologyInfo failed: unknown result");
     }
 
-    public TopologyInfo getTopologyInfoByName(String topologyName) throws NotAliveException, org.apache.thrift.TException
+    public TopologyInfo getTopologyInfoByName(String topologyName) throws NotAliveException, TException
     {
       send_getTopologyInfoByName(topologyName);
       return recv_getTopologyInfoByName();
     }
 
-    public void send_getTopologyInfoByName(String topologyName) throws org.apache.thrift.TException
+    public void send_getTopologyInfoByName(String topologyName) throws TException
     {
       getTopologyInfoByName_args args = new getTopologyInfoByName_args();
       args.set_topologyName(topologyName);
       sendBase("getTopologyInfoByName", args);
     }
 
-    public TopologyInfo recv_getTopologyInfoByName() throws NotAliveException, org.apache.thrift.TException
+    public TopologyInfo recv_getTopologyInfoByName() throws NotAliveException, TException
     {
       getTopologyInfoByName_result result = new getTopologyInfoByName_result();
       receiveBase(result, "getTopologyInfoByName");
@@ -751,20 +783,20 @@ public class Nimbus {
       throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getTopologyInfoByName failed: unknown result");
     }
 
-    public StormTopology getTopology(String id) throws NotAliveException, org.apache.thrift.TException
+    public StormTopology getTopology(String id) throws NotAliveException, TException
     {
       send_getTopology(id);
       return recv_getTopology();
     }
 
-    public void send_getTopology(String id) throws org.apache.thrift.TException
+    public void send_getTopology(String id) throws TException
     {
       getTopology_args args = new getTopology_args();
       args.set_id(id);
       sendBase("getTopology", args);
     }
 
-    public StormTopology recv_getTopology() throws NotAliveException, org.apache.thrift.TException
+    public StormTopology recv_getTopology() throws NotAliveException, TException
     {
       getTopology_result result = new getTopology_result();
       receiveBase(result, "getTopology");
@@ -777,20 +809,20 @@ public class Nimbus {
       throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getTopology failed: unknown result");
     }
 
-    public StormTopology getUserTopology(String id) throws NotAliveException, org.apache.thrift.TException
+    public StormTopology getUserTopology(String id) throws NotAliveException, TException
     {
       send_getUserTopology(id);
       return recv_getUserTopology();
     }
 
-    public void send_getUserTopology(String id) throws org.apache.thrift.TException
+    public void send_getUserTopology(String id) throws TException
     {
       getUserTopology_args args = new getUserTopology_args();
       args.set_id(id);
       sendBase("getUserTopology", args);
     }
 
-    public StormTopology recv_getUserTopology() throws NotAliveException, org.apache.thrift.TException
+    public StormTopology recv_getUserTopology() throws NotAliveException, TException
     {
       getUserTopology_result result = new getUserTopology_result();
       receiveBase(result, "getUserTopology");
@@ -803,110 +835,278 @@ public class Nimbus {
       throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getUserTopology failed: unknown result");
     }
 
-    public void workerUploadMetric(WorkerUploadMetrics uploadMetrics) throws org.apache.thrift.TException
+    public void uploadTopologyMetrics(String topologyId, TopologyMetric topologyMetrics) throws TException
     {
-      send_workerUploadMetric(uploadMetrics);
-      recv_workerUploadMetric();
+      send_uploadTopologyMetrics(topologyId, topologyMetrics);
+      recv_uploadTopologyMetrics();
     }
 
-    public void send_workerUploadMetric(WorkerUploadMetrics uploadMetrics) throws org.apache.thrift.TException
+    public void send_uploadTopologyMetrics(String topologyId, TopologyMetric topologyMetrics) throws TException
     {
-      workerUploadMetric_args args = new workerUploadMetric_args();
-      args.set_uploadMetrics(uploadMetrics);
-      sendBase("workerUploadMetric", args);
+      uploadTopologyMetrics_args args = new uploadTopologyMetrics_args();
+      args.set_topologyId(topologyId);
+      args.set_topologyMetrics(topologyMetrics);
+      sendBase("uploadTopologyMetrics", args);
     }
 
-    public void recv_workerUploadMetric() throws org.apache.thrift.TException
+    public void recv_uploadTopologyMetrics() throws TException
     {
-      workerUploadMetric_result result = new workerUploadMetric_result();
-      receiveBase(result, "workerUploadMetric");
+      uploadTopologyMetrics_result result = new uploadTopologyMetrics_result();
+      receiveBase(result, "uploadTopologyMetrics");
       return;
     }
 
-    public TopologyMetric getTopologyMetric(String topologyName) throws org.apache.thrift.TException
+    public Map<String,Long> registerMetrics(String topologyId, Set<String> metrics) throws TException
     {
-      send_getTopologyMetric(topologyName);
-      return recv_getTopologyMetric();
+      send_registerMetrics(topologyId, metrics);
+      return recv_registerMetrics();
     }
 
-    public void send_getTopologyMetric(String topologyName) throws org.apache.thrift.TException
+    public void send_registerMetrics(String topologyId, Set<String> metrics) throws TException
     {
-      getTopologyMetric_args args = new getTopologyMetric_args();
-      args.set_topologyName(topologyName);
-      sendBase("getTopologyMetric", args);
+      registerMetrics_args args = new registerMetrics_args();
+      args.set_topologyId(topologyId);
+      args.set_metrics(metrics);
+      sendBase("registerMetrics", args);
     }
 
-    public TopologyMetric recv_getTopologyMetric() throws org.apache.thrift.TException
+    public Map<String,Long> recv_registerMetrics() throws TException
     {
-      getTopologyMetric_result result = new getTopologyMetric_result();
-      receiveBase(result, "getTopologyMetric");
+      registerMetrics_result result = new registerMetrics_result();
+      receiveBase(result, "registerMetrics");
       if (result.is_set_success()) {
         return result.success;
       }
-      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getTopologyMetric failed: unknown result");
+      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "registerMetrics failed: unknown result");
     }
 
-    public NettyMetric getNettyMetric(String topologyName, int pos) throws org.apache.thrift.TException
+    public TopologyMetric getTopologyMetrics(String topologyId) throws TException
     {
-      send_getNettyMetric(topologyName, pos);
-      return recv_getNettyMetric();
+      send_getTopologyMetrics(topologyId);
+      return recv_getTopologyMetrics();
     }
 
-    public void send_getNettyMetric(String topologyName, int pos) throws org.apache.thrift.TException
+    public void send_getTopologyMetrics(String topologyId) throws TException
     {
-      getNettyMetric_args args = new getNettyMetric_args();
-      args.set_topologyName(topologyName);
-      args.set_pos(pos);
-      sendBase("getNettyMetric", args);
+      getTopologyMetrics_args args = new getTopologyMetrics_args();
+      args.set_topologyId(topologyId);
+      sendBase("getTopologyMetrics", args);
     }
 
-    public NettyMetric recv_getNettyMetric() throws org.apache.thrift.TException
+    public TopologyMetric recv_getTopologyMetrics() throws TException
     {
-      getNettyMetric_result result = new getNettyMetric_result();
-      receiveBase(result, "getNettyMetric");
+      getTopologyMetrics_result result = new getTopologyMetrics_result();
+      receiveBase(result, "getTopologyMetrics");
       if (result.is_set_success()) {
         return result.success;
       }
-      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getNettyMetric failed: unknown result");
+      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getTopologyMetrics failed: unknown result");
     }
 
-    public NettyMetric getServerNettyMetric(String topologyName, String serverName) throws org.apache.thrift.TException
+    public List<MetricInfo> getMetrics(String topologyId, int type) throws TException
     {
-      send_getServerNettyMetric(topologyName, serverName);
-      return recv_getServerNettyMetric();
+      send_getMetrics(topologyId, type);
+      return recv_getMetrics();
     }
 
-    public void send_getServerNettyMetric(String topologyName, String serverName) throws org.apache.thrift.TException
+    public void send_getMetrics(String topologyId, int type) throws TException
     {
-      getServerNettyMetric_args args = new getServerNettyMetric_args();
-      args.set_topologyName(topologyName);
-      args.set_serverName(serverName);
-      sendBase("getServerNettyMetric", args);
+      getMetrics_args args = new getMetrics_args();
+      args.set_topologyId(topologyId);
+      args.set_type(type);
+      sendBase("getMetrics", args);
+    }
+
+    public List<MetricInfo> recv_getMetrics() throws TException
+    {
+      getMetrics_result result = new getMetrics_result();
+      receiveBase(result, "getMetrics");
+      if (result.is_set_success()) {
+        return result.success;
+      }
+      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getMetrics failed: unknown result");
+    }
+
+    public MetricInfo getNettyMetrics(String topologyId) throws TException
+    {
+      send_getNettyMetrics(topologyId);
+      return recv_getNettyMetrics();
+    }
+
+    public void send_getNettyMetrics(String topologyId) throws TException
+    {
+      getNettyMetrics_args args = new getNettyMetrics_args();
+      args.set_topologyId(topologyId);
+      sendBase("getNettyMetrics", args);
+    }
+
+    public MetricInfo recv_getNettyMetrics() throws TException
+    {
+      getNettyMetrics_result result = new getNettyMetrics_result();
+      receiveBase(result, "getNettyMetrics");
+      if (result.is_set_success()) {
+        return result.success;
+      }
+      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getNettyMetrics failed: unknown result");
+    }
+
+    public MetricInfo getNettyMetricsByHost(String topologyId, String host) throws TException
+    {
+      send_getNettyMetricsByHost(topologyId, host);
+      return recv_getNettyMetricsByHost();
+    }
+
+    public void send_getNettyMetricsByHost(String topologyId, String host) throws TException
+    {
+      getNettyMetricsByHost_args args = new getNettyMetricsByHost_args();
+      args.set_topologyId(topologyId);
+      args.set_host(host);
+      sendBase("getNettyMetricsByHost", args);
+    }
+
+    public MetricInfo recv_getNettyMetricsByHost() throws TException
+    {
+      getNettyMetricsByHost_result result = new getNettyMetricsByHost_result();
+      receiveBase(result, "getNettyMetricsByHost");
+      if (result.is_set_success()) {
+        return result.success;
+      }
+      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getNettyMetricsByHost failed: unknown result");
+    }
+
+    public MetricInfo getPagingNettyMetrics(String topologyId, String host, int page) throws TException
+    {
+      send_getPagingNettyMetrics(topologyId, host, page);
+      return recv_getPagingNettyMetrics();
+    }
+
+    public void send_getPagingNettyMetrics(String topologyId, String host, int page) throws TException
+    {
+      getPagingNettyMetrics_args args = new getPagingNettyMetrics_args();
+      args.set_topologyId(topologyId);
+      args.set_host(host);
+      args.set_page(page);
+      sendBase("getPagingNettyMetrics", args);
+    }
+
+    public MetricInfo recv_getPagingNettyMetrics() throws TException
+    {
+      getPagingNettyMetrics_result result = new getPagingNettyMetrics_result();
+      receiveBase(result, "getPagingNettyMetrics");
+      if (result.is_set_success()) {
+        return result.success;
+      }
+      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getPagingNettyMetrics failed: unknown result");
+    }
+
+    public int getNettyMetricSizeByHost(String topologyId, String host) throws TException
+    {
+      send_getNettyMetricSizeByHost(topologyId, host);
+      return recv_getNettyMetricSizeByHost();
+    }
+
+    public void send_getNettyMetricSizeByHost(String topologyId, String host) throws TException
+    {
+      getNettyMetricSizeByHost_args args = new getNettyMetricSizeByHost_args();
+      args.set_topologyId(topologyId);
+      args.set_host(host);
+      sendBase("getNettyMetricSizeByHost", args);
+    }
+
+    public int recv_getNettyMetricSizeByHost() throws TException
+    {
+      getNettyMetricSizeByHost_result result = new getNettyMetricSizeByHost_result();
+      receiveBase(result, "getNettyMetricSizeByHost");
+      if (result.is_set_success()) {
+        return result.success;
+      }
+      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getNettyMetricSizeByHost failed: unknown result");
+    }
+
+    public MetricInfo getTaskMetrics(String topologyId, String component) throws TException
+    {
+      send_getTaskMetrics(topologyId, component);
+      return recv_getTaskMetrics();
+    }
+
+    public void send_getTaskMetrics(String topologyId, String component) throws TException
+    {
+      getTaskMetrics_args args = new getTaskMetrics_args();
+      args.set_topologyId(topologyId);
+      args.set_component(component);
+      sendBase("getTaskMetrics", args);
+    }
+
+    public MetricInfo recv_getTaskMetrics() throws TException
+    {
+      getTaskMetrics_result result = new getTaskMetrics_result();
+      receiveBase(result, "getTaskMetrics");
+      if (result.is_set_success()) {
+        return result.success;
+      }
+      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getTaskMetrics failed: unknown result");
+    }
+
+    public List<MetricInfo> getTaskAndStreamMetrics(String topologyId, int taskId) throws TException
+    {
+      send_getTaskAndStreamMetrics(topologyId, taskId);
+      return recv_getTaskAndStreamMetrics();
+    }
+
+    public void send_getTaskAndStreamMetrics(String topologyId, int taskId) throws TException
+    {
+      getTaskAndStreamMetrics_args args = new getTaskAndStreamMetrics_args();
+      args.set_topologyId(topologyId);
+      args.set_taskId(taskId);
+      sendBase("getTaskAndStreamMetrics", args);
+    }
+
+    public List<MetricInfo> recv_getTaskAndStreamMetrics() throws TException
+    {
+      getTaskAndStreamMetrics_result result = new getTaskAndStreamMetrics_result();
+      receiveBase(result, "getTaskAndStreamMetrics");
+      if (result.is_set_success()) {
+        return result.success;
+      }
+      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getTaskAndStreamMetrics failed: unknown result");
+    }
+
+    public List<MetricInfo> getSummarizedTopologyMetrics(String topologyId) throws TException
+    {
+      send_getSummarizedTopologyMetrics(topologyId);
+      return recv_getSummarizedTopologyMetrics();
+    }
+
+    public void send_getSummarizedTopologyMetrics(String topologyId) throws TException
+    {
+      getSummarizedTopologyMetrics_args args = new getSummarizedTopologyMetrics_args();
+      args.set_topologyId(topologyId);
+      sendBase("getSummarizedTopologyMetrics", args);
     }
 
-    public NettyMetric recv_getServerNettyMetric() throws org.apache.thrift.TException
+    public List<MetricInfo> recv_getSummarizedTopologyMetrics() throws TException
     {
-      getServerNettyMetric_result result = new getServerNettyMetric_result();
-      receiveBase(result, "getServerNettyMetric");
+      getSummarizedTopologyMetrics_result result = new getSummarizedTopologyMetrics_result();
+      receiveBase(result, "getSummarizedTopologyMetrics");
       if (result.is_set_success()) {
         return result.success;
       }
-      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getServerNettyMetric failed: unknown result");
+      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getSummarizedTopologyMetrics failed: unknown result");
     }
 
-    public String getVersion() throws org.apache.thrift.TException
+    public String getVersion() throws TException
     {
       send_getVersion();
       return recv_getVersion();
     }
 
-    public void send_getVersion() throws org.apache.thrift.TException
+    public void send_getVersion() throws TException
     {
       getVersion_args args = new getVersion_args();
       sendBase("getVersion", args);
     }
 
-    public String recv_getVersion() throws org.apache.thrift.TException
+    public String recv_getVersion() throws TException
     {
       getVersion_result result = new getVersion_result();
       receiveBase(result, "getVersion");
@@ -916,24 +1116,25 @@ public class Nimbus {
       throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getVersion failed: unknown result");
     }
 
-    public void updateConf(String name, String conf) throws NotAliveException, InvalidTopologyException, org.apache.thrift.TException
+    public void updateTopology(String name, String uploadedLocation, String updateConf) throws NotAliveException, InvalidTopologyException, TException
     {
-      send_updateConf(name, conf);
-      recv_updateConf();
+      send_updateTopology(name, uploadedLocation, updateConf);
+      recv_updateTopology();
     }
 
-    public void send_updateConf(String name, String conf) throws org.apache.thrift.TException
+    public void send_updateTopology(String name, String uploadedLocation, String updateConf) throws TException
     {
-      updateConf_args args = new updateConf_args();
+      updateTopology_args args = new updateTopology_args();
       args.set_name(name);
-      args.set_conf(conf);
-      sendBase("updateConf", args);
+      args.set_uploadedLocation(uploadedLocation);
+      args.set_updateConf(updateConf);
+      sendBase("updateTopology", args);
     }
 
-    public void recv_updateConf() throws NotAliveException, InvalidTopologyException, org.apache.thrift.TException
+    public void recv_updateTopology() throws NotAliveException, InvalidTopologyException, TException
     {
-      updateConf_result result = new updateConf_result();
-      receiveBase(result, "updateConf");
+      updateTopology_result result = new updateTopology_result();
+      receiveBase(result, "updateTopology");
       if (result.e != null) {
         throw result.e;
       }
@@ -943,6 +1144,26 @@ public class Nimbus {
       return;
     }
 
+    public void updateTaskHeartbeat(TopologyTaskHbInfo taskHbs) throws TException
+    {
+      send_updateTaskHeartbeat(taskHbs);
+      recv_updateTaskHeartbeat();
+    }
+
+    public void send_updateTaskHeartbeat(TopologyTaskHbInfo taskHbs) throws TException
+    {
+      updateTaskHeartbeat_args args = new updateTaskHeartbeat_args();
+      args.set_taskHbs(taskHbs);
+      sendBase("updateTaskHeartbeat", args);
+    }
+
+    public void recv_updateTaskHeartbeat() throws TException
+    {
+      updateTaskHeartbeat_result result = new updateTaskHeartbeat_result();
+      receiveBase(result, "updateTaskHeartbeat");
+      return;
+    }
+
   }
   public static class AsyncClient extends org.apache.thrift.async.TAsyncClient implements AsyncIface {
     public static class Factory implements org.apache.thrift.async.TAsyncClientFactory<AsyncClient> {
@@ -961,7 +1182,7 @@ public class Nimbus {
       super(protocolFactory, clientManager, transport);
     }
 
-    public void submitTopology(String name, String uploadedJarLocation, String jsonConf, StormTopology topology, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+    public void submitTopology(String name, String uploadedJarLocation, String jsonConf, StormTopology topology, AsyncMethodCallback resultHandler) throws TException {
       checkReady();
       submitTopology_call method_call = new submitTopology_call(name, uploadedJarLocation, jsonConf, topology, resultHandler, this, ___protocolFactory, ___transport);
       this.___currentMethod = method_call;
@@ -973,7 +1194,7 @@ public class Nimbus {
       private String uploadedJarLocation;
       private String jsonConf;
       private StormTopology topology;
-      public submitTopology_call(String name, String uploadedJarLocation, String jsonConf, StormTopology topology, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+      public submitTopology_call(String name, String uploadedJarLocation, String jsonConf, StormTopology topology, AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws TException {
         super(client, protocolFactory, transport, resultHandler, false);
         this.name = name;
         this.uploadedJarLocation = uploadedJarLocation;
@@ -981,7 +1202,7 @@ public class Nimbus {
         this.topology = topology;
       }
 
-      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws TException {
         prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("submitTopology", org.apache.thrift.protocol.TMessageType.CALL, 0));
         submitTopology_args args = new submitTopology_args();
         args.set_name(name);
@@ -992,8 +1213,8 @@ public class Nimbus {
         prot.writeMessageEnd();
       }
 
-      public void getResult() throws AlreadyAliveException, InvalidTopologyException, TopologyAssignException, org.apache.thrift.TException {
-        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+      public void getResult() throws AlreadyAliveException, InvalidTopologyException, TopologyAssignException, TException {
+        if (getState() != State.RESPONSE_READ) {
           throw new IllegalStateException("Method call not finished!");
         }
         org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
@@ -1002,7 +1223,7 @@ public class Nimbus {
       }
     }
 
-    public void submitTopologyWithOpts(String name, String uploadedJarLocation, String jsonConf, StormTopology topology, SubmitOptions options, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+    public void submitTopologyWithOpts(String name, String uploadedJarLocation, String jsonConf, StormTopology topology, SubmitOptions options, AsyncMethodCallback resultHandler) throws TException {
       checkReady();
       submitTopologyWithOpts_call method_call = new submitTopologyWithOpts_call(name, uploadedJarLocation, jsonConf, topology, options, resultHandler, this, ___protocolFactory, ___transport);
       this.___currentMethod = method_call;
@@ -1015,7 +1236,7 @@ public class Nimbus {
       private String jsonConf;
       private StormTopology topology;
       private SubmitOptions options;
-      public submitTopologyWithOpts_call(String name, String uploadedJarLocation, String jsonConf, StormTopology topology, SubmitOptions options, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+      public submitTopologyWithOpts_call(String name, String uploadedJarLocation, String jsonConf, StormTopology topology, SubmitOptions options, AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws TException {
         super(client, protocolFactory, transport, resultHandler, false);
         this.name = name;
         this.uploadedJarLocation = uploadedJarLocation;
@@ -1024,7 +1245,7 @@ public class Nimbus {
         this.options = options;
       }
 
-      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws TException {
         prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("submitTopologyWithOpts", org.apache.thrift.protocol.TMessageType.CALL, 0));
         submitTopologyWithOpts_args args = new submitTopologyWithOpts_args();
         args.set_name(name);
@@ -1036,8 +1257,8 @@ public class Nimbus {
         prot.writeMessageEnd();
       }
 
-      public void getResult() throws AlreadyAliveException, InvalidTopologyException, TopologyAssignException, org.apache.thrift.TException {
-        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+      public void getResult() throws AlreadyAliveException, InvalidTopologyException, TopologyAssignException, TException {
+        if (getState() != State.RESPONSE_READ) {
           throw new IllegalStateException("Method call not finished!");
         }
         org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
@@ -1046,7 +1267,7 @@ public class Nimbus {
       }
     }
 
-    public void killTopology(String name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+    public void killTopology(String name, AsyncMethodCallback resultHandler) throws TException {
       checkReady();
       killTopology_call method_call = new killTopology_call(name, resultHandler, this, ___protocolFactory, ___transport);
       this.___currentMethod = method_call;
@@ -1055,12 +1276,12 @@ public class Nimbus {
 
     public static class killTopology_call extends org.apache.thrift.async.TAsyncMethodCall {
       private String name;
-      public killTopology_call(String name, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+      public killTopology_call(String name, AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws TException {
         super(client, protocolFactory, transport, resultHandler, false);
         this.name = name;
       }
 
-      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws TException {
         prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("killTopology", org.apache.thrift.protocol.TMessageType.CALL, 0));
         killTopology_args args = new killTopology_args();
         args.set_name(name);
@@ -1068,8 +1289,8 @@ public class Nimbus {
         prot.writeMessageEnd();
       }
 
-      public void getResult() throws NotAliveException, org.apache.thrift.TException {
-        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+      public void getResult() throws NotAliveException, TException {
+        if (getState() != State.RESPONSE_READ) {
           throw new IllegalStateException("Method call not finished!");
         }
         org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
@@ -1078,7 +1299,7 @@ public class Nimbus {
       }
     }
 
-    public void killTopologyWithOpts(String name, KillOptions options, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+    public void killTopologyWithOpts(String name, KillOptions options, AsyncMethodCallback resultHandler) throws TException {
       checkReady();
       killTopologyWithOpts_call method_call = new killTopologyWithOpts_call(name, options, resultHandler, this, ___protocolFactory, ___transport);
       this.___currentMethod = method_call;
@@ -1088,13 +1309,13 @@ public class Nimbus {
     public static class killTopologyWithOpts_call extends org.apache.thrift.async.TAsyncMethodCall {
       private String name;
       private KillOptions options;
-      public killTopologyWithOpts_call(String name, KillOptions options, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+      public killTopologyWithOpts_call(String name, KillOptions options, AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws TException {
         super(client, protocolFactory, transport, resultHandler, false);
         this.name = name;
         this.options = options;
       }
 
-      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws TException {
         prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("killTopologyWithOpts", org.apache.thrift.protocol.TMessageType.CALL, 0));
         killTopologyWithOpts_args args = new killTopologyWithOpts_args();
         args.set_name(name);
@@ -1103,8 +1324,8 @@ public class Nimbus {
         prot.writeMessageEnd();
       }
 
-      public void getResult() throws NotAliveException, org.apache.thrift.TException {
-        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+      public void getResult() throws NotAliveException, TException {
+        if (getState() != State.RESPONSE_READ) {
           throw new IllegalStateException("Method call not finished!");
         }
         org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
@@ -1113,7 +1334,7 @@ public class Nimbus {
       }
     }
 
-    public void activate(String name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+    public void activate(String name, AsyncMethodCallback resultHandler) throws TException {
       checkReady();
       activate_call method_call = new activate_call(name, resultHandler, this, ___protocolFactory, ___transport);
       this.___currentMethod = method_call;
@@ -1122,12 +1343,12 @@ public class Nimbus {
 
     public static class activate_call extends org.apache.thrift.async.TAsyncMethodCall {
       private String name;
-      public activate_call(String name, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+      public activate_call(String name, AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws TException {
         super(client, protocolFactory, transport, resultHandler, false);
         this.name = name;
       }
 
-      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws TException {
         prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("activate", org.apache.thrift.protocol.TMessageType.CALL, 0));
         activate_args args = new activate_args();
         args.set_name(name);
@@ -1135,8 +1356,8 @@ public class Nimbus {
         prot.writeMessageEnd();
       }
 
-      public void getResult() throws NotAliveException, org.apache.thrift.TException {
-        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+      public void getResult() throws NotAliveException, TException {
+        if (getState() != State.RESPONSE_READ) {
           throw new IllegalStateException("Method call not finished!");
         }
         org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
@@ -1145,7 +1366,7 @@ public class Nimbus {
       }
     }
 
-    public void deactivate(String name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+    public void deactivate(String name, AsyncMethodCallback resultHandler) throws TException {
       checkReady();
       deactivate_call method_call = new deactivate_call(name, resultHandler, this, ___protocolFactory, ___transport);
       this.___currentMethod = method_call;
@@ -1154,12 +1375,12 @@ public class Nimbus {
 
     public static class deactivate_call extends org.apache.thrift.async.TAsyncMethodCall {
       private String name;
-      public deactivate_call(String name, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+      public deactivate_call(String name, AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws TException {
         super(client, protocolFactory, transport, resultHandler, false);
         this.name = name;
       }
 
-      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws TException {
         prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("deactivate", org.apache.thrift.protocol.TMessageType.CALL, 0));
         deactivate_args args = new deactivate_args();
         args.set_name(name);
@@ -1167,8 +1388,8 @@ public class Nimbus {
         prot.writeMessageEnd();
       }
 
-      public void getResult() throws NotAliveException, org.apache.thrift.TException {
-        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+      public void getResult() throws NotAliveException, TException {
+        if (getState() != State.RESPONSE_READ) {
           throw new IllegalStateException("Method call not finished!");
         }
         org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
@@ -1177,7 +1398,7 @@ public class Nimbus {
       }
     }
 
-    public void rebalance(String name, RebalanceOptions options, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+    public void rebalance(String name, RebalanceOptions options, AsyncMethodCallback resultHandler) throws TException {
       checkReady();
       rebalance_call method_call = new rebalance_call(name, options, resultHandler, this, ___protocolFactory, ___transport);
       this.___currentMethod = method_call;
@@ -1187,13 +1408,13 @@ public class Nimbus {
     public static class rebalance_call extends org.apache.thrift.async.TAsyncMethodCall {
       private String name;
       private RebalanceOptions options;
-      public rebalance_call(String name, RebalanceOptions options, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+      public rebalance_call(String name, RebalanceOptions options, AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws TException {
         super(client, protocolFactory, transport, resultHandler, false);
         this.name = name;
         this.options = options;
       }
 
-      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws TException {
         prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("rebalance", org.apache.thrift.protocol.TMessageType.CALL, 0));
         rebalance_args args = new rebalance_args();
         args.set_name(name);
@@ -1202,8 +1423,8 @@ public class Nimbus {
         prot.writeMessageEnd();
       }
 
-      public void getResult() throws NotAliveException, InvalidTopologyException, org.apache.thrift.TException {
-        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+      public void getResult() throws NotAliveException, InvalidTopologyException, TException {
+        if (getState() != State.RESPONSE_READ) {
           throw new IllegalStateException("Method call not finished!");
         }
         org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
@@ -1212,7 +1433,7 @@ public class Nimbus {
       }
     }
 
-    public void metricMonitor(String name, MonitorOptions options, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+    public void metricMonitor(String name, MonitorOptions options, AsyncMethodCallback resultHandler) throws TException {
       checkReady();
       metricMonitor_call method_call = new metricMonitor_call(name, options, resultHandler, this, ___protocolFactory, ___transport);
       this.___currentMethod = method_call;
@@ -1222,13 +1443,13 @@ public class Nimbus {
     public static class metricMonitor_call extends org.apache.thrift.async.TAsyncMethodCall {
       private String name;
       private MonitorOptions options;
-      public metricMonitor_call(String name, MonitorOptions options, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+      public metricMonitor_call(String name, MonitorOptions options, AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws TException {
         super(client, protocolFactory, transport, resultHandler, false);
         this.name = name;
         this.options = options;
       }
 
-      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws TException {
         prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("metricMonitor", org.apache.thrift.protocol.TMessageType.CALL, 0));
         metricMonitor_args args = new metricMonitor_args();
         args.set_name(name);
@@ -1237,8 +1458,8 @@ public class Nimbus {
         prot.writeMessageEnd();
       }
 
-      public void getResult() throws NotAliveException, org.apache.thrift.TException {
-        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+      public void getResult() throws NotAliveException, TException {
+        if (getState() != State.RESPONSE_READ) {
           throw new IllegalStateException("Method call not finished!");
         }
         org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
@@ -1247,7 +1468,7 @@ public class Nimbus {
       }
     }
 
-    public void restart(String name, String jsonConf, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+    public void restart(String name, String jsonConf, AsyncMethodCallback resultHandler) throws TException {
       checkReady();
       restart_call method_call = new restart_call(name, jsonConf, resultHandler, this, ___protocolFactory, ___transport);
       this.___currentMethod = method_call;
@@ -1257,13 +1478,13 @@ public class Nimbus {
     public static class restart_call extends org.apache.thrift.async.TAsyncMethodCall {
       private String name;
       private String jsonConf;
-      public restart_call(String name, String jsonConf, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+      public restart_call(String name, String jsonConf, AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws TException {
         super(client, protocolFactory, transport, resultHandler, false);
         this.name = name;
         this.jsonConf = jsonConf;
       }
 
-      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws TException {
         prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("restart", org.apache.thrift.protocol.TMessageType.CALL, 0));
         restart_args args = new restart_args();
         args.set_name(name);
@@ -1272,8 +1493,8 @@ public class Nimbus {
         prot.writeMessageEnd();
       }
 
-      public void getResult() throws NotAliveException, InvalidTopologyException, TopologyAssignException, org.apache.thrift.TException {
-        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+      public void getResult() throws NotAliveException, InvalidTopologyException, TopologyAssignException, TException {
+        if (getState() != State.RESPONSE_READ) {
           throw new IllegalStateException("Method call not finished!");
         }
         org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
@@ -1282,7 +1503,7 @@ public class Nimbus {
       }
     }
 
-    public void beginLibUpload(String libName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+    public void beginLibUpload(String libName, AsyncMethodCallback resultHandler) throws TException {
       checkReady();
       beginLibUpload_call method_call = new beginLibUpload_call(libName, resultHandler, this, ___protocolFactory, ___transport);
       this.___currentMethod = method_call;
@@ -1291,12 +1512,12 @@ public class Nimbus {
 
     public static class beginLibUpload_call extends org.apache.thrift.async.TAsyncMethodCall {
       private String libName;
-      public beginLibUpload_call(String libName, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+      public beginLibUpload_call(String libName, AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws TException {
         super(client, protocolFactory, transport, resultHandler, false);
         this.libName = libName;
       }
 
-      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws TException {
         prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("beginLibUpload", org.apache.thrift.protocol.TMessageType.CALL, 0));
         beginLibUpload_args args = new beginLibUpload_args();
         args.set_libName(libName);
@@ -1304,8 +1525,8 @@ public class Nimbus {
         prot.writeMessageEnd();
       }
 
-      public void getResult() throws org.apache.thrift.TException {
-        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+      public void getResult() throws TException {
+        if (getState() != State.RESPONSE_READ) {
           throw new IllegalStateException("Method call not finished!");
         }
         org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
@@ -1314,7 +1535,7 @@ public class Nimbus {
       }
     }
 
-    public void beginFileUpload(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+    public void beginFileUpload(AsyncMethodCallback resultHandler) throws TException {
       checkReady();
       beginFileUpload_call method_call = new beginFileUpload_call(resultHandler, this, ___protocolFactory, ___transport);
       this.___currentMethod = method_call;
@@ -1322,19 +1543,19 @@ public class Nimbus {
     }
 
     public static class beginFileUpload_call extends org.apache.thrift.async.TAsyncMethodCall {
-      public beginFileUpload_call(org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+      public beginFileUpload_call(AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws TException {
         super(client, protocolFactory, transport, resultHandler, false);
       }
 
-      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws TException {
         prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("beginFileUpload", org.apache.thrift.protocol.TMessageType.CALL, 0));
         beginFileUpload_args args = new beginFileUpload_args();
         args.write(prot);
         prot.writeMessageEnd();
       }
 
-      public String getResult() throws org.apache.thrift.TException {
-        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+      public String getResult() throws TException {
+        if (getState() != State.RESPONSE_READ) {
           throw new IllegalStateException("Method call not finished!");
         }
         org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
@@ -1343,7 +1564,7 @@ public class Nimbus {
       }
     }
 
-    public void uploadChunk(String location, ByteBuffer chunk, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+    public void uploadChunk(String location, ByteBuffer chunk, AsyncMethodCallback resultHandler) throws TEx

<TRUNCATED>

[45/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/DistributedRPCInvocations.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/DistributedRPCInvocations.java b/jstorm-core/src/main/java/backtype/storm/generated/DistributedRPCInvocations.java
index 08aab63..6795e5f 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/DistributedRPCInvocations.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/DistributedRPCInvocations.java
@@ -34,26 +34,26 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class DistributedRPCInvocations {
 
   public interface Iface {
 
-    public void result(String id, String result) throws org.apache.thrift.TException;
+    public void result(String id, String result) throws AuthorizationException, TException;
 
-    public DRPCRequest fetchRequest(String functionName) throws org.apache.thrift.TException;
+    public DRPCRequest fetchRequest(String functionName) throws AuthorizationException, TException;
 
-    public void failRequest(String id) throws org.apache.thrift.TException;
+    public void failRequest(String id) throws AuthorizationException, TException;
 
   }
 
   public interface AsyncIface {
 
-    public void result(String id, String result, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+    public void result(String id, String result, AsyncMethodCallback resultHandler) throws TException;
 
-    public void fetchRequest(String functionName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+    public void fetchRequest(String functionName, AsyncMethodCallback resultHandler) throws TException;
 
-    public void failRequest(String id, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+    public void failRequest(String id, AsyncMethodCallback resultHandler) throws TException;
 
   }
 
@@ -77,13 +77,13 @@ public class DistributedRPCInvocations {
       super(iprot, oprot);
     }
 
-    public void result(String id, String result) throws org.apache.thrift.TException
+    public void result(String id, String result) throws AuthorizationException, TException
     {
       send_result(id, result);
       recv_result();
     }
 
-    public void send_result(String id, String result) throws org.apache.thrift.TException
+    public void send_result(String id, String result) throws TException
     {
       result_args args = new result_args();
       args.set_id(id);
@@ -91,53 +91,62 @@ public class DistributedRPCInvocations {
       sendBase("result", args);
     }
 
-    public void recv_result() throws org.apache.thrift.TException
+    public void recv_result() throws AuthorizationException, TException
     {
       result_result result = new result_result();
       receiveBase(result, "result");
+      if (result.aze != null) {
+        throw result.aze;
+      }
       return;
     }
 
-    public DRPCRequest fetchRequest(String functionName) throws org.apache.thrift.TException
+    public DRPCRequest fetchRequest(String functionName) throws AuthorizationException, TException
     {
       send_fetchRequest(functionName);
       return recv_fetchRequest();
     }
 
-    public void send_fetchRequest(String functionName) throws org.apache.thrift.TException
+    public void send_fetchRequest(String functionName) throws TException
     {
       fetchRequest_args args = new fetchRequest_args();
       args.set_functionName(functionName);
       sendBase("fetchRequest", args);
     }
 
-    public DRPCRequest recv_fetchRequest() throws org.apache.thrift.TException
+    public DRPCRequest recv_fetchRequest() throws AuthorizationException, TException
     {
       fetchRequest_result result = new fetchRequest_result();
       receiveBase(result, "fetchRequest");
       if (result.is_set_success()) {
         return result.success;
       }
+      if (result.aze != null) {
+        throw result.aze;
+      }
       throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "fetchRequest failed: unknown result");
     }
 
-    public void failRequest(String id) throws org.apache.thrift.TException
+    public void failRequest(String id) throws AuthorizationException, TException
     {
       send_failRequest(id);
       recv_failRequest();
     }
 
-    public void send_failRequest(String id) throws org.apache.thrift.TException
+    public void send_failRequest(String id) throws TException
     {
       failRequest_args args = new failRequest_args();
       args.set_id(id);
       sendBase("failRequest", args);
     }
 
-    public void recv_failRequest() throws org.apache.thrift.TException
+    public void recv_failRequest() throws AuthorizationException, TException
     {
       failRequest_result result = new failRequest_result();
       receiveBase(result, "failRequest");
+      if (result.aze != null) {
+        throw result.aze;
+      }
       return;
     }
 
@@ -159,7 +168,7 @@ public class DistributedRPCInvocations {
       super(protocolFactory, clientManager, transport);
     }
 
-    public void result(String id, String result, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+    public void result(String id, String result, AsyncMethodCallback resultHandler) throws TException {
       checkReady();
       result_call method_call = new result_call(id, result, resultHandler, this, ___protocolFactory, ___transport);
       this.___currentMethod = method_call;
@@ -169,13 +178,13 @@ public class DistributedRPCInvocations {
     public static class result_call extends org.apache.thrift.async.TAsyncMethodCall {
       private String id;
       private String result;
-      public result_call(String id, String result, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+      public result_call(String id, String result, AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws TException {
         super(client, protocolFactory, transport, resultHandler, false);
         this.id = id;
         this.result = result;
       }
 
-      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws TException {
         prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("result", org.apache.thrift.protocol.TMessageType.CALL, 0));
         result_args args = new result_args();
         args.set_id(id);
@@ -184,8 +193,8 @@ public class DistributedRPCInvocations {
         prot.writeMessageEnd();
       }
 
-      public void getResult() throws org.apache.thrift.TException {
-        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+      public void getResult() throws AuthorizationException, TException {
+        if (getState() != State.RESPONSE_READ) {
           throw new IllegalStateException("Method call not finished!");
         }
         org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
@@ -194,7 +203,7 @@ public class DistributedRPCInvocations {
       }
     }
 
-    public void fetchRequest(String functionName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+    public void fetchRequest(String functionName, AsyncMethodCallback resultHandler) throws TException {
       checkReady();
       fetchRequest_call method_call = new fetchRequest_call(functionName, resultHandler, this, ___protocolFactory, ___transport);
       this.___currentMethod = method_call;
@@ -203,12 +212,12 @@ public class DistributedRPCInvocations {
 
     public static class fetchRequest_call extends org.apache.thrift.async.TAsyncMethodCall {
       private String functionName;
-      public fetchRequest_call(String functionName, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+      public fetchRequest_call(String functionName, AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws TException {
         super(client, protocolFactory, transport, resultHandler, false);
         this.functionName = functionName;
       }
 
-      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws TException {
         prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("fetchRequest", org.apache.thrift.protocol.TMessageType.CALL, 0));
         fetchRequest_args args = new fetchRequest_args();
         args.set_functionName(functionName);
@@ -216,8 +225,8 @@ public class DistributedRPCInvocations {
         prot.writeMessageEnd();
       }
 
-      public DRPCRequest getResult() throws org.apache.thrift.TException {
-        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+      public DRPCRequest getResult() throws AuthorizationException, TException {
+        if (getState() != State.RESPONSE_READ) {
           throw new IllegalStateException("Method call not finished!");
         }
         org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
@@ -226,7 +235,7 @@ public class DistributedRPCInvocations {
       }
     }
 
-    public void failRequest(String id, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+    public void failRequest(String id, AsyncMethodCallback resultHandler) throws TException {
       checkReady();
       failRequest_call method_call = new failRequest_call(id, resultHandler, this, ___protocolFactory, ___transport);
       this.___currentMethod = method_call;
@@ -235,12 +244,12 @@ public class DistributedRPCInvocations {
 
     public static class failRequest_call extends org.apache.thrift.async.TAsyncMethodCall {
       private String id;
-      public failRequest_call(String id, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+      public failRequest_call(String id, AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws TException {
         super(client, protocolFactory, transport, resultHandler, false);
         this.id = id;
       }
 
-      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws TException {
         prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("failRequest", org.apache.thrift.protocol.TMessageType.CALL, 0));
         failRequest_args args = new failRequest_args();
         args.set_id(id);
@@ -248,8 +257,8 @@ public class DistributedRPCInvocations {
         prot.writeMessageEnd();
       }
 
-      public void getResult() throws org.apache.thrift.TException {
-        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+      public void getResult() throws AuthorizationException, TException {
+        if (getState() != State.RESPONSE_READ) {
           throw new IllegalStateException("Method call not finished!");
         }
         org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
@@ -290,9 +299,13 @@ public class DistributedRPCInvocations {
         return false;
       }
 
-      public result_result getResult(I iface, result_args args) throws org.apache.thrift.TException {
+      public result_result getResult(I iface, result_args args) throws TException {
         result_result result = new result_result();
-        iface.result(args.id, args.result);
+        try {
+          iface.result(args.id, args.result);
+        } catch (AuthorizationException aze) {
+          result.aze = aze;
+        }
         return result;
       }
     }
@@ -310,9 +323,13 @@ public class DistributedRPCInvocations {
         return false;
       }
 
-      public fetchRequest_result getResult(I iface, fetchRequest_args args) throws org.apache.thrift.TException {
+      public fetchRequest_result getResult(I iface, fetchRequest_args args) throws TException {
         fetchRequest_result result = new fetchRequest_result();
-        result.success = iface.fetchRequest(args.functionName);
+        try {
+          result.success = iface.fetchRequest(args.functionName);
+        } catch (AuthorizationException aze) {
+          result.aze = aze;
+        }
         return result;
       }
     }
@@ -330,9 +347,13 @@ public class DistributedRPCInvocations {
         return false;
       }
 
-      public failRequest_result getResult(I iface, failRequest_args args) throws org.apache.thrift.TException {
+      public failRequest_result getResult(I iface, failRequest_args args) throws TException {
         failRequest_result result = new failRequest_result();
-        iface.failRequest(args.id);
+        try {
+          iface.failRequest(args.id);
+        } catch (AuthorizationException aze) {
+          result.aze = aze;
+        }
         return result;
       }
     }
@@ -382,6 +403,12 @@ public class DistributedRPCInvocations {
             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
             org.apache.thrift.TBase msg;
             result_result result = new result_result();
+            if (e instanceof AuthorizationException) {
+                        result.aze = (AuthorizationException) e;
+                        result.set_aze_isSet(true);
+                        msg = result;
+            }
+             else 
             {
               msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
               msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
@@ -401,7 +428,7 @@ public class DistributedRPCInvocations {
         return false;
       }
 
-      public void start(I iface, result_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
+      public void start(I iface, result_args args, AsyncMethodCallback<Void> resultHandler) throws TException {
         iface.result(args.id, args.result,resultHandler);
       }
     }
@@ -433,6 +460,12 @@ public class DistributedRPCInvocations {
             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
             org.apache.thrift.TBase msg;
             fetchRequest_result result = new fetchRequest_result();
+            if (e instanceof AuthorizationException) {
+                        result.aze = (AuthorizationException) e;
+                        result.set_aze_isSet(true);
+                        msg = result;
+            }
+             else 
             {
               msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
               msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
@@ -452,7 +485,7 @@ public class DistributedRPCInvocations {
         return false;
       }
 
-      public void start(I iface, fetchRequest_args args, org.apache.thrift.async.AsyncMethodCallback<DRPCRequest> resultHandler) throws TException {
+      public void start(I iface, fetchRequest_args args, AsyncMethodCallback<DRPCRequest> resultHandler) throws TException {
         iface.fetchRequest(args.functionName,resultHandler);
       }
     }
@@ -483,6 +516,12 @@ public class DistributedRPCInvocations {
             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
             org.apache.thrift.TBase msg;
             failRequest_result result = new failRequest_result();
+            if (e instanceof AuthorizationException) {
+                        result.aze = (AuthorizationException) e;
+                        result.set_aze_isSet(true);
+                        msg = result;
+            }
+             else 
             {
               msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
               msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
@@ -502,7 +541,7 @@ public class DistributedRPCInvocations {
         return false;
       }
 
-      public void start(I iface, failRequest_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
+      public void start(I iface, failRequest_args args, AsyncMethodCallback<Void> resultHandler) throws TException {
         iface.failRequest(args.id,resultHandler);
       }
     }
@@ -811,11 +850,11 @@ public class DistributedRPCInvocations {
       return _Fields.findByThriftId(fieldId);
     }
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
       schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
       schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
     }
 
@@ -843,7 +882,7 @@ public class DistributedRPCInvocations {
       return sb.toString();
     }
 
-    public void validate() throws org.apache.thrift.TException {
+    public void validate() throws TException {
       // check for required fields
       // check for sub-struct validity
     }
@@ -851,7 +890,7 @@ public class DistributedRPCInvocations {
     private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
       try {
         write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-      } catch (org.apache.thrift.TException te) {
+      } catch (TException te) {
         throw new java.io.IOException(te);
       }
     }
@@ -859,7 +898,7 @@ public class DistributedRPCInvocations {
     private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
       try {
         read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-      } catch (org.apache.thrift.TException te) {
+      } catch (TException te) {
         throw new java.io.IOException(te);
       }
     }
@@ -872,7 +911,7 @@ public class DistributedRPCInvocations {
 
     private static class result_argsStandardScheme extends StandardScheme<result_args> {
 
-      public void read(org.apache.thrift.protocol.TProtocol iprot, result_args struct) throws org.apache.thrift.TException {
+      public void read(org.apache.thrift.protocol.TProtocol iprot, result_args struct) throws TException {
         org.apache.thrift.protocol.TField schemeField;
         iprot.readStructBegin();
         while (true)
@@ -907,7 +946,7 @@ public class DistributedRPCInvocations {
         struct.validate();
       }
 
-      public void write(org.apache.thrift.protocol.TProtocol oprot, result_args struct) throws org.apache.thrift.TException {
+      public void write(org.apache.thrift.protocol.TProtocol oprot, result_args struct) throws TException {
         struct.validate();
 
         oprot.writeStructBegin(STRUCT_DESC);
@@ -936,7 +975,7 @@ public class DistributedRPCInvocations {
     private static class result_argsTupleScheme extends TupleScheme<result_args> {
 
       @Override
-      public void write(org.apache.thrift.protocol.TProtocol prot, result_args struct) throws org.apache.thrift.TException {
+      public void write(org.apache.thrift.protocol.TProtocol prot, result_args struct) throws TException {
         TTupleProtocol oprot = (TTupleProtocol) prot;
         BitSet optionals = new BitSet();
         if (struct.is_set_id()) {
@@ -955,7 +994,7 @@ public class DistributedRPCInvocations {
       }
 
       @Override
-      public void read(org.apache.thrift.protocol.TProtocol prot, result_args struct) throws org.apache.thrift.TException {
+      public void read(org.apache.thrift.protocol.TProtocol prot, result_args struct) throws TException {
         TTupleProtocol iprot = (TTupleProtocol) prot;
         BitSet incoming = iprot.readBitSet(2);
         if (incoming.get(0)) {
@@ -974,6 +1013,7 @@ public class DistributedRPCInvocations {
   public static class result_result implements org.apache.thrift.TBase<result_result, result_result._Fields>, java.io.Serializable, Cloneable, Comparable<result_result>   {
     private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("result_result");
 
+    private static final org.apache.thrift.protocol.TField AZE_FIELD_DESC = new org.apache.thrift.protocol.TField("aze", org.apache.thrift.protocol.TType.STRUCT, (short)1);
 
     private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
     static {
@@ -981,10 +1021,11 @@ public class DistributedRPCInvocations {
       schemes.put(TupleScheme.class, new result_resultTupleSchemeFactory());
     }
 
+    private AuthorizationException aze; // required
 
     /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
     public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-;
+      AZE((short)1, "aze");
 
       private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 
@@ -999,6 +1040,8 @@ public class DistributedRPCInvocations {
        */
       public static _Fields findByThriftId(int fieldId) {
         switch(fieldId) {
+          case 1: // AZE
+            return AZE;
           default:
             return null;
         }
@@ -1037,9 +1080,13 @@ public class DistributedRPCInvocations {
         return _fieldName;
       }
     }
+
+    // isset id assignments
     public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
     static {
       Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+      tmpMap.put(_Fields.AZE, new org.apache.thrift.meta_data.FieldMetaData("aze", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
       metaDataMap = Collections.unmodifiableMap(tmpMap);
       org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(result_result.class, metaDataMap);
     }
@@ -1047,10 +1094,20 @@ public class DistributedRPCInvocations {
     public result_result() {
     }
 
+    public result_result(
+      AuthorizationException aze)
+    {
+      this();
+      this.aze = aze;
+    }
+
     /**
      * Performs a deep copy on <i>other</i>.
      */
     public result_result(result_result other) {
+      if (other.is_set_aze()) {
+        this.aze = new AuthorizationException(other.aze);
+      }
     }
 
     public result_result deepCopy() {
@@ -1059,15 +1116,50 @@ public class DistributedRPCInvocations {
 
     @Override
     public void clear() {
+      this.aze = null;
+    }
+
+    public AuthorizationException get_aze() {
+      return this.aze;
+    }
+
+    public void set_aze(AuthorizationException aze) {
+      this.aze = aze;
+    }
+
+    public void unset_aze() {
+      this.aze = null;
+    }
+
+    /** Returns true if field aze is set (has been assigned a value) and false otherwise */
+    public boolean is_set_aze() {
+      return this.aze != null;
+    }
+
+    public void set_aze_isSet(boolean value) {
+      if (!value) {
+        this.aze = null;
+      }
     }
 
     public void setFieldValue(_Fields field, Object value) {
       switch (field) {
+      case AZE:
+        if (value == null) {
+          unset_aze();
+        } else {
+          set_aze((AuthorizationException)value);
+        }
+        break;
+
       }
     }
 
     public Object getFieldValue(_Fields field) {
       switch (field) {
+      case AZE:
+        return get_aze();
+
       }
       throw new IllegalStateException();
     }
@@ -1079,6 +1171,8 @@ public class DistributedRPCInvocations {
       }
 
       switch (field) {
+      case AZE:
+        return is_set_aze();
       }
       throw new IllegalStateException();
     }
@@ -1096,6 +1190,15 @@ public class DistributedRPCInvocations {
       if (that == null)
         return false;
 
+      boolean this_present_aze = true && this.is_set_aze();
+      boolean that_present_aze = true && that.is_set_aze();
+      if (this_present_aze || that_present_aze) {
+        if (!(this_present_aze && that_present_aze))
+          return false;
+        if (!this.aze.equals(that.aze))
+          return false;
+      }
+
       return true;
     }
 
@@ -1103,6 +1206,11 @@ public class DistributedRPCInvocations {
     public int hashCode() {
       List<Object> list = new ArrayList<Object>();
 
+      boolean present_aze = true && (is_set_aze());
+      list.add(present_aze);
+      if (present_aze)
+        list.add(aze);
+
       return list.hashCode();
     }
 
@@ -1114,6 +1222,16 @@ public class DistributedRPCInvocations {
 
       int lastComparison = 0;
 
+      lastComparison = Boolean.valueOf(is_set_aze()).compareTo(other.is_set_aze());
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+      if (is_set_aze()) {
+        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.aze, other.aze);
+        if (lastComparison != 0) {
+          return lastComparison;
+        }
+      }
       return 0;
     }
 
@@ -1121,11 +1239,11 @@ public class DistributedRPCInvocations {
       return _Fields.findByThriftId(fieldId);
     }
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
       schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
       schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
       }
 
@@ -1134,11 +1252,18 @@ public class DistributedRPCInvocations {
       StringBuilder sb = new StringBuilder("result_result(");
       boolean first = true;
 
+      sb.append("aze:");
+      if (this.aze == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.aze);
+      }
+      first = false;
       sb.append(")");
       return sb.toString();
     }
 
-    public void validate() throws org.apache.thrift.TException {
+    public void validate() throws TException {
       // check for required fields
       // check for sub-struct validity
     }
@@ -1146,7 +1271,7 @@ public class DistributedRPCInvocations {
     private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
       try {
         write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-      } catch (org.apache.thrift.TException te) {
+      } catch (TException te) {
         throw new java.io.IOException(te);
       }
     }
@@ -1154,7 +1279,7 @@ public class DistributedRPCInvocations {
     private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
       try {
         read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-      } catch (org.apache.thrift.TException te) {
+      } catch (TException te) {
         throw new java.io.IOException(te);
       }
     }
@@ -1167,7 +1292,7 @@ public class DistributedRPCInvocations {
 
     private static class result_resultStandardScheme extends StandardScheme<result_result> {
 
-      public void read(org.apache.thrift.protocol.TProtocol iprot, result_result struct) throws org.apache.thrift.TException {
+      public void read(org.apache.thrift.protocol.TProtocol iprot, result_result struct) throws TException {
         org.apache.thrift.protocol.TField schemeField;
         iprot.readStructBegin();
         while (true)
@@ -1177,6 +1302,15 @@ public class DistributedRPCInvocations {
             break;
           }
           switch (schemeField.id) {
+            case 1: // AZE
+              if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+                struct.aze = new AuthorizationException();
+                struct.aze.read(iprot);
+                struct.set_aze_isSet(true);
+              } else { 
+                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+              }
+              break;
             default:
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
           }
@@ -1186,10 +1320,15 @@ public class DistributedRPCInvocations {
         struct.validate();
       }
 
-      public void write(org.apache.thrift.protocol.TProtocol oprot, result_result struct) throws org.apache.thrift.TException {
+      public void write(org.apache.thrift.protocol.TProtocol oprot, result_result struct) throws TException {
         struct.validate();
 
         oprot.writeStructBegin(STRUCT_DESC);
+        if (struct.aze != null) {
+          oprot.writeFieldBegin(AZE_FIELD_DESC);
+          struct.aze.write(oprot);
+          oprot.writeFieldEnd();
+        }
         oprot.writeFieldStop();
         oprot.writeStructEnd();
       }
@@ -1205,13 +1344,27 @@ public class DistributedRPCInvocations {
     private static class result_resultTupleScheme extends TupleScheme<result_result> {
 
       @Override
-      public void write(org.apache.thrift.protocol.TProtocol prot, result_result struct) throws org.apache.thrift.TException {
+      public void write(org.apache.thrift.protocol.TProtocol prot, result_result struct) throws TException {
         TTupleProtocol oprot = (TTupleProtocol) prot;
+        BitSet optionals = new BitSet();
+        if (struct.is_set_aze()) {
+          optionals.set(0);
+        }
+        oprot.writeBitSet(optionals, 1);
+        if (struct.is_set_aze()) {
+          struct.aze.write(oprot);
+        }
       }
 
       @Override
-      public void read(org.apache.thrift.protocol.TProtocol prot, result_result struct) throws org.apache.thrift.TException {
+      public void read(org.apache.thrift.protocol.TProtocol prot, result_result struct) throws TException {
         TTupleProtocol iprot = (TTupleProtocol) prot;
+        BitSet incoming = iprot.readBitSet(1);
+        if (incoming.get(0)) {
+          struct.aze = new AuthorizationException();
+          struct.aze.read(iprot);
+          struct.set_aze_isSet(true);
+        }
       }
     }
 
@@ -1446,11 +1599,11 @@ public class DistributedRPCInvocations {
       return _Fields.findByThriftId(fieldId);
     }
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
       schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
       schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
     }
 
@@ -1470,7 +1623,7 @@ public class DistributedRPCInvocations {
       return sb.toString();
     }
 
-    public void validate() throws org.apache.thrift.TException {
+    public void validate() throws TException {
       // check for required fields
       // check for sub-struct validity
     }
@@ -1478,7 +1631,7 @@ public class DistributedRPCInvocations {
     private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
       try {
         write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-      } catch (org.apache.thrift.TException te) {
+      } catch (TException te) {
         throw new java.io.IOException(te);
       }
     }
@@ -1486,7 +1639,7 @@ public class DistributedRPCInvocations {
     private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
       try {
         read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-      } catch (org.apache.thrift.TException te) {
+      } catch (TException te) {
         throw new java.io.IOException(te);
       }
     }
@@ -1499,7 +1652,7 @@ public class DistributedRPCInvocations {
 
     private static class fetchRequest_argsStandardScheme extends StandardScheme<fetchRequest_args> {
 
-      public void read(org.apache.thrift.protocol.TProtocol iprot, fetchRequest_args struct) throws org.apache.thrift.TException {
+      public void read(org.apache.thrift.protocol.TProtocol iprot, fetchRequest_args struct) throws TException {
         org.apache.thrift.protocol.TField schemeField;
         iprot.readStructBegin();
         while (true)
@@ -1526,7 +1679,7 @@ public class DistributedRPCInvocations {
         struct.validate();
       }
 
-      public void write(org.apache.thrift.protocol.TProtocol oprot, fetchRequest_args struct) throws org.apache.thrift.TException {
+      public void write(org.apache.thrift.protocol.TProtocol oprot, fetchRequest_args struct) throws TException {
         struct.validate();
 
         oprot.writeStructBegin(STRUCT_DESC);
@@ -1550,7 +1703,7 @@ public class DistributedRPCInvocations {
     private static class fetchRequest_argsTupleScheme extends TupleScheme<fetchRequest_args> {
 
       @Override
-      public void write(org.apache.thrift.protocol.TProtocol prot, fetchRequest_args struct) throws org.apache.thrift.TException {
+      public void write(org.apache.thrift.protocol.TProtocol prot, fetchRequest_args struct) throws TException {
         TTupleProtocol oprot = (TTupleProtocol) prot;
         BitSet optionals = new BitSet();
         if (struct.is_set_functionName()) {
@@ -1563,7 +1716,7 @@ public class DistributedRPCInvocations {
       }
 
       @Override
-      public void read(org.apache.thrift.protocol.TProtocol prot, fetchRequest_args struct) throws org.apache.thrift.TException {
+      public void read(org.apache.thrift.protocol.TProtocol prot, fetchRequest_args struct) throws TException {
         TTupleProtocol iprot = (TTupleProtocol) prot;
         BitSet incoming = iprot.readBitSet(1);
         if (incoming.get(0)) {
@@ -1579,6 +1732,7 @@ public class DistributedRPCInvocations {
     private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("fetchRequest_result");
 
     private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0);
+    private static final org.apache.thrift.protocol.TField AZE_FIELD_DESC = new org.apache.thrift.protocol.TField("aze", org.apache.thrift.protocol.TType.STRUCT, (short)1);
 
     private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
     static {
@@ -1587,10 +1741,12 @@ public class DistributedRPCInvocations {
     }
 
     private DRPCRequest success; // required
+    private AuthorizationException aze; // required
 
     /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
     public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-      SUCCESS((short)0, "success");
+      SUCCESS((short)0, "success"),
+      AZE((short)1, "aze");
 
       private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 
@@ -1607,6 +1763,8 @@ public class DistributedRPCInvocations {
         switch(fieldId) {
           case 0: // SUCCESS
             return SUCCESS;
+          case 1: // AZE
+            return AZE;
           default:
             return null;
         }
@@ -1652,6 +1810,8 @@ public class DistributedRPCInvocations {
       Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
       tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
           new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, DRPCRequest.class)));
+      tmpMap.put(_Fields.AZE, new org.apache.thrift.meta_data.FieldMetaData("aze", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
       metaDataMap = Collections.unmodifiableMap(tmpMap);
       org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(fetchRequest_result.class, metaDataMap);
     }
@@ -1660,10 +1820,12 @@ public class DistributedRPCInvocations {
     }
 
     public fetchRequest_result(
-      DRPCRequest success)
+      DRPCRequest success,
+      AuthorizationException aze)
     {
       this();
       this.success = success;
+      this.aze = aze;
     }
 
     /**
@@ -1673,6 +1835,9 @@ public class DistributedRPCInvocations {
       if (other.is_set_success()) {
         this.success = new DRPCRequest(other.success);
       }
+      if (other.is_set_aze()) {
+        this.aze = new AuthorizationException(other.aze);
+      }
     }
 
     public fetchRequest_result deepCopy() {
@@ -1682,6 +1847,7 @@ public class DistributedRPCInvocations {
     @Override
     public void clear() {
       this.success = null;
+      this.aze = null;
     }
 
     public DRPCRequest get_success() {
@@ -1707,6 +1873,29 @@ public class DistributedRPCInvocations {
       }
     }
 
+    public AuthorizationException get_aze() {
+      return this.aze;
+    }
+
+    public void set_aze(AuthorizationException aze) {
+      this.aze = aze;
+    }
+
+    public void unset_aze() {
+      this.aze = null;
+    }
+
+    /** Returns true if field aze is set (has been assigned a value) and false otherwise */
+    public boolean is_set_aze() {
+      return this.aze != null;
+    }
+
+    public void set_aze_isSet(boolean value) {
+      if (!value) {
+        this.aze = null;
+      }
+    }
+
     public void setFieldValue(_Fields field, Object value) {
       switch (field) {
       case SUCCESS:
@@ -1717,6 +1906,14 @@ public class DistributedRPCInvocations {
         }
         break;
 
+      case AZE:
+        if (value == null) {
+          unset_aze();
+        } else {
+          set_aze((AuthorizationException)value);
+        }
+        break;
+
       }
     }
 
@@ -1725,6 +1922,9 @@ public class DistributedRPCInvocations {
       case SUCCESS:
         return get_success();
 
+      case AZE:
+        return get_aze();
+
       }
       throw new IllegalStateException();
     }
@@ -1738,6 +1938,8 @@ public class DistributedRPCInvocations {
       switch (field) {
       case SUCCESS:
         return is_set_success();
+      case AZE:
+        return is_set_aze();
       }
       throw new IllegalStateException();
     }
@@ -1764,6 +1966,15 @@ public class DistributedRPCInvocations {
           return false;
       }
 
+      boolean this_present_aze = true && this.is_set_aze();
+      boolean that_present_aze = true && that.is_set_aze();
+      if (this_present_aze || that_present_aze) {
+        if (!(this_present_aze && that_present_aze))
+          return false;
+        if (!this.aze.equals(that.aze))
+          return false;
+      }
+
       return true;
     }
 
@@ -1776,6 +1987,11 @@ public class DistributedRPCInvocations {
       if (present_success)
         list.add(success);
 
+      boolean present_aze = true && (is_set_aze());
+      list.add(present_aze);
+      if (present_aze)
+        list.add(aze);
+
       return list.hashCode();
     }
 
@@ -1797,6 +2013,16 @@ public class DistributedRPCInvocations {
           return lastComparison;
         }
       }
+      lastComparison = Boolean.valueOf(is_set_aze()).compareTo(other.is_set_aze());
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+      if (is_set_aze()) {
+        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.aze, other.aze);
+        if (lastComparison != 0) {
+          return lastComparison;
+        }
+      }
       return 0;
     }
 
@@ -1804,11 +2030,11 @@ public class DistributedRPCInvocations {
       return _Fields.findByThriftId(fieldId);
     }
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
       schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
       schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
       }
 
@@ -1824,11 +2050,19 @@ public class DistributedRPCInvocations {
         sb.append(this.success);
       }
       first = false;
+      if (!first) sb.append(", ");
+      sb.append("aze:");
+      if (this.aze == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.aze);
+      }
+      first = false;
       sb.append(")");
       return sb.toString();
     }
 
-    public void validate() throws org.apache.thrift.TException {
+    public void validate() throws TException {
       // check for required fields
       // check for sub-struct validity
       if (success != null) {
@@ -1839,7 +2073,7 @@ public class DistributedRPCInvocations {
     private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
       try {
         write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-      } catch (org.apache.thrift.TException te) {
+      } catch (TException te) {
         throw new java.io.IOException(te);
       }
     }
@@ -1847,7 +2081,7 @@ public class DistributedRPCInvocations {
     private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
       try {
         read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-      } catch (org.apache.thrift.TException te) {
+      } catch (TException te) {
         throw new java.io.IOException(te);
       }
     }
@@ -1860,7 +2094,7 @@ public class DistributedRPCInvocations {
 
     private static class fetchRequest_resultStandardScheme extends StandardScheme<fetchRequest_result> {
 
-      public void read(org.apache.thrift.protocol.TProtocol iprot, fetchRequest_result struct) throws org.apache.thrift.TException {
+      public void read(org.apache.thrift.protocol.TProtocol iprot, fetchRequest_result struct) throws TException {
         org.apache.thrift.protocol.TField schemeField;
         iprot.readStructBegin();
         while (true)
@@ -1879,6 +2113,15 @@ public class DistributedRPCInvocations {
                 org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
               }
               break;
+            case 1: // AZE
+              if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+                struct.aze = new AuthorizationException();
+                struct.aze.read(iprot);
+                struct.set_aze_isSet(true);
+              } else { 
+                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+              }
+              break;
             default:
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
           }
@@ -1888,7 +2131,7 @@ public class DistributedRPCInvocations {
         struct.validate();
       }
 
-      public void write(org.apache.thrift.protocol.TProtocol oprot, fetchRequest_result struct) throws org.apache.thrift.TException {
+      public void write(org.apache.thrift.protocol.TProtocol oprot, fetchRequest_result struct) throws TException {
         struct.validate();
 
         oprot.writeStructBegin(STRUCT_DESC);
@@ -1897,6 +2140,11 @@ public class DistributedRPCInvocations {
           struct.success.write(oprot);
           oprot.writeFieldEnd();
         }
+        if (struct.aze != null) {
+          oprot.writeFieldBegin(AZE_FIELD_DESC);
+          struct.aze.write(oprot);
+          oprot.writeFieldEnd();
+        }
         oprot.writeFieldStop();
         oprot.writeStructEnd();
       }
@@ -1912,27 +2160,38 @@ public class DistributedRPCInvocations {
     private static class fetchRequest_resultTupleScheme extends TupleScheme<fetchRequest_result> {
 
       @Override
-      public void write(org.apache.thrift.protocol.TProtocol prot, fetchRequest_result struct) throws org.apache.thrift.TException {
+      public void write(org.apache.thrift.protocol.TProtocol prot, fetchRequest_result struct) throws TException {
         TTupleProtocol oprot = (TTupleProtocol) prot;
         BitSet optionals = new BitSet();
         if (struct.is_set_success()) {
           optionals.set(0);
         }
-        oprot.writeBitSet(optionals, 1);
+        if (struct.is_set_aze()) {
+          optionals.set(1);
+        }
+        oprot.writeBitSet(optionals, 2);
         if (struct.is_set_success()) {
           struct.success.write(oprot);
         }
+        if (struct.is_set_aze()) {
+          struct.aze.write(oprot);
+        }
       }
 
       @Override
-      public void read(org.apache.thrift.protocol.TProtocol prot, fetchRequest_result struct) throws org.apache.thrift.TException {
+      public void read(org.apache.thrift.protocol.TProtocol prot, fetchRequest_result struct) throws TException {
         TTupleProtocol iprot = (TTupleProtocol) prot;
-        BitSet incoming = iprot.readBitSet(1);
+        BitSet incoming = iprot.readBitSet(2);
         if (incoming.get(0)) {
           struct.success = new DRPCRequest();
           struct.success.read(iprot);
           struct.set_success_isSet(true);
         }
+        if (incoming.get(1)) {
+          struct.aze = new AuthorizationException();
+          struct.aze.read(iprot);
+          struct.set_aze_isSet(true);
+        }
       }
     }
 
@@ -2167,11 +2426,11 @@ public class DistributedRPCInvocations {
       return _Fields.findByThriftId(fieldId);
     }
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
       schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
       schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
     }
 
@@ -2191,7 +2450,7 @@ public class DistributedRPCInvocations {
       return sb.toString();
     }
 
-    public void validate() throws org.apache.thrift.TException {
+    public void validate() throws TException {
       // check for required fields
       // check for sub-struct validity
     }
@@ -2199,7 +2458,7 @@ public class DistributedRPCInvocations {
     private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
       try {
         write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-      } catch (org.apache.thrift.TException te) {
+      } catch (TException te) {
         throw new java.io.IOException(te);
       }
     }
@@ -2207,7 +2466,7 @@ public class DistributedRPCInvocations {
     private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
       try {
         read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-      } catch (org.apache.thrift.TException te) {
+      } catch (TException te) {
         throw new java.io.IOException(te);
       }
     }
@@ -2220,7 +2479,7 @@ public class DistributedRPCInvocations {
 
     private static class failRequest_argsStandardScheme extends StandardScheme<failRequest_args> {
 
-      public void read(org.apache.thrift.protocol.TProtocol iprot, failRequest_args struct) throws org.apache.thrift.TException {
+      public void read(org.apache.thrift.protocol.TProtocol iprot, failRequest_args struct) throws TException {
         org.apache.thrift.protocol.TField schemeField;
         iprot.readStructBegin();
         while (true)
@@ -2247,7 +2506,7 @@ public class DistributedRPCInvocations {
         struct.validate();
       }
 
-      public void write(org.apache.thrift.protocol.TProtocol oprot, failRequest_args struct) throws org.apache.thrift.TException {
+      public void write(org.apache.thrift.protocol.TProtocol oprot, failRequest_args struct) throws TException {
         struct.validate();
 
         oprot.writeStructBegin(STRUCT_DESC);
@@ -2271,7 +2530,7 @@ public class DistributedRPCInvocations {
     private static class failRequest_argsTupleScheme extends TupleScheme<failRequest_args> {
 
       @Override
-      public void write(org.apache.thrift.protocol.TProtocol prot, failRequest_args struct) throws org.apache.thrift.TException {
+      public void write(org.apache.thrift.protocol.TProtocol prot, failRequest_args struct) throws TException {
         TTupleProtocol oprot = (TTupleProtocol) prot;
         BitSet optionals = new BitSet();
         if (struct.is_set_id()) {
@@ -2284,7 +2543,7 @@ public class DistributedRPCInvocations {
       }
 
       @Override
-      public void read(org.apache.thrift.protocol.TProtocol prot, failRequest_args struct) throws org.apache.thrift.TException {
+      public void read(org.apache.thrift.protocol.TProtocol prot, failRequest_args struct) throws TException {
         TTupleProtocol iprot = (TTupleProtocol) prot;
         BitSet incoming = iprot.readBitSet(1);
         if (incoming.get(0)) {
@@ -2299,6 +2558,7 @@ public class DistributedRPCInvocations {
   public static class failRequest_result implements org.apache.thrift.TBase<failRequest_result, failRequest_result._Fields>, java.io.Serializable, Cloneable, Comparable<failRequest_result>   {
     private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("failRequest_result");
 
+    private static final org.apache.thrift.protocol.TField AZE_FIELD_DESC = new org.apache.thrift.protocol.TField("aze", org.apache.thrift.protocol.TType.STRUCT, (short)1);
 
     private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
     static {
@@ -2306,10 +2566,11 @@ public class DistributedRPCInvocations {
       schemes.put(TupleScheme.class, new failRequest_resultTupleSchemeFactory());
     }
 
+    private AuthorizationException aze; // required
 
     /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
     public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-;
+      AZE((short)1, "aze");
 
       private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 
@@ -2324,6 +2585,8 @@ public class DistributedRPCInvocations {
        */
       public static _Fields findByThriftId(int fieldId) {
         switch(fieldId) {
+          case 1: // AZE
+            return AZE;
           default:
             return null;
         }
@@ -2362,9 +2625,13 @@ public class DistributedRPCInvocations {
         return _fieldName;
       }
     }
+
+    // isset id assignments
     public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
     static {
       Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+      tmpMap.put(_Fields.AZE, new org.apache.thrift.meta_data.FieldMetaData("aze", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
       metaDataMap = Collections.unmodifiableMap(tmpMap);
       org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(failRequest_result.class, metaDataMap);
     }
@@ -2372,10 +2639,20 @@ public class DistributedRPCInvocations {
     public failRequest_result() {
     }
 
+    public failRequest_result(
+      AuthorizationException aze)
+    {
+      this();
+      this.aze = aze;
+    }
+
     /**
      * Performs a deep copy on <i>other</i>.
      */
     public failRequest_result(failRequest_result other) {
+      if (other.is_set_aze()) {
+        this.aze = new AuthorizationException(other.aze);
+      }
     }
 
     public failRequest_result deepCopy() {
@@ -2384,15 +2661,50 @@ public class DistributedRPCInvocations {
 
     @Override
     public void clear() {
+      this.aze = null;
+    }
+
+    public AuthorizationException get_aze() {
+      return this.aze;
+    }
+
+    public void set_aze(AuthorizationException aze) {
+      this.aze = aze;
+    }
+
+    public void unset_aze() {
+      this.aze = null;
+    }
+
+    /** Returns true if field aze is set (has been assigned a value) and false otherwise */
+    public boolean is_set_aze() {
+      return this.aze != null;
+    }
+
+    public void set_aze_isSet(boolean value) {
+      if (!value) {
+        this.aze = null;
+      }
     }
 
     public void setFieldValue(_Fields field, Object value) {
       switch (field) {
+      case AZE:
+        if (value == null) {
+          unset_aze();
+        } else {
+          set_aze((AuthorizationException)value);
+        }
+        break;
+
       }
     }
 
     public Object getFieldValue(_Fields field) {
       switch (field) {
+      case AZE:
+        return get_aze();
+
       }
       throw new IllegalStateException();
     }
@@ -2404,6 +2716,8 @@ public class DistributedRPCInvocations {
       }
 
       switch (field) {
+      case AZE:
+        return is_set_aze();
       }
       throw new IllegalStateException();
     }
@@ -2421,6 +2735,15 @@ public class DistributedRPCInvocations {
       if (that == null)
         return false;
 
+      boolean this_present_aze = true && this.is_set_aze();
+      boolean that_present_aze = true && that.is_set_aze();
+      if (this_present_aze || that_present_aze) {
+        if (!(this_present_aze && that_present_aze))
+          return false;
+        if (!this.aze.equals(that.aze))
+          return false;
+      }
+
       return true;
     }
 
@@ -2428,6 +2751,11 @@ public class DistributedRPCInvocations {
     public int hashCode() {
       List<Object> list = new ArrayList<Object>();
 
+      boolean present_aze = true && (is_set_aze());
+      list.add(present_aze);
+      if (present_aze)
+        list.add(aze);
+
       return list.hashCode();
     }
 
@@ -2439,6 +2767,16 @@ public class DistributedRPCInvocations {
 
       int lastComparison = 0;
 
+      lastComparison = Boolean.valueOf(is_set_aze()).compareTo(other.is_set_aze());
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+      if (is_set_aze()) {
+        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.aze, other.aze);
+        if (lastComparison != 0) {
+          return lastComparison;
+        }
+      }
       return 0;
     }
 
@@ -2446,11 +2784,11 @@ public class DistributedRPCInvocations {
       return _Fields.findByThriftId(fieldId);
     }
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
       schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
       schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
       }
 
@@ -2459,11 +2797,18 @@ public class DistributedRPCInvocations {
       StringBuilder sb = new StringBuilder("failRequest_result(");
       boolean first = true;
 
+      sb.append("aze:");
+      if (this.aze == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.aze);
+      }
+      first = false;
       sb.append(")");
       return sb.toString();
     }
 
-    public void validate() throws org.apache.thrift.TException {
+    public void validate() throws TException {
       // check for required fields
       // check for sub-struct validity
     }
@@ -2471,7 +2816,7 @@ public class DistributedRPCInvocations {
     private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
       try {
         write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-      } catch (org.apache.thrift.TException te) {
+      } catch (TException te) {
         throw new java.io.IOException(te);
       }
     }
@@ -2479,7 +2824,7 @@ public class DistributedRPCInvocations {
     private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
       try {
         read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-      } catch (org.apache.thrift.TException te) {
+      } catch (TException te) {
         throw new java.io.IOException(te);
       }
     }
@@ -2492,7 +2837,7 @@ public class DistributedRPCInvocations {
 
     private static class failRequest_resultStandardScheme extends StandardScheme<failRequest_result> {
 
-      public void read(org.apache.thrift.protocol.TProtocol iprot, failRequest_result struct) throws org.apache.thrift.TException {
+      public void read(org.apache.thrift.protocol.TProtocol iprot, failRequest_result struct) throws TException {
         org.apache.thrift.protocol.TField schemeField;
         iprot.readStructBegin();
         while (true)
@@ -2502,6 +2847,15 @@ public class DistributedRPCInvocations {
             break;
           }
           switch (schemeField.id) {
+            case 1: // AZE
+              if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+                struct.aze = new AuthorizationException();
+                struct.aze.read(iprot);
+                struct.set_aze_isSet(true);
+              } else { 
+                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+              }
+              break;
             default:
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
           }
@@ -2511,10 +2865,15 @@ public class DistributedRPCInvocations {
         struct.validate();
       }
 
-      public void write(org.apache.thrift.protocol.TProtocol oprot, failRequest_result struct) throws org.apache.thrift.TException {
+      public void write(org.apache.thrift.protocol.TProtocol oprot, failRequest_result struct) throws TException {
         struct.validate();
 
         oprot.writeStructBegin(STRUCT_DESC);
+        if (struct.aze != null) {
+          oprot.writeFieldBegin(AZE_FIELD_DESC);
+          struct.aze.write(oprot);
+          oprot.writeFieldEnd();
+        }
         oprot.writeFieldStop();
         oprot.writeStructEnd();
       }
@@ -2530,13 +2889,27 @@ public class DistributedRPCInvocations {
     private static class failRequest_resultTupleScheme extends TupleScheme<failRequest_result> {
 
       @Override
-      public void write(org.apache.thrift.protocol.TProtocol prot, failRequest_result struct) throws org.apache.thrift.TException {
+      public void write(org.apache.thrift.protocol.TProtocol prot, failRequest_result struct) throws TException {
         TTupleProtocol oprot = (TTupleProtocol) prot;
+        BitSet optionals = new BitSet();
+        if (struct.is_set_aze()) {
+          optionals.set(0);
+        }
+        oprot.writeBitSet(optionals, 1);
+        if (struct.is_set_aze()) {
+          struct.aze.write(oprot);
+        }
       }
 
       @Override
-      public void read(org.apache.thrift.protocol.TProtocol prot, failRequest_result struct) throws org.apache.thrift.TException {
+      public void read(org.apache.thrift.protocol.TProtocol prot, failRequest_result struct) throws TException {
         TTupleProtocol iprot = (TTupleProtocol) prot;
+        BitSet incoming = iprot.readBitSet(1);
+        if (incoming.get(0)) {
+          struct.aze = new AuthorizationException();
+          struct.aze.read(iprot);
+          struct.set_aze_isSet(true);
+        }
       }
     }
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/ErrorInfo.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/ErrorInfo.java b/jstorm-core/src/main/java/backtype/storm/generated/ErrorInfo.java
index f52e526..f99797b 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/ErrorInfo.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/ErrorInfo.java
@@ -34,12 +34,12 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class ErrorInfo implements org.apache.thrift.TBase<ErrorInfo, ErrorInfo._Fields>, java.io.Serializable, Cloneable, Comparable<ErrorInfo> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ErrorInfo");
 
   private static final org.apache.thrift.protocol.TField ERROR_FIELD_DESC = new org.apache.thrift.protocol.TField("error", org.apache.thrift.protocol.TType.STRING, (short)1);
-  private static final org.apache.thrift.protocol.TField ERROR_TIME_SECS_FIELD_DESC = new org.apache.thrift.protocol.TField("error_time_secs", org.apache.thrift.protocol.TType.I32, (short)2);
+  private static final org.apache.thrift.protocol.TField ERROR_TIME_SECS_FIELD_DESC = new org.apache.thrift.protocol.TField("errorTimeSecs", org.apache.thrift.protocol.TType.I32, (short)2);
 
   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
   static {
@@ -48,12 +48,12 @@ public class ErrorInfo implements org.apache.thrift.TBase<ErrorInfo, ErrorInfo._
   }
 
   private String error; // required
-  private int error_time_secs; // required
+  private int errorTimeSecs; // required
 
   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
     ERROR((short)1, "error"),
-    ERROR_TIME_SECS((short)2, "error_time_secs");
+    ERROR_TIME_SECS((short)2, "errorTimeSecs");
 
     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 
@@ -112,14 +112,14 @@ public class ErrorInfo implements org.apache.thrift.TBase<ErrorInfo, ErrorInfo._
   }
 
   // isset id assignments
-  private static final int __ERROR_TIME_SECS_ISSET_ID = 0;
+  private static final int __ERRORTIMESECS_ISSET_ID = 0;
   private byte __isset_bitfield = 0;
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
     tmpMap.put(_Fields.ERROR, new org.apache.thrift.meta_data.FieldMetaData("error", org.apache.thrift.TFieldRequirementType.REQUIRED, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    tmpMap.put(_Fields.ERROR_TIME_SECS, new org.apache.thrift.meta_data.FieldMetaData("error_time_secs", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+    tmpMap.put(_Fields.ERROR_TIME_SECS, new org.apache.thrift.meta_data.FieldMetaData("errorTimeSecs", org.apache.thrift.TFieldRequirementType.REQUIRED, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ErrorInfo.class, metaDataMap);
@@ -130,12 +130,12 @@ public class ErrorInfo implements org.apache.thrift.TBase<ErrorInfo, ErrorInfo._
 
   public ErrorInfo(
     String error,
-    int error_time_secs)
+    int errorTimeSecs)
   {
     this();
     this.error = error;
-    this.error_time_secs = error_time_secs;
-    set_error_time_secs_isSet(true);
+    this.errorTimeSecs = errorTimeSecs;
+    set_errorTimeSecs_isSet(true);
   }
 
   /**
@@ -146,7 +146,7 @@ public class ErrorInfo implements org.apache.thrift.TBase<ErrorInfo, ErrorInfo._
     if (other.is_set_error()) {
       this.error = other.error;
     }
-    this.error_time_secs = other.error_time_secs;
+    this.errorTimeSecs = other.errorTimeSecs;
   }
 
   public ErrorInfo deepCopy() {
@@ -156,8 +156,8 @@ public class ErrorInfo implements org.apache.thrift.TBase<ErrorInfo, ErrorInfo._
   @Override
   public void clear() {
     this.error = null;
-    set_error_time_secs_isSet(false);
-    this.error_time_secs = 0;
+    set_errorTimeSecs_isSet(false);
+    this.errorTimeSecs = 0;
   }
 
   public String get_error() {
@@ -183,26 +183,26 @@ public class ErrorInfo implements org.apache.thrift.TBase<ErrorInfo, ErrorInfo._
     }
   }
 
-  public int get_error_time_secs() {
-    return this.error_time_secs;
+  public int get_errorTimeSecs() {
+    return this.errorTimeSecs;
   }
 
-  public void set_error_time_secs(int error_time_secs) {
-    this.error_time_secs = error_time_secs;
-    set_error_time_secs_isSet(true);
+  public void set_errorTimeSecs(int errorTimeSecs) {
+    this.errorTimeSecs = errorTimeSecs;
+    set_errorTimeSecs_isSet(true);
   }
 
-  public void unset_error_time_secs() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ERROR_TIME_SECS_ISSET_ID);
+  public void unset_errorTimeSecs() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ERRORTIMESECS_ISSET_ID);
   }
 
-  /** Returns true if field error_time_secs is set (has been assigned a value) and false otherwise */
-  public boolean is_set_error_time_secs() {
-    return EncodingUtils.testBit(__isset_bitfield, __ERROR_TIME_SECS_ISSET_ID);
+  /** Returns true if field errorTimeSecs is set (has been assigned a value) and false otherwise */
+  public boolean is_set_errorTimeSecs() {
+    return EncodingUtils.testBit(__isset_bitfield, __ERRORTIMESECS_ISSET_ID);
   }
 
-  public void set_error_time_secs_isSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ERROR_TIME_SECS_ISSET_ID, value);
+  public void set_errorTimeSecs_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ERRORTIMESECS_ISSET_ID, value);
   }
 
   public void setFieldValue(_Fields field, Object value) {
@@ -217,9 +217,9 @@ public class ErrorInfo implements org.apache.thrift.TBase<ErrorInfo, ErrorInfo._
 
     case ERROR_TIME_SECS:
       if (value == null) {
-        unset_error_time_secs();
+        unset_errorTimeSecs();
       } else {
-        set_error_time_secs((Integer)value);
+        set_errorTimeSecs((Integer)value);
       }
       break;
 
@@ -232,7 +232,7 @@ public class ErrorInfo implements org.apache.thrift.TBase<ErrorInfo, ErrorInfo._
       return get_error();
 
     case ERROR_TIME_SECS:
-      return Integer.valueOf(get_error_time_secs());
+      return Integer.valueOf(get_errorTimeSecs());
 
     }
     throw new IllegalStateException();
@@ -248,7 +248,7 @@ public class ErrorInfo implements org.apache.thrift.TBase<ErrorInfo, ErrorInfo._
     case ERROR:
       return is_set_error();
     case ERROR_TIME_SECS:
-      return is_set_error_time_secs();
+      return is_set_errorTimeSecs();
     }
     throw new IllegalStateException();
   }
@@ -275,12 +275,12 @@ public class ErrorInfo implements org.apache.thrift.TBase<ErrorInfo, ErrorInfo._
         return false;
     }
 
-    boolean this_present_error_time_secs = true;
-    boolean that_present_error_time_secs = true;
-    if (this_present_error_time_secs || that_present_error_time_secs) {
-      if (!(this_present_error_time_secs && that_present_error_time_secs))
+    boolean this_present_errorTimeSecs = true;
+    boolean that_present_errorTimeSecs = true;
+    if (this_present_errorTimeSecs || that_present_errorTimeSecs) {
+      if (!(this_present_errorTimeSecs && that_present_errorTimeSecs))
         return false;
-      if (this.error_time_secs != that.error_time_secs)
+      if (this.errorTimeSecs != that.errorTimeSecs)
         return false;
     }
 
@@ -296,10 +296,10 @@ public class ErrorInfo implements org.apache.thrift.TBase<ErrorInfo, ErrorInfo._
     if (present_error)
       list.add(error);
 
-    boolean present_error_time_secs = true;
-    list.add(present_error_time_secs);
-    if (present_error_time_secs)
-      list.add(error_time_secs);
+    boolean present_errorTimeSecs = true;
+    list.add(present_errorTimeSecs);
+    if (present_errorTimeSecs)
+      list.add(errorTimeSecs);
 
     return list.hashCode();
   }
@@ -322,12 +322,12 @@ public class ErrorInfo implements org.apache.thrift.TBase<ErrorInfo, ErrorInfo._
         return lastComparison;
       }
     }
-    lastComparison = Boolean.valueOf(is_set_error_time_secs()).compareTo(other.is_set_error_time_secs());
+    lastComparison = Boolean.valueOf(is_set_errorTimeSecs()).compareTo(other.is_set_errorTimeSecs());
     if (lastComparison != 0) {
       return lastComparison;
     }
-    if (is_set_error_time_secs()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.error_time_secs, other.error_time_secs);
+    if (is_set_errorTimeSecs()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.errorTimeSecs, other.errorTimeSecs);
       if (lastComparison != 0) {
         return lastComparison;
       }
@@ -339,11 +339,11 @@ public class ErrorInfo implements org.apache.thrift.TBase<ErrorInfo, ErrorInfo._
     return _Fields.findByThriftId(fieldId);
   }
 
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
@@ -360,21 +360,21 @@ public class ErrorInfo implements org.apache.thrift.TBase<ErrorInfo, ErrorInfo._
     }
     first = false;
     if (!first) sb.append(", ");
-    sb.append("error_time_secs:");
-    sb.append(this.error_time_secs);
+    sb.append("errorTimeSecs:");
+    sb.append(this.errorTimeSecs);
     first = false;
     sb.append(")");
     return sb.toString();
   }
 
-  public void validate() throws org.apache.thrift.TException {
+  public void validate() throws TException {
     // check for required fields
     if (!is_set_error()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'error' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'error' is unset! Struct:" + toString());
     }
 
-    if (!is_set_error_time_secs()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'error_time_secs' is unset! Struct:" + toString());
+    if (!is_set_errorTimeSecs()) {
+      throw new TProtocolException("Required field 'errorTimeSecs' is unset! Struct:" + toString());
     }
 
     // check for sub-struct validity
@@ -383,7 +383,7 @@ public class ErrorInfo implements org.apache.thrift.TBase<ErrorInfo, ErrorInfo._
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -393,7 +393,7 @@ public class ErrorInfo implements org.apache.thrift.TBase<ErrorInfo, ErrorInfo._
       // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
       __isset_bitfield = 0;
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -406,7 +406,7 @@ public class ErrorInfo implements org.apache.thrift.TBase<ErrorInfo, ErrorInfo._
 
   private static class ErrorInfoStandardScheme extends StandardScheme<ErrorInfo> {
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot, ErrorInfo struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot, ErrorInfo struct) throws TException {
       org.apache.thrift.protocol.TField schemeField;
       iprot.readStructBegin();
       while (true)
@@ -426,8 +426,8 @@ public class ErrorInfo implements org.apache.thrift.TBase<ErrorInfo, ErrorInfo._
             break;
           case 2: // ERROR_TIME_SECS
             if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
-              struct.error_time_secs = iprot.readI32();
-              struct.set_error_time_secs_isSet(true);
+              struct.errorTimeSecs = iprot.readI32();
+              struct.set_errorTimeSecs_isSet(true);
             } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
@@ -441,7 +441,7 @@ public class ErrorInfo implements org.apache.thrift.TBase<ErrorInfo, ErrorInfo._
       struct.validate();
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot, ErrorInfo struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot, ErrorInfo struct) throws TException {
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
@@ -451,7 +451,7 @@ public class ErrorInfo implements org.apache.thrift.TBase<ErrorInfo, ErrorInfo._
         oprot.writeFieldEnd();
       }
       oprot.writeFieldBegin(ERROR_TIME_SECS_FIELD_DESC);
-      oprot.writeI32(struct.error_time_secs);
+      oprot.writeI32(struct.errorTimeSecs);
       oprot.writeFieldEnd();
       oprot.writeFieldStop();
       oprot.writeStructEnd();
@@ -468,19 +468,19 @@ public class ErrorInfo implements org.apache.thrift.TBase<ErrorInfo, ErrorInfo._
   private static class ErrorInfoTupleScheme extends TupleScheme<ErrorInfo> {
 
     @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, ErrorInfo struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol prot, ErrorInfo struct) throws TException {
       TTupleProtocol oprot = (TTupleProtocol) prot;
       oprot.writeString(struct.error);
-      oprot.writeI32(struct.error_time_secs);
+      oprot.writeI32(struct.errorTimeSecs);
     }
 
     @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, ErrorInfo struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol prot, ErrorInfo struct) throws TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       struct.error = iprot.readString();
       struct.set_error_isSet(true);
-      struct.error_time_secs = iprot.readI32();
-      struct.set_error_time_secs_isSet(true);
+      struct.errorTimeSecs = iprot.readI32();
+      struct.set_errorTimeSecs_isSet(true);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/GlobalStreamId.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/GlobalStreamId.java b/jstorm-core/src/main/java/backtype/storm/generated/GlobalStreamId.java
index 490a81d..65f5bfd 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/GlobalStreamId.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/GlobalStreamId.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class GlobalStreamId implements org.apache.thrift.TBase<GlobalStreamId, GlobalStreamId._Fields>, java.io.Serializable, Cloneable, Comparable<GlobalStreamId> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GlobalStreamId");
 
@@ -337,11 +337,11 @@ public class GlobalStreamId implements org.apache.thrift.TBase<GlobalStreamId, G
     return _Fields.findByThriftId(fieldId);
   }
 
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
@@ -369,14 +369,14 @@ public class GlobalStreamId implements org.apache.thrift.TBase<GlobalStreamId, G
     return sb.toString();
   }
 
-  public void validate() throws org.apache.thrift.TException {
+  public void validate() throws TException {
     // check for required fields
     if (!is_set_componentId()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'componentId' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'componentId' is unset! Struct:" + toString());
     }
 
     if (!is_set_streamId()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'streamId' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'streamId' is unset! Struct:" + toString());
     }
 
     // check for sub-struct validity
@@ -385,7 +385,7 @@ public class GlobalStreamId implements org.apache.thrift.TBase<GlobalStreamId, G
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -393,7 +393,7 @@ public class GlobalStreamId implements org.apache.thrift.TBase<GlobalStreamId, G
   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
     try {
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -406,7 +406,7 @@ public class GlobalStreamId implements org.apache.thrift.TBase<GlobalStreamId, G
 
   private static class GlobalStreamIdStandardScheme extends StandardScheme<GlobalStreamId> {
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot, GlobalStreamId struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot, GlobalStreamId struct) throws TException {
       org.apache.thrift.protocol.TField schemeField;
       iprot.readStructBegin();
       while (true)
@@ -441,7 +441,7 @@ public class GlobalStreamId implements org.apache.thrift.TBase<GlobalStreamId, G
       struct.validate();
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot, GlobalStreamId struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot, GlobalStreamId struct) throws TException {
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
@@ -470,14 +470,14 @@ public class GlobalStreamId implements org.apache.thrift.TBase<GlobalStreamId, G
   private static class GlobalStreamIdTupleScheme extends TupleScheme<GlobalStreamId> {
 
     @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, GlobalStreamId struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol prot, GlobalStreamId struct) throws TException {
       TTupleProtocol oprot = (TTupleProtocol) prot;
       oprot.writeString(struct.componentId);
       oprot.writeString(struct.streamId);
     }
 
     @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, GlobalStreamId struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol prot, GlobalStreamId struct) throws TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       struct.componentId = iprot.readString();
       struct.set_componentId_isSet(true);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/Grouping.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/Grouping.java b/jstorm-core/src/main/java/backtype/storm/generated/Grouping.java
index bc60a06..1ef2fe3 100755
--- a/jstorm-core/src/main/java/backtype/storm/generated/Grouping.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/Grouping.java
@@ -284,7 +284,7 @@ public class Grouping extends org.apache.thrift.TUnion<Grouping, Grouping._Field
   }
 
   @Override
-  protected Object standardSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TField field) throws org.apache.thrift.TException {
+  protected Object standardSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TField field) throws TException {
     _Fields setField = _Fields.findByThriftId(field.id);
     if (setField != null) {
       switch (setField) {
@@ -396,7 +396,7 @@ public class Grouping extends org.apache.thrift.TUnion<Grouping, Grouping._Field
   }
 
   @Override
-  protected void standardSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  protected void standardSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     switch (setField_) {
       case FIELDS:
         List<String> fields = (List<String>)value_;
@@ -447,7 +447,7 @@ public class Grouping extends org.apache.thrift.TUnion<Grouping, Grouping._Field
   }
 
   @Override
-  protected Object tupleSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, short fieldID) throws org.apache.thrift.TException {
+  protected Object tupleSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, short fieldID) throws TException {
     _Fields setField = _Fields.findByThriftId(fieldID);
     if (setField != null) {
       switch (setField) {
@@ -513,7 +513,7 @@ public class Grouping extends org.apache.thrift.TUnion<Grouping, Grouping._Field
   }
 
   @Override
-  protected void tupleSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  protected void tupleSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     switch (setField_) {
       case FIELDS:
         List<String> fields = (List<String>)value_;
@@ -826,7 +826,7 @@ public class Grouping extends org.apache.thrift.TUnion<Grouping, Grouping._Field
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -835,7 +835,7 @@ public class Grouping extends org.apache.thrift.TUnion<Grouping, Grouping._Field
   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
     try {
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }


[18/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/window/Metric.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/window/Metric.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/window/Metric.java
deleted file mode 100755
index 63a725a..0000000
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/window/Metric.java
+++ /dev/null
@@ -1,231 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.alibaba.jstorm.common.metric.window;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.alibaba.jstorm.callback.Callback;
-import com.alibaba.jstorm.common.metric.operator.Sampling;
-import com.alibaba.jstorm.common.metric.operator.convert.Convertor;
-import com.alibaba.jstorm.common.metric.operator.merger.Merger;
-import com.alibaba.jstorm.common.metric.operator.updater.Updater;
-import com.alibaba.jstorm.utils.IntervalCheck;
-
-public class Metric<T, V> implements Sampling<Map<Integer, T>> {
-    private static final long serialVersionUID = -1362345159511508074L;
-    private static final Logger LOG = LoggerFactory.getLogger(Metric.class);
-
-    protected static boolean enable;
-
-    public static void setEnable(boolean e) {
-        enable = e;
-    }
-
-    protected List<RollingWindow<V>> rollingWindows;
-    protected AllWindow<V> allWindow;
-
-    protected int[] windowSeconds = { StatBuckets.MINUTE_WINDOW,
-            StatBuckets.HOUR_WINDOW, StatBuckets.DAY_WINDOW };
-    protected int bucketSize = StatBuckets.NUM_STAT_BUCKETS;
-    protected V defaultValue;
-    protected Updater<V> updater;
-    protected Merger<V> merger;
-    protected Convertor<V, T> convertor;
-    protected Callback callback;
-
-    protected int interval; // unit is second
-    protected IntervalCheck intervalCheck;
-    protected V unflushed;
-
-    public Metric() {
-    }
-
-    public int getInterval() {
-        if (windowSeconds == null || windowSeconds.length == 0) {
-            return StatBuckets.NUM_STAT_BUCKETS;
-        }
-
-        int intervals[] = new int[windowSeconds.length];
-        int smallest = Integer.MAX_VALUE;
-        for (int i = 0; i < windowSeconds.length; i++) {
-            int interval = windowSeconds[i] / bucketSize;
-            intervals[i] = interval;
-            if (interval < smallest) {
-                smallest = interval;
-            }
-        }
-
-        for (int goodInterval = smallest; goodInterval > 1; goodInterval--) {
-            boolean good = true;
-            for (int interval : intervals) {
-                if (interval % goodInterval != 0) {
-                    good = false;
-                    break;
-                }
-            }
-
-            if (good == true) {
-                return goodInterval;
-            }
-        }
-
-        return 1;
-    }
-
-    public void init() {
-        if (defaultValue == null || updater == null || merger == null
-                || convertor == null) {
-            throw new IllegalArgumentException("Invalid argements");
-        }
-
-        rollingWindows = new ArrayList<RollingWindow<V>>();
-        if (windowSeconds != null) {
-            rollingWindows.clear();
-            for (int windowSize : windowSeconds) {
-                RollingWindow<V> rollingWindow =
-                        new RollingWindow<V>(defaultValue, windowSize
-                                / bucketSize, windowSize, updater, merger);
-
-                rollingWindows.add(rollingWindow);
-            }
-
-        }
-        allWindow = new AllWindow<V>(defaultValue, updater, merger);
-
-        this.interval = getInterval();
-        this.intervalCheck = new IntervalCheck();
-        this.intervalCheck.setInterval(interval);
-    }
-
-    /**
-     * In order to improve performance 
-     * Do 
-     */
-    @Override
-    public void update(Number obj) {
-        if (enable == false) {
-            return;
-        }
-
-        if (intervalCheck.check()) {
-            flush();
-        }
-        synchronized (this) {
-            unflushed = updater.update(obj, unflushed);
-        }
-    }
-
-    public synchronized void flush() {
-        if (unflushed == null) {
-            return;
-        }
-        for (RollingWindow<V> rollingWindow : rollingWindows) {
-            rollingWindow.updateBatch(unflushed);
-        }
-        allWindow.updateBatch(unflushed);
-        unflushed = null;
-    }
-
-    @Override
-    public Map<Integer, T> getSnapshot() {
-        // TODO Auto-generated method stub
-        flush();
-
-        Map<Integer, T> ret = new TreeMap<Integer, T>();
-        for (RollingWindow<V> rollingWindow : rollingWindows) {
-            V value = rollingWindow.getSnapshot();
-
-            ret.put(rollingWindow.getWindowSecond(), convertor.convert(value));
-        }
-
-        ret.put(StatBuckets.ALL_TIME_WINDOW,
-                convertor.convert(allWindow.getSnapshot()));
-
-        if (callback != null) {
-            callback.execute(this);
-        }
-        return ret;
-    }
-
-    public T getAllTimeValue() {
-        return convertor.convert(allWindow.getSnapshot());
-    }
-
-    public int[] getWindowSeconds() {
-        return windowSeconds;
-    }
-
-    public void setWindowSeconds(int[] windowSeconds) {
-        this.windowSeconds = windowSeconds;
-    }
-
-    public int getBucketSize() {
-        return bucketSize;
-    }
-
-    public void setBucketSize(int bucketSize) {
-        this.bucketSize = bucketSize;
-    }
-
-    public V getDefaultValue() {
-        return defaultValue;
-    }
-
-    public void setDefaultValue(V defaultValue) {
-        this.defaultValue = defaultValue;
-    }
-
-    public Updater<V> getUpdater() {
-        return updater;
-    }
-
-    public void setUpdater(Updater<V> updater) {
-        this.updater = updater;
-    }
-
-    public Merger<V> getMerger() {
-        return merger;
-    }
-
-    public void setMerger(Merger<V> merger) {
-        this.merger = merger;
-    }
-
-    public Convertor<V, T> getConvertor() {
-        return convertor;
-    }
-
-    public void setConvertor(Convertor<V, T> convertor) {
-        this.convertor = convertor;
-    }
-
-    public Callback getCallback() {
-        return callback;
-    }
-
-    public void setCallback(Callback callback) {
-        this.callback = callback;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/window/RollingWindow.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/window/RollingWindow.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/window/RollingWindow.java
deleted file mode 100755
index 54047a6..0000000
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/window/RollingWindow.java
+++ /dev/null
@@ -1,194 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.alibaba.jstorm.common.metric.window;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.TreeMap;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.alibaba.jstorm.common.metric.operator.Sampling;
-import com.alibaba.jstorm.common.metric.operator.StartTime;
-import com.alibaba.jstorm.common.metric.operator.merger.Merger;
-import com.alibaba.jstorm.common.metric.operator.updater.Updater;
-import com.alibaba.jstorm.utils.IntervalCheck;
-import com.alibaba.jstorm.utils.TimeUtils;
-
-public class RollingWindow<V> implements Sampling<V>, StartTime {
-    private static final long serialVersionUID = 3794478417380003279L;
-    private static final Logger LOG = LoggerFactory
-            .getLogger(RollingWindow.class);
-
-    protected long startTime;
-    protected Integer currBucketTime;
-    protected int interval; // unit is second
-    protected int windowSecond;
-    protected IntervalCheck intervalCheck;
-
-    protected TreeMap<Integer, V> buckets;
-    protected Integer bucketNum;
-    protected V unflushed;
-    protected V defaultValue;
-
-    protected Updater<V> updater;
-    protected Merger<V> merger;
-
-    RollingWindow(V defaultValue, int interval, int windowSecond,
-            Updater<V> updater, Merger<V> merger) {
-        this.startTime = System.currentTimeMillis();
-        this.interval = interval;
-        this.intervalCheck = new IntervalCheck();
-        this.intervalCheck.setInterval(interval);
-        this.currBucketTime = getCurrBucketTime();
-
-        this.bucketNum = windowSecond / interval;
-        this.windowSecond = (bucketNum) * interval;
-
-        this.buckets = new TreeMap<Integer, V>();
-
-        this.updater = updater;
-        this.merger = merger;
-
-        this.defaultValue = defaultValue;
-
-    }
-
-    
-    @Override
-    public void update(Number obj) {
-        // TODO Auto-generated method stub
-
-        if (intervalCheck.check()) {
-            rolling();
-        }
-        synchronized (this) {
-            unflushed = updater.update(obj, unflushed);
-
-        }
-
-    }
-    
-    /**
-     * In order to improve performance 
-     * Flush one batch to rollingWindow
-     * 
-     */
-    public void updateBatch(V batch) {
-
-        if (intervalCheck.check()) {
-            rolling();
-        }
-        synchronized (this) {
-            unflushed = updater.updateBatch(batch, unflushed);
-        }
-            
-    }
-
-    @Override
-    public V getSnapshot() {
-        // TODO Auto-generated method stub
-        if (intervalCheck.check()) {
-            rolling();
-        }
-
-        cleanExpiredBuckets();
-        // @@@ Testing
-        //LOG.info("Raw Data:" + buckets + ",unflushed:" + unflushed);
-
-        Collection<V> values = buckets.values();
-
-        V ret = merger.merge(values, unflushed, this);
-        if (ret == null) {
-
-            // @@@ testing
-            //LOG.warn("!!!!Exist null data !!!!!");
-            return defaultValue;
-        }
-        return ret;
-    }
-
-    /*
-     * Move the "current bucket time" index and clean the expired buckets
-     */
-    protected void rolling() {
-        synchronized (this) {
-            if (unflushed != null) {
-                buckets.put(currBucketTime, unflushed);
-                unflushed = null;
-            }
-        	
-        	currBucketTime = getCurrBucketTime();
-        	
-        	return ;
-        }
-    }
-
-    protected void cleanExpiredBuckets() {
-        int nowSec = TimeUtils.current_time_secs();
-        int startRemove = nowSec - (interval - 1) - windowSecond;
-
-        List<Integer> removeList = new ArrayList<Integer>();
-
-        for (Integer keyTime : buckets.keySet()) {
-            if (keyTime < startRemove) {
-                removeList.add(keyTime);
-            } else if (keyTime >= startRemove) {
-                break;
-            }
-        }
-
-        for (Integer removeKey : removeList) {
-            buckets.remove(removeKey);
-            // @@@ Testing
-            //LOG.info("Remove key:" + removeKey + ", diff:" + (nowSec - removeKey));
-
-        }
-
-        if (buckets.isEmpty() == false) {
-            Integer first = buckets.firstKey();
-            startTime = first.longValue() * 1000;
-        }
-    }
-
-    public int getWindowSecond() {
-        return windowSecond;
-    }
-
-    public long getStartTime() {
-        return startTime;
-    }
-
-    public int getInterval() {
-        return interval;
-    }
-
-    public Integer getBucketNum() {
-        return bucketNum;
-    }
-
-    public V getDefaultValue() {
-        return defaultValue;
-    }
-
-    private Integer getCurrBucketTime() {
-        return (TimeUtils.current_time_secs() / interval) * interval;
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/window/StatBuckets.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/window/StatBuckets.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/window/StatBuckets.java
deleted file mode 100755
index 3e9b021..0000000
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/window/StatBuckets.java
+++ /dev/null
@@ -1,153 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.alibaba.jstorm.common.metric.window;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Set;
-import java.util.TreeSet;
-
-public class StatBuckets {
-
-    public static final Integer NUM_STAT_BUCKETS = 20;
-
-    public static final Integer MINUTE_WINDOW = 600;
-    public static final Integer HOUR_WINDOW = 10800;
-    public static final Integer DAY_WINDOW = 86400;
-    public static final Integer ALL_TIME_WINDOW = 0;
-    public static Set<Integer> TIME_WINDOWS = new TreeSet<Integer>();
-    static {
-        TIME_WINDOWS.add(ALL_TIME_WINDOW);
-        TIME_WINDOWS.add(MINUTE_WINDOW);
-        TIME_WINDOWS.add(HOUR_WINDOW);
-        TIME_WINDOWS.add(DAY_WINDOW);
-    }
-
-    public static final String MINUTE_WINDOW_STR = "0d0h10m0s";
-    public static final String HOUR_WINDOW_STR = "0d3h0m0s";
-    public static final String DAY_WINDOW_STR = "1d0h0m0s";
-    public static final String ALL_WINDOW_STR = "All-time";
-
-    public static Integer[] STAT_BUCKETS = { MINUTE_WINDOW / NUM_STAT_BUCKETS,
-            HOUR_WINDOW / NUM_STAT_BUCKETS, DAY_WINDOW / NUM_STAT_BUCKETS };
-
-    private static final String[][] PRETTYSECDIVIDERS = {
-            new String[] { "s", "60" }, new String[] { "m", "60" },
-            new String[] { "h", "24" }, new String[] { "d", null } };
-
-    /**
-     * Service b
-     * 
-     * @param key
-     * @return
-     */
-    public static String parseTimeKey(Integer key) {
-        if (key == 0) {
-            return ALL_WINDOW_STR;
-        } else {
-            return String.valueOf(key);
-        }
-    }
-
-    /**
-     * 
-     * Default is the latest result
-     * 
-     * @param showKey
-     * @return
-     */
-    public static Integer getTimeKey(String showKey) {
-        Integer window = null;
-        if (showKey == null) {
-            window = (MINUTE_WINDOW);
-        } else if (showKey.equals(MINUTE_WINDOW_STR)) {
-            window = (MINUTE_WINDOW);
-        } else if (showKey.equals(HOUR_WINDOW_STR)) {
-            window = (HOUR_WINDOW);
-        } else if (showKey.equals(DAY_WINDOW_STR)) {
-            window = (DAY_WINDOW);
-        } else if (showKey.equals(ALL_WINDOW_STR)) {
-            window = ALL_TIME_WINDOW;
-        } else {
-            window = MINUTE_WINDOW;
-        }
-
-        return window;
-    }
-
-    /**
-     * Default is the latest result
-     * 
-     * @param showStr
-     * @return
-     */
-    public static String getShowTimeStr(Integer time) {
-        if (time == null) {
-            return MINUTE_WINDOW_STR;
-        } else if (time.equals(MINUTE_WINDOW)) {
-            return MINUTE_WINDOW_STR;
-        } else if (time.equals(HOUR_WINDOW)) {
-            return HOUR_WINDOW_STR;
-        } else if (time.equals(DAY_WINDOW)) {
-            return DAY_WINDOW_STR;
-        } else if (time.equals(ALL_TIME_WINDOW)) {
-            return ALL_WINDOW_STR;
-        } else {
-            return MINUTE_WINDOW_STR;
-        }
-
-    }
-
-    /**
-     * seconds to string like 1d20h30m40s
-     * 
-     * @param secs
-     * @return
-     */
-    public static String prettyUptimeStr(int secs) {
-        int diversize = PRETTYSECDIVIDERS.length;
-
-        List<String> tmp = new ArrayList<String>();
-        int div = secs;
-        for (int i = 0; i < diversize; i++) {
-            if (PRETTYSECDIVIDERS[i][1] != null) {
-                Integer d = Integer.parseInt(PRETTYSECDIVIDERS[i][1]);
-                tmp.add(div % d + PRETTYSECDIVIDERS[i][0]);
-                div = div / d;
-            } else {
-                tmp.add(div + PRETTYSECDIVIDERS[i][0]);
-            }
-        }
-
-        String rtn = "";
-        int tmpSzie = tmp.size();
-        for (int j = tmpSzie - 1; j > -1; j--) {
-            rtn += tmp.get(j);
-        }
-        return rtn;
-    }
-
-    /**
-     * @param args
-     */
-    public static void main(String[] args) {
-        // TODO Auto-generated method stub
-
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/stats/StaticsType.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/stats/StaticsType.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/stats/StaticsType.java
deleted file mode 100755
index 2dbab6f..0000000
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/common/stats/StaticsType.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.alibaba.jstorm.common.stats;
-
-public enum StaticsType {
-    emitted, send_tps, recv_tps, acked, failed, transferred, process_latencies;
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/container/CgroupCenter.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/container/CgroupCenter.java b/jstorm-core/src/main/java/com/alibaba/jstorm/container/CgroupCenter.java
index d9148db..15b1cfa 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/container/CgroupCenter.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/container/CgroupCenter.java
@@ -100,9 +100,8 @@ public class CgroupCenter implements CgroupOperation {
                 SubSystemType type = SubSystemType.getSubSystem(split[0]);
                 if (type == null)
                     continue;
-                subSystems.add(new SubSystem(type, Integer.valueOf(split[1]),
-                        Integer.valueOf(split[2]), Integer.valueOf(split[3])
-                                .intValue() == 1 ? true : false));
+                subSystems.add(new SubSystem(type, Integer.valueOf(split[1]), Integer.valueOf(split[2]), Integer.valueOf(split[3]).intValue() == 1 ? true
+                        : false));
             }
             return subSystems;
         } catch (Exception e) {
@@ -168,8 +167,7 @@ public class CgroupCenter implements CgroupOperation {
         if (!CgroupUtils.dirExists(hierarchy.getDir()))
             new File(hierarchy.getDir()).mkdirs();
         String subSystems = CgroupUtils.reAnalyse(subsystems);
-        SystemOperation.mount(subSystems, hierarchy.getDir(), "cgroup",
-                subSystems);
+        SystemOperation.mount(subSystems, hierarchy.getDir(), "cgroup", subSystems);
 
     }
 
@@ -217,8 +215,7 @@ public class CgroupCenter implements CgroupOperation {
     }
 
     public static void main(String args[]) {
-        System.out.println(CgroupCenter.getInstance().getHierarchies().get(0)
-                .getRootCgroups().getChildren().size());
+        System.out.println(CgroupCenter.getInstance().getHierarchies().get(0).getRootCgroups().getChildren().size());
     }
 
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/container/CgroupUtils.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/container/CgroupUtils.java b/jstorm-core/src/main/java/com/alibaba/jstorm/container/CgroupUtils.java
index 4de2d5a..0cc45cc 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/container/CgroupUtils.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/container/CgroupUtils.java
@@ -82,8 +82,7 @@ public class CgroupUtils {
         return CgroupUtils.fileExists(Constants.CGROUP_STATUS_FILE);
     }
 
-    public static List<String> readFileByLine(String fileDir)
-            throws IOException {
+    public static List<String> readFileByLine(String fileDir) throws IOException {
         List<String> result = new ArrayList<String>();
         FileReader fileReader = null;
         BufferedReader reader = null;
@@ -101,8 +100,7 @@ public class CgroupUtils {
         return result;
     }
 
-    public static void writeFileByLine(String fileDir, List<String> strings)
-            throws IOException {
+    public static void writeFileByLine(String fileDir, List<String> strings) throws IOException {
         FileWriter writer = null;
         BufferedWriter bw = null;
         try {
@@ -123,8 +121,7 @@ public class CgroupUtils {
         }
     }
 
-    public static void writeFileByLine(String fileDir, String string)
-            throws IOException {
+    public static void writeFileByLine(String fileDir, String string) throws IOException {
         FileWriter writer = null;
         BufferedWriter bw = null;
         try {

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/container/SubSystem.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/container/SubSystem.java b/jstorm-core/src/main/java/com/alibaba/jstorm/container/SubSystem.java
index 1655e49..20d4ec0 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/container/SubSystem.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/container/SubSystem.java
@@ -27,8 +27,7 @@ public class SubSystem {
 
     private boolean enable;
 
-    public SubSystem(SubSystemType type, int hierarchyID, int cgroupNum,
-            boolean enable) {
+    public SubSystem(SubSystemType type, int hierarchyID, int cgroupNum, boolean enable) {
         this.type = type;
         this.hierarchyID = hierarchyID;
         this.cgroupsNum = cgroupNum;

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/CgroupCommon.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/CgroupCommon.java b/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/CgroupCommon.java
index 0a772f6..224d05d 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/CgroupCommon.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/CgroupCommon.java
@@ -59,9 +59,7 @@ public class CgroupCommon implements CgroupCommonOperation {
         this.parent = parent;
         this.dir = parent.getDir() + "/" + name;
         this.init();
-        cores =
-                CgroupCoreFactory.getInstance(this.hierarchy.getSubSystems(),
-                        this.dir);
+        cores = CgroupCoreFactory.getInstance(this.hierarchy.getSubSystems(), this.dir);
         this.isRoot = false;
     }
 
@@ -74,23 +72,19 @@ public class CgroupCommon implements CgroupCommonOperation {
         this.parent = null;
         this.dir = dir;
         this.init();
-        cores =
-                CgroupCoreFactory.getInstance(this.hierarchy.getSubSystems(),
-                        this.dir);
+        cores = CgroupCoreFactory.getInstance(this.hierarchy.getSubSystems(), this.dir);
         this.isRoot = true;
     }
 
     @Override
     public void addTask(int taskId) throws IOException {
         // TODO Auto-generated method stub
-        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, TASKS),
-                String.valueOf(taskId));
+        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, TASKS), String.valueOf(taskId));
     }
 
     @Override
     public Set<Integer> getTasks() throws IOException {
-        List<String> stringTasks =
-                CgroupUtils.readFileByLine(Constants.getDir(this.dir, TASKS));
+        List<String> stringTasks = CgroupUtils.readFileByLine(Constants.getDir(this.dir, TASKS));
         Set<Integer> tasks = new HashSet<Integer>();
         for (String task : stringTasks) {
             tasks.add(Integer.valueOf(task));
@@ -101,16 +95,13 @@ public class CgroupCommon implements CgroupCommonOperation {
     @Override
     public void addProcs(int pid) throws IOException {
         // TODO Auto-generated method stub
-        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, CGROUP_PROCS),
-                String.valueOf(pid));
+        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, CGROUP_PROCS), String.valueOf(pid));
     }
 
     @Override
     public Set<Integer> getPids() throws IOException {
         // TODO Auto-generated method stub
-        List<String> stringPids =
-                CgroupUtils.readFileByLine(Constants.getDir(this.dir,
-                        CGROUP_PROCS));
+        List<String> stringPids = CgroupUtils.readFileByLine(Constants.getDir(this.dir, CGROUP_PROCS));
         Set<Integer> pids = new HashSet<Integer>();
         for (String task : stringPids) {
             pids.add(Integer.valueOf(task));
@@ -121,16 +112,12 @@ public class CgroupCommon implements CgroupCommonOperation {
     @Override
     public void setNotifyOnRelease(boolean flag) throws IOException {
         // TODO Auto-generated method stub
-        CgroupUtils
-                .writeFileByLine(Constants.getDir(this.dir, NOTIFY_ON_RELEASE),
-                        flag ? "1" : "0");
+        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, NOTIFY_ON_RELEASE), flag ? "1" : "0");
     }
 
     @Override
     public boolean getNotifyOnRelease() throws IOException {
-        return CgroupUtils
-                .readFileByLine(Constants.getDir(this.dir, NOTIFY_ON_RELEASE))
-                .get(0).equals("1") ? true : false;
+        return CgroupUtils.readFileByLine(Constants.getDir(this.dir, NOTIFY_ON_RELEASE)).get(0).equals("1") ? true : false;
     }
 
     @Override
@@ -138,16 +125,14 @@ public class CgroupCommon implements CgroupCommonOperation {
         // TODO Auto-generated method stub
         if (!this.isRoot)
             return;
-        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, RELEASE_AGENT),
-                command);
+        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, RELEASE_AGENT), command);
     }
 
     @Override
     public String getReleaseAgent() throws IOException {
         if (!this.isRoot)
             return null;
-        return CgroupUtils.readFileByLine(
-                Constants.getDir(this.dir, RELEASE_AGENT)).get(0);
+        return CgroupUtils.readFileByLine(Constants.getDir(this.dir, RELEASE_AGENT)).get(0);
     }
 
     @Override
@@ -155,21 +140,16 @@ public class CgroupCommon implements CgroupCommonOperation {
         // TODO Auto-generated method stub
         if (!this.cores.keySet().contains(SubSystemType.cpuset))
             return;
-        CgroupUtils.writeFileByLine(Constants.getDir(this.dir,
-                CGROUP_CLONE_CHILDREN), flag ? "1" : "0");
+        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, CGROUP_CLONE_CHILDREN), flag ? "1" : "0");
     }
 
     @Override
     public boolean getCgroupCloneChildren() throws IOException {
-        return CgroupUtils
-                .readFileByLine(
-                        Constants.getDir(this.dir, CGROUP_CLONE_CHILDREN))
-                .get(0).equals("1") ? true : false;
+        return CgroupUtils.readFileByLine(Constants.getDir(this.dir, CGROUP_CLONE_CHILDREN)).get(0).equals("1") ? true : false;
     }
 
     @Override
-    public void setEventControl(String eventFd, String controlFd,
-            String... args) throws IOException {
+    public void setEventControl(String eventFd, String controlFd, String... args) throws IOException {
         // TODO Auto-generated method stub
         StringBuilder sb = new StringBuilder();
         sb.append(eventFd);
@@ -179,10 +159,7 @@ public class CgroupCommon implements CgroupCommonOperation {
             sb.append(' ');
             sb.append(arg);
         }
-        CgroupUtils
-                .writeFileByLine(
-                        Constants.getDir(this.dir, CGROUP_EVENT_CONTROL),
-                        sb.toString());
+        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, CGROUP_EVENT_CONTROL), sb.toString());
     }
 
     public Hierarchy getHierarchy() {
@@ -240,8 +217,7 @@ public class CgroupCommon implements CgroupCommonOperation {
             return;
         for (File child : files) {
             if (child.isDirectory()) {
-                this.children.add(new CgroupCommon(child.getName(),
-                        this.hierarchy, this));
+                this.children.add(new CgroupCommon(child.getName(), this.hierarchy, this));
             }
         }
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/CgroupCommonOperation.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/CgroupCommonOperation.java b/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/CgroupCommonOperation.java
index 3f9090f..a76b09c 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/CgroupCommonOperation.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/CgroupCommonOperation.java
@@ -42,7 +42,6 @@ public interface CgroupCommonOperation {
 
     public boolean getCgroupCloneChildren() throws IOException;
 
-    public void setEventControl(String eventFd, String controlFd,
-            String... args) throws IOException;
+    public void setEventControl(String eventFd, String controlFd, String... args) throws IOException;
 
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/CgroupCoreFactory.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/CgroupCoreFactory.java b/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/CgroupCoreFactory.java
index 279366a..2b3f3a8 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/CgroupCoreFactory.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/CgroupCoreFactory.java
@@ -35,10 +35,8 @@ import com.alibaba.jstorm.container.cgroup.core.NetPrioCore;
 
 public class CgroupCoreFactory {
 
-    public static Map<SubSystemType, CgroupCore> getInstance(
-            Set<SubSystemType> types, String dir) {
-        Map<SubSystemType, CgroupCore> result =
-                new HashMap<SubSystemType, CgroupCore>();
+    public static Map<SubSystemType, CgroupCore> getInstance(Set<SubSystemType> types, String dir) {
+        Map<SubSystemType, CgroupCore> result = new HashMap<SubSystemType, CgroupCore>();
         for (SubSystemType type : types) {
             switch (type) {
             case blkio:

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/BlkioCore.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/BlkioCore.java b/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/BlkioCore.java
index 5d487ec..9958114 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/BlkioCore.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/BlkioCore.java
@@ -33,25 +33,18 @@ public class BlkioCore implements CgroupCore {
     public static final String BLKIO_WEIGHT_DEVICE = "/blkio.weight_device";
     public static final String BLKIO_RESET_STATS = "/blkio.reset_stats";
 
-    public static final String BLKIO_THROTTLE_READ_BPS_DEVICE =
-            "/blkio.throttle.read_bps_device";
-    public static final String BLKIO_THROTTLE_WRITE_BPS_DEVICE =
-            "/blkio.throttle.write_bps_device";
-    public static final String BLKIO_THROTTLE_READ_IOPS_DEVICE =
-            "/blkio.throttle.read_iops_device";
-    public static final String BLKIO_THROTTLE_WRITE_IOPS_DEVICE =
-            "/blkio.throttle.write_iops_device";
-
-    public static final String BLKIO_THROTTLE_IO_SERVICED =
-            "/blkio.throttle.io_serviced";
-    public static final String BLKIO_THROTTLE_IO_SERVICE_BYTES =
-            "/blkio.throttle.io_service_bytes";
+    public static final String BLKIO_THROTTLE_READ_BPS_DEVICE = "/blkio.throttle.read_bps_device";
+    public static final String BLKIO_THROTTLE_WRITE_BPS_DEVICE = "/blkio.throttle.write_bps_device";
+    public static final String BLKIO_THROTTLE_READ_IOPS_DEVICE = "/blkio.throttle.read_iops_device";
+    public static final String BLKIO_THROTTLE_WRITE_IOPS_DEVICE = "/blkio.throttle.write_iops_device";
+
+    public static final String BLKIO_THROTTLE_IO_SERVICED = "/blkio.throttle.io_serviced";
+    public static final String BLKIO_THROTTLE_IO_SERVICE_BYTES = "/blkio.throttle.io_service_bytes";
 
     public static final String BLKIO_TIME = "/blkio.time";
     public static final String BLKIO_SECTORS = "/blkio.sectors";
     public static final String BLKIO_IO_SERVICED = "/blkio.io_serviced";
-    public static final String BLKIO_IO_SERVICE_BYTES =
-            "/blkio.io_service_bytes";
+    public static final String BLKIO_IO_SERVICE_BYTES = "/blkio.io_service_bytes";
     public static final String BLKIO_IO_SERVICE_TIME = "/blkio.io_service_time";
     public static final String BLKIO_IO_WAIT_TIME = "/blkio.io_wait_time";
     public static final String BLKIO_IO_MERGED = "/blkio.io_merged";
@@ -71,28 +64,19 @@ public class BlkioCore implements CgroupCore {
 
     /* weight: 100-1000 */
     public void setBlkioWeight(int weight) throws IOException {
-        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, BLKIO_WEIGHT),
-                String.valueOf(weight));
+        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, BLKIO_WEIGHT), String.valueOf(weight));
     }
 
     public int getBlkioWeight() throws IOException {
-        return Integer.valueOf(
-                CgroupUtils.readFileByLine(
-                        Constants.getDir(this.dir, BLKIO_WEIGHT)).get(0))
-                .intValue();
+        return Integer.valueOf(CgroupUtils.readFileByLine(Constants.getDir(this.dir, BLKIO_WEIGHT)).get(0)).intValue();
     }
 
-    public void setBlkioWeightDevice(Device device, int weight)
-            throws IOException {
-        CgroupUtils.writeFileByLine(
-                Constants.getDir(this.dir, BLKIO_WEIGHT_DEVICE),
-                makeContext(device, weight));
+    public void setBlkioWeightDevice(Device device, int weight) throws IOException {
+        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, BLKIO_WEIGHT_DEVICE), makeContext(device, weight));
     }
 
     public Map<Device, Integer> getBlkioWeightDevice() throws IOException {
-        List<String> strings =
-                CgroupUtils.readFileByLine(Constants.getDir(this.dir,
-                        BLKIO_WEIGHT_DEVICE));
+        List<String> strings = CgroupUtils.readFileByLine(Constants.getDir(this.dir, BLKIO_WEIGHT_DEVICE));
         Map<Device, Integer> result = new HashMap<Device, Integer>();
         for (String string : strings) {
             String[] strArgs = string.split(" ");
@@ -104,15 +88,11 @@ public class BlkioCore implements CgroupCore {
     }
 
     public void setReadBps(Device device, long bps) throws IOException {
-        CgroupUtils.writeFileByLine(
-                Constants.getDir(this.dir, BLKIO_THROTTLE_READ_BPS_DEVICE),
-                makeContext(device, bps));
+        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, BLKIO_THROTTLE_READ_BPS_DEVICE), makeContext(device, bps));
     }
 
     public Map<Device, Long> getReadBps() throws IOException {
-        List<String> strings =
-                CgroupUtils.readFileByLine(Constants.getDir(this.dir,
-                        BLKIO_THROTTLE_READ_BPS_DEVICE));
+        List<String> strings = CgroupUtils.readFileByLine(Constants.getDir(this.dir, BLKIO_THROTTLE_READ_BPS_DEVICE));
         Map<Device, Long> result = new HashMap<Device, Long>();
         for (String string : strings) {
             String[] strArgs = string.split(" ");
@@ -124,15 +104,11 @@ public class BlkioCore implements CgroupCore {
     }
 
     public void setWriteBps(Device device, long bps) throws IOException {
-        CgroupUtils.writeFileByLine(
-                Constants.getDir(this.dir, BLKIO_THROTTLE_WRITE_BPS_DEVICE),
-                makeContext(device, bps));
+        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, BLKIO_THROTTLE_WRITE_BPS_DEVICE), makeContext(device, bps));
     }
 
     public Map<Device, Long> getWriteBps() throws IOException {
-        List<String> strings =
-                CgroupUtils.readFileByLine(Constants.getDir(this.dir,
-                        BLKIO_THROTTLE_WRITE_BPS_DEVICE));
+        List<String> strings = CgroupUtils.readFileByLine(Constants.getDir(this.dir, BLKIO_THROTTLE_WRITE_BPS_DEVICE));
         Map<Device, Long> result = new HashMap<Device, Long>();
         for (String string : strings) {
             String[] strArgs = string.split(" ");
@@ -144,15 +120,11 @@ public class BlkioCore implements CgroupCore {
     }
 
     public void setReadIOps(Device device, long iops) throws IOException {
-        CgroupUtils.writeFileByLine(
-                Constants.getDir(this.dir, BLKIO_THROTTLE_READ_IOPS_DEVICE),
-                makeContext(device, iops));
+        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, BLKIO_THROTTLE_READ_IOPS_DEVICE), makeContext(device, iops));
     }
 
     public Map<Device, Long> getReadIOps() throws IOException {
-        List<String> strings =
-                CgroupUtils.readFileByLine(Constants.getDir(this.dir,
-                        BLKIO_THROTTLE_READ_IOPS_DEVICE));
+        List<String> strings = CgroupUtils.readFileByLine(Constants.getDir(this.dir, BLKIO_THROTTLE_READ_IOPS_DEVICE));
         Map<Device, Long> result = new HashMap<Device, Long>();
         for (String string : strings) {
             String[] strArgs = string.split(" ");
@@ -164,15 +136,11 @@ public class BlkioCore implements CgroupCore {
     }
 
     public void setWriteIOps(Device device, long iops) throws IOException {
-        CgroupUtils.writeFileByLine(
-                Constants.getDir(this.dir, BLKIO_THROTTLE_WRITE_IOPS_DEVICE),
-                makeContext(device, iops));
+        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, BLKIO_THROTTLE_WRITE_IOPS_DEVICE), makeContext(device, iops));
     }
 
     public Map<Device, Long> getWriteIOps() throws IOException {
-        List<String> strings =
-                CgroupUtils.readFileByLine(Constants.getDir(this.dir,
-                        BLKIO_THROTTLE_WRITE_IOPS_DEVICE));
+        List<String> strings = CgroupUtils.readFileByLine(Constants.getDir(this.dir, BLKIO_THROTTLE_WRITE_IOPS_DEVICE));
         Map<Device, Long> result = new HashMap<Device, Long>();
         for (String string : strings) {
             String[] strArgs = string.split(" ");
@@ -183,23 +151,17 @@ public class BlkioCore implements CgroupCore {
         return result;
     }
 
-    public Map<Device, Map<RecordType, Long>> getThrottleIOServiced()
-            throws IOException {
-        return this.analyseRecord(CgroupUtils.readFileByLine(Constants.getDir(
-                this.dir, BLKIO_THROTTLE_IO_SERVICED)));
+    public Map<Device, Map<RecordType, Long>> getThrottleIOServiced() throws IOException {
+        return this.analyseRecord(CgroupUtils.readFileByLine(Constants.getDir(this.dir, BLKIO_THROTTLE_IO_SERVICED)));
     }
 
-    public Map<Device, Map<RecordType, Long>> getThrottleIOServiceByte()
-            throws IOException {
-        return this.analyseRecord(CgroupUtils.readFileByLine(Constants.getDir(
-                this.dir, BLKIO_THROTTLE_IO_SERVICE_BYTES)));
+    public Map<Device, Map<RecordType, Long>> getThrottleIOServiceByte() throws IOException {
+        return this.analyseRecord(CgroupUtils.readFileByLine(Constants.getDir(this.dir, BLKIO_THROTTLE_IO_SERVICE_BYTES)));
     }
 
     public Map<Device, Long> getBlkioTime() throws IOException {
         Map<Device, Long> result = new HashMap<Device, Long>();
-        List<String> strs =
-                CgroupUtils.readFileByLine(Constants.getDir(this.dir,
-                        BLKIO_TIME));
+        List<String> strs = CgroupUtils.readFileByLine(Constants.getDir(this.dir, BLKIO_TIME));
         for (String str : strs) {
             String[] strArgs = str.split(" ");
             result.put(new Device(strArgs[0]), Long.parseLong(strArgs[1]));
@@ -209,9 +171,7 @@ public class BlkioCore implements CgroupCore {
 
     public Map<Device, Long> getBlkioSectors() throws IOException {
         Map<Device, Long> result = new HashMap<Device, Long>();
-        List<String> strs =
-                CgroupUtils.readFileByLine(Constants.getDir(this.dir,
-                        BLKIO_SECTORS));
+        List<String> strs = CgroupUtils.readFileByLine(Constants.getDir(this.dir, BLKIO_SECTORS));
         for (String str : strs) {
             String[] strArgs = str.split(" ");
             result.put(new Device(strArgs[0]), Long.parseLong(strArgs[1]));
@@ -219,43 +179,32 @@ public class BlkioCore implements CgroupCore {
         return result;
     }
 
-    public Map<Device, Map<RecordType, Long>> getIOServiced()
-            throws IOException {
-        return this.analyseRecord(CgroupUtils.readFileByLine(Constants.getDir(
-                this.dir, BLKIO_IO_SERVICED)));
+    public Map<Device, Map<RecordType, Long>> getIOServiced() throws IOException {
+        return this.analyseRecord(CgroupUtils.readFileByLine(Constants.getDir(this.dir, BLKIO_IO_SERVICED)));
     }
 
-    public Map<Device, Map<RecordType, Long>> getIOServiceBytes()
-            throws IOException {
-        return this.analyseRecord(CgroupUtils.readFileByLine(Constants.getDir(
-                this.dir, BLKIO_IO_SERVICE_BYTES)));
+    public Map<Device, Map<RecordType, Long>> getIOServiceBytes() throws IOException {
+        return this.analyseRecord(CgroupUtils.readFileByLine(Constants.getDir(this.dir, BLKIO_IO_SERVICE_BYTES)));
     }
 
-    public Map<Device, Map<RecordType, Long>> getIOServiceTime()
-            throws IOException {
-        return this.analyseRecord(CgroupUtils.readFileByLine(Constants.getDir(
-                this.dir, BLKIO_IO_SERVICE_TIME)));
+    public Map<Device, Map<RecordType, Long>> getIOServiceTime() throws IOException {
+        return this.analyseRecord(CgroupUtils.readFileByLine(Constants.getDir(this.dir, BLKIO_IO_SERVICE_TIME)));
     }
 
-    public Map<Device, Map<RecordType, Long>> getIOWaitTime()
-            throws IOException {
-        return this.analyseRecord(CgroupUtils.readFileByLine(Constants.getDir(
-                this.dir, BLKIO_IO_WAIT_TIME)));
+    public Map<Device, Map<RecordType, Long>> getIOWaitTime() throws IOException {
+        return this.analyseRecord(CgroupUtils.readFileByLine(Constants.getDir(this.dir, BLKIO_IO_WAIT_TIME)));
     }
 
     public Map<Device, Map<RecordType, Long>> getIOMerged() throws IOException {
-        return this.analyseRecord(CgroupUtils.readFileByLine(Constants.getDir(
-                this.dir, BLKIO_IO_MERGED)));
+        return this.analyseRecord(CgroupUtils.readFileByLine(Constants.getDir(this.dir, BLKIO_IO_MERGED)));
     }
 
     public Map<Device, Map<RecordType, Long>> getIOQueued() throws IOException {
-        return this.analyseRecord(CgroupUtils.readFileByLine(Constants.getDir(
-                this.dir, BLKIO_IO_QUEUED)));
+        return this.analyseRecord(CgroupUtils.readFileByLine(Constants.getDir(this.dir, BLKIO_IO_QUEUED)));
     }
 
     public void resetStats() throws IOException {
-        CgroupUtils.writeFileByLine(
-                Constants.getDir(this.dir, BLKIO_RESET_STATS), "1");
+        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, BLKIO_RESET_STATS), "1");
     }
 
     private String makeContext(Device device, Object data) {
@@ -265,8 +214,7 @@ public class BlkioCore implements CgroupCore {
     }
 
     private Map<Device, Map<RecordType, Long>> analyseRecord(List<String> strs) {
-        Map<Device, Map<RecordType, Long>> result =
-                new HashMap<Device, Map<RecordType, Long>>();
+        Map<Device, Map<RecordType, Long>> result = new HashMap<Device, Map<RecordType, Long>>();
         for (String str : strs) {
             String[] strArgs = str.split(" ");
             if (strArgs.length != 3)

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/CpuCore.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/CpuCore.java b/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/CpuCore.java
index 609898e..6a723a0 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/CpuCore.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/CpuCore.java
@@ -46,62 +46,47 @@ public class CpuCore implements CgroupCore {
     }
 
     public void setCpuShares(int weight) throws IOException {
-        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, CPU_SHARES),
-                String.valueOf(weight));
+        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, CPU_SHARES), String.valueOf(weight));
     }
 
     public int getCpuShares() throws IOException {
-        return Integer.parseInt(CgroupUtils.readFileByLine(
-                Constants.getDir(this.dir, CPU_SHARES)).get(0));
+        return Integer.parseInt(CgroupUtils.readFileByLine(Constants.getDir(this.dir, CPU_SHARES)).get(0));
     }
 
     public void setCpuRtRuntimeUs(long us) throws IOException {
-        CgroupUtils.writeFileByLine(
-                Constants.getDir(this.dir, CPU_RT_RUNTIME_US),
-                String.valueOf(us));
+        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, CPU_RT_RUNTIME_US), String.valueOf(us));
     }
 
     public long getCpuRtRuntimeUs() throws IOException {
-        return Long.parseLong(CgroupUtils.readFileByLine(
-                Constants.getDir(this.dir, CPU_RT_RUNTIME_US)).get(0));
+        return Long.parseLong(CgroupUtils.readFileByLine(Constants.getDir(this.dir, CPU_RT_RUNTIME_US)).get(0));
     }
 
     public void setCpuRtPeriodUs(long us) throws IOException {
-        CgroupUtils.writeFileByLine(
-                Constants.getDir(this.dir, CPU_RT_PERIOD_US),
-                String.valueOf(us));
+        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, CPU_RT_PERIOD_US), String.valueOf(us));
     }
 
     public Long getCpuRtPeriodUs() throws IOException {
-        return Long.parseLong(CgroupUtils.readFileByLine(
-                Constants.getDir(this.dir, CPU_RT_PERIOD_US)).get(0));
+        return Long.parseLong(CgroupUtils.readFileByLine(Constants.getDir(this.dir, CPU_RT_PERIOD_US)).get(0));
     }
 
     public void setCpuCfsPeriodUs(long us) throws IOException {
-        CgroupUtils.writeFileByLine(
-                Constants.getDir(this.dir, CPU_CFS_PERIOD_US),
-                String.valueOf(us));
+        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, CPU_CFS_PERIOD_US), String.valueOf(us));
     }
 
     public Long getCpuCfsPeriodUs(long us) throws IOException {
-        return Long.parseLong(CgroupUtils.readFileByLine(
-                Constants.getDir(this.dir, CPU_CFS_PERIOD_US)).get(0));
+        return Long.parseLong(CgroupUtils.readFileByLine(Constants.getDir(this.dir, CPU_CFS_PERIOD_US)).get(0));
     }
 
     public void setCpuCfsQuotaUs(long us) throws IOException {
-        CgroupUtils.writeFileByLine(
-                Constants.getDir(this.dir, CPU_CFS_QUOTA_US),
-                String.valueOf(us));
+        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, CPU_CFS_QUOTA_US), String.valueOf(us));
     }
 
     public Long getCpuCfsQuotaUs(long us) throws IOException {
-        return Long.parseLong(CgroupUtils.readFileByLine(
-                Constants.getDir(this.dir, CPU_CFS_QUOTA_US)).get(0));
+        return Long.parseLong(CgroupUtils.readFileByLine(Constants.getDir(this.dir, CPU_CFS_QUOTA_US)).get(0));
     }
 
     public Stat getCpuStat() throws IOException {
-        return new Stat(CgroupUtils.readFileByLine(Constants.getDir(this.dir,
-                CPU_STAT)));
+        return new Stat(CgroupUtils.readFileByLine(Constants.getDir(this.dir, CPU_STAT)));
     }
 
     public static class Stat {

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/CpuacctCore.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/CpuacctCore.java b/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/CpuacctCore.java
index c54421b..8bec196 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/CpuacctCore.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/CpuacctCore.java
@@ -45,14 +45,11 @@ public class CpuacctCore implements CgroupCore {
     }
 
     public Long getCpuUsage() throws IOException {
-        return Long.parseLong(CgroupUtils.readFileByLine(
-                Constants.getDir(this.dir, CPUACCT_USAGE)).get(0));
+        return Long.parseLong(CgroupUtils.readFileByLine(Constants.getDir(this.dir, CPUACCT_USAGE)).get(0));
     }
 
     public Map<StatType, Long> getCpuStat() throws IOException {
-        List<String> strs =
-                CgroupUtils.readFileByLine(Constants.getDir(this.dir,
-                        CPUACCT_STAT));
+        List<String> strs = CgroupUtils.readFileByLine(Constants.getDir(this.dir, CPUACCT_STAT));
         Map<StatType, Long> result = new HashMap<StatType, Long>();
         result.put(StatType.user, Long.parseLong(strs.get(0).split(" ")[1]));
         result.put(StatType.system, Long.parseLong(strs.get(1).split(" ")[1]));
@@ -60,10 +57,7 @@ public class CpuacctCore implements CgroupCore {
     }
 
     public Long[] getPerCpuUsage() throws IOException {
-        String str =
-                CgroupUtils.readFileByLine(
-                        Constants.getDir(this.dir, CPUACCT_USAGE_PERCPU))
-                        .get(0);
+        String str = CgroupUtils.readFileByLine(Constants.getDir(this.dir, CPUACCT_USAGE_PERCPU)).get(0);
         String[] strArgs = str.split(" ");
         Long[] result = new Long[strArgs.length];
         for (int i = 0; i < result.length; i++) {

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/CpusetCore.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/CpusetCore.java b/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/CpusetCore.java
index d693b6c..02bcace 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/CpusetCore.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/CpusetCore.java
@@ -32,18 +32,12 @@ public class CpusetCore implements CgroupCore {
     public static final String CPUSET_CPU_EXCLUSIVE = "/cpuset.cpu_exclusive";
     public static final String CPUSET_MEM_EXCLUSIVE = "/cpuset.mem_exclusive";
     public static final String CPUSET_MEM_HARDWALL = "/cpuset.mem_hardwall";
-    public static final String CPUSET_MEMORY_PRESSURE =
-            "/cpuset.memory_pressure";
-    public static final String CPUSET_MEMORY_PRESSURE_ENABLED =
-            "/cpuset.memory_pressure_enabled";
-    public static final String CPUSET_MEMORY_SPREAD_PAGE =
-            "/cpuset.memory_spread_page";
-    public static final String CPUSET_MEMORY_SPREAD_SLAB =
-            "/cpuset.memory_spread_slab";
-    public static final String CPUSET_SCHED_LOAD_BALANCE =
-            "/cpuset.sched_load_balance";
-    public static final String CPUSET_SCHED_RELAX_DOMAIN_LEVEL =
-            "/cpuset.sched_relax_domain_level";
+    public static final String CPUSET_MEMORY_PRESSURE = "/cpuset.memory_pressure";
+    public static final String CPUSET_MEMORY_PRESSURE_ENABLED = "/cpuset.memory_pressure_enabled";
+    public static final String CPUSET_MEMORY_SPREAD_PAGE = "/cpuset.memory_spread_page";
+    public static final String CPUSET_MEMORY_SPREAD_SLAB = "/cpuset.memory_spread_slab";
+    public static final String CPUSET_SCHED_LOAD_BALANCE = "/cpuset.sched_load_balance";
+    public static final String CPUSET_SCHED_RELAX_DOMAIN_LEVEL = "/cpuset.sched_relax_domain_level";
 
     private final String dir;
 
@@ -64,14 +58,11 @@ public class CpusetCore implements CgroupCore {
             sb.append(',');
         }
         sb.deleteCharAt(sb.length() - 1);
-        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, CPUSET_CPUS),
-                sb.toString());
+        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, CPUSET_CPUS), sb.toString());
     }
 
     public int[] getCpus() throws IOException {
-        String output =
-                CgroupUtils.readFileByLine(
-                        Constants.getDir(this.dir, CPUSET_CPUS)).get(0);
+        String output = CgroupUtils.readFileByLine(Constants.getDir(this.dir, CPUSET_CPUS)).get(0);
         return parseNums(output);
     }
 
@@ -82,147 +73,97 @@ public class CpusetCore implements CgroupCore {
             sb.append(',');
         }
         sb.deleteCharAt(sb.length() - 1);
-        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, CPUSET_MEMS),
-                sb.toString());
+        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, CPUSET_MEMS), sb.toString());
     }
 
     public int[] getMems() throws IOException {
-        String output =
-                CgroupUtils.readFileByLine(
-                        Constants.getDir(this.dir, CPUSET_MEMS)).get(0);
+        String output = CgroupUtils.readFileByLine(Constants.getDir(this.dir, CPUSET_MEMS)).get(0);
         return parseNums(output);
     }
 
     public void setMemMigrate(boolean flag) throws IOException {
-        CgroupUtils.writeFileByLine(
-                Constants.getDir(this.dir, CPUSET_MEMORY_MIGRATE),
-                String.valueOf(flag ? 1 : 0));
+        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, CPUSET_MEMORY_MIGRATE), String.valueOf(flag ? 1 : 0));
     }
 
     public boolean isMemMigrate() throws IOException {
-        int output =
-                Integer.parseInt(CgroupUtils.readFileByLine(
-                        Constants.getDir(this.dir, CPUSET_MEMORY_MIGRATE)).get(
-                        0));
+        int output = Integer.parseInt(CgroupUtils.readFileByLine(Constants.getDir(this.dir, CPUSET_MEMORY_MIGRATE)).get(0));
         return output > 0;
     }
 
     public void setCpuExclusive(boolean flag) throws IOException {
-        CgroupUtils.writeFileByLine(
-                Constants.getDir(this.dir, CPUSET_CPU_EXCLUSIVE),
-                String.valueOf(flag ? 1 : 0));
+        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, CPUSET_CPU_EXCLUSIVE), String.valueOf(flag ? 1 : 0));
     }
 
     public boolean isCpuExclusive() throws IOException {
-        int output =
-                Integer.parseInt(CgroupUtils.readFileByLine(
-                        Constants.getDir(this.dir, CPUSET_CPU_EXCLUSIVE))
-                        .get(0));
+        int output = Integer.parseInt(CgroupUtils.readFileByLine(Constants.getDir(this.dir, CPUSET_CPU_EXCLUSIVE)).get(0));
         return output > 0;
     }
 
     public void setMemExclusive(boolean flag) throws IOException {
-        CgroupUtils.writeFileByLine(
-                Constants.getDir(this.dir, CPUSET_MEM_EXCLUSIVE),
-                String.valueOf(flag ? 1 : 0));
+        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, CPUSET_MEM_EXCLUSIVE), String.valueOf(flag ? 1 : 0));
     }
 
     public boolean isMemExclusive() throws IOException {
-        int output =
-                Integer.parseInt(CgroupUtils.readFileByLine(
-                        Constants.getDir(this.dir, CPUSET_MEM_EXCLUSIVE))
-                        .get(0));
+        int output = Integer.parseInt(CgroupUtils.readFileByLine(Constants.getDir(this.dir, CPUSET_MEM_EXCLUSIVE)).get(0));
         return output > 0;
     }
 
     public void setMemHardwall(boolean flag) throws IOException {
-        CgroupUtils.writeFileByLine(
-                Constants.getDir(this.dir, CPUSET_MEM_HARDWALL),
-                String.valueOf(flag ? 1 : 0));
+        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, CPUSET_MEM_HARDWALL), String.valueOf(flag ? 1 : 0));
     }
 
     public boolean isMemHardwall() throws IOException {
-        int output =
-                Integer.parseInt(CgroupUtils.readFileByLine(
-                        Constants.getDir(this.dir, CPUSET_MEM_HARDWALL)).get(0));
+        int output = Integer.parseInt(CgroupUtils.readFileByLine(Constants.getDir(this.dir, CPUSET_MEM_HARDWALL)).get(0));
         return output > 0;
     }
 
     public int getMemPressure() throws IOException {
-        String output =
-                CgroupUtils.readFileByLine(
-                        Constants.getDir(this.dir, CPUSET_MEMORY_PRESSURE))
-                        .get(0);
+        String output = CgroupUtils.readFileByLine(Constants.getDir(this.dir, CPUSET_MEMORY_PRESSURE)).get(0);
         return Integer.parseInt(output);
     }
 
     public void setMemPressureEnabled(boolean flag) throws IOException {
-        CgroupUtils.writeFileByLine(
-                Constants.getDir(this.dir, CPUSET_MEMORY_PRESSURE_ENABLED),
-                String.valueOf(flag ? 1 : 0));
+        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, CPUSET_MEMORY_PRESSURE_ENABLED), String.valueOf(flag ? 1 : 0));
     }
 
     public boolean isMemPressureEnabled() throws IOException {
-        int output =
-                Integer.parseInt(CgroupUtils.readFileByLine(
-                        Constants.getDir(this.dir,
-                                CPUSET_MEMORY_PRESSURE_ENABLED)).get(0));
+        int output = Integer.parseInt(CgroupUtils.readFileByLine(Constants.getDir(this.dir, CPUSET_MEMORY_PRESSURE_ENABLED)).get(0));
         return output > 0;
     }
 
     public void setMemSpreadPage(boolean flag) throws IOException {
-        CgroupUtils.writeFileByLine(
-                Constants.getDir(this.dir, CPUSET_MEMORY_SPREAD_PAGE),
-                String.valueOf(flag ? 1 : 0));
+        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, CPUSET_MEMORY_SPREAD_PAGE), String.valueOf(flag ? 1 : 0));
     }
 
     public boolean isMemSpreadPage() throws IOException {
-        int output =
-                Integer.parseInt(CgroupUtils.readFileByLine(
-                        Constants.getDir(this.dir, CPUSET_MEMORY_SPREAD_PAGE))
-                        .get(0));
+        int output = Integer.parseInt(CgroupUtils.readFileByLine(Constants.getDir(this.dir, CPUSET_MEMORY_SPREAD_PAGE)).get(0));
         return output > 0;
     }
 
     public void setMemSpreadSlab(boolean flag) throws IOException {
-        CgroupUtils.writeFileByLine(
-                Constants.getDir(this.dir, CPUSET_MEMORY_SPREAD_SLAB),
-                String.valueOf(flag ? 1 : 0));
+        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, CPUSET_MEMORY_SPREAD_SLAB), String.valueOf(flag ? 1 : 0));
     }
 
     public boolean isMemSpreadSlab() throws IOException {
-        int output =
-                Integer.parseInt(CgroupUtils.readFileByLine(
-                        Constants.getDir(this.dir, CPUSET_MEMORY_SPREAD_SLAB))
-                        .get(0));
+        int output = Integer.parseInt(CgroupUtils.readFileByLine(Constants.getDir(this.dir, CPUSET_MEMORY_SPREAD_SLAB)).get(0));
         return output > 0;
     }
 
     public void setSchedLoadBlance(boolean flag) throws IOException {
-        CgroupUtils.writeFileByLine(
-                Constants.getDir(this.dir, CPUSET_SCHED_LOAD_BALANCE),
-                String.valueOf(flag ? 1 : 0));
+        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, CPUSET_SCHED_LOAD_BALANCE), String.valueOf(flag ? 1 : 0));
     }
 
     public boolean isSchedLoadBlance() throws IOException {
-        int output =
-                Integer.parseInt(CgroupUtils.readFileByLine(
-                        Constants.getDir(this.dir, CPUSET_SCHED_LOAD_BALANCE))
-                        .get(0));
+        int output = Integer.parseInt(CgroupUtils.readFileByLine(Constants.getDir(this.dir, CPUSET_SCHED_LOAD_BALANCE)).get(0));
         return output > 0;
     }
 
     public void setSchedRelaxDomainLevel(int value) throws IOException {
-        CgroupUtils.writeFileByLine(
-                Constants.getDir(this.dir, CPUSET_SCHED_RELAX_DOMAIN_LEVEL),
-                String.valueOf(value));
+        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, CPUSET_SCHED_RELAX_DOMAIN_LEVEL), String.valueOf(value));
     }
 
     public int getSchedRelaxDomainLevel() throws IOException {
-        String output =
-                CgroupUtils.readFileByLine(
-                        Constants.getDir(this.dir,
-                                CPUSET_SCHED_RELAX_DOMAIN_LEVEL)).get(0);
+        String output = CgroupUtils.readFileByLine(Constants.getDir(this.dir, CPUSET_SCHED_RELAX_DOMAIN_LEVEL)).get(0);
         return Integer.parseInt(output);
     }
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/DevicesCore.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/DevicesCore.java b/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/DevicesCore.java
index 1832668..491fc8f 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/DevicesCore.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/DevicesCore.java
@@ -110,8 +110,7 @@ public class DevicesCore implements CgroupCore {
             final int prime = 31;
             int result = 1;
             result = prime * result + accesses;
-            result =
-                    prime * result + ((device == null) ? 0 : device.hashCode());
+            result = prime * result + ((device == null) ? 0 : device.hashCode());
             result = prime * result + type;
             return result;
         }
@@ -161,27 +160,21 @@ public class DevicesCore implements CgroupCore {
         }
     }
 
-    private void setPermission(String prop, char type, Device device,
-            int accesses) throws IOException {
+    private void setPermission(String prop, char type, Device device, int accesses) throws IOException {
         Record record = new Record(type, device, accesses);
-        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, prop),
-                record.toString());
+        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, prop), record.toString());
     }
 
-    public void setAllow(char type, Device device, int accesses)
-            throws IOException {
+    public void setAllow(char type, Device device, int accesses) throws IOException {
         setPermission(DEVICES_ALLOW, type, device, accesses);
     }
 
-    public void setDeny(char type, Device device, int accesses)
-            throws IOException {
+    public void setDeny(char type, Device device, int accesses) throws IOException {
         setPermission(DEVICES_DENY, type, device, accesses);
     }
 
     public Record[] getList() throws IOException {
-        List<String> output =
-                CgroupUtils.readFileByLine(Constants.getDir(this.dir,
-                        DEVICES_LIST));
+        List<String> output = CgroupUtils.readFileByLine(Constants.getDir(this.dir, DEVICES_LIST));
         return Record.parseRecordList(output);
     }
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/FreezerCore.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/FreezerCore.java b/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/FreezerCore.java
index c601c3e..e0ad3da 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/FreezerCore.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/FreezerCore.java
@@ -40,13 +40,11 @@ public class FreezerCore implements CgroupCore {
     }
 
     public void setState(State state) throws IOException {
-        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, FREEZER_STATE),
-                state.name().toUpperCase());
+        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, FREEZER_STATE), state.name().toUpperCase());
     }
 
     public State getState() throws IOException {
-        return State.getStateValue(CgroupUtils.readFileByLine(
-                Constants.getDir(this.dir, FREEZER_STATE)).get(0));
+        return State.getStateValue(CgroupUtils.readFileByLine(Constants.getDir(this.dir, FREEZER_STATE)).get(0));
     }
 
     public enum State {

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/MemoryCore.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/MemoryCore.java b/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/MemoryCore.java
index a2db78c..1b37bd3 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/MemoryCore.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/MemoryCore.java
@@ -27,15 +27,11 @@ public class MemoryCore implements CgroupCore {
 
     public static final String MEMORY_STAT = "/memory.stat";
     public static final String MEMORY_USAGE_IN_BYTES = "/memory.usage_in_bytes";
-    public static final String MEMORY_MEMSW_USAGE_IN_BYTES =
-            "/memory.memsw.usage_in_bytes";
-    public static final String MEMORY_MAX_USAGE_IN_BYTES =
-            "/memory.max_usage_in_bytes";
-    public static final String MEMORY_MEMSW_MAX_USAGE_IN_BYTES =
-            "/memory.memsw.max_usage_in_bytes";
+    public static final String MEMORY_MEMSW_USAGE_IN_BYTES = "/memory.memsw.usage_in_bytes";
+    public static final String MEMORY_MAX_USAGE_IN_BYTES = "/memory.max_usage_in_bytes";
+    public static final String MEMORY_MEMSW_MAX_USAGE_IN_BYTES = "/memory.memsw.max_usage_in_bytes";
     public static final String MEMORY_LIMIT_IN_BYTES = "/memory.limit_in_bytes";
-    public static final String MEMORY_MEMSW_LIMIT_IN_BYTES =
-            "/memory.memsw.limit_in_bytes";
+    public static final String MEMORY_MEMSW_LIMIT_IN_BYTES = "/memory.memsw.limit_in_bytes";
     public static final String MEMORY_FAILCNT = "/memory.failcnt";
     public static final String MEMORY_MEMSW_FAILCNT = "/memory.memsw.failcnt";
     public static final String MEMORY_FORCE_EMPTY = "/memory.force_empty";
@@ -115,113 +111,78 @@ public class MemoryCore implements CgroupCore {
     }
 
     public Stat getStat() throws IOException {
-        String output =
-                CgroupUtils.readFileByLine(
-                        Constants.getDir(this.dir, MEMORY_STAT)).get(0);
+        String output = CgroupUtils.readFileByLine(Constants.getDir(this.dir, MEMORY_STAT)).get(0);
         Stat stat = new Stat(output);
         return stat;
     }
 
     public long getPhysicalUsage() throws IOException {
-        return Long.parseLong(CgroupUtils.readFileByLine(
-                Constants.getDir(this.dir, MEMORY_USAGE_IN_BYTES)).get(0));
+        return Long.parseLong(CgroupUtils.readFileByLine(Constants.getDir(this.dir, MEMORY_USAGE_IN_BYTES)).get(0));
     }
 
     public long getWithSwapUsage() throws IOException {
-        return Long
-                .parseLong(CgroupUtils
-                        .readFileByLine(
-                                Constants.getDir(this.dir,
-                                        MEMORY_MEMSW_USAGE_IN_BYTES)).get(0));
+        return Long.parseLong(CgroupUtils.readFileByLine(Constants.getDir(this.dir, MEMORY_MEMSW_USAGE_IN_BYTES)).get(0));
     }
 
     public long getMaxPhysicalUsage() throws IOException {
-        return Long.parseLong(CgroupUtils.readFileByLine(
-                Constants.getDir(this.dir, MEMORY_MAX_USAGE_IN_BYTES)).get(0));
+        return Long.parseLong(CgroupUtils.readFileByLine(Constants.getDir(this.dir, MEMORY_MAX_USAGE_IN_BYTES)).get(0));
     }
 
     public long getMaxWithSwapUsage() throws IOException {
-        return Long.parseLong(CgroupUtils.readFileByLine(
-                Constants.getDir(this.dir, MEMORY_MEMSW_MAX_USAGE_IN_BYTES))
-                .get(0));
+        return Long.parseLong(CgroupUtils.readFileByLine(Constants.getDir(this.dir, MEMORY_MEMSW_MAX_USAGE_IN_BYTES)).get(0));
     }
 
     public void setPhysicalUsageLimit(long value) throws IOException {
-        CgroupUtils.writeFileByLine(
-                Constants.getDir(this.dir, MEMORY_LIMIT_IN_BYTES),
-                String.valueOf(value));
+        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, MEMORY_LIMIT_IN_BYTES), String.valueOf(value));
     }
 
     public long getPhysicalUsageLimit() throws IOException {
-        return Long.parseLong(CgroupUtils.readFileByLine(
-                Constants.getDir(this.dir, MEMORY_LIMIT_IN_BYTES)).get(0));
+        return Long.parseLong(CgroupUtils.readFileByLine(Constants.getDir(this.dir, MEMORY_LIMIT_IN_BYTES)).get(0));
     }
 
     public void setWithSwapUsageLimit(long value) throws IOException {
-        CgroupUtils.writeFileByLine(
-                Constants.getDir(this.dir, MEMORY_MEMSW_LIMIT_IN_BYTES),
-                String.valueOf(value));
+        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, MEMORY_MEMSW_LIMIT_IN_BYTES), String.valueOf(value));
     }
 
     public long getWithSwapUsageLimit() throws IOException {
-        return Long
-                .parseLong(CgroupUtils
-                        .readFileByLine(
-                                Constants.getDir(this.dir,
-                                        MEMORY_MEMSW_LIMIT_IN_BYTES)).get(0));
+        return Long.parseLong(CgroupUtils.readFileByLine(Constants.getDir(this.dir, MEMORY_MEMSW_LIMIT_IN_BYTES)).get(0));
     }
 
     public int getPhysicalFailCount() throws IOException {
-        return Integer.parseInt(CgroupUtils.readFileByLine(
-                Constants.getDir(this.dir, MEMORY_FAILCNT)).get(0));
+        return Integer.parseInt(CgroupUtils.readFileByLine(Constants.getDir(this.dir, MEMORY_FAILCNT)).get(0));
     }
 
     public int getWithSwapFailCount() throws IOException {
-        return Integer.parseInt(CgroupUtils.readFileByLine(
-                Constants.getDir(this.dir, MEMORY_MEMSW_FAILCNT)).get(0));
+        return Integer.parseInt(CgroupUtils.readFileByLine(Constants.getDir(this.dir, MEMORY_MEMSW_FAILCNT)).get(0));
     }
 
     public void clearForceEmpty() throws IOException {
-        CgroupUtils.writeFileByLine(
-                Constants.getDir(this.dir, MEMORY_FORCE_EMPTY),
-                String.valueOf(0));
+        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, MEMORY_FORCE_EMPTY), String.valueOf(0));
     }
 
     public void setSwappiness(int value) throws IOException {
-        CgroupUtils.writeFileByLine(
-                Constants.getDir(this.dir, MEMORY_SWAPPINESS),
-                String.valueOf(value));
+        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, MEMORY_SWAPPINESS), String.valueOf(value));
     }
 
     public int getSwappiness() throws IOException {
-        return Integer.parseInt(CgroupUtils.readFileByLine(
-                Constants.getDir(this.dir, MEMORY_SWAPPINESS)).get(0));
+        return Integer.parseInt(CgroupUtils.readFileByLine(Constants.getDir(this.dir, MEMORY_SWAPPINESS)).get(0));
     }
 
     public void setUseHierarchy(boolean flag) throws IOException {
-        CgroupUtils.writeFileByLine(
-                Constants.getDir(this.dir, MEMORY_USE_HIERARCHY),
-                String.valueOf(flag ? 1 : 0));
+        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, MEMORY_USE_HIERARCHY), String.valueOf(flag ? 1 : 0));
     }
 
     public boolean isUseHierarchy() throws IOException {
-        int output =
-                Integer.parseInt(CgroupUtils.readFileByLine(
-                        Constants.getDir(this.dir, MEMORY_USE_HIERARCHY))
-                        .get(0));
+        int output = Integer.parseInt(CgroupUtils.readFileByLine(Constants.getDir(this.dir, MEMORY_USE_HIERARCHY)).get(0));
         return output > 0;
     }
 
     public void setOomControl(boolean flag) throws IOException {
-        CgroupUtils.writeFileByLine(
-                Constants.getDir(this.dir, MEMORY_OOM_CONTROL),
-                String.valueOf(flag ? 1 : 0));
+        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, MEMORY_OOM_CONTROL), String.valueOf(flag ? 1 : 0));
     }
 
     public boolean isOomControl() throws IOException {
-        String output =
-                CgroupUtils.readFileByLine(
-                        Constants.getDir(this.dir, MEMORY_OOM_CONTROL)).get(0);
+        String output = CgroupUtils.readFileByLine(Constants.getDir(this.dir, MEMORY_OOM_CONTROL)).get(0);
         output = output.split("\n")[0].split("[\\s]")[1];
         int value = Integer.parseInt(output);
         return value > 0;

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/NetClsCore.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/NetClsCore.java b/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/NetClsCore.java
index dd80c0a..e7c376d 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/NetClsCore.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/NetClsCore.java
@@ -58,14 +58,11 @@ public class NetClsCore implements CgroupCore {
         StringBuilder sb = new StringBuilder("0x");
         sb.append(toHex(major));
         sb.append(toHex(minor));
-        CgroupUtils.writeFileByLine(
-                Constants.getDir(this.dir, NET_CLS_CLASSID), sb.toString());
+        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, NET_CLS_CLASSID), sb.toString());
     }
 
     public Device getClassId() throws IOException {
-        String output =
-                CgroupUtils.readFileByLine(
-                        Constants.getDir(this.dir, NET_CLS_CLASSID)).get(0);
+        String output = CgroupUtils.readFileByLine(Constants.getDir(this.dir, NET_CLS_CLASSID)).get(0);
         output = Integer.toHexString(Integer.parseInt(output));
         int major = Integer.parseInt(output.substring(0, output.length() - 4));
         int minor = Integer.parseInt(output.substring(output.length() - 4));

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/NetPrioCore.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/NetPrioCore.java b/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/NetPrioCore.java
index fd7e899..6b9b344 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/NetPrioCore.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/container/cgroup/core/NetPrioCore.java
@@ -44,8 +44,7 @@ public class NetPrioCore implements CgroupCore {
     }
 
     public int getPrioId() throws IOException {
-        return Integer.parseInt(CgroupUtils.readFileByLine(
-                Constants.getDir(this.dir, NET_PRIO_PRIOIDX)).get(0));
+        return Integer.parseInt(CgroupUtils.readFileByLine(Constants.getDir(this.dir, NET_PRIO_PRIOIDX)).get(0));
     }
 
     public void setIfPrioMap(String iface, int priority) throws IOException {
@@ -53,15 +52,12 @@ public class NetPrioCore implements CgroupCore {
         sb.append(iface);
         sb.append(' ');
         sb.append(priority);
-        CgroupUtils.writeFileByLine(
-                Constants.getDir(this.dir, NET_PRIO_IFPRIOMAP), sb.toString());
+        CgroupUtils.writeFileByLine(Constants.getDir(this.dir, NET_PRIO_IFPRIOMAP), sb.toString());
     }
 
     public Map<String, Integer> getIfPrioMap() throws IOException {
         Map<String, Integer> result = new HashMap<String, Integer>();
-        List<String> strs =
-                CgroupUtils.readFileByLine(Constants.getDir(this.dir,
-                        NET_PRIO_IFPRIOMAP));
+        List<String> strs = CgroupUtils.readFileByLine(Constants.getDir(this.dir, NET_PRIO_IFPRIOMAP));
         for (String str : strs) {
             String[] strArgs = str.split(" ");
             result.put(strArgs[0], Integer.valueOf(strArgs[1]));

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/DefaultInimbus.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/DefaultInimbus.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/DefaultInimbus.java
index 6c2bd21..f788996 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/DefaultInimbus.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/DefaultInimbus.java
@@ -37,9 +37,8 @@ public class DefaultInimbus implements INimbus {
     }
 
     @Override
-    public Collection<WorkerSlot> allSlotsAvailableForScheduling(
-            Collection<SupervisorDetails> existingSupervisors,
-            Topologies topologies, Set<String> topologiesMissingAssignments) {
+    public Collection<WorkerSlot> allSlotsAvailableForScheduling(Collection<SupervisorDetails> existingSupervisors, Topologies topologies,
+            Set<String> topologiesMissingAssignments) {
         // TODO Auto-generated method stub
         Collection<WorkerSlot> result = new HashSet<WorkerSlot>();
         for (SupervisorDetails detail : existingSupervisors) {
@@ -50,15 +49,13 @@ public class DefaultInimbus implements INimbus {
     }
 
     @Override
-    public void assignSlots(Topologies topologies,
-            Map<String, Collection<WorkerSlot>> newSlotsByTopologyId) {
+    public void assignSlots(Topologies topologies, Map<String, Collection<WorkerSlot>> newSlotsByTopologyId) {
         // TODO Auto-generated method stub
 
     }
 
     @Override
-    public String getHostName(
-            Map<String, SupervisorDetails> existingSupervisors, String nodeId) {
+    public String getHostName(Map<String, SupervisorDetails> existingSupervisors, String nodeId) {
         // TODO Auto-generated method stub
         return null;
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/NimbusCache.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/NimbusCache.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/NimbusCache.java
index 3858595..97dd079 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/NimbusCache.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/NimbusCache.java
@@ -17,104 +17,87 @@
  */
 package com.alibaba.jstorm.daemon.nimbus;
 
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.TreeMap;
-
-import org.apache.commons.lang.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import backtype.storm.utils.Utils;
-
 import com.alibaba.jstorm.cache.JStormCache;
 import com.alibaba.jstorm.cache.RocksDBCache;
 import com.alibaba.jstorm.cache.TimeoutMemCache;
-import com.alibaba.jstorm.callback.RunnableCallback;
 import com.alibaba.jstorm.client.ConfigExtension;
-import com.alibaba.jstorm.cluster.Cluster;
-import com.alibaba.jstorm.cluster.Common;
-import com.alibaba.jstorm.cluster.StormBase;
 import com.alibaba.jstorm.cluster.StormClusterState;
 import com.alibaba.jstorm.cluster.StormConfig;
-import com.alibaba.jstorm.task.TaskInfo;
-import com.alibaba.jstorm.task.error.TaskError;
 import com.alibaba.jstorm.utils.OSInfo;
+import org.apache.commons.lang.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Map;
 
-public class NimbusCache{
-    private static final long serialVersionUID = 1685576554130463610L;
-    
+public class NimbusCache {
     private static final Logger LOG = LoggerFactory.getLogger(NimbusCache.class);
-    
-    
+
     public static final String TIMEOUT_MEM_CACHE_CLASS = TimeoutMemCache.class.getName();
     public static final String ROCKS_DB_CACHE_CLASS = RocksDBCache.class.getName();
-    
+
     protected JStormCache memCache;
     protected JStormCache dbCache;
     protected StormClusterState zkCluster;
-    
+
     public String getNimbusCacheClass(Map conf) {
         boolean isLinux = OSInfo.isLinux();
         boolean isMac = OSInfo.isMac();
         boolean isLocal = StormConfig.local_mode(conf);
-        
+
         if (isLocal == true) {
             return TIMEOUT_MEM_CACHE_CLASS;
         }
-        
+
         if (isLinux == false && isMac == false) {
             return TIMEOUT_MEM_CACHE_CLASS;
         }
-        
+
         String nimbusCacheClass = ConfigExtension.getNimbusCacheClass(conf);
         if (StringUtils.isBlank(nimbusCacheClass) == false) {
             return nimbusCacheClass;
         }
-        
+
         return ROCKS_DB_CACHE_CLASS;
-        
+
     }
-    
+
     public NimbusCache(Map conf, StormClusterState zkCluster) {
         super();
-        
+
         String dbCacheClass = getNimbusCacheClass(conf);
         LOG.info("NimbusCache db Cache will use {}", dbCacheClass);
-        
+
         try {
-            dbCache = (JStormCache)Utils.newInstance(dbCacheClass);
-            
+            dbCache = (JStormCache) Utils.newInstance(dbCacheClass);
+
             String dbDir = StormConfig.masterDbDir(conf);
             conf.put(RocksDBCache.ROCKSDB_ROOT_DIR, dbDir);
-            
+
             conf.put(RocksDBCache.ROCKSDB_RESET, ConfigExtension.getNimbusCacheReset(conf));
-            
+
             dbCache.init(conf);
-            
+
             if (dbCache instanceof TimeoutMemCache) {
                 memCache = dbCache;
-            }else {
+            } else {
                 memCache = new TimeoutMemCache();
                 memCache.init(conf);
             }
-        }catch(java.lang.UnsupportedClassVersionError e) {
-        	
-        	if (e.getMessage().indexOf("Unsupported major.minor version") >= 0) {
-        		LOG.error("!!!Please update jdk version to 7 or higher!!!");
-        		
-        	}
-        	LOG.error("Failed to create NimbusCache!", e);
+        } catch (UnsupportedClassVersionError e) {
+
+            if (e.getMessage().indexOf("Unsupported major.minor version") >= 0) {
+                LOG.error("!!!Please update jdk version to 7 or higher!!!");
+
+            }
+            LOG.error("Failed to create NimbusCache!", e);
             throw new RuntimeException(e);
         } catch (Exception e) {
             LOG.error("Failed to create NimbusCache!", e);
             throw new RuntimeException(e);
         }
-        
+
         this.zkCluster = zkCluster;
     }
 
@@ -128,19 +111,15 @@ public class NimbusCache{
 
     public void cleanup() {
         dbCache.cleanup();
-        
+
     }
-    
-    
+
     /**
      * 
-     * In the old design, 
-     * DBCache will cache all taskInfo/taskErrors, this will be useful for huge topology
+     * In the old design, DBCache will cache all taskInfo/taskErrors, this will be useful for huge topology
      * 
-     * But the latest zk design, taskInfo is only one znode, taskErros has few znode
-     * So remove them from DBCache
-     * Skip timely refresh taskInfo/taskErrors
+     * But the latest zk design, taskInfo is only one znode, taskErros has few znode So remove them from DBCache Skip timely refresh taskInfo/taskErrors
      * 
      */
-    
+
 }


[05/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/TaskBatchReceiver.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/TaskBatchReceiver.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/TaskBatchReceiver.java
index db2e990..7ef7144 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/task/TaskBatchReceiver.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/TaskBatchReceiver.java
@@ -25,6 +25,7 @@ import org.slf4j.LoggerFactory;
 import com.alibaba.jstorm.callback.AsyncLoopThread;
 import com.alibaba.jstorm.task.TaskReceiver.DeserializeRunnable;
 import com.alibaba.jstorm.utils.JStormUtils;
+import com.alibaba.jstorm.utils.TimeUtils;
 
 import backtype.storm.task.TopologyContext;
 import backtype.storm.tuple.BatchTuple;
@@ -32,27 +33,20 @@ import backtype.storm.tuple.Tuple;
 import backtype.storm.utils.DisruptorQueue;
 
 public class TaskBatchReceiver extends TaskReceiver {
-    private static Logger LOG = LoggerFactory
-            .getLogger(TaskBatchReceiver.class);
+    private static Logger LOG = LoggerFactory.getLogger(TaskBatchReceiver.class);
 
-    public TaskBatchReceiver(Task task, int taskId, Map stormConf,
-            TopologyContext topologyContext,
-            Map<Integer, DisruptorQueue> innerTaskTransfer,
+    public TaskBatchReceiver(Task task, int taskId, Map stormConf, TopologyContext topologyContext, Map<Integer, DisruptorQueue> innerTaskTransfer,
             TaskStatus taskStatus, String taskName) {
-        super(task, taskId, stormConf, topologyContext, innerTaskTransfer,
-                taskStatus, taskName);
+        super(task, taskId, stormConf, topologyContext, innerTaskTransfer, taskStatus, taskName);
     }
 
     @Override
     protected void setDeserializeThread() {
-        this.deserializeThread =
-                new AsyncLoopThread(new DeserializeBatchRunnable(
-                        deserializeQueue, innerTaskTransfer.get(taskId)));
+        this.deserializeThread = new AsyncLoopThread(new DeserializeBatchRunnable(deserializeQueue, innerTaskTransfer.get(taskId)));
     }
 
     public class DeserializeBatchRunnable extends DeserializeRunnable {
-        public DeserializeBatchRunnable(DisruptorQueue deserializeQueue,
-                DisruptorQueue exeQueue) {
+        public DeserializeBatchRunnable(DisruptorQueue deserializeQueue, DisruptorQueue exeQueue) {
             super(deserializeQueue, exeQueue);
         }
 
@@ -83,14 +77,11 @@ public class TaskBatchReceiver extends TaskReceiver {
                 return tuple;
             } catch (Throwable e) {
                 if (taskStatus.isShutdown() == false) {
-                    LOG.error(
-                            idStr + " recv thread error "
-                                    + JStormUtils.toPrintableString(ser_msg)
-                                    + "\n", e);
+                    LOG.error(idStr + " recv thread error " + JStormUtils.toPrintableString(ser_msg) + "\n", e);
                 }
             } finally {
                 long end = System.nanoTime();
-                deserializeTimer.update((end - start)/1000000.0d);
+                deserializeTimer.update((end - start) / TimeUtils.NS_PER_US);
             }
 
             return null;

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/TaskBatchTransfer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/TaskBatchTransfer.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/TaskBatchTransfer.java
index e10fe96..07d7cbb 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/task/TaskBatchTransfer.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/TaskBatchTransfer.java
@@ -20,6 +20,7 @@ package com.alibaba.jstorm.task;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Map.Entry;
+import java.util.concurrent.TimeUnit;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -27,13 +28,15 @@ import org.slf4j.LoggerFactory;
 import com.alibaba.jstorm.callback.AsyncLoopThread;
 import com.alibaba.jstorm.client.ConfigExtension;
 import com.alibaba.jstorm.daemon.worker.WorkerData;
+import com.alibaba.jstorm.daemon.worker.timer.TaskBatchCheckTrigger;
+import com.alibaba.jstorm.daemon.worker.timer.TaskBatchFlushTrigger;
+import com.alibaba.jstorm.utils.EventSampler;
+import com.alibaba.jstorm.utils.Pair;
 
-import backtype.storm.messaging.IConnection;
-import backtype.storm.messaging.TaskMessage;
 import backtype.storm.serialization.KryoTupleSerializer;
 import backtype.storm.tuple.BatchTuple;
+import backtype.storm.tuple.ITupleExt;
 import backtype.storm.tuple.TupleExt;
-import backtype.storm.utils.DisruptorQueue;
 
 /**
  * Batch Tuples, then send out
@@ -43,62 +46,84 @@ import backtype.storm.utils.DisruptorQueue;
  */
 public class TaskBatchTransfer extends TaskTransfer {
 
-    private static Logger LOG = LoggerFactory
-            .getLogger(TaskBatchTransfer.class);
-
+    private static Logger LOG = LoggerFactory.getLogger(TaskBatchTransfer.class);
+    protected static final double BATCH_SIZE_THRESHOLD = 2.0;
+    protected static final int BATCH_FLUSH_INTERVAL_MS = 5;
+    protected static final int BATCH_CHECK_INTERVAL_S = 3600;
+    protected static final int BATCH_EVENT_SAMPLER_INTERVAL_S = 4 * 240;
+    
     private Map<Integer, BatchTuple> batchMap;
+    private final int maxBatchSize;
     private int batchSize;
     private Object lock = new Object();
+    private EventSampler eventSampler = null;
 
-    public TaskBatchTransfer(Task task, String taskName,
-            KryoTupleSerializer serializer, TaskStatus taskStatus,
-            WorkerData workerData) {
+    public TaskBatchTransfer(Task task, String taskName, KryoTupleSerializer serializer, TaskStatus taskStatus, WorkerData workerData) {
         super(task, taskName, serializer, taskStatus, workerData);
 
         batchMap = new HashMap<Integer, BatchTuple>();
-        batchSize =
-                ConfigExtension.getTaskMsgBatchSize(workerData.getStormConf());
+        maxBatchSize = ConfigExtension.getTaskMsgBatchSize(workerData.getStormConf());
+        
+        
+        TaskBatchFlushTrigger batchFlushTrigger = new TaskBatchFlushTrigger(BATCH_FLUSH_INTERVAL_MS, taskName, this);
+        batchFlushTrigger.register(TimeUnit.MILLISECONDS);
+        
+        TaskBatchCheckTrigger batchCheckTrigger = new TaskBatchCheckTrigger(BATCH_CHECK_INTERVAL_S, taskName, this);
+        batchCheckTrigger.register();
+        
+        startCheck();
     }
+    
+    public void setBatchSize(int batchSize) {
+		this.batchSize = batchSize;
+		LOG.info(taskName + " set batch size as " + batchSize);
+	}
 
     @Override
     protected AsyncLoopThread setupSerializeThread() {
         return new AsyncLoopThread(new TransferBatchRunnable());
     }
+    
+    public void startCheck() {
+    	eventSampler = new EventSampler(BATCH_EVENT_SAMPLER_INTERVAL_S);
+    	setBatchSize(maxBatchSize);
+    	LOG.info("Start check batch size, task of  " + taskName);
+    }
+    
+    public void stopCheck() {
+    	eventSampler = null;
+    	LOG.info("Stop check batch size, task of  " + taskName);
+    }
 
-    @Override
-    public void transfer(TupleExt tuple) {
-        int targetTaskid = tuple.getTargetTaskId();
-        synchronized (lock) {
-            BatchTuple batch = getBatchTuple(targetTaskid);
+	@Override
+	public void push(int taskId, TupleExt tuple) {
+		synchronized (lock) {
+			BatchTuple batch = getBatchTuple(taskId);
 
-            batch.addToBatch(tuple);
-            if (batch.isBatchFull()) {
-                pushToQueue(targetTaskid, batch);
-            }
-        }
-    }
+			batch.addToBatch(tuple);
+			if (batch.isBatchFull()) {
+				serializeQueue.publish(batch);
+				batchMap.put(taskId, new BatchTuple(taskId, batchSize));
+			}
+		}
+
+	}
 
     public void flush() {
+    	Map<Integer, BatchTuple> oldBatchMap = null;
         synchronized (lock) {
-            for (Entry<Integer, BatchTuple> entry : batchMap.entrySet()) {
-                int taskId = entry.getKey();
-                BatchTuple batch = entry.getValue();
-                if (batch != null && batch.currBatchSize() > 0) {
-                    pushToQueue(taskId, batch);
-                }
+            oldBatchMap = batchMap;
+            batchMap = new HashMap<Integer, BatchTuple>();
+        }
+        
+        for (Entry<Integer, BatchTuple> entry : oldBatchMap.entrySet()) {
+            BatchTuple batch = entry.getValue();
+            if (batch != null && batch.currBatchSize() > 0) {
+            	serializeQueue.publish(batch);
             }
         }
     }
 
-    private void pushToQueue(int targetTaskid, BatchTuple batch) {
-        DisruptorQueue exeQueue = innerTaskTransfer.get(targetTaskid);
-        if (exeQueue != null) {
-            exeQueue.publish(batch);
-        } else {
-            serializeQueue.publish(batch);
-        }
-        resetBatchTuple(targetTaskid);
-    }
 
     private BatchTuple getBatchTuple(int targetTaskId) {
         BatchTuple ret = batchMap.get(targetTaskId);
@@ -109,33 +134,27 @@ public class TaskBatchTransfer extends TaskTransfer {
         return ret;
     }
 
-    private void resetBatchTuple(int targetTaskId) {
-        batchMap.put(targetTaskId, null);
-    }
 
     protected class TransferBatchRunnable extends TransferRunnable {
-        @Override
-        public void onEvent(Object event, long sequence, boolean endOfBatch)
-                throws Exception {
-
-            if (event == null) {
-                return;
-            }
-
-            long start = System.currentTimeMillis();
-            try {
-                BatchTuple tuple = (BatchTuple) event;
-                int taskid = tuple.getTargetTaskId();
-                byte[] tupleMessage = serializer.serializeBatch(tuple);
-                TaskMessage taskMessage = new TaskMessage(taskid, tupleMessage);
-                IConnection conn = getConnection(taskid);
-                if (conn != null)
-                    conn.send(taskMessage);
-            } finally {
-                long end = System.currentTimeMillis();
-                timer.update(end - start);
-            }
 
+        public byte[] serialize(ITupleExt tuple) {
+    		BatchTuple batchTuple = (BatchTuple)tuple;
+    		if (eventSampler != null) {
+    			Pair<Integer, Double> result = eventSampler.avgCheck(batchTuple.currBatchSize());
+    			if (result != null) {
+    				Double avgBatchSize = result.getSecond();
+    				LOG.info(taskName + " batch average size is " + avgBatchSize);
+    				if (avgBatchSize < BATCH_SIZE_THRESHOLD) {
+    					LOG.info("Due to average size is small, so directly reset batch size as 1");
+    					// set the batch size as 1
+    					// transfer can directly send tuple, don't need wait flush interval
+    					setBatchSize(1);
+    				}
+    				stopCheck();
+    			}
+    			
+    		}
+        	return serializer.serializeBatch(batchTuple);
         }
     }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/TaskInfo.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/TaskInfo.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/TaskInfo.java
index 0eb1c4b..ddfe63d 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/task/TaskInfo.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/TaskInfo.java
@@ -54,11 +54,8 @@ public class TaskInfo implements Serializable {
 
     @Override
     public boolean equals(Object assignment) {
-        if (assignment instanceof TaskInfo
-                && ((TaskInfo) assignment).getComponentId().equals(
-                        getComponentId())
-                && ((TaskInfo) assignment).getComponentType().equals(
-                        componentType)) {
+        if (assignment instanceof TaskInfo && ((TaskInfo) assignment).getComponentId().equals(getComponentId())
+                && ((TaskInfo) assignment).getComponentType().equals(componentType)) {
             return true;
         }
         return false;
@@ -66,13 +63,11 @@ public class TaskInfo implements Serializable {
 
     @Override
     public int hashCode() {
-        return this.getComponentId().hashCode()
-                + this.getComponentType().hashCode();
+        return this.getComponentId().hashCode() + this.getComponentType().hashCode();
     }
 
     @Override
     public String toString() {
-        return ToStringBuilder.reflectionToString(this,
-                ToStringStyle.SHORT_PREFIX_STYLE);
+        return ToStringBuilder.reflectionToString(this, ToStringStyle.SHORT_PREFIX_STYLE);
     }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/TaskReceiver.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/TaskReceiver.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/TaskReceiver.java
index ad32ceb..230ba16 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/task/TaskReceiver.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/TaskReceiver.java
@@ -17,32 +17,32 @@
  */
 package com.alibaba.jstorm.task;
 
-import java.util.Map;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import backtype.storm.Config;
 import backtype.storm.serialization.KryoTupleDeserializer;
 import backtype.storm.task.TopologyContext;
 import backtype.storm.tuple.Tuple;
 import backtype.storm.utils.DisruptorQueue;
-import backtype.storm.utils.Utils;
 import backtype.storm.utils.WorkerClassLoader;
 
 import com.alibaba.jstorm.callback.AsyncLoopThread;
 import com.alibaba.jstorm.callback.RunnableCallback;
 import com.alibaba.jstorm.client.ConfigExtension;
-import com.alibaba.jstorm.common.metric.Histogram;
+import com.alibaba.jstorm.common.metric.AsmGauge;
+import com.alibaba.jstorm.common.metric.AsmHistogram;
 import com.alibaba.jstorm.common.metric.QueueGauge;
-import com.alibaba.jstorm.metric.JStormHealthCheck;
-import com.alibaba.jstorm.metric.JStormMetrics;
-import com.alibaba.jstorm.metric.MetricDef;
+import com.alibaba.jstorm.metric.*;
 import com.alibaba.jstorm.utils.JStormUtils;
+import com.alibaba.jstorm.utils.TimeUtils;
+import com.esotericsoftware.kryo.KryoException;
 import com.lmax.disruptor.EventHandler;
 import com.lmax.disruptor.WaitStrategy;
 import com.lmax.disruptor.dsl.ProducerType;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Map;
+
 public class TaskReceiver {
     private static Logger LOG = LoggerFactory.getLogger(TaskReceiver.class);
 
@@ -58,13 +58,11 @@ public class TaskReceiver {
     protected DisruptorQueue deserializeQueue;
     protected KryoTupleDeserializer deserializer;
     protected AsyncLoopThread deserializeThread;
-    protected Histogram deserializeTimer;
+    protected AsmHistogram deserializeTimer;
 
     protected TaskStatus taskStatus;
 
-    public TaskReceiver(Task task, int taskId, Map stormConf,
-            TopologyContext topologyContext,
-            Map<Integer, DisruptorQueue> innerTaskTransfer,
+    public TaskReceiver(Task task, int taskId, Map stormConf, TopologyContext topologyContext, Map<Integer, DisruptorQueue> innerTaskTransfer,
             TaskStatus taskStatus, String taskName) {
         this.task = task;
         this.taskId = taskId;
@@ -77,34 +75,24 @@ public class TaskReceiver {
 
         this.isDebugRecv = ConfigExtension.isTopologyDebugRecvTuple(stormConf);
 
-        int queueSize =
-                JStormUtils
-                        .parseInt(
-                                stormConf
-                                        .get(Config.TOPOLOGY_EXECUTOR_RECEIVE_BUFFER_SIZE),
-                                256);
-
-        WaitStrategy waitStrategy =
-                (WaitStrategy) JStormUtils
-                        .createDisruptorWaitStrategy(stormConf);
-        this.deserializeQueue =
-                DisruptorQueue.mkInstance("TaskDeserialize",
-                        ProducerType.MULTI, queueSize, waitStrategy);
+        int queueSize = JStormUtils.parseInt(stormConf.get(Config.TOPOLOGY_EXECUTOR_RECEIVE_BUFFER_SIZE), 256);
+
+        WaitStrategy waitStrategy = (WaitStrategy) JStormUtils.createDisruptorWaitStrategy(stormConf);
+        this.deserializeQueue = DisruptorQueue.mkInstance("TaskDeserialize", ProducerType.MULTI, queueSize, waitStrategy);
         setDeserializeThread();
-        this.deserializer =
-                new KryoTupleDeserializer(stormConf, topologyContext);
+        this.deserializer = new KryoTupleDeserializer(stormConf, topologyContext);
+
+        String topologyId = topologyContext.getTopologyId();
+        String component = topologyContext.getThisComponentId();
 
         deserializeTimer =
-                JStormMetrics.registerTaskHistogram(taskId,
-                        MetricDef.DESERIALIZE_TIME);
-
-        QueueGauge deserializeQueueGauge =
-                new QueueGauge(idStr + MetricDef.DESERIALIZE_QUEUE,
-                        deserializeQueue);
-        JStormMetrics.registerTaskGauge(deserializeQueueGauge, taskId,
-                MetricDef.DESERIALIZE_QUEUE);
-        JStormHealthCheck.registerTaskHealthCheck(taskId,
-                MetricDef.DESERIALIZE_QUEUE, deserializeQueueGauge);
+                (AsmHistogram) JStormMetrics.registerTaskMetric(
+                        MetricUtils.taskMetricName(topologyId, component, taskId, MetricDef.DESERIALIZE_TIME, MetricType.HISTOGRAM), new AsmHistogram());
+
+        QueueGauge deserializeQueueGauge = new QueueGauge(deserializeQueue, idStr, MetricDef.DESERIALIZE_QUEUE);
+        JStormMetrics.registerTaskMetric(MetricUtils.taskMetricName(topologyId, component, taskId, MetricDef.DESERIALIZE_QUEUE, MetricType.GAUGE),
+                new AsmGauge(deserializeQueueGauge));
+        JStormHealthCheck.registerTaskHealthCheck(taskId, MetricDef.DESERIALIZE_QUEUE, deserializeQueueGauge);
     }
 
     public AsyncLoopThread getDeserializeThread() {
@@ -112,9 +100,7 @@ public class TaskReceiver {
     }
 
     protected void setDeserializeThread() {
-        this.deserializeThread =
-                new AsyncLoopThread(new DeserializeRunnable(deserializeQueue,
-                        innerTaskTransfer.get(taskId)));
+        this.deserializeThread = new AsyncLoopThread(new DeserializeRunnable(deserializeQueue, innerTaskTransfer.get(taskId)));
     }
 
     public DisruptorQueue getDeserializeQueue() {
@@ -126,8 +112,7 @@ public class TaskReceiver {
         DisruptorQueue deserializeQueue;
         DisruptorQueue exeQueue;
 
-        DeserializeRunnable(DisruptorQueue deserializeQueue,
-                DisruptorQueue exeQueue) {
+        DeserializeRunnable(DisruptorQueue deserializeQueue, DisruptorQueue exeQueue) {
             this.deserializeQueue = deserializeQueue;
             this.exeQueue = exeQueue;
         }
@@ -162,24 +147,22 @@ public class TaskReceiver {
                 }
 
                 return tuple;
+            } catch (KryoException e) {
+                throw new RuntimeException(e);
             } catch (Throwable e) {
                 if (taskStatus.isShutdown() == false) {
-                    LOG.error(
-                            idStr + " recv thread error "
-                                    + JStormUtils.toPrintableString(ser_msg)
-                                    + "\n", e);
+                    LOG.error(idStr + " recv thread error " + JStormUtils.toPrintableString(ser_msg) + "\n", e);
                 }
             } finally {
                 long end = System.nanoTime();
-                deserializeTimer.update((end - start)/1000000.0d);
+                deserializeTimer.update((end - start) / TimeUtils.NS_PER_US);
             }
 
             return null;
         }
 
         @Override
-        public void onEvent(Object event, long sequence, boolean endOfBatch)
-                throws Exception {
+        public void onEvent(Object event, long sequence, boolean endOfBatch) throws Exception {
             Object tuple = deserialize((byte[]) event);
 
             if (tuple != null) {
@@ -189,7 +172,7 @@ public class TaskReceiver {
 
         @Override
         public void preRun() {
-            WorkerClassLoader.switchThreadContext();  
+            WorkerClassLoader.switchThreadContext();
         }
 
         @Override

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/TaskShutdownDameon.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/TaskShutdownDameon.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/TaskShutdownDameon.java
index c49e9fc..d685c04 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/task/TaskShutdownDameon.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/TaskShutdownDameon.java
@@ -17,36 +17,31 @@
  */
 package com.alibaba.jstorm.task;
 
-import java.util.List;
-import java.util.Map;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import backtype.storm.spout.ISpout;
 import backtype.storm.task.IBolt;
-import backtype.storm.topology.IConfig;
+import backtype.storm.topology.IDynamicComponent;
 import backtype.storm.utils.WorkerClassLoader;
-
 import com.alibaba.jstorm.callback.AsyncLoopThread;
 import com.alibaba.jstorm.cluster.StormClusterState;
 import com.alibaba.jstorm.daemon.worker.ShutdownableDameon;
-import com.alibaba.jstorm.metric.JStormMetrics;
-import com.alibaba.jstorm.task.heartbeat.TaskHeartbeatRunable;
 import com.alibaba.jstorm.utils.JStormUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.List;
+import java.util.Map;
 
 /**
  * shutdown one task
  * 
  * @author yannian/Longda
- * 
  */
 public class TaskShutdownDameon implements ShutdownableDameon {
-    private static Logger LOG = LoggerFactory
-            .getLogger(TaskShutdownDameon.class);
+    private static Logger LOG = LoggerFactory.getLogger(TaskShutdownDameon.class);
 
     public static final byte QUIT_MSG = (byte) 0xff;
 
+    private Task task;
     private TaskStatus taskStatus;
     private String topology_id;
     private Integer task_id;
@@ -55,16 +50,15 @@ public class TaskShutdownDameon implements ShutdownableDameon {
     private Object task_obj;
     private boolean isClosed = false;
 
-    public TaskShutdownDameon(TaskStatus taskStatus, String topology_id,
-            Integer task_id, List<AsyncLoopThread> all_threads,
-            StormClusterState zkCluster, Object task_obj) {
+    public TaskShutdownDameon(TaskStatus taskStatus, String topology_id, Integer task_id, List<AsyncLoopThread> all_threads, StormClusterState zkCluster,
+            Object task_obj, Task task) {
         this.taskStatus = taskStatus;
         this.topology_id = topology_id;
         this.task_id = task_id;
         this.all_threads = all_threads;
         this.zkCluster = zkCluster;
         this.task_obj = task_obj;
-
+        this.task = task;
     }
 
     @Override
@@ -104,18 +98,9 @@ public class TaskShutdownDameon implements ShutdownableDameon {
         closeComponent(task_obj);
 
         try {
-        	JStormMetrics.unregisterTask(task_id);
-            TaskHeartbeatRunable.unregisterTaskStats(task_id);
-            zkCluster.remove_task_heartbeat(topology_id, task_id);
+            zkCluster.disconnect();
         } catch (Exception e) {
-            // TODO Auto-generated catch block
-            LOG.info("Failed to cleanup");
-        } finally {
-            try {
-                zkCluster.disconnect();
-            } catch (Exception e) {
-                LOG.info("Failed to disconnect", e);
-            }
+            LOG.error("Failed to disconnect zk for task-" + task_id);
         }
 
         LOG.info("Successfully shutdown task " + topology_id + ":" + task_id);
@@ -170,19 +155,22 @@ public class TaskShutdownDameon implements ShutdownableDameon {
         }
     }
 
-    public void updateConf(Map conf) {
-        if (task_obj instanceof IConfig) {
-            ((IConfig) task_obj).updateConf(conf);
+    public void update(Map conf) {
+        if (task_obj instanceof IDynamicComponent) {
+            ((IDynamicComponent) task_obj).update(conf);
         }
     }
 
     @Override
     public void run() {
-        // TODO Auto-generated method stub
         shutdown();
     }
 
     public int getTaskId() {
         return this.task_id;
     }
+
+    public Task getTask() {
+        return this.task;
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/TaskTransfer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/TaskTransfer.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/TaskTransfer.java
index efe6dee..4da4330 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/task/TaskTransfer.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/TaskTransfer.java
@@ -24,40 +24,44 @@ import java.util.concurrent.atomic.AtomicBoolean;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import backtype.storm.Config;
-import backtype.storm.messaging.IConnection;
-import backtype.storm.messaging.TaskMessage;
-import backtype.storm.scheduler.WorkerSlot;
-import backtype.storm.serialization.KryoTupleSerializer;
-import backtype.storm.tuple.TupleExt;
-import backtype.storm.utils.DisruptorQueue;
-import backtype.storm.utils.Utils;
-import backtype.storm.utils.WorkerClassLoader;
-
 import com.alibaba.jstorm.callback.AsyncLoopRunnable;
 import com.alibaba.jstorm.callback.AsyncLoopThread;
 import com.alibaba.jstorm.callback.RunnableCallback;
-import com.alibaba.jstorm.common.metric.MetricRegistry;
+import com.alibaba.jstorm.common.metric.AsmGauge;
+import com.alibaba.jstorm.common.metric.AsmHistogram;
 import com.alibaba.jstorm.common.metric.QueueGauge;
-import com.alibaba.jstorm.common.metric.Timer;
 import com.alibaba.jstorm.daemon.worker.WorkerData;
 import com.alibaba.jstorm.metric.JStormHealthCheck;
 import com.alibaba.jstorm.metric.JStormMetrics;
 import com.alibaba.jstorm.metric.MetricDef;
+import com.alibaba.jstorm.metric.MetricType;
+import com.alibaba.jstorm.metric.MetricUtils;
+import com.alibaba.jstorm.task.backpressure.BackpressureController;
 import com.alibaba.jstorm.utils.JStormUtils;
+import com.alibaba.jstorm.utils.TimeUtils;
 import com.lmax.disruptor.EventHandler;
 import com.lmax.disruptor.WaitStrategy;
 import com.lmax.disruptor.dsl.ProducerType;
 
+import backtype.storm.Config;
+import backtype.storm.messaging.IConnection;
+import backtype.storm.messaging.TaskMessage;
+import backtype.storm.scheduler.WorkerSlot;
+import backtype.storm.serialization.KryoTupleSerializer;
+import backtype.storm.tuple.ITupleExt;
+import backtype.storm.tuple.TupleExt;
+import backtype.storm.utils.DisruptorQueue;
+import backtype.storm.utils.Utils;
+import backtype.storm.utils.WorkerClassLoader;
+
 /**
  * Sending entrance
- * 
+ * <p/>
  * Task sending all tuples through this Object
- * 
+ * <p/>
  * Serialize the Tuple and put the serialized data to the sending queue
  * 
  * @author yannian
- * 
  */
 public class TaskTransfer {
 
@@ -71,15 +75,18 @@ public class TaskTransfer {
     protected final AsyncLoopThread serializeThread;
     protected volatile TaskStatus taskStatus;
     protected String taskName;
-    protected Timer timer;
+    protected AsmHistogram serializeTimer;
     protected Task task;
-    
+    protected String topolgyId;
+    protected String componentId;
+    protected int taskId;
+
     protected ConcurrentHashMap<WorkerSlot, IConnection> nodeportSocket;
     protected ConcurrentHashMap<Integer, WorkerSlot> taskNodeport;
 
-    public TaskTransfer(Task task, String taskName,
-            KryoTupleSerializer serializer, TaskStatus taskStatus,
-            WorkerData workerData) {
+    protected BackpressureController backpressureController;
+
+    public TaskTransfer(Task task, String taskName, KryoTupleSerializer serializer, TaskStatus taskStatus, WorkerData workerData) {
         this.task = task;
         this.taskName = taskName;
         this.serializer = serializer;
@@ -87,49 +94,53 @@ public class TaskTransfer {
         this.storm_conf = workerData.getStormConf();
         this.transferQueue = workerData.getTransferQueue();
         this.innerTaskTransfer = workerData.getInnerTaskTransfer();
-        
+
         this.nodeportSocket = workerData.getNodeportSocket();
         this.taskNodeport = workerData.getTaskNodeport();
 
-        int queue_size =
-                Utils.getInt(storm_conf
-                        .get(Config.TOPOLOGY_EXECUTOR_SEND_BUFFER_SIZE));
-        WaitStrategy waitStrategy =
-                (WaitStrategy) JStormUtils.createDisruptorWaitStrategy(storm_conf);
-        this.serializeQueue =
-                DisruptorQueue.mkInstance(taskName, ProducerType.MULTI,
-                        queue_size, waitStrategy);
+        this.topolgyId = workerData.getTopologyId();
+        this.componentId = this.task.getComponentId();
+        this.taskId = this.task.getTaskId();
+
+        int queue_size = Utils.getInt(storm_conf.get(Config.TOPOLOGY_EXECUTOR_SEND_BUFFER_SIZE));
+        WaitStrategy waitStrategy = (WaitStrategy) JStormUtils.createDisruptorWaitStrategy(storm_conf);
+        this.serializeQueue = DisruptorQueue.mkInstance(taskName, ProducerType.MULTI, queue_size, waitStrategy);
         this.serializeQueue.consumerStarted();
 
         String taskId = taskName.substring(taskName.indexOf(":") + 1);
-        String metricName =
-                MetricRegistry.name(MetricDef.SERIALIZE_QUEUE, taskName);
-        QueueGauge serializeQueueGauge =
-                new QueueGauge(metricName, serializeQueue);
-        JStormMetrics.registerTaskGauge(serializeQueueGauge,
-                Integer.valueOf(taskId), MetricDef.SERIALIZE_QUEUE);
-        JStormHealthCheck.registerTaskHealthCheck(Integer.valueOf(taskId),
-                MetricDef.SERIALIZE_QUEUE, serializeQueueGauge);
-        timer =
-                JStormMetrics.registerTaskTimer(Integer.valueOf(taskId),
-                        MetricDef.SERIALIZE_TIME);
+        QueueGauge serializeQueueGauge = new QueueGauge(serializeQueue, taskName, MetricDef.SERIALIZE_QUEUE);
+        JStormMetrics.registerTaskMetric(MetricUtils.taskMetricName(topolgyId, componentId, this.taskId, MetricDef.SERIALIZE_QUEUE, MetricType.GAUGE),
+                new AsmGauge(serializeQueueGauge));
+        JStormHealthCheck.registerTaskHealthCheck(Integer.valueOf(taskId), MetricDef.SERIALIZE_QUEUE, serializeQueueGauge);
+        serializeTimer =
+                (AsmHistogram) JStormMetrics.registerTaskMetric(
+                        MetricUtils.taskMetricName(topolgyId, componentId, this.taskId, MetricDef.SERIALIZE_TIME, MetricType.HISTOGRAM), new AsmHistogram());
 
         serializeThread = setupSerializeThread();
+
+        backpressureController = new BackpressureController(storm_conf, task.getTaskId(), serializeQueue, queue_size);
         LOG.info("Successfully start TaskTransfer thread");
 
     }
 
     public void transfer(TupleExt tuple) {
 
-        int taskid = tuple.getTargetTaskId();
+        int taskId = tuple.getTargetTaskId();
 
-        DisruptorQueue exeQueue = innerTaskTransfer.get(taskid);
+        DisruptorQueue exeQueue = innerTaskTransfer.get(taskId);
         if (exeQueue != null) {
             exeQueue.publish(tuple);
         } else {
-            serializeQueue.publish(tuple);
+            push(taskId, tuple);
         }
 
+        if (backpressureController.isBackpressureMode()) {
+            backpressureController.flowControl();
+        }
+    }
+    
+    public void push(int taskId, TupleExt tuple) {
+    	serializeQueue.publish(tuple);
     }
 
     protected AsyncLoopThread setupSerializeThread() {
@@ -140,6 +151,10 @@ public class TaskTransfer {
         return serializeThread;
     }
 
+    public BackpressureController getBackpressureController() {
+        return backpressureController;
+    }
+
     protected class TransferRunnable extends RunnableCallback implements EventHandler {
 
         private AtomicBoolean shutdown = AsyncLoopRunnable.getShutdown();
@@ -148,6 +163,7 @@ public class TaskTransfer {
         public String getThreadName() {
             return taskName + "-" + TransferRunnable.class.getSimpleName();
         }
+		
 
         @Override
         public void preRun() {
@@ -156,61 +172,80 @@ public class TaskTransfer {
 
         @Override
         public void run() {
-
             while (shutdown.get() == false) {
                 serializeQueue.consumeBatchWhenAvailable(this);
-
             }
-
         }
 
         @Override
         public void postRun() {
             WorkerClassLoader.restoreThreadContext();
         }
+        
+        public byte[] serialize(ITupleExt tuple) {
+        	return serializer.serialize((TupleExt)tuple);
+        }
 
         @Override
-        public void onEvent(Object event, long sequence, boolean endOfBatch)
-                throws Exception {
+        public void onEvent(Object event, long sequence, boolean endOfBatch) throws Exception {
 
             if (event == null) {
                 return;
             }
 
-            long start = System.currentTimeMillis();
+            long start = System.nanoTime();
 
             try {
-                TupleExt tuple = (TupleExt) event;
+			
+			    ITupleExt tuple = (ITupleExt) event;
                 int taskid = tuple.getTargetTaskId();
-                byte[] tupleMessage = serializer.serialize(tuple);
-                TaskMessage taskMessage = new TaskMessage(taskid, tupleMessage);
                 IConnection conn = getConnection(taskid);
                 if (conn != null) {
+                	byte[] tupleMessage = serialize(tuple);
+                    TaskMessage taskMessage = new TaskMessage(taskid, tupleMessage);
                     conn.send(taskMessage);
                 }
             } finally {
-                long end = System.currentTimeMillis();
-                timer.update(end - start);
+                long end = System.nanoTime();
+                serializeTimer.update((end - start)/TimeUtils.NS_PER_US);
             }
 
         }
-        
+
         protected IConnection getConnection(int taskId) {
             IConnection conn = null;
             WorkerSlot nodePort = taskNodeport.get(taskId);
             if (nodePort == null) {
-                String errormsg = "can`t not found IConnection to " + taskId;
-                LOG.warn("Intra transfer warn", new Exception(errormsg));
+                String errormsg = "IConnection to " + taskId + " can't be found";
+                LOG.warn("Internal transfer warn, throw tuple,", new Exception(errormsg));
             } else {
                 conn = nodeportSocket.get(nodePort);
                 if (conn == null) {
-                    String errormsg = "can`t not found nodePort " + nodePort;
-                    LOG.warn("Intra transfer warn", new Exception(errormsg));
+                    String errormsg = "NodePort to" + nodePort + " can't be found";
+                    LOG.warn("Internal transfer warn, throw tuple,", new Exception(errormsg));
                 }
             }
             return conn;
         }
 
+        protected void pullTuples(Object event) {
+            TupleExt tuple = (TupleExt) event;
+            int taskid = tuple.getTargetTaskId();
+            IConnection conn = getConnection(taskid);
+            if (conn != null) {
+                while (conn.available() == false) {
+                    try {
+                        Thread.sleep(1);
+                    } catch (InterruptedException e) {
+
+                    }
+                }
+                byte[] tupleMessage = serializer.serialize(tuple);
+                TaskMessage taskMessage = new TaskMessage(taskid, tupleMessage);
+                conn.send(taskMessage);
+            }
+        }
+
     }
 
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/TkHbCacheTime.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/TkHbCacheTime.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/TkHbCacheTime.java
index 596fa35..f703f25 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/task/TkHbCacheTime.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/TkHbCacheTime.java
@@ -17,12 +17,12 @@
  */
 package com.alibaba.jstorm.task;
 
-import com.alibaba.jstorm.task.heartbeat.TaskHeartbeat;
+import backtype.storm.generated.TaskHeartbeat;
+
 import com.alibaba.jstorm.utils.TimeUtils;
 
 /**
- * TkHbCacheTime is describle taskheartcache (Map<topologyId, Map<taskid,
- * Map<tkHbCacheTime, time>>>)
+ * TkHbCacheTime is describle taskheartcache (Map<topologyId, Map<taskid, Map<tkHbCacheTime, time>>>)
  */
 
 public class TkHbCacheTime {
@@ -54,12 +54,13 @@ public class TkHbCacheTime {
         this.taskAssignedTime = taskAssignedTime;
     }
 
-    public void update(TaskHeartbeat zkTaskHeartbeat) {
-        int nowSecs = TimeUtils.current_time_secs();
-        this.nimbusTime = nowSecs;
-        this.taskReportedTime = zkTaskHeartbeat.getTimeSecs();
-        this.taskAssignedTime =
-                zkTaskHeartbeat.getTimeSecs() - zkTaskHeartbeat.getUptimeSecs();
+    public void update(TaskHeartbeat taskHeartbeat) {
+        if (taskHeartbeat != null) {
+            int nowSecs = TimeUtils.current_time_secs();
+            this.nimbusTime = nowSecs;
+            this.taskReportedTime = taskHeartbeat.get_time();
+            this.taskAssignedTime = taskHeartbeat.get_time() - taskHeartbeat.get_uptime();
+        }
     }
 
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/acker/Acker.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/acker/Acker.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/acker/Acker.java
index 2be1592..4deb8ed 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/task/acker/Acker.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/acker/Acker.java
@@ -17,25 +17,21 @@
  */
 package com.alibaba.jstorm.task.acker;
 
-import java.util.List;
-import java.util.Map;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import backtype.storm.Config;
 import backtype.storm.task.IBolt;
 import backtype.storm.task.OutputCollector;
 import backtype.storm.task.TopologyContext;
 import backtype.storm.tuple.Tuple;
-
 import com.alibaba.jstorm.utils.JStormUtils;
 import com.alibaba.jstorm.utils.RotatingMap;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.List;
+import java.util.Map;
 
 /**
- * 
  * @author yannian/Longda
- * 
  */
 public class Acker implements IBolt {
 
@@ -57,27 +53,19 @@ public class Acker implements IBolt {
     private long rotateTime;
 
     @Override
-    public void prepare(Map stormConf, TopologyContext context,
-            OutputCollector collector) {
+    public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
         this.collector = collector;
         // pending = new TimeCacheMap<Object, AckObject>(timeoutSec,
         // TIMEOUT_BUCKET_NUM);
         this.pending = new RotatingMap<Object, AckObject>(TIMEOUT_BUCKET_NUM);
-        this.rotateTime =
-                1000L
-                        * JStormUtils.parseInt(stormConf
-                                .get(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS), 30)
-                        / (TIMEOUT_BUCKET_NUM - 1);
+        this.rotateTime = 1000L * JStormUtils.parseInt(stormConf.get(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS), 30) / (TIMEOUT_BUCKET_NUM - 1);
     }
 
     @Override
     public void execute(Tuple input) {
         Object id = input.getValue(0);
-
         AckObject curr = pending.get(id);
-
         String stream_id = input.getSourceStreamId();
-
         if (Acker.ACKER_INIT_STREAM_ID.equals(stream_id)) {
             if (curr == null) {
                 curr = new AckObject();
@@ -95,17 +83,13 @@ public class Acker implements IBolt {
         } else if (Acker.ACKER_ACK_STREAM_ID.equals(stream_id)) {
             if (curr != null) {
                 curr.update_ack(input.getValue(1));
-
             } else {
                 // two case
                 // one is timeout
                 // the other is bolt's ack first come
                 curr = new AckObject();
-
-                curr.val = Long.valueOf(input.getLong(1));
-
+                curr.val = input.getLong(1);
                 pending.put(id, curr);
-
             }
         } else if (Acker.ACKER_FAIL_STREAM_ID.equals(stream_id)) {
             if (curr == null) {
@@ -113,31 +97,23 @@ public class Acker implements IBolt {
                 // already timeout, should go fail
                 return;
             }
-
             curr.failed = true;
-
         } else {
             LOG.info("Unknow source stream");
             return;
         }
 
         Integer task = curr.spout_task;
-
         if (task != null) {
-
             if (curr.val == 0) {
                 pending.remove(id);
                 List values = JStormUtils.mk_list(id);
-
                 collector.emitDirect(task, Acker.ACKER_ACK_STREAM_ID, values);
-
             } else {
-
                 if (curr.failed) {
                     pending.remove(id);
                     List values = JStormUtils.mk_list(id);
-                    collector.emitDirect(task, Acker.ACKER_FAIL_STREAM_ID,
-                            values);
+                    collector.emitDirect(task, Acker.ACKER_FAIL_STREAM_ID, values);
                 }
             }
         } else {

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/backpressure/Backpressure.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/backpressure/Backpressure.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/backpressure/Backpressure.java
new file mode 100644
index 0000000..528fc6b
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/backpressure/Backpressure.java
@@ -0,0 +1,88 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.task.backpressure;
+
+import java.util.Map;
+
+import com.alibaba.jstorm.client.ConfigExtension;
+import com.alibaba.jstorm.utils.JStormUtils;
+
+public abstract class Backpressure {
+    private static final String BACKPRESSURE_DELAY_TIME = "topology.backpressure.delay.time";
+
+    protected volatile boolean isBackpressureEnable;
+
+    protected volatile double highWaterMark;
+    protected volatile double lowWaterMark;
+
+    protected volatile double triggerBpRatio;
+
+    protected volatile long sleepTime;
+
+    public Backpressure(Map stormConf) {
+        this.isBackpressureEnable = ConfigExtension.isBackpressureEnable(stormConf);
+        this.highWaterMark = ConfigExtension.getBackpressureWaterMarkHigh(stormConf);
+        this.lowWaterMark = ConfigExtension.getBackpressureWaterMarkLow(stormConf);
+        this.triggerBpRatio = ConfigExtension.getBackpressureCoordinatorRatio(stormConf);
+    }
+
+    protected void updateConfig(Map stormConf) {
+        if (stormConf == null) {
+            return;
+        }
+
+        if (stormConf.containsKey(ConfigExtension.TOPOLOGY_BACKPRESSURE_ENABLE)) {
+            this.isBackpressureEnable = ConfigExtension.isBackpressureEnable(stormConf);
+        }
+
+        if (stormConf.containsKey(ConfigExtension.TOPOLOGY_BACKPRESSURE_WATER_MARK_HIGH)) {
+            this.highWaterMark = ConfigExtension.getBackpressureWaterMarkHigh(stormConf);
+        }
+
+        if (stormConf.containsKey(ConfigExtension.TOPOLOGY_BACKPRESSURE_WATER_MARK_LOW)) {
+            this.lowWaterMark = ConfigExtension.getBackpressureWaterMarkLow(stormConf);
+        }
+
+        if (stormConf.containsKey(ConfigExtension.TOPOLOGY_BACKPRESSURE_COORDINATOR_RATIO)) {
+            this.triggerBpRatio = ConfigExtension.getBackpressureCoordinatorRatio(stormConf);
+        }
+
+        if (stormConf.containsKey(BACKPRESSURE_DELAY_TIME)) {
+            long time = JStormUtils.parseLong(stormConf, 0l);
+            if (time != 0l) {
+                this.sleepTime = time;
+            }
+        }
+    }
+
+    public boolean isBackpressureConfigChange(Map stormConf) {
+        if (stormConf == null) {
+            return false;
+        }
+
+        if (stormConf.containsKey(ConfigExtension.TOPOLOGY_BACKPRESSURE_ENABLE) || 
+                stormConf.containsKey(ConfigExtension.TOPOLOGY_BACKPRESSURE_WATER_MARK_HIGH) || 
+                stormConf.containsKey(ConfigExtension.TOPOLOGY_BACKPRESSURE_WATER_MARK_LOW) || 
+                stormConf.containsKey(ConfigExtension.TOPOLOGY_BACKPRESSURE_COORDINATOR_RATIO) ||
+                stormConf.containsKey(BACKPRESSURE_DELAY_TIME)) {
+            return true;
+        } else {
+            return false;
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/backpressure/BackpressureController.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/backpressure/BackpressureController.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/backpressure/BackpressureController.java
new file mode 100644
index 0000000..82dd938
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/backpressure/BackpressureController.java
@@ -0,0 +1,182 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.task.backpressure;
+
+import java.util.List;
+import java.util.Map;
+
+import com.alibaba.jstorm.utils.JStormUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.alibaba.jstorm.cluster.Common;
+import com.alibaba.jstorm.task.TaskTransfer;
+
+import backtype.storm.spout.SpoutOutputCollector;
+import backtype.storm.tuple.Values;
+import backtype.storm.utils.DisruptorQueue;
+
+import com.alibaba.jstorm.task.master.TopoMasterCtrlEvent;
+import com.alibaba.jstorm.task.master.TopoMasterCtrlEvent.EventType;
+
+/**
+ * Flow Control
+ * 
+ * @author Basti Liu
+ */
+public class BackpressureController extends Backpressure {
+    private static Logger LOG = LoggerFactory.getLogger(BackpressureController.class);
+
+    private int taskId;
+    private DisruptorQueue queueControlled;
+    private int totalQueueSize;
+    private int queueSizeReduced;
+
+    private boolean isBackpressureMode = false;
+
+    private SpoutOutputCollector outputCollector;
+
+    private long maxBound, minBound;
+
+    public BackpressureController(Map conf, int taskId, DisruptorQueue queue, int queueSize) {
+        super(conf);
+        this.queueControlled = queue;
+        this.totalQueueSize = queueSize;
+        this.queueSizeReduced = queueSize;
+        this.taskId = taskId;
+        this.maxBound = 0l;
+        this.minBound = 0l;
+    }
+
+    public void setOutputCollector(SpoutOutputCollector outputCollector) {
+        this.outputCollector = outputCollector;
+    }
+
+    public void control(TopoMasterCtrlEvent ctrlEvent) {
+        if (isBackpressureEnable == false) {
+            return;
+        }
+
+        EventType eventType = ctrlEvent.getEventType();
+        LOG.debug("Received control event, " + eventType.toString());
+        if (eventType.equals(EventType.startBackpressure)) {
+            List<Object> value = ctrlEvent.getEventValue();
+            int flowCtrlTime = value.get(0) != null ? (Integer) value.get(0) : 0;
+            start(flowCtrlTime);
+        } else if (eventType.equals(EventType.stopBackpressure)) {
+            stop();
+        } else if (eventType.equals(EventType.updateBackpressureConfig)) {
+            List<Object> value = ctrlEvent.getEventValue();
+            if (value != null) {
+                Map stormConf = (Map) value.get(0);
+                updateConfig(stormConf);
+
+                if (isBackpressureEnable == false) {
+                    LOG.info("Disable backpressure in controller.");
+                    resetBackpressureInfo();
+                } else {
+                    LOG.info("Enable backpressure in controller");
+                }
+            }
+        }
+    }
+
+    public void flowControl() {
+        if (isBackpressureEnable == false) {
+            return;
+        }
+
+        try {
+            Thread.sleep(sleepTime);
+            while (isQueueCapacityAvailable() == false) {
+                Thread.sleep(1);
+            }
+        } catch (InterruptedException e) {
+            LOG.error("Sleep was interrupted!");
+        }
+    }
+
+    private void resetBackpressureInfo() {
+        sleepTime = 0l;
+        maxBound = 0l;
+        minBound = 0l;
+        queueSizeReduced = totalQueueSize;
+        isBackpressureMode = false;
+    }
+
+    private void start(int flowCtrlTime) {
+        if (flowCtrlTime > 0) {
+            if (maxBound < flowCtrlTime) {
+                sleepTime = flowCtrlTime;
+            } else if (maxBound == flowCtrlTime) {
+                if (sleepTime >= maxBound) {
+                    sleepTime++;
+                } else {
+                    sleepTime = JStormUtils.halfValueOfSum(flowCtrlTime, sleepTime, true);
+                } 
+            } else {
+                if (maxBound <= sleepTime) {
+                    sleepTime++;
+                } else {
+                    if (sleepTime >= flowCtrlTime) {
+                        sleepTime = JStormUtils.halfValueOfSum(maxBound, sleepTime, true);
+                    } else {
+                        sleepTime = JStormUtils.halfValueOfSum(flowCtrlTime, sleepTime, true);
+                    }
+                }
+            }
+        } else {
+            sleepTime++;
+        }
+        if (sleepTime > maxBound) {
+            maxBound = sleepTime;
+        }
+
+        int size = totalQueueSize / 100;
+        queueSizeReduced = size > 10 ? size : 10;
+        isBackpressureMode = true;
+
+        LOG.info("Start backpressure at spout-{}, sleepTime={}, queueSizeReduced={}, flowCtrlTime={}", taskId, sleepTime, queueSizeReduced, flowCtrlTime);
+    }
+
+    private void stop() {
+        if (sleepTime == minBound) {
+            minBound = 0;
+        }
+        sleepTime = JStormUtils.halfValueOfSum(minBound, sleepTime, false);
+
+        if (sleepTime == 0) {
+            resetBackpressureInfo();
+
+            TopoMasterCtrlEvent stopBp = new TopoMasterCtrlEvent(EventType.stopBackpressure, null);
+            outputCollector.emit(Common.TOPOLOGY_MASTER_CONTROL_STREAM_ID, new Values(stopBp));
+        } else {
+            minBound = sleepTime;
+        }
+
+        LOG.info("Stop backpressure at spout-{}, sleepTime={}, queueSizeReduced={}, flowCtrlTime={}", taskId, sleepTime, queueSizeReduced);
+    }
+
+    public boolean isBackpressureMode() {
+        return isBackpressureMode & isBackpressureEnable;
+    }
+
+    public boolean isQueueCapacityAvailable() {
+        return (queueControlled.population() < queueSizeReduced);
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/backpressure/BackpressureCoordinator.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/backpressure/BackpressureCoordinator.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/backpressure/BackpressureCoordinator.java
new file mode 100644
index 0000000..d270078
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/backpressure/BackpressureCoordinator.java
@@ -0,0 +1,415 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.task.backpressure;
+
+import backtype.storm.generated.*;
+import backtype.storm.task.OutputCollector;
+import backtype.storm.task.TopologyContext;
+import backtype.storm.tuple.Tuple;
+import backtype.storm.tuple.Values;
+
+import com.alibaba.jstorm.client.ConfigExtension;
+import com.alibaba.jstorm.cluster.*;
+import com.alibaba.jstorm.task.acker.Acker;
+import com.alibaba.jstorm.task.master.TopoMasterCtrlEvent;
+import com.alibaba.jstorm.task.master.TopoMasterCtrlEvent.EventType;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.*;
+import java.util.Map.Entry;
+
+/**
+ * Coordinator is responsible for the request from trigger and controller.
+ * - Event from trigger:
+ *   Find relative controllers (source spouts), and decide if it is required to send out the request.
+ * - Event from controller: 
+ *   If backpressure stop event, send stop request to all target triggers.
+ * 
+ * @author Basti Li
+ */
+public class BackpressureCoordinator extends Backpressure {
+    private static final Logger LOG = LoggerFactory.getLogger(BackpressureCoordinator.class);
+
+    private static final int adjustedTime = 5;
+
+    private TopologyContext context;
+    private StormTopology topology;
+    private OutputCollector output;
+
+    private int topologyMasterId;
+    private Map<Integer, String> taskIdToComponentId;
+    private Map<String, SpoutSpec> spouts;
+    private Map<String, Bolt> bolts;
+
+    // Map<source componentId, Map<ComponentId, backpressure info>>
+    private Map<String, SourceBackpressureInfo> SourceTobackpressureInfo;
+
+    private Integer period;
+
+    private StormClusterState zkCluster;
+    private static final String BACKPRESSURE_TAG = "Backpressure has been ";
+
+
+    public BackpressureCoordinator(OutputCollector output, TopologyContext topologyContext, Integer taskId) {
+        super(topologyContext.getStormConf());
+        this.context = topologyContext;
+        this.topology = topologyContext.getRawTopology();
+        this.spouts = new HashMap<String, SpoutSpec>(); 
+        if (this.topology.get_spouts() != null) {
+            this.spouts.putAll(this.topology.get_spouts());
+        }
+        this.bolts = new HashMap<String, Bolt>(); 
+        if (this.topology.get_bolts() != null) {
+            this.bolts.putAll(this.topology.get_bolts());
+        }
+        this.taskIdToComponentId = topologyContext.getTaskToComponent();
+        this.topologyMasterId = taskId;
+
+        this.output = output;
+
+        int checkInterval = ConfigExtension.getBackpressureCheckIntervl(context.getStormConf());
+        int sampleNum = ConfigExtension.getBackpressureTriggerSampleNumber(context.getStormConf());
+        this.period = checkInterval * sampleNum;
+
+        this.zkCluster = topologyContext.getZkCluster();
+        try {
+            this.SourceTobackpressureInfo = zkCluster.get_backpressure_info(context.getTopologyId());
+            if (this.SourceTobackpressureInfo == null) {
+                this.SourceTobackpressureInfo = new HashMap<String, SourceBackpressureInfo>();
+            } else {
+                LOG.info("Successfully retrieve existing SourceTobackpressureInfo from zk: " + SourceTobackpressureInfo);
+            }
+        } catch (Exception e) {
+            LOG.warn("Failed to get SourceTobackpressureInfo from zk", e);
+            this.SourceTobackpressureInfo = new HashMap<String, SourceBackpressureInfo>();
+        }
+    }
+
+    private Set<String> getInputSpoutsForBolt(StormTopology topology, String boltComponentId, Set<String> componentsTraversed) {
+        Set<String> ret = new TreeSet<String>();
+
+        if (componentsTraversed == null) {
+            componentsTraversed = new HashSet<String>();
+        }
+
+        Bolt bolt = bolts.get(boltComponentId);
+        if (bolt == null) {
+            return ret;
+        }
+
+        ComponentCommon common = bolt.get_common();
+        Set<GlobalStreamId> inputstreams = common.get_inputs().keySet();
+        Set<String> inputComponents = new TreeSet<String>();
+        for (GlobalStreamId streamId : inputstreams) {
+            inputComponents.add(streamId.get_componentId());
+        }
+
+        Set<String> spoutComponentIds = new HashSet<String>(spouts.keySet());
+        Set<String> boltComponentIds = new HashSet<String>(bolts.keySet());
+        for (String inputComponent : inputComponents) {
+            // Skip the components which has been traversed before, to avoid dead loop when there are loop bolts in topology
+            if (componentsTraversed.contains(inputComponent)) {
+                continue;
+            } else {
+                componentsTraversed.add(inputComponent);
+            }
+
+            if (spoutComponentIds.contains(inputComponent)) {
+                ret.add(inputComponent);
+            } else if (boltComponentIds.contains(inputComponent)) {
+                Set<String> inputs = getInputSpoutsForBolt(topology, inputComponent, componentsTraversed);
+                ret.addAll(inputs);
+            }
+        }
+
+        return ret;
+    }
+
+    public void process(Tuple input) {
+        if (isBackpressureEnable == false) {
+            return;
+        }
+
+        int sourceTask = input.getSourceTask();
+        String componentId = taskIdToComponentId.get(sourceTask);
+        if (componentId == null) {
+            LOG.warn("Receive tuple from unknown task-" + sourceTask);
+            return;
+        }
+
+        if (spouts.keySet().contains(componentId)) {
+            if (SourceTobackpressureInfo.get(componentId) != null) {
+                handleEventFromSpout(sourceTask, input);
+            }
+        } else if (bolts.keySet().contains(componentId)) {
+            handleEventFromBolt(sourceTask, input);
+        }
+    }
+
+    public void updateBackpressureConfig(Map conf) {
+        updateConfig(conf);
+
+        if (isBackpressureEnable == false) {
+            LOG.info("Disable backpressure in coordinator.");
+            SourceTobackpressureInfo.clear();
+        } else {
+            LOG.info("Enable backpressure in coordinator.");
+        }
+
+        TopoMasterCtrlEvent updateBpConfig = new TopoMasterCtrlEvent(EventType.updateBackpressureConfig, new ArrayList<Object>());
+        updateBpConfig.addEventValue(conf);
+        Values values = new Values(updateBpConfig);
+        Set<Integer> targetTasks = new TreeSet<Integer>(taskIdToComponentId.keySet());
+        targetTasks.remove(topologyMasterId);
+        targetTasks.removeAll(context.getComponentTasks(Acker.ACKER_COMPONENT_ID));
+        sendBackpressureMessage(targetTasks, values, EventType.updateBackpressureConfig);
+
+        reportBackpressureStatus();
+    }
+
+    private boolean checkSpoutsUnderBackpressure(Set<String> spouts) {
+        boolean ret = false;
+
+        if (spouts != null) {
+            for (String spout : spouts) {
+                SourceBackpressureInfo backpressureInfo = SourceTobackpressureInfo.get(spout);
+                if (backpressureInfo != null && backpressureInfo.getTasks().size() > 0) {
+                    ret = true;
+                    break;
+                }
+            }
+        }
+
+        return ret;
+    }
+
+    private TargetBackpressureInfo getBackpressureInfoBySourceSpout(String sourceSpout, String targetComponentId, boolean created) {
+        TargetBackpressureInfo ret = null;
+
+        SourceBackpressureInfo info = SourceTobackpressureInfo.get(sourceSpout);
+        if (info == null) {
+            if (created) {
+                info = new SourceBackpressureInfo();
+                SourceTobackpressureInfo.put(sourceSpout, info);
+            }
+        } else {
+            ret = info.getTargetTasks().get(targetComponentId);
+        }
+    
+        if (ret == null && created) {
+            ret = new TargetBackpressureInfo();
+            info.getTargetTasks().put(targetComponentId, ret);
+        }
+        return ret;
+    }
+
+    private boolean checkIntervalExpired(long time) {
+        boolean ret = false;
+        if (time != 0) {
+            if (System.currentTimeMillis() - time > period) {
+                ret = true;
+            }
+        }
+        return ret;
+    }
+
+    private void sendBackpressureMessage(Set<Integer> targetTasks, Values value, EventType backpressureType) {
+        for (Integer taskId : targetTasks) {
+            output.emitDirect(taskId, Common.TOPOLOGY_MASTER_CONTROL_STREAM_ID, value);
+            LOG.debug("Send " + backpressureType.toString() + " request to taskId-" + taskId);
+        }
+    }
+
+    private void handleEventFromSpout(int sourceTask, Tuple input) {
+        TopoMasterCtrlEvent ctrlEvent = (TopoMasterCtrlEvent) input.getValueByField("ctrlEvent");
+        EventType type = ctrlEvent.getEventType();
+
+        boolean update = false;
+        if (type.equals(EventType.stopBackpressure)) {
+            String spoutComponentId = taskIdToComponentId.get(sourceTask);
+            SourceBackpressureInfo info = SourceTobackpressureInfo.remove(spoutComponentId);
+            if (info != null) {
+                info.getTasks().remove(sourceTask);
+                if (info.getTasks().size() == 0) {  
+                    for (Entry<String, TargetBackpressureInfo> entry : info.getTargetTasks().entrySet()) {
+                        String componentId = entry.getKey();
+
+                        // Make sure if all source spouts for this bolt are NOT under backpressure mode.
+                        Set<String> sourceSpouts = getInputSpoutsForBolt(topology, componentId, null);
+                        if (checkSpoutsUnderBackpressure(sourceSpouts) == false) {
+                            Set<Integer> tasks = new TreeSet<Integer>();
+                            tasks.addAll(context.getComponentTasks(componentId));
+                            sendBackpressureMessage(tasks, new Values(ctrlEvent), type);
+                        }
+                    }
+                }
+                update = true;
+            } else {
+                LOG.error("Received event from non-recorded spout-" + sourceTask);
+            }
+
+        } else {
+            LOG.warn("Received unexpected event, " + type.toString());
+        }
+
+        // If task set under backpressure has been changed, report the latest status
+        if (update) {
+            reportBackpressureStatus();
+        }
+    }
+
+    private void handleEventFromBolt(int sourceTask, Tuple input) {
+        String componentId = taskIdToComponentId.get(sourceTask);
+        Set<String> inputSpouts = getInputSpoutsForBolt(topology, componentId, null);
+
+        TopoMasterCtrlEvent ctrlEvent = (TopoMasterCtrlEvent) input.getValueByField("ctrlEvent");
+        EventType type = ctrlEvent.getEventType();
+        Set<Integer> notifyList = new TreeSet<Integer>();
+        Values values = null;
+        TargetBackpressureInfo info = null;
+        boolean update = false;
+        if (type.equals(EventType.startBackpressure)) {
+            int flowCtrlTime = (Integer) ctrlEvent.getEventValue().get(0);
+            for (String spout : inputSpouts) {
+                info = getBackpressureInfoBySourceSpout(spout, componentId, true);
+                SourceBackpressureInfo sourceInfo = SourceTobackpressureInfo.get(spout);
+                update = info.getTasks().add(sourceTask);
+                boolean add = false;
+                if (System.currentTimeMillis() - sourceInfo.getLastestTimeStamp() > period) { 
+                    add = true;
+                } else {
+                    EventType lastestBpEvent = sourceInfo.getLastestBackpressureEvent();
+                    if (lastestBpEvent != null && lastestBpEvent.equals(EventType.startBackpressure) == false) {
+                        add = true;
+                    }
+
+                    int maxFlowCtrlTime = sourceInfo.getMaxFlowCtrlTime();
+                    if ((flowCtrlTime - maxFlowCtrlTime > adjustedTime || maxFlowCtrlTime == -1) &&
+                            flowCtrlTime >= 0) {
+                        add = true;
+                    }
+                }
+                info.setFlowCtrlTime(flowCtrlTime);
+                info.setBackpressureStatus(type);
+
+                if (add) {
+                    info.setTimeStamp(System.currentTimeMillis());
+                    // Only when the number of bolt tasks sending request is more than a configured number, coordinator will 
+                    // send out backpressure request to controller. It is used to avoid the problem that very few tasks might
+                    // cause the over control.
+                    double taskBpRatio = Double.valueOf(info.getTasks().size()) / Double.valueOf(context.getComponentTasks(componentId).size()) ;
+                    if (taskBpRatio >= triggerBpRatio) {
+                        Set<Integer> spoutTasks = new TreeSet<Integer>(context.getComponentTasks(spout));
+                        if (spoutTasks != null) {
+                            SourceTobackpressureInfo.get(spout).getTasks().addAll(spoutTasks);
+                            notifyList.addAll(spoutTasks);
+                        }
+                    } else {
+                        update = false;
+                    }
+                } else {
+                    update = false;
+                }
+            }
+
+            List<Object> value = new ArrayList<Object>();
+            value.add(info.getFlowCtrlTime());
+            TopoMasterCtrlEvent startBp = new TopoMasterCtrlEvent(EventType.startBackpressure, value);
+            values = new Values(startBp);
+        } else if (type.equals(EventType.stopBackpressure)) {
+            for (String spout : inputSpouts) {
+                info = getBackpressureInfoBySourceSpout(spout, componentId, false);
+                SourceBackpressureInfo sourceInfo = SourceTobackpressureInfo.get(spout);
+                if (info != null) {
+                    Set<Integer> tasks = info.getTasks();
+                    if (tasks != null) {
+                        if(tasks.remove(sourceTask)) {
+                            update = true;
+                        }
+                    }
+                }
+
+                if (sourceInfo != null && checkIntervalExpired(sourceInfo.getLastestTimeStamp())) {
+                    info.setTimeStamp(System.currentTimeMillis());
+                    Set<Integer> spoutTasks = new TreeSet<Integer>(context.getComponentTasks(spout));
+                    if (spoutTasks != null) {
+                        notifyList.addAll(spoutTasks);
+                    }
+                    info.setBackpressureStatus(type);
+                }
+            }
+
+            // Check if all source spouts are Not under backpressure. If so, notify the bolt.
+            if (checkSpoutsUnderBackpressure(inputSpouts) == false) {
+                notifyList.add(sourceTask);
+            }
+
+            TopoMasterCtrlEvent stoptBp = new TopoMasterCtrlEvent(EventType.stopBackpressure, null);
+            values = new Values(stoptBp);
+        } else {
+            LOG.warn("Received unknown event " + type.toString());
+        }
+
+        sendBackpressureMessage(notifyList, values, type);
+
+        // If task set under backpressure has been changed, report the latest status
+        if (update) {
+            LOG.info("inputspouts=" + inputSpouts + " for " + componentId + "-" + sourceTask + ", eventType=" + type.toString());
+            reportBackpressureStatus();
+        }
+    }
+
+    private Set<Integer> getTasksUnderBackpressure() {
+        Set<Integer> ret = new HashSet<Integer>();
+
+        for (Entry<String, SourceBackpressureInfo> entry : SourceTobackpressureInfo.entrySet()) {
+            SourceBackpressureInfo sourceInfo = entry.getValue();
+            if (sourceInfo.getTasks().size() > 0) {
+                ret.addAll(sourceInfo.getTasks());
+
+                for (Entry<String, TargetBackpressureInfo> targetEntry: sourceInfo.getTargetTasks().entrySet()) {
+                    ret.addAll(targetEntry.getValue().getTasks());
+                }
+                
+            }
+        }
+
+        return ret;
+    }
+
+    private void reportBackpressureStatus() {
+        try {
+            StringBuilder stringBuilder = new StringBuilder();
+            Set<Integer> underTasks = getTasksUnderBackpressure();
+            stringBuilder.append(BACKPRESSURE_TAG);
+            if (underTasks.isEmpty()){
+                stringBuilder.append("closed ");
+            }else {
+                stringBuilder.append("opened: ");
+                stringBuilder.append(underTasks);
+            }
+            zkCluster.report_task_error(context.getTopologyId(), context.getThisTaskId(), stringBuilder.toString(), BACKPRESSURE_TAG);
+            zkCluster.set_backpressure_info(context.getTopologyId(), SourceTobackpressureInfo);
+            LOG.info(stringBuilder.toString());
+        } catch (Exception e) {
+            LOG.error("can't update backpressure state ", e);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/backpressure/BackpressureTrigger.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/backpressure/BackpressureTrigger.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/backpressure/BackpressureTrigger.java
new file mode 100644
index 0000000..0f2df95
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/backpressure/BackpressureTrigger.java
@@ -0,0 +1,216 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.task.backpressure;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.TreeSet;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import backtype.storm.task.OutputCollector;
+import backtype.storm.tuple.Tuple;
+import backtype.storm.tuple.Values;
+import backtype.storm.utils.DisruptorQueue;
+
+import com.alibaba.jstorm.client.ConfigExtension;
+import com.alibaba.jstorm.cluster.Common;
+import com.alibaba.jstorm.cluster.StormClusterState;
+import com.alibaba.jstorm.task.Task;
+import com.alibaba.jstorm.task.execute.BoltExecutors;
+import com.alibaba.jstorm.task.master.TopoMasterCtrlEvent;
+import com.alibaba.jstorm.task.master.TopoMasterCtrlEvent.EventType;
+import com.alibaba.jstorm.utils.IntervalCheck;
+
+/**
+ * Responsible for checking if back pressure shall be triggered. 
+ * When heavy load (the size of the queue monitored reaches high water mark), start back pressure, 
+ * and when load goes down, stop back pressure.
+ *         
+ * @author Basti Liu
+ */
+public class BackpressureTrigger extends Backpressure {
+    private static final Logger LOG = LoggerFactory.getLogger(BackpressureTrigger.class);
+
+    private Task task;
+    private int taskId;
+
+    // Queue which is going to be monitored
+    private DisruptorQueue exeQueue;
+    private DisruptorQueue recvQueue;
+
+    private BoltExecutors boltExecutor;
+
+    private volatile boolean isUnderBackpressure = false;
+
+    private IntervalCheck intervalCheck;
+
+    OutputCollector output;
+
+    private List<EventType> samplingSet;
+    private double triggerSampleRate;
+
+    public BackpressureTrigger(Task task, BoltExecutors boltExecutor, Map stormConf, OutputCollector output) {
+        super(stormConf);
+
+        this.task = task;
+        this.taskId = task.getTaskId();
+
+        int sampleNum = ConfigExtension.getBackpressureTriggerSampleNumber(stormConf);
+        int smapleInterval = sampleNum * (ConfigExtension.getBackpressureCheckIntervl(stormConf));
+        this.intervalCheck = new IntervalCheck();
+        this.intervalCheck.setIntervalMs(smapleInterval);
+        this.intervalCheck.start();
+
+        this.samplingSet = new ArrayList<EventType>();
+        this.triggerSampleRate = ConfigExtension.getBackpressureTriggerSampleRate(stormConf);
+
+        this.output = output;
+
+        this.boltExecutor = boltExecutor;
+
+        try {
+            StormClusterState zkCluster = task.getZkCluster();
+            Map<String, SourceBackpressureInfo> backpressureInfo = zkCluster.get_backpressure_info(task.getTopologyId());
+            if (backpressureInfo != null) {
+                for (Entry<String, SourceBackpressureInfo> entry : backpressureInfo.entrySet()) {
+                    SourceBackpressureInfo info = entry.getValue();
+                    Map<String, TargetBackpressureInfo> targetInfoMap = info.getTargetTasks();
+                    if (targetInfoMap != null) {
+                        TargetBackpressureInfo targetInfo = targetInfoMap.get(task.getComponentId());
+                        if (targetInfo != null && targetInfo.getTasks().contains(taskId)) {
+                            isBackpressureEnable = true;
+                            LOG.info("Retrieved backpressure info for task-" + taskId);
+                        }
+                    }
+                }
+            }
+        } catch (Exception e) {
+            LOG.info("Failed to get backpressure info from zk", e);
+        }
+        LOG.info("Finished BackpressureTrigger init, highWaterMark=" + highWaterMark + ", lowWaterMark=" + lowWaterMark + ", sendInterval="
+                + intervalCheck.getInterval());
+    }
+
+    public void checkAndTrigger() {
+        if (isBackpressureEnable == false) {
+            return;
+        }
+
+        if (exeQueue == null || recvQueue == null) {
+            exeQueue = task.getExecuteQueue();
+            recvQueue = task.getDeserializeQueue();
+            
+            if (exeQueue == null) {
+                LOG.info("Init of excutor-task-" + taskId + " has not been finished!");
+                return;
+            }
+            if (recvQueue == null) {
+                LOG.info("Init of receiver-task-" + taskId + " has not been finished!");
+                return;
+            }
+        }
+
+        LOG.debug("Backpressure Check: exeQueue load=" + (exeQueue.pctFull() * 100) + ", recvQueue load=" + (recvQueue.pctFull() * 100));
+        if (exeQueue.pctFull() > highWaterMark) {
+            samplingSet.add(EventType.startBackpressure);
+        } else if (exeQueue.pctFull() <= lowWaterMark) {
+            samplingSet.add(EventType.stopBackpressure);
+        } else {
+            samplingSet.add(EventType.defaultType);
+        }
+
+        if (intervalCheck.check()) {
+            int startCount = 0, stopCount = 0;
+
+            for (EventType eventType : samplingSet) {
+                if (eventType.equals(EventType.startBackpressure)) {
+                    startCount++;
+                } else if (eventType.equals(EventType.stopBackpressure)) {
+                    stopCount++;
+                }
+            }
+
+            if (startCount > stopCount) {
+                if (sampleRateCheck(startCount)) {
+                    startBackpressure();
+                    isUnderBackpressure = true;
+                }
+            } else {
+                if (sampleRateCheck(stopCount) && isUnderBackpressure == true) {
+                    stopBackpressure();
+                }
+            }
+
+            samplingSet.clear();
+        }
+    }
+
+    private boolean sampleRateCheck(double count) {
+        double sampleRate = count / samplingSet.size();
+        if (sampleRate > triggerSampleRate)
+            return true;
+        else
+            return false;
+    }
+
+    public void handle(Tuple input) {
+        try {
+            TopoMasterCtrlEvent event = (TopoMasterCtrlEvent) input.getValueByField("ctrlEvent");
+            EventType type = event.getEventType();
+            if (type.equals(EventType.stopBackpressure)) {
+                isUnderBackpressure = false;
+                LOG.info("Received stop backpressure event for task-" + task.getTaskId());
+            } else if (type.equals(EventType.updateBackpressureConfig)) {
+                Map stormConf = (Map) event.getEventValue().get(0);
+                updateConfig(stormConf);
+
+                if (isBackpressureEnable == false) {
+                    LOG.info("Disable backpressure in trigger.");
+                    isUnderBackpressure = false;
+                    samplingSet.clear();
+                } else {
+                    LOG.info("Enable backpressure in trigger.");
+                }
+            } else {
+                LOG.info("Received unexpected event, " + type.toString());
+            }
+        } catch (Exception e) {
+            LOG.error("Failed to handle event", e);
+        }
+    }
+
+    private void startBackpressure() {
+        List<Object> value = new ArrayList<Object>();
+        Double flowCtrlTime = Double.valueOf(boltExecutor.getExecuteTime() / 1000);
+        value.add(flowCtrlTime.intValue());
+        TopoMasterCtrlEvent startBp = new TopoMasterCtrlEvent(EventType.startBackpressure, value);
+        output.emit(Common.TOPOLOGY_MASTER_CONTROL_STREAM_ID, new Values(startBp));
+        LOG.debug("Send start backpressure request for task-{}, flowCtrlTime={}", taskId, flowCtrlTime.intValue());
+    }
+
+    private void stopBackpressure() {
+        TopoMasterCtrlEvent stopBp = new TopoMasterCtrlEvent(EventType.stopBackpressure, null);
+        output.emit(Common.TOPOLOGY_MASTER_CONTROL_STREAM_ID, new Values(stopBp));
+        LOG.debug("Send stop backpressure request for task-{}", taskId);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/backpressure/SourceBackpressureInfo.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/backpressure/SourceBackpressureInfo.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/backpressure/SourceBackpressureInfo.java
new file mode 100644
index 0000000..05f7d11
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/backpressure/SourceBackpressureInfo.java
@@ -0,0 +1,97 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.task.backpressure;
+
+import java.io.Serializable;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.Map.Entry;
+
+import org.apache.commons.lang.builder.ToStringBuilder;
+import org.apache.commons.lang.builder.ToStringStyle;
+import com.alibaba.jstorm.task.master.TopoMasterCtrlEvent.EventType;
+
+public class SourceBackpressureInfo implements Serializable {
+    private static final long serialVersionUID = -8213491092461721871L;
+
+    // source tasks under backpressure
+    private Set<Integer> tasks;
+
+    // target tasks which has sent request to source task
+    // Map<componentId, source task backpressure info>
+    private Map<String, TargetBackpressureInfo> targetTasks;
+
+    public SourceBackpressureInfo() {
+        this.tasks = new TreeSet<Integer>();
+        this.targetTasks = new HashMap<String, TargetBackpressureInfo>();
+    }
+
+    public Set<Integer> getTasks() {
+        return tasks;
+    }
+
+    public Map<String, TargetBackpressureInfo> getTargetTasks() {
+        return targetTasks;
+    }
+
+    public long getLastestTimeStamp() {
+        long ret = 0;
+
+        for (Entry<String, TargetBackpressureInfo> entry : targetTasks.entrySet()) {
+            TargetBackpressureInfo info = entry.getValue();
+            if (info.getTimeStamp() > ret) {
+                ret = info.getTimeStamp();
+            }
+        }
+        return ret;
+    }
+
+    public EventType getLastestBackpressureEvent() {
+        EventType ret = null;
+        long timeStamp = 0;
+
+        for (Entry<String, TargetBackpressureInfo> entry : targetTasks.entrySet()) {
+            TargetBackpressureInfo info = entry.getValue();
+            if (info.getTimeStamp() > timeStamp) {
+                timeStamp = info.getTimeStamp();
+                ret = info.getBackpressureStatus();
+            }
+        }
+
+        return ret;
+    }
+
+    public int getMaxFlowCtrlTime() {
+        int ret = 0;
+
+        for (Entry<String, TargetBackpressureInfo> entry : targetTasks.entrySet()) {
+            TargetBackpressureInfo info = entry.getValue();
+            if (info.getFlowCtrlTime() > ret) {
+                ret = info.getFlowCtrlTime();
+            }
+        }
+        return ret;
+    }
+
+    @Override
+    public String toString() {
+        return ToStringBuilder.reflectionToString(this, ToStringStyle.SHORT_PREFIX_STYLE);
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/backpressure/TargetBackpressureInfo.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/backpressure/TargetBackpressureInfo.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/backpressure/TargetBackpressureInfo.java
new file mode 100644
index 0000000..2f6332b
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/backpressure/TargetBackpressureInfo.java
@@ -0,0 +1,84 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.task.backpressure;
+
+import java.io.Serializable;
+import java.util.Set;
+import java.util.TreeSet;
+
+import org.apache.commons.lang.builder.ToStringBuilder;
+import org.apache.commons.lang.builder.ToStringStyle;
+
+import com.alibaba.jstorm.task.master.TopoMasterCtrlEvent.EventType;
+
+public class TargetBackpressureInfo implements Serializable {
+    private static final long serialVersionUID = -1829897435773792484L;
+    
+    private Set<Integer> tasks;
+    
+    private EventType backpressureStatus;
+    private int flowCtrlTime;
+    private long timeStamp;
+
+    public TargetBackpressureInfo() {
+        this.tasks = new TreeSet<Integer>();
+        this.backpressureStatus = EventType.defaultType;
+        this.flowCtrlTime = -1;
+        this.timeStamp = 0l;
+    }
+
+    public TargetBackpressureInfo(EventType backpressureStatus, int flowCtrlTime, long time) {
+        this.tasks = new TreeSet<Integer>();
+        this.backpressureStatus = backpressureStatus;
+        this.flowCtrlTime = flowCtrlTime;
+        this.timeStamp = time;
+    }
+
+    public Set<Integer> getTasks() {
+        return tasks;
+    }
+
+    public void setBackpressureStatus(EventType status) {
+        this.backpressureStatus = status;
+    }
+
+    public EventType getBackpressureStatus() {
+        return this.backpressureStatus;
+    }
+
+    public void setTimeStamp(long time) {
+        this.timeStamp = time;
+    }
+
+    public long getTimeStamp() {
+        return this.timeStamp;
+    }
+
+    public int getFlowCtrlTime() {
+        return this.flowCtrlTime;
+    }
+
+    public void setFlowCtrlTime(int time) {
+        this.flowCtrlTime = time;
+    }
+
+    @Override
+    public String toString() {
+        return ToStringBuilder.reflectionToString(this, ToStringStyle.SHORT_PREFIX_STYLE);
+    }
+}


[46/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/ClusterSummary.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/ClusterSummary.java b/jstorm-core/src/main/java/backtype/storm/generated/ClusterSummary.java
index 1735b8a..273aaf3 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/ClusterSummary.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/ClusterSummary.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class ClusterSummary implements org.apache.thrift.TBase<ClusterSummary, ClusterSummary._Fields>, java.io.Serializable, Cloneable, Comparable<ClusterSummary> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ClusterSummary");
 
@@ -450,11 +450,11 @@ public class ClusterSummary implements org.apache.thrift.TBase<ClusterSummary, C
     return _Fields.findByThriftId(fieldId);
   }
 
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
@@ -490,18 +490,18 @@ public class ClusterSummary implements org.apache.thrift.TBase<ClusterSummary, C
     return sb.toString();
   }
 
-  public void validate() throws org.apache.thrift.TException {
+  public void validate() throws TException {
     // check for required fields
     if (!is_set_nimbus()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'nimbus' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'nimbus' is unset! Struct:" + toString());
     }
 
     if (!is_set_supervisors()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'supervisors' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'supervisors' is unset! Struct:" + toString());
     }
 
     if (!is_set_topologies()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'topologies' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'topologies' is unset! Struct:" + toString());
     }
 
     // check for sub-struct validity
@@ -513,7 +513,7 @@ public class ClusterSummary implements org.apache.thrift.TBase<ClusterSummary, C
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -521,7 +521,7 @@ public class ClusterSummary implements org.apache.thrift.TBase<ClusterSummary, C
   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
     try {
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -534,7 +534,7 @@ public class ClusterSummary implements org.apache.thrift.TBase<ClusterSummary, C
 
   private static class ClusterSummaryStandardScheme extends StandardScheme<ClusterSummary> {
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot, ClusterSummary struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot, ClusterSummary struct) throws TException {
       org.apache.thrift.protocol.TField schemeField;
       iprot.readStructBegin();
       while (true)
@@ -600,7 +600,7 @@ public class ClusterSummary implements org.apache.thrift.TBase<ClusterSummary, C
       struct.validate();
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot, ClusterSummary struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot, ClusterSummary struct) throws TException {
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
@@ -648,7 +648,7 @@ public class ClusterSummary implements org.apache.thrift.TBase<ClusterSummary, C
   private static class ClusterSummaryTupleScheme extends TupleScheme<ClusterSummary> {
 
     @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, ClusterSummary struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol prot, ClusterSummary struct) throws TException {
       TTupleProtocol oprot = (TTupleProtocol) prot;
       struct.nimbus.write(oprot);
       {
@@ -668,7 +668,7 @@ public class ClusterSummary implements org.apache.thrift.TBase<ClusterSummary, C
     }
 
     @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, ClusterSummary struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol prot, ClusterSummary struct) throws TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       struct.nimbus = new NimbusSummary();
       struct.nimbus.read(iprot);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/ComponentCommon.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/ComponentCommon.java b/jstorm-core/src/main/java/backtype/storm/generated/ComponentCommon.java
index 0a98a62..715dfa3 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/ComponentCommon.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/ComponentCommon.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class ComponentCommon implements org.apache.thrift.TBase<ComponentCommon, ComponentCommon._Fields>, java.io.Serializable, Cloneable, Comparable<ComponentCommon> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ComponentCommon");
 
@@ -531,11 +531,11 @@ public class ComponentCommon implements org.apache.thrift.TBase<ComponentCommon,
     return _Fields.findByThriftId(fieldId);
   }
 
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
@@ -579,14 +579,14 @@ public class ComponentCommon implements org.apache.thrift.TBase<ComponentCommon,
     return sb.toString();
   }
 
-  public void validate() throws org.apache.thrift.TException {
+  public void validate() throws TException {
     // check for required fields
     if (!is_set_inputs()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'inputs' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'inputs' is unset! Struct:" + toString());
     }
 
     if (!is_set_streams()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'streams' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'streams' is unset! Struct:" + toString());
     }
 
     // check for sub-struct validity
@@ -595,7 +595,7 @@ public class ComponentCommon implements org.apache.thrift.TBase<ComponentCommon,
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -605,7 +605,7 @@ public class ComponentCommon implements org.apache.thrift.TBase<ComponentCommon,
       // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
       __isset_bitfield = 0;
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -618,7 +618,7 @@ public class ComponentCommon implements org.apache.thrift.TBase<ComponentCommon,
 
   private static class ComponentCommonStandardScheme extends StandardScheme<ComponentCommon> {
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot, ComponentCommon struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot, ComponentCommon struct) throws TException {
       org.apache.thrift.protocol.TField schemeField;
       iprot.readStructBegin();
       while (true)
@@ -696,7 +696,7 @@ public class ComponentCommon implements org.apache.thrift.TBase<ComponentCommon,
       struct.validate();
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot, ComponentCommon struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot, ComponentCommon struct) throws TException {
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
@@ -753,7 +753,7 @@ public class ComponentCommon implements org.apache.thrift.TBase<ComponentCommon,
   private static class ComponentCommonTupleScheme extends TupleScheme<ComponentCommon> {
 
     @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, ComponentCommon struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol prot, ComponentCommon struct) throws TException {
       TTupleProtocol oprot = (TTupleProtocol) prot;
       {
         oprot.writeI32(struct.inputs.size());
@@ -788,7 +788,7 @@ public class ComponentCommon implements org.apache.thrift.TBase<ComponentCommon,
     }
 
     @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, ComponentCommon struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol prot, ComponentCommon struct) throws TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       {
         org.apache.thrift.protocol.TMap _map36 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/ComponentObject.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/ComponentObject.java b/jstorm-core/src/main/java/backtype/storm/generated/ComponentObject.java
index ab32225..1ff671e 100755
--- a/jstorm-core/src/main/java/backtype/storm/generated/ComponentObject.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/ComponentObject.java
@@ -181,7 +181,7 @@ public class ComponentObject extends org.apache.thrift.TUnion<ComponentObject, C
   }
 
   @Override
-  protected Object standardSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TField field) throws org.apache.thrift.TException {
+  protected Object standardSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TField field) throws TException {
     _Fields setField = _Fields.findByThriftId(field.id);
     if (setField != null) {
       switch (setField) {
@@ -224,7 +224,7 @@ public class ComponentObject extends org.apache.thrift.TUnion<ComponentObject, C
   }
 
   @Override
-  protected void standardSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  protected void standardSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     switch (setField_) {
       case SERIALIZED_JAVA:
         ByteBuffer serialized_java = (ByteBuffer)value_;
@@ -244,7 +244,7 @@ public class ComponentObject extends org.apache.thrift.TUnion<ComponentObject, C
   }
 
   @Override
-  protected Object tupleSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, short fieldID) throws org.apache.thrift.TException {
+  protected Object tupleSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, short fieldID) throws TException {
     _Fields setField = _Fields.findByThriftId(fieldID);
     if (setField != null) {
       switch (setField) {
@@ -271,7 +271,7 @@ public class ComponentObject extends org.apache.thrift.TUnion<ComponentObject, C
   }
 
   @Override
-  protected void tupleSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  protected void tupleSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     switch (setField_) {
       case SERIALIZED_JAVA:
         ByteBuffer serialized_java = (ByteBuffer)value_;
@@ -427,7 +427,7 @@ public class ComponentObject extends org.apache.thrift.TUnion<ComponentObject, C
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -436,7 +436,7 @@ public class ComponentObject extends org.apache.thrift.TUnion<ComponentObject, C
   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
     try {
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/ComponentSummary.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/ComponentSummary.java b/jstorm-core/src/main/java/backtype/storm/generated/ComponentSummary.java
index 8161f72..4bffd3f 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/ComponentSummary.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/ComponentSummary.java
@@ -34,14 +34,14 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class ComponentSummary implements org.apache.thrift.TBase<ComponentSummary, ComponentSummary._Fields>, java.io.Serializable, Cloneable, Comparable<ComponentSummary> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ComponentSummary");
 
   private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1);
   private static final org.apache.thrift.protocol.TField PARALLEL_FIELD_DESC = new org.apache.thrift.protocol.TField("parallel", org.apache.thrift.protocol.TType.I32, (short)2);
   private static final org.apache.thrift.protocol.TField TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("type", org.apache.thrift.protocol.TType.STRING, (short)3);
-  private static final org.apache.thrift.protocol.TField TASK_IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("task_ids", org.apache.thrift.protocol.TType.LIST, (short)4);
+  private static final org.apache.thrift.protocol.TField TASK_IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("taskIds", org.apache.thrift.protocol.TType.LIST, (short)4);
   private static final org.apache.thrift.protocol.TField ERRORS_FIELD_DESC = new org.apache.thrift.protocol.TField("errors", org.apache.thrift.protocol.TType.LIST, (short)5);
 
   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
@@ -53,7 +53,7 @@ public class ComponentSummary implements org.apache.thrift.TBase<ComponentSummar
   private String name; // required
   private int parallel; // required
   private String type; // required
-  private List<Integer> task_ids; // required
+  private List<Integer> taskIds; // required
   private List<ErrorInfo> errors; // optional
 
   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
@@ -61,7 +61,7 @@ public class ComponentSummary implements org.apache.thrift.TBase<ComponentSummar
     NAME((short)1, "name"),
     PARALLEL((short)2, "parallel"),
     TYPE((short)3, "type"),
-    TASK_IDS((short)4, "task_ids"),
+    TASK_IDS((short)4, "taskIds"),
     ERRORS((short)5, "errors");
 
     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -139,7 +139,7 @@ public class ComponentSummary implements org.apache.thrift.TBase<ComponentSummar
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
     tmpMap.put(_Fields.TYPE, new org.apache.thrift.meta_data.FieldMetaData("type", org.apache.thrift.TFieldRequirementType.REQUIRED, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    tmpMap.put(_Fields.TASK_IDS, new org.apache.thrift.meta_data.FieldMetaData("task_ids", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+    tmpMap.put(_Fields.TASK_IDS, new org.apache.thrift.meta_data.FieldMetaData("taskIds", org.apache.thrift.TFieldRequirementType.REQUIRED, 
         new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
             new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))));
     tmpMap.put(_Fields.ERRORS, new org.apache.thrift.meta_data.FieldMetaData("errors", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
@@ -156,14 +156,14 @@ public class ComponentSummary implements org.apache.thrift.TBase<ComponentSummar
     String name,
     int parallel,
     String type,
-    List<Integer> task_ids)
+    List<Integer> taskIds)
   {
     this();
     this.name = name;
     this.parallel = parallel;
     set_parallel_isSet(true);
     this.type = type;
-    this.task_ids = task_ids;
+    this.taskIds = taskIds;
   }
 
   /**
@@ -178,9 +178,9 @@ public class ComponentSummary implements org.apache.thrift.TBase<ComponentSummar
     if (other.is_set_type()) {
       this.type = other.type;
     }
-    if (other.is_set_task_ids()) {
-      List<Integer> __this__task_ids = new ArrayList<Integer>(other.task_ids);
-      this.task_ids = __this__task_ids;
+    if (other.is_set_taskIds()) {
+      List<Integer> __this__taskIds = new ArrayList<Integer>(other.taskIds);
+      this.taskIds = __this__taskIds;
     }
     if (other.is_set_errors()) {
       List<ErrorInfo> __this__errors = new ArrayList<ErrorInfo>(other.errors.size());
@@ -201,7 +201,7 @@ public class ComponentSummary implements org.apache.thrift.TBase<ComponentSummar
     set_parallel_isSet(false);
     this.parallel = 0;
     this.type = null;
-    this.task_ids = null;
+    this.taskIds = null;
     this.errors = null;
   }
 
@@ -273,41 +273,41 @@ public class ComponentSummary implements org.apache.thrift.TBase<ComponentSummar
     }
   }
 
-  public int get_task_ids_size() {
-    return (this.task_ids == null) ? 0 : this.task_ids.size();
+  public int get_taskIds_size() {
+    return (this.taskIds == null) ? 0 : this.taskIds.size();
   }
 
-  public java.util.Iterator<Integer> get_task_ids_iterator() {
-    return (this.task_ids == null) ? null : this.task_ids.iterator();
+  public java.util.Iterator<Integer> get_taskIds_iterator() {
+    return (this.taskIds == null) ? null : this.taskIds.iterator();
   }
 
-  public void add_to_task_ids(int elem) {
-    if (this.task_ids == null) {
-      this.task_ids = new ArrayList<Integer>();
+  public void add_to_taskIds(int elem) {
+    if (this.taskIds == null) {
+      this.taskIds = new ArrayList<Integer>();
     }
-    this.task_ids.add(elem);
+    this.taskIds.add(elem);
   }
 
-  public List<Integer> get_task_ids() {
-    return this.task_ids;
+  public List<Integer> get_taskIds() {
+    return this.taskIds;
   }
 
-  public void set_task_ids(List<Integer> task_ids) {
-    this.task_ids = task_ids;
+  public void set_taskIds(List<Integer> taskIds) {
+    this.taskIds = taskIds;
   }
 
-  public void unset_task_ids() {
-    this.task_ids = null;
+  public void unset_taskIds() {
+    this.taskIds = null;
   }
 
-  /** Returns true if field task_ids is set (has been assigned a value) and false otherwise */
-  public boolean is_set_task_ids() {
-    return this.task_ids != null;
+  /** Returns true if field taskIds is set (has been assigned a value) and false otherwise */
+  public boolean is_set_taskIds() {
+    return this.taskIds != null;
   }
 
-  public void set_task_ids_isSet(boolean value) {
+  public void set_taskIds_isSet(boolean value) {
     if (!value) {
-      this.task_ids = null;
+      this.taskIds = null;
     }
   }
 
@@ -377,9 +377,9 @@ public class ComponentSummary implements org.apache.thrift.TBase<ComponentSummar
 
     case TASK_IDS:
       if (value == null) {
-        unset_task_ids();
+        unset_taskIds();
       } else {
-        set_task_ids((List<Integer>)value);
+        set_taskIds((List<Integer>)value);
       }
       break;
 
@@ -406,7 +406,7 @@ public class ComponentSummary implements org.apache.thrift.TBase<ComponentSummar
       return get_type();
 
     case TASK_IDS:
-      return get_task_ids();
+      return get_taskIds();
 
     case ERRORS:
       return get_errors();
@@ -429,7 +429,7 @@ public class ComponentSummary implements org.apache.thrift.TBase<ComponentSummar
     case TYPE:
       return is_set_type();
     case TASK_IDS:
-      return is_set_task_ids();
+      return is_set_taskIds();
     case ERRORS:
       return is_set_errors();
     }
@@ -476,12 +476,12 @@ public class ComponentSummary implements org.apache.thrift.TBase<ComponentSummar
         return false;
     }
 
-    boolean this_present_task_ids = true && this.is_set_task_ids();
-    boolean that_present_task_ids = true && that.is_set_task_ids();
-    if (this_present_task_ids || that_present_task_ids) {
-      if (!(this_present_task_ids && that_present_task_ids))
+    boolean this_present_taskIds = true && this.is_set_taskIds();
+    boolean that_present_taskIds = true && that.is_set_taskIds();
+    if (this_present_taskIds || that_present_taskIds) {
+      if (!(this_present_taskIds && that_present_taskIds))
         return false;
-      if (!this.task_ids.equals(that.task_ids))
+      if (!this.taskIds.equals(that.taskIds))
         return false;
     }
 
@@ -516,10 +516,10 @@ public class ComponentSummary implements org.apache.thrift.TBase<ComponentSummar
     if (present_type)
       list.add(type);
 
-    boolean present_task_ids = true && (is_set_task_ids());
-    list.add(present_task_ids);
-    if (present_task_ids)
-      list.add(task_ids);
+    boolean present_taskIds = true && (is_set_taskIds());
+    list.add(present_taskIds);
+    if (present_taskIds)
+      list.add(taskIds);
 
     boolean present_errors = true && (is_set_errors());
     list.add(present_errors);
@@ -567,12 +567,12 @@ public class ComponentSummary implements org.apache.thrift.TBase<ComponentSummar
         return lastComparison;
       }
     }
-    lastComparison = Boolean.valueOf(is_set_task_ids()).compareTo(other.is_set_task_ids());
+    lastComparison = Boolean.valueOf(is_set_taskIds()).compareTo(other.is_set_taskIds());
     if (lastComparison != 0) {
       return lastComparison;
     }
-    if (is_set_task_ids()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.task_ids, other.task_ids);
+    if (is_set_taskIds()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.taskIds, other.taskIds);
       if (lastComparison != 0) {
         return lastComparison;
       }
@@ -594,11 +594,11 @@ public class ComponentSummary implements org.apache.thrift.TBase<ComponentSummar
     return _Fields.findByThriftId(fieldId);
   }
 
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
@@ -627,11 +627,11 @@ public class ComponentSummary implements org.apache.thrift.TBase<ComponentSummar
     }
     first = false;
     if (!first) sb.append(", ");
-    sb.append("task_ids:");
-    if (this.task_ids == null) {
+    sb.append("taskIds:");
+    if (this.taskIds == null) {
       sb.append("null");
     } else {
-      sb.append(this.task_ids);
+      sb.append(this.taskIds);
     }
     first = false;
     if (is_set_errors()) {
@@ -648,22 +648,22 @@ public class ComponentSummary implements org.apache.thrift.TBase<ComponentSummar
     return sb.toString();
   }
 
-  public void validate() throws org.apache.thrift.TException {
+  public void validate() throws TException {
     // check for required fields
     if (!is_set_name()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'name' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'name' is unset! Struct:" + toString());
     }
 
     if (!is_set_parallel()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'parallel' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'parallel' is unset! Struct:" + toString());
     }
 
     if (!is_set_type()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'type' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'type' is unset! Struct:" + toString());
     }
 
-    if (!is_set_task_ids()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'task_ids' is unset! Struct:" + toString());
+    if (!is_set_taskIds()) {
+      throw new TProtocolException("Required field 'taskIds' is unset! Struct:" + toString());
     }
 
     // check for sub-struct validity
@@ -672,7 +672,7 @@ public class ComponentSummary implements org.apache.thrift.TBase<ComponentSummar
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -682,7 +682,7 @@ public class ComponentSummary implements org.apache.thrift.TBase<ComponentSummar
       // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
       __isset_bitfield = 0;
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -695,7 +695,7 @@ public class ComponentSummary implements org.apache.thrift.TBase<ComponentSummar
 
   private static class ComponentSummaryStandardScheme extends StandardScheme<ComponentSummary> {
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot, ComponentSummary struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot, ComponentSummary struct) throws TException {
       org.apache.thrift.protocol.TField schemeField;
       iprot.readStructBegin();
       while (true)
@@ -732,17 +732,17 @@ public class ComponentSummary implements org.apache.thrift.TBase<ComponentSummar
           case 4: // TASK_IDS
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list214 = iprot.readListBegin();
-                struct.task_ids = new ArrayList<Integer>(_list214.size);
-                int _elem215;
-                for (int _i216 = 0; _i216 < _list214.size; ++_i216)
+                org.apache.thrift.protocol.TList _list162 = iprot.readListBegin();
+                struct.taskIds = new ArrayList<Integer>(_list162.size);
+                int _elem163;
+                for (int _i164 = 0; _i164 < _list162.size; ++_i164)
                 {
-                  _elem215 = iprot.readI32();
-                  struct.task_ids.add(_elem215);
+                  _elem163 = iprot.readI32();
+                  struct.taskIds.add(_elem163);
                 }
                 iprot.readListEnd();
               }
-              struct.set_task_ids_isSet(true);
+              struct.set_taskIds_isSet(true);
             } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
@@ -750,14 +750,14 @@ public class ComponentSummary implements org.apache.thrift.TBase<ComponentSummar
           case 5: // ERRORS
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list217 = iprot.readListBegin();
-                struct.errors = new ArrayList<ErrorInfo>(_list217.size);
-                ErrorInfo _elem218;
-                for (int _i219 = 0; _i219 < _list217.size; ++_i219)
+                org.apache.thrift.protocol.TList _list165 = iprot.readListBegin();
+                struct.errors = new ArrayList<ErrorInfo>(_list165.size);
+                ErrorInfo _elem166;
+                for (int _i167 = 0; _i167 < _list165.size; ++_i167)
                 {
-                  _elem218 = new ErrorInfo();
-                  _elem218.read(iprot);
-                  struct.errors.add(_elem218);
+                  _elem166 = new ErrorInfo();
+                  _elem166.read(iprot);
+                  struct.errors.add(_elem166);
                 }
                 iprot.readListEnd();
               }
@@ -775,7 +775,7 @@ public class ComponentSummary implements org.apache.thrift.TBase<ComponentSummar
       struct.validate();
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot, ComponentSummary struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot, ComponentSummary struct) throws TException {
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
@@ -792,13 +792,13 @@ public class ComponentSummary implements org.apache.thrift.TBase<ComponentSummar
         oprot.writeString(struct.type);
         oprot.writeFieldEnd();
       }
-      if (struct.task_ids != null) {
+      if (struct.taskIds != null) {
         oprot.writeFieldBegin(TASK_IDS_FIELD_DESC);
         {
-          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, struct.task_ids.size()));
-          for (int _iter220 : struct.task_ids)
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, struct.taskIds.size()));
+          for (int _iter168 : struct.taskIds)
           {
-            oprot.writeI32(_iter220);
+            oprot.writeI32(_iter168);
           }
           oprot.writeListEnd();
         }
@@ -809,9 +809,9 @@ public class ComponentSummary implements org.apache.thrift.TBase<ComponentSummar
           oprot.writeFieldBegin(ERRORS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.errors.size()));
-            for (ErrorInfo _iter221 : struct.errors)
+            for (ErrorInfo _iter169 : struct.errors)
             {
-              _iter221.write(oprot);
+              _iter169.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -833,16 +833,16 @@ public class ComponentSummary implements org.apache.thrift.TBase<ComponentSummar
   private static class ComponentSummaryTupleScheme extends TupleScheme<ComponentSummary> {
 
     @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, ComponentSummary struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol prot, ComponentSummary struct) throws TException {
       TTupleProtocol oprot = (TTupleProtocol) prot;
       oprot.writeString(struct.name);
       oprot.writeI32(struct.parallel);
       oprot.writeString(struct.type);
       {
-        oprot.writeI32(struct.task_ids.size());
-        for (int _iter222 : struct.task_ids)
+        oprot.writeI32(struct.taskIds.size());
+        for (int _iter170 : struct.taskIds)
         {
-          oprot.writeI32(_iter222);
+          oprot.writeI32(_iter170);
         }
       }
       BitSet optionals = new BitSet();
@@ -853,16 +853,16 @@ public class ComponentSummary implements org.apache.thrift.TBase<ComponentSummar
       if (struct.is_set_errors()) {
         {
           oprot.writeI32(struct.errors.size());
-          for (ErrorInfo _iter223 : struct.errors)
+          for (ErrorInfo _iter171 : struct.errors)
           {
-            _iter223.write(oprot);
+            _iter171.write(oprot);
           }
         }
       }
     }
 
     @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, ComponentSummary struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol prot, ComponentSummary struct) throws TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       struct.name = iprot.readString();
       struct.set_name_isSet(true);
@@ -871,27 +871,27 @@ public class ComponentSummary implements org.apache.thrift.TBase<ComponentSummar
       struct.type = iprot.readString();
       struct.set_type_isSet(true);
       {
-        org.apache.thrift.protocol.TList _list224 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, iprot.readI32());
-        struct.task_ids = new ArrayList<Integer>(_list224.size);
-        int _elem225;
-        for (int _i226 = 0; _i226 < _list224.size; ++_i226)
+        org.apache.thrift.protocol.TList _list172 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, iprot.readI32());
+        struct.taskIds = new ArrayList<Integer>(_list172.size);
+        int _elem173;
+        for (int _i174 = 0; _i174 < _list172.size; ++_i174)
         {
-          _elem225 = iprot.readI32();
-          struct.task_ids.add(_elem225);
+          _elem173 = iprot.readI32();
+          struct.taskIds.add(_elem173);
         }
       }
-      struct.set_task_ids_isSet(true);
+      struct.set_taskIds_isSet(true);
       BitSet incoming = iprot.readBitSet(1);
       if (incoming.get(0)) {
         {
-          org.apache.thrift.protocol.TList _list227 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-          struct.errors = new ArrayList<ErrorInfo>(_list227.size);
-          ErrorInfo _elem228;
-          for (int _i229 = 0; _i229 < _list227.size; ++_i229)
+          org.apache.thrift.protocol.TList _list175 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.errors = new ArrayList<ErrorInfo>(_list175.size);
+          ErrorInfo _elem176;
+          for (int _i177 = 0; _i177 < _list175.size; ++_i177)
           {
-            _elem228 = new ErrorInfo();
-            _elem228.read(iprot);
-            struct.errors.add(_elem228);
+            _elem176 = new ErrorInfo();
+            _elem176.read(iprot);
+            struct.errors.add(_elem176);
           }
         }
         struct.set_errors_isSet(true);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/Credentials.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/Credentials.java b/jstorm-core/src/main/java/backtype/storm/generated/Credentials.java
index e2ca92d..2105c7f 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/Credentials.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/Credentials.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class Credentials implements org.apache.thrift.TBase<Credentials, Credentials._Fields>, java.io.Serializable, Cloneable, Comparable<Credentials> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Credentials");
 
@@ -278,11 +278,11 @@ public class Credentials implements org.apache.thrift.TBase<Credentials, Credent
     return _Fields.findByThriftId(fieldId);
   }
 
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
@@ -302,10 +302,10 @@ public class Credentials implements org.apache.thrift.TBase<Credentials, Credent
     return sb.toString();
   }
 
-  public void validate() throws org.apache.thrift.TException {
+  public void validate() throws TException {
     // check for required fields
     if (!is_set_creds()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'creds' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'creds' is unset! Struct:" + toString());
     }
 
     // check for sub-struct validity
@@ -314,7 +314,7 @@ public class Credentials implements org.apache.thrift.TBase<Credentials, Credent
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -322,7 +322,7 @@ public class Credentials implements org.apache.thrift.TBase<Credentials, Credent
   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
     try {
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -335,7 +335,7 @@ public class Credentials implements org.apache.thrift.TBase<Credentials, Credent
 
   private static class CredentialsStandardScheme extends StandardScheme<Credentials> {
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot, Credentials struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot, Credentials struct) throws TException {
       org.apache.thrift.protocol.TField schemeField;
       iprot.readStructBegin();
       while (true)
@@ -348,15 +348,15 @@ public class Credentials implements org.apache.thrift.TBase<Credentials, Credent
           case 1: // CREDS
             if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
               {
-                org.apache.thrift.protocol.TMap _map254 = iprot.readMapBegin();
-                struct.creds = new HashMap<String,String>(2*_map254.size);
-                String _key255;
-                String _val256;
-                for (int _i257 = 0; _i257 < _map254.size; ++_i257)
+                org.apache.thrift.protocol.TMap _map202 = iprot.readMapBegin();
+                struct.creds = new HashMap<String,String>(2*_map202.size);
+                String _key203;
+                String _val204;
+                for (int _i205 = 0; _i205 < _map202.size; ++_i205)
                 {
-                  _key255 = iprot.readString();
-                  _val256 = iprot.readString();
-                  struct.creds.put(_key255, _val256);
+                  _key203 = iprot.readString();
+                  _val204 = iprot.readString();
+                  struct.creds.put(_key203, _val204);
                 }
                 iprot.readMapEnd();
               }
@@ -374,7 +374,7 @@ public class Credentials implements org.apache.thrift.TBase<Credentials, Credent
       struct.validate();
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot, Credentials struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot, Credentials struct) throws TException {
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
@@ -382,10 +382,10 @@ public class Credentials implements org.apache.thrift.TBase<Credentials, Credent
         oprot.writeFieldBegin(CREDS_FIELD_DESC);
         {
           oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.creds.size()));
-          for (Map.Entry<String, String> _iter258 : struct.creds.entrySet())
+          for (Map.Entry<String, String> _iter206 : struct.creds.entrySet())
           {
-            oprot.writeString(_iter258.getKey());
-            oprot.writeString(_iter258.getValue());
+            oprot.writeString(_iter206.getKey());
+            oprot.writeString(_iter206.getValue());
           }
           oprot.writeMapEnd();
         }
@@ -406,31 +406,31 @@ public class Credentials implements org.apache.thrift.TBase<Credentials, Credent
   private static class CredentialsTupleScheme extends TupleScheme<Credentials> {
 
     @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, Credentials struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol prot, Credentials struct) throws TException {
       TTupleProtocol oprot = (TTupleProtocol) prot;
       {
         oprot.writeI32(struct.creds.size());
-        for (Map.Entry<String, String> _iter259 : struct.creds.entrySet())
+        for (Map.Entry<String, String> _iter207 : struct.creds.entrySet())
         {
-          oprot.writeString(_iter259.getKey());
-          oprot.writeString(_iter259.getValue());
+          oprot.writeString(_iter207.getKey());
+          oprot.writeString(_iter207.getValue());
         }
       }
     }
 
     @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, Credentials struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol prot, Credentials struct) throws TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       {
-        org.apache.thrift.protocol.TMap _map260 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-        struct.creds = new HashMap<String,String>(2*_map260.size);
-        String _key261;
-        String _val262;
-        for (int _i263 = 0; _i263 < _map260.size; ++_i263)
+        org.apache.thrift.protocol.TMap _map208 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+        struct.creds = new HashMap<String,String>(2*_map208.size);
+        String _key209;
+        String _val210;
+        for (int _i211 = 0; _i211 < _map208.size; ++_i211)
         {
-          _key261 = iprot.readString();
-          _val262 = iprot.readString();
-          struct.creds.put(_key261, _val262);
+          _key209 = iprot.readString();
+          _val210 = iprot.readString();
+          struct.creds.put(_key209, _val210);
         }
       }
       struct.set_creds_isSet(true);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/DRPCExecutionException.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/DRPCExecutionException.java b/jstorm-core/src/main/java/backtype/storm/generated/DRPCExecutionException.java
index 3d8502f..b6bc34f 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/DRPCExecutionException.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/DRPCExecutionException.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class DRPCExecutionException extends TException implements org.apache.thrift.TBase<DRPCExecutionException, DRPCExecutionException._Fields>, java.io.Serializable, Cloneable, Comparable<DRPCExecutionException> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("DRPCExecutionException");
 
@@ -264,11 +264,11 @@ public class DRPCExecutionException extends TException implements org.apache.thr
     return _Fields.findByThriftId(fieldId);
   }
 
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
@@ -288,10 +288,10 @@ public class DRPCExecutionException extends TException implements org.apache.thr
     return sb.toString();
   }
 
-  public void validate() throws org.apache.thrift.TException {
+  public void validate() throws TException {
     // check for required fields
     if (!is_set_msg()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'msg' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'msg' is unset! Struct:" + toString());
     }
 
     // check for sub-struct validity
@@ -300,7 +300,7 @@ public class DRPCExecutionException extends TException implements org.apache.thr
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -308,7 +308,7 @@ public class DRPCExecutionException extends TException implements org.apache.thr
   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
     try {
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -321,7 +321,7 @@ public class DRPCExecutionException extends TException implements org.apache.thr
 
   private static class DRPCExecutionExceptionStandardScheme extends StandardScheme<DRPCExecutionException> {
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot, DRPCExecutionException struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot, DRPCExecutionException struct) throws TException {
       org.apache.thrift.protocol.TField schemeField;
       iprot.readStructBegin();
       while (true)
@@ -348,7 +348,7 @@ public class DRPCExecutionException extends TException implements org.apache.thr
       struct.validate();
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot, DRPCExecutionException struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot, DRPCExecutionException struct) throws TException {
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
@@ -372,13 +372,13 @@ public class DRPCExecutionException extends TException implements org.apache.thr
   private static class DRPCExecutionExceptionTupleScheme extends TupleScheme<DRPCExecutionException> {
 
     @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, DRPCExecutionException struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol prot, DRPCExecutionException struct) throws TException {
       TTupleProtocol oprot = (TTupleProtocol) prot;
       oprot.writeString(struct.msg);
     }
 
     @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, DRPCExecutionException struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol prot, DRPCExecutionException struct) throws TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       struct.msg = iprot.readString();
       struct.set_msg_isSet(true);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/DRPCRequest.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/DRPCRequest.java b/jstorm-core/src/main/java/backtype/storm/generated/DRPCRequest.java
index 00448f5..dd9c307 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/DRPCRequest.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/DRPCRequest.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class DRPCRequest implements org.apache.thrift.TBase<DRPCRequest, DRPCRequest._Fields>, java.io.Serializable, Cloneable, Comparable<DRPCRequest> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("DRPCRequest");
 
@@ -337,11 +337,11 @@ public class DRPCRequest implements org.apache.thrift.TBase<DRPCRequest, DRPCReq
     return _Fields.findByThriftId(fieldId);
   }
 
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
@@ -369,14 +369,14 @@ public class DRPCRequest implements org.apache.thrift.TBase<DRPCRequest, DRPCReq
     return sb.toString();
   }
 
-  public void validate() throws org.apache.thrift.TException {
+  public void validate() throws TException {
     // check for required fields
     if (!is_set_func_args()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'func_args' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'func_args' is unset! Struct:" + toString());
     }
 
     if (!is_set_request_id()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'request_id' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'request_id' is unset! Struct:" + toString());
     }
 
     // check for sub-struct validity
@@ -385,7 +385,7 @@ public class DRPCRequest implements org.apache.thrift.TBase<DRPCRequest, DRPCReq
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -393,7 +393,7 @@ public class DRPCRequest implements org.apache.thrift.TBase<DRPCRequest, DRPCReq
   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
     try {
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -406,7 +406,7 @@ public class DRPCRequest implements org.apache.thrift.TBase<DRPCRequest, DRPCReq
 
   private static class DRPCRequestStandardScheme extends StandardScheme<DRPCRequest> {
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot, DRPCRequest struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot, DRPCRequest struct) throws TException {
       org.apache.thrift.protocol.TField schemeField;
       iprot.readStructBegin();
       while (true)
@@ -441,7 +441,7 @@ public class DRPCRequest implements org.apache.thrift.TBase<DRPCRequest, DRPCReq
       struct.validate();
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot, DRPCRequest struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot, DRPCRequest struct) throws TException {
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
@@ -470,14 +470,14 @@ public class DRPCRequest implements org.apache.thrift.TBase<DRPCRequest, DRPCReq
   private static class DRPCRequestTupleScheme extends TupleScheme<DRPCRequest> {
 
     @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, DRPCRequest struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol prot, DRPCRequest struct) throws TException {
       TTupleProtocol oprot = (TTupleProtocol) prot;
       oprot.writeString(struct.func_args);
       oprot.writeString(struct.request_id);
     }
 
     @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, DRPCRequest struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol prot, DRPCRequest struct) throws TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       struct.func_args = iprot.readString();
       struct.set_func_args_isSet(true);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/DistributedRPC.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/DistributedRPC.java b/jstorm-core/src/main/java/backtype/storm/generated/DistributedRPC.java
index ff3c112..2b1c1cb 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/DistributedRPC.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/DistributedRPC.java
@@ -34,18 +34,18 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class DistributedRPC {
 
   public interface Iface {
 
-    public String execute(String functionName, String funcArgs) throws DRPCExecutionException, org.apache.thrift.TException;
+    public String execute(String functionName, String funcArgs) throws DRPCExecutionException, AuthorizationException, TException;
 
   }
 
   public interface AsyncIface {
 
-    public void execute(String functionName, String funcArgs, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+    public void execute(String functionName, String funcArgs, AsyncMethodCallback resultHandler) throws TException;
 
   }
 
@@ -69,13 +69,13 @@ public class DistributedRPC {
       super(iprot, oprot);
     }
 
-    public String execute(String functionName, String funcArgs) throws DRPCExecutionException, org.apache.thrift.TException
+    public String execute(String functionName, String funcArgs) throws DRPCExecutionException, AuthorizationException, TException
     {
       send_execute(functionName, funcArgs);
       return recv_execute();
     }
 
-    public void send_execute(String functionName, String funcArgs) throws org.apache.thrift.TException
+    public void send_execute(String functionName, String funcArgs) throws TException
     {
       execute_args args = new execute_args();
       args.set_functionName(functionName);
@@ -83,7 +83,7 @@ public class DistributedRPC {
       sendBase("execute", args);
     }
 
-    public String recv_execute() throws DRPCExecutionException, org.apache.thrift.TException
+    public String recv_execute() throws DRPCExecutionException, AuthorizationException, TException
     {
       execute_result result = new execute_result();
       receiveBase(result, "execute");
@@ -93,6 +93,9 @@ public class DistributedRPC {
       if (result.e != null) {
         throw result.e;
       }
+      if (result.aze != null) {
+        throw result.aze;
+      }
       throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "execute failed: unknown result");
     }
 
@@ -114,7 +117,7 @@ public class DistributedRPC {
       super(protocolFactory, clientManager, transport);
     }
 
-    public void execute(String functionName, String funcArgs, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+    public void execute(String functionName, String funcArgs, AsyncMethodCallback resultHandler) throws TException {
       checkReady();
       execute_call method_call = new execute_call(functionName, funcArgs, resultHandler, this, ___protocolFactory, ___transport);
       this.___currentMethod = method_call;
@@ -124,13 +127,13 @@ public class DistributedRPC {
     public static class execute_call extends org.apache.thrift.async.TAsyncMethodCall {
       private String functionName;
       private String funcArgs;
-      public execute_call(String functionName, String funcArgs, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+      public execute_call(String functionName, String funcArgs, AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws TException {
         super(client, protocolFactory, transport, resultHandler, false);
         this.functionName = functionName;
         this.funcArgs = funcArgs;
       }
 
-      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws TException {
         prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("execute", org.apache.thrift.protocol.TMessageType.CALL, 0));
         execute_args args = new execute_args();
         args.set_functionName(functionName);
@@ -139,8 +142,8 @@ public class DistributedRPC {
         prot.writeMessageEnd();
       }
 
-      public String getResult() throws DRPCExecutionException, org.apache.thrift.TException {
-        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+      public String getResult() throws DRPCExecutionException, AuthorizationException, TException {
+        if (getState() != State.RESPONSE_READ) {
           throw new IllegalStateException("Method call not finished!");
         }
         org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
@@ -179,12 +182,14 @@ public class DistributedRPC {
         return false;
       }
 
-      public execute_result getResult(I iface, execute_args args) throws org.apache.thrift.TException {
+      public execute_result getResult(I iface, execute_args args) throws TException {
         execute_result result = new execute_result();
         try {
           result.success = iface.execute(args.functionName, args.funcArgs);
         } catch (DRPCExecutionException e) {
           result.e = e;
+        } catch (AuthorizationException aze) {
+          result.aze = aze;
         }
         return result;
       }
@@ -239,6 +244,11 @@ public class DistributedRPC {
                         result.set_e_isSet(true);
                         msg = result;
             }
+            else             if (e instanceof AuthorizationException) {
+                        result.aze = (AuthorizationException) e;
+                        result.set_aze_isSet(true);
+                        msg = result;
+            }
              else 
             {
               msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
@@ -259,7 +269,7 @@ public class DistributedRPC {
         return false;
       }
 
-      public void start(I iface, execute_args args, org.apache.thrift.async.AsyncMethodCallback<String> resultHandler) throws TException {
+      public void start(I iface, execute_args args, AsyncMethodCallback<String> resultHandler) throws TException {
         iface.execute(args.functionName, args.funcArgs,resultHandler);
       }
     }
@@ -568,11 +578,11 @@ public class DistributedRPC {
       return _Fields.findByThriftId(fieldId);
     }
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
       schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
       schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
     }
 
@@ -600,7 +610,7 @@ public class DistributedRPC {
       return sb.toString();
     }
 
-    public void validate() throws org.apache.thrift.TException {
+    public void validate() throws TException {
       // check for required fields
       // check for sub-struct validity
     }
@@ -608,7 +618,7 @@ public class DistributedRPC {
     private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
       try {
         write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-      } catch (org.apache.thrift.TException te) {
+      } catch (TException te) {
         throw new java.io.IOException(te);
       }
     }
@@ -616,7 +626,7 @@ public class DistributedRPC {
     private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
       try {
         read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-      } catch (org.apache.thrift.TException te) {
+      } catch (TException te) {
         throw new java.io.IOException(te);
       }
     }
@@ -629,7 +639,7 @@ public class DistributedRPC {
 
     private static class execute_argsStandardScheme extends StandardScheme<execute_args> {
 
-      public void read(org.apache.thrift.protocol.TProtocol iprot, execute_args struct) throws org.apache.thrift.TException {
+      public void read(org.apache.thrift.protocol.TProtocol iprot, execute_args struct) throws TException {
         org.apache.thrift.protocol.TField schemeField;
         iprot.readStructBegin();
         while (true)
@@ -664,7 +674,7 @@ public class DistributedRPC {
         struct.validate();
       }
 
-      public void write(org.apache.thrift.protocol.TProtocol oprot, execute_args struct) throws org.apache.thrift.TException {
+      public void write(org.apache.thrift.protocol.TProtocol oprot, execute_args struct) throws TException {
         struct.validate();
 
         oprot.writeStructBegin(STRUCT_DESC);
@@ -693,7 +703,7 @@ public class DistributedRPC {
     private static class execute_argsTupleScheme extends TupleScheme<execute_args> {
 
       @Override
-      public void write(org.apache.thrift.protocol.TProtocol prot, execute_args struct) throws org.apache.thrift.TException {
+      public void write(org.apache.thrift.protocol.TProtocol prot, execute_args struct) throws TException {
         TTupleProtocol oprot = (TTupleProtocol) prot;
         BitSet optionals = new BitSet();
         if (struct.is_set_functionName()) {
@@ -712,7 +722,7 @@ public class DistributedRPC {
       }
 
       @Override
-      public void read(org.apache.thrift.protocol.TProtocol prot, execute_args struct) throws org.apache.thrift.TException {
+      public void read(org.apache.thrift.protocol.TProtocol prot, execute_args struct) throws TException {
         TTupleProtocol iprot = (TTupleProtocol) prot;
         BitSet incoming = iprot.readBitSet(2);
         if (incoming.get(0)) {
@@ -733,6 +743,7 @@ public class DistributedRPC {
 
     private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRING, (short)0);
     private static final org.apache.thrift.protocol.TField E_FIELD_DESC = new org.apache.thrift.protocol.TField("e", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+    private static final org.apache.thrift.protocol.TField AZE_FIELD_DESC = new org.apache.thrift.protocol.TField("aze", org.apache.thrift.protocol.TType.STRUCT, (short)2);
 
     private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
     static {
@@ -742,11 +753,13 @@ public class DistributedRPC {
 
     private String success; // required
     private DRPCExecutionException e; // required
+    private AuthorizationException aze; // required
 
     /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
     public enum _Fields implements org.apache.thrift.TFieldIdEnum {
       SUCCESS((short)0, "success"),
-      E((short)1, "e");
+      E((short)1, "e"),
+      AZE((short)2, "aze");
 
       private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 
@@ -765,6 +778,8 @@ public class DistributedRPC {
             return SUCCESS;
           case 1: // E
             return E;
+          case 2: // AZE
+            return AZE;
           default:
             return null;
         }
@@ -812,6 +827,8 @@ public class DistributedRPC {
           new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
       tmpMap.put(_Fields.E, new org.apache.thrift.meta_data.FieldMetaData("e", org.apache.thrift.TFieldRequirementType.DEFAULT, 
           new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
+      tmpMap.put(_Fields.AZE, new org.apache.thrift.meta_data.FieldMetaData("aze", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
       metaDataMap = Collections.unmodifiableMap(tmpMap);
       org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(execute_result.class, metaDataMap);
     }
@@ -821,11 +838,13 @@ public class DistributedRPC {
 
     public execute_result(
       String success,
-      DRPCExecutionException e)
+      DRPCExecutionException e,
+      AuthorizationException aze)
     {
       this();
       this.success = success;
       this.e = e;
+      this.aze = aze;
     }
 
     /**
@@ -838,6 +857,9 @@ public class DistributedRPC {
       if (other.is_set_e()) {
         this.e = new DRPCExecutionException(other.e);
       }
+      if (other.is_set_aze()) {
+        this.aze = new AuthorizationException(other.aze);
+      }
     }
 
     public execute_result deepCopy() {
@@ -848,6 +870,7 @@ public class DistributedRPC {
     public void clear() {
       this.success = null;
       this.e = null;
+      this.aze = null;
     }
 
     public String get_success() {
@@ -896,6 +919,29 @@ public class DistributedRPC {
       }
     }
 
+    public AuthorizationException get_aze() {
+      return this.aze;
+    }
+
+    public void set_aze(AuthorizationException aze) {
+      this.aze = aze;
+    }
+
+    public void unset_aze() {
+      this.aze = null;
+    }
+
+    /** Returns true if field aze is set (has been assigned a value) and false otherwise */
+    public boolean is_set_aze() {
+      return this.aze != null;
+    }
+
+    public void set_aze_isSet(boolean value) {
+      if (!value) {
+        this.aze = null;
+      }
+    }
+
     public void setFieldValue(_Fields field, Object value) {
       switch (field) {
       case SUCCESS:
@@ -914,6 +960,14 @@ public class DistributedRPC {
         }
         break;
 
+      case AZE:
+        if (value == null) {
+          unset_aze();
+        } else {
+          set_aze((AuthorizationException)value);
+        }
+        break;
+
       }
     }
 
@@ -925,6 +979,9 @@ public class DistributedRPC {
       case E:
         return get_e();
 
+      case AZE:
+        return get_aze();
+
       }
       throw new IllegalStateException();
     }
@@ -940,6 +997,8 @@ public class DistributedRPC {
         return is_set_success();
       case E:
         return is_set_e();
+      case AZE:
+        return is_set_aze();
       }
       throw new IllegalStateException();
     }
@@ -975,6 +1034,15 @@ public class DistributedRPC {
           return false;
       }
 
+      boolean this_present_aze = true && this.is_set_aze();
+      boolean that_present_aze = true && that.is_set_aze();
+      if (this_present_aze || that_present_aze) {
+        if (!(this_present_aze && that_present_aze))
+          return false;
+        if (!this.aze.equals(that.aze))
+          return false;
+      }
+
       return true;
     }
 
@@ -992,6 +1060,11 @@ public class DistributedRPC {
       if (present_e)
         list.add(e);
 
+      boolean present_aze = true && (is_set_aze());
+      list.add(present_aze);
+      if (present_aze)
+        list.add(aze);
+
       return list.hashCode();
     }
 
@@ -1023,6 +1096,16 @@ public class DistributedRPC {
           return lastComparison;
         }
       }
+      lastComparison = Boolean.valueOf(is_set_aze()).compareTo(other.is_set_aze());
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+      if (is_set_aze()) {
+        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.aze, other.aze);
+        if (lastComparison != 0) {
+          return lastComparison;
+        }
+      }
       return 0;
     }
 
@@ -1030,11 +1113,11 @@ public class DistributedRPC {
       return _Fields.findByThriftId(fieldId);
     }
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
       schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
       schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
       }
 
@@ -1058,11 +1141,19 @@ public class DistributedRPC {
         sb.append(this.e);
       }
       first = false;
+      if (!first) sb.append(", ");
+      sb.append("aze:");
+      if (this.aze == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.aze);
+      }
+      first = false;
       sb.append(")");
       return sb.toString();
     }
 
-    public void validate() throws org.apache.thrift.TException {
+    public void validate() throws TException {
       // check for required fields
       // check for sub-struct validity
     }
@@ -1070,7 +1161,7 @@ public class DistributedRPC {
     private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
       try {
         write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-      } catch (org.apache.thrift.TException te) {
+      } catch (TException te) {
         throw new java.io.IOException(te);
       }
     }
@@ -1078,7 +1169,7 @@ public class DistributedRPC {
     private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
       try {
         read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-      } catch (org.apache.thrift.TException te) {
+      } catch (TException te) {
         throw new java.io.IOException(te);
       }
     }
@@ -1091,7 +1182,7 @@ public class DistributedRPC {
 
     private static class execute_resultStandardScheme extends StandardScheme<execute_result> {
 
-      public void read(org.apache.thrift.protocol.TProtocol iprot, execute_result struct) throws org.apache.thrift.TException {
+      public void read(org.apache.thrift.protocol.TProtocol iprot, execute_result struct) throws TException {
         org.apache.thrift.protocol.TField schemeField;
         iprot.readStructBegin();
         while (true)
@@ -1118,6 +1209,15 @@ public class DistributedRPC {
                 org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
               }
               break;
+            case 2: // AZE
+              if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+                struct.aze = new AuthorizationException();
+                struct.aze.read(iprot);
+                struct.set_aze_isSet(true);
+              } else { 
+                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+              }
+              break;
             default:
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
           }
@@ -1127,7 +1227,7 @@ public class DistributedRPC {
         struct.validate();
       }
 
-      public void write(org.apache.thrift.protocol.TProtocol oprot, execute_result struct) throws org.apache.thrift.TException {
+      public void write(org.apache.thrift.protocol.TProtocol oprot, execute_result struct) throws TException {
         struct.validate();
 
         oprot.writeStructBegin(STRUCT_DESC);
@@ -1141,6 +1241,11 @@ public class DistributedRPC {
           struct.e.write(oprot);
           oprot.writeFieldEnd();
         }
+        if (struct.aze != null) {
+          oprot.writeFieldBegin(AZE_FIELD_DESC);
+          struct.aze.write(oprot);
+          oprot.writeFieldEnd();
+        }
         oprot.writeFieldStop();
         oprot.writeStructEnd();
       }
@@ -1156,7 +1261,7 @@ public class DistributedRPC {
     private static class execute_resultTupleScheme extends TupleScheme<execute_result> {
 
       @Override
-      public void write(org.apache.thrift.protocol.TProtocol prot, execute_result struct) throws org.apache.thrift.TException {
+      public void write(org.apache.thrift.protocol.TProtocol prot, execute_result struct) throws TException {
         TTupleProtocol oprot = (TTupleProtocol) prot;
         BitSet optionals = new BitSet();
         if (struct.is_set_success()) {
@@ -1165,19 +1270,25 @@ public class DistributedRPC {
         if (struct.is_set_e()) {
           optionals.set(1);
         }
-        oprot.writeBitSet(optionals, 2);
+        if (struct.is_set_aze()) {
+          optionals.set(2);
+        }
+        oprot.writeBitSet(optionals, 3);
         if (struct.is_set_success()) {
           oprot.writeString(struct.success);
         }
         if (struct.is_set_e()) {
           struct.e.write(oprot);
         }
+        if (struct.is_set_aze()) {
+          struct.aze.write(oprot);
+        }
       }
 
       @Override
-      public void read(org.apache.thrift.protocol.TProtocol prot, execute_result struct) throws org.apache.thrift.TException {
+      public void read(org.apache.thrift.protocol.TProtocol prot, execute_result struct) throws TException {
         TTupleProtocol iprot = (TTupleProtocol) prot;
-        BitSet incoming = iprot.readBitSet(2);
+        BitSet incoming = iprot.readBitSet(3);
         if (incoming.get(0)) {
           struct.success = iprot.readString();
           struct.set_success_isSet(true);
@@ -1187,6 +1298,11 @@ public class DistributedRPC {
           struct.e.read(iprot);
           struct.set_e_isSet(true);
         }
+        if (incoming.get(2)) {
+          struct.aze = new AuthorizationException();
+          struct.aze.read(iprot);
+          struct.set_aze_isSet(true);
+        }
       }
     }
 


[44/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/InvalidTopologyException.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/InvalidTopologyException.java b/jstorm-core/src/main/java/backtype/storm/generated/InvalidTopologyException.java
index 3d5424a..4f8ed3d 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/InvalidTopologyException.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/InvalidTopologyException.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class InvalidTopologyException extends TException implements org.apache.thrift.TBase<InvalidTopologyException, InvalidTopologyException._Fields>, java.io.Serializable, Cloneable, Comparable<InvalidTopologyException> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("InvalidTopologyException");
 
@@ -264,11 +264,11 @@ public class InvalidTopologyException extends TException implements org.apache.t
     return _Fields.findByThriftId(fieldId);
   }
 
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
@@ -288,10 +288,10 @@ public class InvalidTopologyException extends TException implements org.apache.t
     return sb.toString();
   }
 
-  public void validate() throws org.apache.thrift.TException {
+  public void validate() throws TException {
     // check for required fields
     if (!is_set_msg()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'msg' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'msg' is unset! Struct:" + toString());
     }
 
     // check for sub-struct validity
@@ -300,7 +300,7 @@ public class InvalidTopologyException extends TException implements org.apache.t
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -308,7 +308,7 @@ public class InvalidTopologyException extends TException implements org.apache.t
   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
     try {
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -321,7 +321,7 @@ public class InvalidTopologyException extends TException implements org.apache.t
 
   private static class InvalidTopologyExceptionStandardScheme extends StandardScheme<InvalidTopologyException> {
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot, InvalidTopologyException struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot, InvalidTopologyException struct) throws TException {
       org.apache.thrift.protocol.TField schemeField;
       iprot.readStructBegin();
       while (true)
@@ -348,7 +348,7 @@ public class InvalidTopologyException extends TException implements org.apache.t
       struct.validate();
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot, InvalidTopologyException struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot, InvalidTopologyException struct) throws TException {
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
@@ -372,13 +372,13 @@ public class InvalidTopologyException extends TException implements org.apache.t
   private static class InvalidTopologyExceptionTupleScheme extends TupleScheme<InvalidTopologyException> {
 
     @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, InvalidTopologyException struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol prot, InvalidTopologyException struct) throws TException {
       TTupleProtocol oprot = (TTupleProtocol) prot;
       oprot.writeString(struct.msg);
     }
 
     @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, InvalidTopologyException struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol prot, InvalidTopologyException struct) throws TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       struct.msg = iprot.readString();
       struct.set_msg_isSet(true);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/JavaObject.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/JavaObject.java b/jstorm-core/src/main/java/backtype/storm/generated/JavaObject.java
index 5998993..8739a1a 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/JavaObject.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/JavaObject.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class JavaObject implements org.apache.thrift.TBase<JavaObject, JavaObject._Fields>, java.io.Serializable, Cloneable, Comparable<JavaObject> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("JavaObject");
 
@@ -357,11 +357,11 @@ public class JavaObject implements org.apache.thrift.TBase<JavaObject, JavaObjec
     return _Fields.findByThriftId(fieldId);
   }
 
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
@@ -389,14 +389,14 @@ public class JavaObject implements org.apache.thrift.TBase<JavaObject, JavaObjec
     return sb.toString();
   }
 
-  public void validate() throws org.apache.thrift.TException {
+  public void validate() throws TException {
     // check for required fields
     if (!is_set_full_class_name()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'full_class_name' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'full_class_name' is unset! Struct:" + toString());
     }
 
     if (!is_set_args_list()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'args_list' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'args_list' is unset! Struct:" + toString());
     }
 
     // check for sub-struct validity
@@ -405,7 +405,7 @@ public class JavaObject implements org.apache.thrift.TBase<JavaObject, JavaObjec
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -413,7 +413,7 @@ public class JavaObject implements org.apache.thrift.TBase<JavaObject, JavaObjec
   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
     try {
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -426,7 +426,7 @@ public class JavaObject implements org.apache.thrift.TBase<JavaObject, JavaObjec
 
   private static class JavaObjectStandardScheme extends StandardScheme<JavaObject> {
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot, JavaObject struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot, JavaObject struct) throws TException {
       org.apache.thrift.protocol.TField schemeField;
       iprot.readStructBegin();
       while (true)
@@ -472,7 +472,7 @@ public class JavaObject implements org.apache.thrift.TBase<JavaObject, JavaObjec
       struct.validate();
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot, JavaObject struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot, JavaObject struct) throws TException {
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
@@ -508,7 +508,7 @@ public class JavaObject implements org.apache.thrift.TBase<JavaObject, JavaObjec
   private static class JavaObjectTupleScheme extends TupleScheme<JavaObject> {
 
     @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, JavaObject struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol prot, JavaObject struct) throws TException {
       TTupleProtocol oprot = (TTupleProtocol) prot;
       oprot.writeString(struct.full_class_name);
       {
@@ -521,7 +521,7 @@ public class JavaObject implements org.apache.thrift.TBase<JavaObject, JavaObjec
     }
 
     @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, JavaObject struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol prot, JavaObject struct) throws TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       struct.full_class_name = iprot.readString();
       struct.set_full_class_name_isSet(true);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/JavaObjectArg.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/JavaObjectArg.java b/jstorm-core/src/main/java/backtype/storm/generated/JavaObjectArg.java
index 4469306..c883d6e 100755
--- a/jstorm-core/src/main/java/backtype/storm/generated/JavaObjectArg.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/JavaObjectArg.java
@@ -232,7 +232,7 @@ public class JavaObjectArg extends org.apache.thrift.TUnion<JavaObjectArg, JavaO
   }
 
   @Override
-  protected Object standardSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TField field) throws org.apache.thrift.TException {
+  protected Object standardSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TField field) throws TException {
     _Fields setField = _Fields.findByThriftId(field.id);
     if (setField != null) {
       switch (setField) {
@@ -300,7 +300,7 @@ public class JavaObjectArg extends org.apache.thrift.TUnion<JavaObjectArg, JavaO
   }
 
   @Override
-  protected void standardSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  protected void standardSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     switch (setField_) {
       case INT_ARG:
         Integer int_arg = (Integer)value_;
@@ -332,7 +332,7 @@ public class JavaObjectArg extends org.apache.thrift.TUnion<JavaObjectArg, JavaO
   }
 
   @Override
-  protected Object tupleSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, short fieldID) throws org.apache.thrift.TException {
+  protected Object tupleSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, short fieldID) throws TException {
     _Fields setField = _Fields.findByThriftId(fieldID);
     if (setField != null) {
       switch (setField) {
@@ -369,7 +369,7 @@ public class JavaObjectArg extends org.apache.thrift.TUnion<JavaObjectArg, JavaO
   }
 
   @Override
-  protected void tupleSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  protected void tupleSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     switch (setField_) {
       case INT_ARG:
         Integer int_arg = (Integer)value_;
@@ -596,7 +596,7 @@ public class JavaObjectArg extends org.apache.thrift.TUnion<JavaObjectArg, JavaO
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -605,7 +605,7 @@ public class JavaObjectArg extends org.apache.thrift.TUnion<JavaObjectArg, JavaO
   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
     try {
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/KillOptions.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/KillOptions.java b/jstorm-core/src/main/java/backtype/storm/generated/KillOptions.java
index 7abb762..c80a222 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/KillOptions.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/KillOptions.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class KillOptions implements org.apache.thrift.TBase<KillOptions, KillOptions._Fields>, java.io.Serializable, Cloneable, Comparable<KillOptions> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("KillOptions");
 
@@ -259,11 +259,11 @@ public class KillOptions implements org.apache.thrift.TBase<KillOptions, KillOpt
     return _Fields.findByThriftId(fieldId);
   }
 
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
@@ -281,7 +281,7 @@ public class KillOptions implements org.apache.thrift.TBase<KillOptions, KillOpt
     return sb.toString();
   }
 
-  public void validate() throws org.apache.thrift.TException {
+  public void validate() throws TException {
     // check for required fields
     // check for sub-struct validity
   }
@@ -289,7 +289,7 @@ public class KillOptions implements org.apache.thrift.TBase<KillOptions, KillOpt
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -299,7 +299,7 @@ public class KillOptions implements org.apache.thrift.TBase<KillOptions, KillOpt
       // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
       __isset_bitfield = 0;
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -312,7 +312,7 @@ public class KillOptions implements org.apache.thrift.TBase<KillOptions, KillOpt
 
   private static class KillOptionsStandardScheme extends StandardScheme<KillOptions> {
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot, KillOptions struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot, KillOptions struct) throws TException {
       org.apache.thrift.protocol.TField schemeField;
       iprot.readStructBegin();
       while (true)
@@ -339,7 +339,7 @@ public class KillOptions implements org.apache.thrift.TBase<KillOptions, KillOpt
       struct.validate();
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot, KillOptions struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot, KillOptions struct) throws TException {
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
@@ -363,7 +363,7 @@ public class KillOptions implements org.apache.thrift.TBase<KillOptions, KillOpt
   private static class KillOptionsTupleScheme extends TupleScheme<KillOptions> {
 
     @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, KillOptions struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol prot, KillOptions struct) throws TException {
       TTupleProtocol oprot = (TTupleProtocol) prot;
       BitSet optionals = new BitSet();
       if (struct.is_set_wait_secs()) {
@@ -376,7 +376,7 @@ public class KillOptions implements org.apache.thrift.TBase<KillOptions, KillOpt
     }
 
     @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, KillOptions struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol prot, KillOptions struct) throws TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       BitSet incoming = iprot.readBitSet(1);
       if (incoming.get(0)) {

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/LocalStateData.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/LocalStateData.java b/jstorm-core/src/main/java/backtype/storm/generated/LocalStateData.java
index 2fd49b4..dca9e60 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/LocalStateData.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/LocalStateData.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class LocalStateData implements org.apache.thrift.TBase<LocalStateData, LocalStateData._Fields>, java.io.Serializable, Cloneable, Comparable<LocalStateData> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("LocalStateData");
 
@@ -289,11 +289,11 @@ public class LocalStateData implements org.apache.thrift.TBase<LocalStateData, L
     return _Fields.findByThriftId(fieldId);
   }
 
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
@@ -313,10 +313,10 @@ public class LocalStateData implements org.apache.thrift.TBase<LocalStateData, L
     return sb.toString();
   }
 
-  public void validate() throws org.apache.thrift.TException {
+  public void validate() throws TException {
     // check for required fields
     if (!is_set_serialized_parts()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'serialized_parts' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'serialized_parts' is unset! Struct:" + toString());
     }
 
     // check for sub-struct validity
@@ -325,7 +325,7 @@ public class LocalStateData implements org.apache.thrift.TBase<LocalStateData, L
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -333,7 +333,7 @@ public class LocalStateData implements org.apache.thrift.TBase<LocalStateData, L
   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
     try {
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -346,7 +346,7 @@ public class LocalStateData implements org.apache.thrift.TBase<LocalStateData, L
 
   private static class LocalStateDataStandardScheme extends StandardScheme<LocalStateData> {
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot, LocalStateData struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot, LocalStateData struct) throws TException {
       org.apache.thrift.protocol.TField schemeField;
       iprot.readStructBegin();
       while (true)
@@ -359,16 +359,16 @@ public class LocalStateData implements org.apache.thrift.TBase<LocalStateData, L
           case 1: // SERIALIZED_PARTS
             if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
               {
-                org.apache.thrift.protocol.TMap _map264 = iprot.readMapBegin();
-                struct.serialized_parts = new HashMap<String,ThriftSerializedObject>(2*_map264.size);
-                String _key265;
-                ThriftSerializedObject _val266;
-                for (int _i267 = 0; _i267 < _map264.size; ++_i267)
+                org.apache.thrift.protocol.TMap _map212 = iprot.readMapBegin();
+                struct.serialized_parts = new HashMap<String,ThriftSerializedObject>(2*_map212.size);
+                String _key213;
+                ThriftSerializedObject _val214;
+                for (int _i215 = 0; _i215 < _map212.size; ++_i215)
                 {
-                  _key265 = iprot.readString();
-                  _val266 = new ThriftSerializedObject();
-                  _val266.read(iprot);
-                  struct.serialized_parts.put(_key265, _val266);
+                  _key213 = iprot.readString();
+                  _val214 = new ThriftSerializedObject();
+                  _val214.read(iprot);
+                  struct.serialized_parts.put(_key213, _val214);
                 }
                 iprot.readMapEnd();
               }
@@ -386,7 +386,7 @@ public class LocalStateData implements org.apache.thrift.TBase<LocalStateData, L
       struct.validate();
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot, LocalStateData struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot, LocalStateData struct) throws TException {
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
@@ -394,10 +394,10 @@ public class LocalStateData implements org.apache.thrift.TBase<LocalStateData, L
         oprot.writeFieldBegin(SERIALIZED_PARTS_FIELD_DESC);
         {
           oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.serialized_parts.size()));
-          for (Map.Entry<String, ThriftSerializedObject> _iter268 : struct.serialized_parts.entrySet())
+          for (Map.Entry<String, ThriftSerializedObject> _iter216 : struct.serialized_parts.entrySet())
           {
-            oprot.writeString(_iter268.getKey());
-            _iter268.getValue().write(oprot);
+            oprot.writeString(_iter216.getKey());
+            _iter216.getValue().write(oprot);
           }
           oprot.writeMapEnd();
         }
@@ -418,32 +418,32 @@ public class LocalStateData implements org.apache.thrift.TBase<LocalStateData, L
   private static class LocalStateDataTupleScheme extends TupleScheme<LocalStateData> {
 
     @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, LocalStateData struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol prot, LocalStateData struct) throws TException {
       TTupleProtocol oprot = (TTupleProtocol) prot;
       {
         oprot.writeI32(struct.serialized_parts.size());
-        for (Map.Entry<String, ThriftSerializedObject> _iter269 : struct.serialized_parts.entrySet())
+        for (Map.Entry<String, ThriftSerializedObject> _iter217 : struct.serialized_parts.entrySet())
         {
-          oprot.writeString(_iter269.getKey());
-          _iter269.getValue().write(oprot);
+          oprot.writeString(_iter217.getKey());
+          _iter217.getValue().write(oprot);
         }
       }
     }
 
     @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, LocalStateData struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol prot, LocalStateData struct) throws TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       {
-        org.apache.thrift.protocol.TMap _map270 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-        struct.serialized_parts = new HashMap<String,ThriftSerializedObject>(2*_map270.size);
-        String _key271;
-        ThriftSerializedObject _val272;
-        for (int _i273 = 0; _i273 < _map270.size; ++_i273)
+        org.apache.thrift.protocol.TMap _map218 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.serialized_parts = new HashMap<String,ThriftSerializedObject>(2*_map218.size);
+        String _key219;
+        ThriftSerializedObject _val220;
+        for (int _i221 = 0; _i221 < _map218.size; ++_i221)
         {
-          _key271 = iprot.readString();
-          _val272 = new ThriftSerializedObject();
-          _val272.read(iprot);
-          struct.serialized_parts.put(_key271, _val272);
+          _key219 = iprot.readString();
+          _val220 = new ThriftSerializedObject();
+          _val220.read(iprot);
+          struct.serialized_parts.put(_key219, _val220);
         }
       }
       struct.set_serialized_parts_isSet(true);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/MetricInfo.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/MetricInfo.java b/jstorm-core/src/main/java/backtype/storm/generated/MetricInfo.java
index 2703777..c332217 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/MetricInfo.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/MetricInfo.java
@@ -34,13 +34,11 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class MetricInfo implements org.apache.thrift.TBase<MetricInfo, MetricInfo._Fields>, java.io.Serializable, Cloneable, Comparable<MetricInfo> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("MetricInfo");
 
-  private static final org.apache.thrift.protocol.TField BASE_METRIC_FIELD_DESC = new org.apache.thrift.protocol.TField("baseMetric", org.apache.thrift.protocol.TType.MAP, (short)1);
-  private static final org.apache.thrift.protocol.TField INPUT_METRIC_FIELD_DESC = new org.apache.thrift.protocol.TField("inputMetric", org.apache.thrift.protocol.TType.MAP, (short)2);
-  private static final org.apache.thrift.protocol.TField OUTPUT_METRIC_FIELD_DESC = new org.apache.thrift.protocol.TField("outputMetric", org.apache.thrift.protocol.TType.MAP, (short)3);
+  private static final org.apache.thrift.protocol.TField METRICS_FIELD_DESC = new org.apache.thrift.protocol.TField("metrics", org.apache.thrift.protocol.TType.MAP, (short)1);
 
   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
   static {
@@ -48,15 +46,11 @@ public class MetricInfo implements org.apache.thrift.TBase<MetricInfo, MetricInf
     schemes.put(TupleScheme.class, new MetricInfoTupleSchemeFactory());
   }
 
-  private Map<String,MetricWindow> baseMetric; // required
-  private Map<String,Map<String,MetricWindow>> inputMetric; // optional
-  private Map<String,Map<String,MetricWindow>> outputMetric; // optional
+  private Map<String,Map<Integer,MetricSnapshot>> metrics; // optional
 
   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-    BASE_METRIC((short)1, "baseMetric"),
-    INPUT_METRIC((short)2, "inputMetric"),
-    OUTPUT_METRIC((short)3, "outputMetric");
+    METRICS((short)1, "metrics");
 
     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 
@@ -71,12 +65,8 @@ public class MetricInfo implements org.apache.thrift.TBase<MetricInfo, MetricInf
      */
     public static _Fields findByThriftId(int fieldId) {
       switch(fieldId) {
-        case 1: // BASE_METRIC
-          return BASE_METRIC;
-        case 2: // INPUT_METRIC
-          return INPUT_METRIC;
-        case 3: // OUTPUT_METRIC
-          return OUTPUT_METRIC;
+        case 1: // METRICS
+          return METRICS;
         default:
           return null;
       }
@@ -117,26 +107,16 @@ public class MetricInfo implements org.apache.thrift.TBase<MetricInfo, MetricInf
   }
 
   // isset id assignments
-  private static final _Fields optionals[] = {_Fields.INPUT_METRIC,_Fields.OUTPUT_METRIC};
+  private static final _Fields optionals[] = {_Fields.METRICS};
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.BASE_METRIC, new org.apache.thrift.meta_data.FieldMetaData("baseMetric", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
-            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
-            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, MetricWindow.class))));
-    tmpMap.put(_Fields.INPUT_METRIC, new org.apache.thrift.meta_data.FieldMetaData("inputMetric", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
-            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
-            new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
-                new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
-                new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, MetricWindow.class)))));
-    tmpMap.put(_Fields.OUTPUT_METRIC, new org.apache.thrift.meta_data.FieldMetaData("outputMetric", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+    tmpMap.put(_Fields.METRICS, new org.apache.thrift.meta_data.FieldMetaData("metrics", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
             new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
             new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
-                new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
-                new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, MetricWindow.class)))));
+                new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32), 
+                new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, MetricSnapshot.class)))));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(MetricInfo.class, metaDataMap);
   }
@@ -144,83 +124,35 @@ public class MetricInfo implements org.apache.thrift.TBase<MetricInfo, MetricInf
   public MetricInfo() {
   }
 
-  public MetricInfo(
-    Map<String,MetricWindow> baseMetric)
-  {
-    this();
-    this.baseMetric = baseMetric;
-  }
-
   /**
    * Performs a deep copy on <i>other</i>.
    */
   public MetricInfo(MetricInfo other) {
-    if (other.is_set_baseMetric()) {
-      Map<String,MetricWindow> __this__baseMetric = new HashMap<String,MetricWindow>(other.baseMetric.size());
-      for (Map.Entry<String, MetricWindow> other_element : other.baseMetric.entrySet()) {
-
-        String other_element_key = other_element.getKey();
-        MetricWindow other_element_value = other_element.getValue();
-
-        String __this__baseMetric_copy_key = other_element_key;
-
-        MetricWindow __this__baseMetric_copy_value = new MetricWindow(other_element_value);
-
-        __this__baseMetric.put(__this__baseMetric_copy_key, __this__baseMetric_copy_value);
-      }
-      this.baseMetric = __this__baseMetric;
-    }
-    if (other.is_set_inputMetric()) {
-      Map<String,Map<String,MetricWindow>> __this__inputMetric = new HashMap<String,Map<String,MetricWindow>>(other.inputMetric.size());
-      for (Map.Entry<String, Map<String,MetricWindow>> other_element : other.inputMetric.entrySet()) {
-
-        String other_element_key = other_element.getKey();
-        Map<String,MetricWindow> other_element_value = other_element.getValue();
-
-        String __this__inputMetric_copy_key = other_element_key;
-
-        Map<String,MetricWindow> __this__inputMetric_copy_value = new HashMap<String,MetricWindow>(other_element_value.size());
-        for (Map.Entry<String, MetricWindow> other_element_value_element : other_element_value.entrySet()) {
-
-          String other_element_value_element_key = other_element_value_element.getKey();
-          MetricWindow other_element_value_element_value = other_element_value_element.getValue();
-
-          String __this__inputMetric_copy_value_copy_key = other_element_value_element_key;
-
-          MetricWindow __this__inputMetric_copy_value_copy_value = new MetricWindow(other_element_value_element_value);
-
-          __this__inputMetric_copy_value.put(__this__inputMetric_copy_value_copy_key, __this__inputMetric_copy_value_copy_value);
-        }
-
-        __this__inputMetric.put(__this__inputMetric_copy_key, __this__inputMetric_copy_value);
-      }
-      this.inputMetric = __this__inputMetric;
-    }
-    if (other.is_set_outputMetric()) {
-      Map<String,Map<String,MetricWindow>> __this__outputMetric = new HashMap<String,Map<String,MetricWindow>>(other.outputMetric.size());
-      for (Map.Entry<String, Map<String,MetricWindow>> other_element : other.outputMetric.entrySet()) {
+    if (other.is_set_metrics()) {
+      Map<String,Map<Integer,MetricSnapshot>> __this__metrics = new HashMap<String,Map<Integer,MetricSnapshot>>(other.metrics.size());
+      for (Map.Entry<String, Map<Integer,MetricSnapshot>> other_element : other.metrics.entrySet()) {
 
         String other_element_key = other_element.getKey();
-        Map<String,MetricWindow> other_element_value = other_element.getValue();
+        Map<Integer,MetricSnapshot> other_element_value = other_element.getValue();
 
-        String __this__outputMetric_copy_key = other_element_key;
+        String __this__metrics_copy_key = other_element_key;
 
-        Map<String,MetricWindow> __this__outputMetric_copy_value = new HashMap<String,MetricWindow>(other_element_value.size());
-        for (Map.Entry<String, MetricWindow> other_element_value_element : other_element_value.entrySet()) {
+        Map<Integer,MetricSnapshot> __this__metrics_copy_value = new HashMap<Integer,MetricSnapshot>(other_element_value.size());
+        for (Map.Entry<Integer, MetricSnapshot> other_element_value_element : other_element_value.entrySet()) {
 
-          String other_element_value_element_key = other_element_value_element.getKey();
-          MetricWindow other_element_value_element_value = other_element_value_element.getValue();
+          Integer other_element_value_element_key = other_element_value_element.getKey();
+          MetricSnapshot other_element_value_element_value = other_element_value_element.getValue();
 
-          String __this__outputMetric_copy_value_copy_key = other_element_value_element_key;
+          Integer __this__metrics_copy_value_copy_key = other_element_value_element_key;
 
-          MetricWindow __this__outputMetric_copy_value_copy_value = new MetricWindow(other_element_value_element_value);
+          MetricSnapshot __this__metrics_copy_value_copy_value = new MetricSnapshot(other_element_value_element_value);
 
-          __this__outputMetric_copy_value.put(__this__outputMetric_copy_value_copy_key, __this__outputMetric_copy_value_copy_value);
+          __this__metrics_copy_value.put(__this__metrics_copy_value_copy_key, __this__metrics_copy_value_copy_value);
         }
 
-        __this__outputMetric.put(__this__outputMetric_copy_key, __this__outputMetric_copy_value);
+        __this__metrics.put(__this__metrics_copy_key, __this__metrics_copy_value);
       }
-      this.outputMetric = __this__outputMetric;
+      this.metrics = __this__metrics;
     }
   }
 
@@ -230,136 +162,50 @@ public class MetricInfo implements org.apache.thrift.TBase<MetricInfo, MetricInf
 
   @Override
   public void clear() {
-    this.baseMetric = null;
-    this.inputMetric = null;
-    this.outputMetric = null;
-  }
-
-  public int get_baseMetric_size() {
-    return (this.baseMetric == null) ? 0 : this.baseMetric.size();
-  }
-
-  public void put_to_baseMetric(String key, MetricWindow val) {
-    if (this.baseMetric == null) {
-      this.baseMetric = new HashMap<String,MetricWindow>();
-    }
-    this.baseMetric.put(key, val);
-  }
-
-  public Map<String,MetricWindow> get_baseMetric() {
-    return this.baseMetric;
+    this.metrics = null;
   }
 
-  public void set_baseMetric(Map<String,MetricWindow> baseMetric) {
-    this.baseMetric = baseMetric;
+  public int get_metrics_size() {
+    return (this.metrics == null) ? 0 : this.metrics.size();
   }
 
-  public void unset_baseMetric() {
-    this.baseMetric = null;
-  }
-
-  /** Returns true if field baseMetric is set (has been assigned a value) and false otherwise */
-  public boolean is_set_baseMetric() {
-    return this.baseMetric != null;
-  }
-
-  public void set_baseMetric_isSet(boolean value) {
-    if (!value) {
-      this.baseMetric = null;
+  public void put_to_metrics(String key, Map<Integer,MetricSnapshot> val) {
+    if (this.metrics == null) {
+      this.metrics = new HashMap<String,Map<Integer,MetricSnapshot>>();
     }
+    this.metrics.put(key, val);
   }
 
-  public int get_inputMetric_size() {
-    return (this.inputMetric == null) ? 0 : this.inputMetric.size();
+  public Map<String,Map<Integer,MetricSnapshot>> get_metrics() {
+    return this.metrics;
   }
 
-  public void put_to_inputMetric(String key, Map<String,MetricWindow> val) {
-    if (this.inputMetric == null) {
-      this.inputMetric = new HashMap<String,Map<String,MetricWindow>>();
-    }
-    this.inputMetric.put(key, val);
+  public void set_metrics(Map<String,Map<Integer,MetricSnapshot>> metrics) {
+    this.metrics = metrics;
   }
 
-  public Map<String,Map<String,MetricWindow>> get_inputMetric() {
-    return this.inputMetric;
+  public void unset_metrics() {
+    this.metrics = null;
   }
 
-  public void set_inputMetric(Map<String,Map<String,MetricWindow>> inputMetric) {
-    this.inputMetric = inputMetric;
+  /** Returns true if field metrics is set (has been assigned a value) and false otherwise */
+  public boolean is_set_metrics() {
+    return this.metrics != null;
   }
 
-  public void unset_inputMetric() {
-    this.inputMetric = null;
-  }
-
-  /** Returns true if field inputMetric is set (has been assigned a value) and false otherwise */
-  public boolean is_set_inputMetric() {
-    return this.inputMetric != null;
-  }
-
-  public void set_inputMetric_isSet(boolean value) {
+  public void set_metrics_isSet(boolean value) {
     if (!value) {
-      this.inputMetric = null;
-    }
-  }
-
-  public int get_outputMetric_size() {
-    return (this.outputMetric == null) ? 0 : this.outputMetric.size();
-  }
-
-  public void put_to_outputMetric(String key, Map<String,MetricWindow> val) {
-    if (this.outputMetric == null) {
-      this.outputMetric = new HashMap<String,Map<String,MetricWindow>>();
-    }
-    this.outputMetric.put(key, val);
-  }
-
-  public Map<String,Map<String,MetricWindow>> get_outputMetric() {
-    return this.outputMetric;
-  }
-
-  public void set_outputMetric(Map<String,Map<String,MetricWindow>> outputMetric) {
-    this.outputMetric = outputMetric;
-  }
-
-  public void unset_outputMetric() {
-    this.outputMetric = null;
-  }
-
-  /** Returns true if field outputMetric is set (has been assigned a value) and false otherwise */
-  public boolean is_set_outputMetric() {
-    return this.outputMetric != null;
-  }
-
-  public void set_outputMetric_isSet(boolean value) {
-    if (!value) {
-      this.outputMetric = null;
+      this.metrics = null;
     }
   }
 
   public void setFieldValue(_Fields field, Object value) {
     switch (field) {
-    case BASE_METRIC:
+    case METRICS:
       if (value == null) {
-        unset_baseMetric();
+        unset_metrics();
       } else {
-        set_baseMetric((Map<String,MetricWindow>)value);
-      }
-      break;
-
-    case INPUT_METRIC:
-      if (value == null) {
-        unset_inputMetric();
-      } else {
-        set_inputMetric((Map<String,Map<String,MetricWindow>>)value);
-      }
-      break;
-
-    case OUTPUT_METRIC:
-      if (value == null) {
-        unset_outputMetric();
-      } else {
-        set_outputMetric((Map<String,Map<String,MetricWindow>>)value);
+        set_metrics((Map<String,Map<Integer,MetricSnapshot>>)value);
       }
       break;
 
@@ -368,14 +214,8 @@ public class MetricInfo implements org.apache.thrift.TBase<MetricInfo, MetricInf
 
   public Object getFieldValue(_Fields field) {
     switch (field) {
-    case BASE_METRIC:
-      return get_baseMetric();
-
-    case INPUT_METRIC:
-      return get_inputMetric();
-
-    case OUTPUT_METRIC:
-      return get_outputMetric();
+    case METRICS:
+      return get_metrics();
 
     }
     throw new IllegalStateException();
@@ -388,12 +228,8 @@ public class MetricInfo implements org.apache.thrift.TBase<MetricInfo, MetricInf
     }
 
     switch (field) {
-    case BASE_METRIC:
-      return is_set_baseMetric();
-    case INPUT_METRIC:
-      return is_set_inputMetric();
-    case OUTPUT_METRIC:
-      return is_set_outputMetric();
+    case METRICS:
+      return is_set_metrics();
     }
     throw new IllegalStateException();
   }
@@ -411,30 +247,12 @@ public class MetricInfo implements org.apache.thrift.TBase<MetricInfo, MetricInf
     if (that == null)
       return false;
 
-    boolean this_present_baseMetric = true && this.is_set_baseMetric();
-    boolean that_present_baseMetric = true && that.is_set_baseMetric();
-    if (this_present_baseMetric || that_present_baseMetric) {
-      if (!(this_present_baseMetric && that_present_baseMetric))
+    boolean this_present_metrics = true && this.is_set_metrics();
+    boolean that_present_metrics = true && that.is_set_metrics();
+    if (this_present_metrics || that_present_metrics) {
+      if (!(this_present_metrics && that_present_metrics))
         return false;
-      if (!this.baseMetric.equals(that.baseMetric))
-        return false;
-    }
-
-    boolean this_present_inputMetric = true && this.is_set_inputMetric();
-    boolean that_present_inputMetric = true && that.is_set_inputMetric();
-    if (this_present_inputMetric || that_present_inputMetric) {
-      if (!(this_present_inputMetric && that_present_inputMetric))
-        return false;
-      if (!this.inputMetric.equals(that.inputMetric))
-        return false;
-    }
-
-    boolean this_present_outputMetric = true && this.is_set_outputMetric();
-    boolean that_present_outputMetric = true && that.is_set_outputMetric();
-    if (this_present_outputMetric || that_present_outputMetric) {
-      if (!(this_present_outputMetric && that_present_outputMetric))
-        return false;
-      if (!this.outputMetric.equals(that.outputMetric))
+      if (!this.metrics.equals(that.metrics))
         return false;
     }
 
@@ -445,20 +263,10 @@ public class MetricInfo implements org.apache.thrift.TBase<MetricInfo, MetricInf
   public int hashCode() {
     List<Object> list = new ArrayList<Object>();
 
-    boolean present_baseMetric = true && (is_set_baseMetric());
-    list.add(present_baseMetric);
-    if (present_baseMetric)
-      list.add(baseMetric);
-
-    boolean present_inputMetric = true && (is_set_inputMetric());
-    list.add(present_inputMetric);
-    if (present_inputMetric)
-      list.add(inputMetric);
-
-    boolean present_outputMetric = true && (is_set_outputMetric());
-    list.add(present_outputMetric);
-    if (present_outputMetric)
-      list.add(outputMetric);
+    boolean present_metrics = true && (is_set_metrics());
+    list.add(present_metrics);
+    if (present_metrics)
+      list.add(metrics);
 
     return list.hashCode();
   }
@@ -471,32 +279,12 @@ public class MetricInfo implements org.apache.thrift.TBase<MetricInfo, MetricInf
 
     int lastComparison = 0;
 
-    lastComparison = Boolean.valueOf(is_set_baseMetric()).compareTo(other.is_set_baseMetric());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_baseMetric()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.baseMetric, other.baseMetric);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_inputMetric()).compareTo(other.is_set_inputMetric());
+    lastComparison = Boolean.valueOf(is_set_metrics()).compareTo(other.is_set_metrics());
     if (lastComparison != 0) {
       return lastComparison;
     }
-    if (is_set_inputMetric()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.inputMetric, other.inputMetric);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_outputMetric()).compareTo(other.is_set_outputMetric());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_outputMetric()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.outputMetric, other.outputMetric);
+    if (is_set_metrics()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.metrics, other.metrics);
       if (lastComparison != 0) {
         return lastComparison;
       }
@@ -508,11 +296,11 @@ public class MetricInfo implements org.apache.thrift.TBase<MetricInfo, MetricInf
     return _Fields.findByThriftId(fieldId);
   }
 
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
@@ -521,30 +309,12 @@ public class MetricInfo implements org.apache.thrift.TBase<MetricInfo, MetricInf
     StringBuilder sb = new StringBuilder("MetricInfo(");
     boolean first = true;
 
-    sb.append("baseMetric:");
-    if (this.baseMetric == null) {
-      sb.append("null");
-    } else {
-      sb.append(this.baseMetric);
-    }
-    first = false;
-    if (is_set_inputMetric()) {
-      if (!first) sb.append(", ");
-      sb.append("inputMetric:");
-      if (this.inputMetric == null) {
-        sb.append("null");
-      } else {
-        sb.append(this.inputMetric);
-      }
-      first = false;
-    }
-    if (is_set_outputMetric()) {
-      if (!first) sb.append(", ");
-      sb.append("outputMetric:");
-      if (this.outputMetric == null) {
+    if (is_set_metrics()) {
+      sb.append("metrics:");
+      if (this.metrics == null) {
         sb.append("null");
       } else {
-        sb.append(this.outputMetric);
+        sb.append(this.metrics);
       }
       first = false;
     }
@@ -552,19 +322,15 @@ public class MetricInfo implements org.apache.thrift.TBase<MetricInfo, MetricInf
     return sb.toString();
   }
 
-  public void validate() throws org.apache.thrift.TException {
+  public void validate() throws TException {
     // check for required fields
-    if (!is_set_baseMetric()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'baseMetric' is unset! Struct:" + toString());
-    }
-
     // check for sub-struct validity
   }
 
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -572,7 +338,7 @@ public class MetricInfo implements org.apache.thrift.TBase<MetricInfo, MetricInf
   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
     try {
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -585,7 +351,7 @@ public class MetricInfo implements org.apache.thrift.TBase<MetricInfo, MetricInf
 
   private static class MetricInfoStandardScheme extends StandardScheme<MetricInfo> {
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot, MetricInfo struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot, MetricInfo struct) throws TException {
       org.apache.thrift.protocol.TField schemeField;
       iprot.readStructBegin();
       while (true)
@@ -595,89 +361,35 @@ public class MetricInfo implements org.apache.thrift.TBase<MetricInfo, MetricInf
           break;
         }
         switch (schemeField.id) {
-          case 1: // BASE_METRIC
-            if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
-              {
-                org.apache.thrift.protocol.TMap _map116 = iprot.readMapBegin();
-                struct.baseMetric = new HashMap<String,MetricWindow>(2*_map116.size);
-                String _key117;
-                MetricWindow _val118;
-                for (int _i119 = 0; _i119 < _map116.size; ++_i119)
-                {
-                  _key117 = iprot.readString();
-                  _val118 = new MetricWindow();
-                  _val118.read(iprot);
-                  struct.baseMetric.put(_key117, _val118);
-                }
-                iprot.readMapEnd();
-              }
-              struct.set_baseMetric_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 2: // INPUT_METRIC
-            if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
-              {
-                org.apache.thrift.protocol.TMap _map120 = iprot.readMapBegin();
-                struct.inputMetric = new HashMap<String,Map<String,MetricWindow>>(2*_map120.size);
-                String _key121;
-                Map<String,MetricWindow> _val122;
-                for (int _i123 = 0; _i123 < _map120.size; ++_i123)
-                {
-                  _key121 = iprot.readString();
-                  {
-                    org.apache.thrift.protocol.TMap _map124 = iprot.readMapBegin();
-                    _val122 = new HashMap<String,MetricWindow>(2*_map124.size);
-                    String _key125;
-                    MetricWindow _val126;
-                    for (int _i127 = 0; _i127 < _map124.size; ++_i127)
-                    {
-                      _key125 = iprot.readString();
-                      _val126 = new MetricWindow();
-                      _val126.read(iprot);
-                      _val122.put(_key125, _val126);
-                    }
-                    iprot.readMapEnd();
-                  }
-                  struct.inputMetric.put(_key121, _val122);
-                }
-                iprot.readMapEnd();
-              }
-              struct.set_inputMetric_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 3: // OUTPUT_METRIC
+          case 1: // METRICS
             if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
               {
-                org.apache.thrift.protocol.TMap _map128 = iprot.readMapBegin();
-                struct.outputMetric = new HashMap<String,Map<String,MetricWindow>>(2*_map128.size);
-                String _key129;
-                Map<String,MetricWindow> _val130;
-                for (int _i131 = 0; _i131 < _map128.size; ++_i131)
+                org.apache.thrift.protocol.TMap _map124 = iprot.readMapBegin();
+                struct.metrics = new HashMap<String,Map<Integer,MetricSnapshot>>(2*_map124.size);
+                String _key125;
+                Map<Integer,MetricSnapshot> _val126;
+                for (int _i127 = 0; _i127 < _map124.size; ++_i127)
                 {
-                  _key129 = iprot.readString();
+                  _key125 = iprot.readString();
                   {
-                    org.apache.thrift.protocol.TMap _map132 = iprot.readMapBegin();
-                    _val130 = new HashMap<String,MetricWindow>(2*_map132.size);
-                    String _key133;
-                    MetricWindow _val134;
-                    for (int _i135 = 0; _i135 < _map132.size; ++_i135)
+                    org.apache.thrift.protocol.TMap _map128 = iprot.readMapBegin();
+                    _val126 = new HashMap<Integer,MetricSnapshot>(2*_map128.size);
+                    int _key129;
+                    MetricSnapshot _val130;
+                    for (int _i131 = 0; _i131 < _map128.size; ++_i131)
                     {
-                      _key133 = iprot.readString();
-                      _val134 = new MetricWindow();
-                      _val134.read(iprot);
-                      _val130.put(_key133, _val134);
+                      _key129 = iprot.readI32();
+                      _val130 = new MetricSnapshot();
+                      _val130.read(iprot);
+                      _val126.put(_key129, _val130);
                     }
                     iprot.readMapEnd();
                   }
-                  struct.outputMetric.put(_key129, _val130);
+                  struct.metrics.put(_key125, _val126);
                 }
                 iprot.readMapEnd();
               }
-              struct.set_outputMetric_isSet(true);
+              struct.set_metrics_isSet(true);
             } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
@@ -691,60 +403,24 @@ public class MetricInfo implements org.apache.thrift.TBase<MetricInfo, MetricInf
       struct.validate();
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot, MetricInfo struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot, MetricInfo struct) throws TException {
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
-      if (struct.baseMetric != null) {
-        oprot.writeFieldBegin(BASE_METRIC_FIELD_DESC);
-        {
-          oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.baseMetric.size()));
-          for (Map.Entry<String, MetricWindow> _iter136 : struct.baseMetric.entrySet())
+      if (struct.metrics != null) {
+        if (struct.is_set_metrics()) {
+          oprot.writeFieldBegin(METRICS_FIELD_DESC);
           {
-            oprot.writeString(_iter136.getKey());
-            _iter136.getValue().write(oprot);
-          }
-          oprot.writeMapEnd();
-        }
-        oprot.writeFieldEnd();
-      }
-      if (struct.inputMetric != null) {
-        if (struct.is_set_inputMetric()) {
-          oprot.writeFieldBegin(INPUT_METRIC_FIELD_DESC);
-          {
-            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, struct.inputMetric.size()));
-            for (Map.Entry<String, Map<String,MetricWindow>> _iter137 : struct.inputMetric.entrySet())
-            {
-              oprot.writeString(_iter137.getKey());
-              {
-                oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, _iter137.getValue().size()));
-                for (Map.Entry<String, MetricWindow> _iter138 : _iter137.getValue().entrySet())
-                {
-                  oprot.writeString(_iter138.getKey());
-                  _iter138.getValue().write(oprot);
-                }
-                oprot.writeMapEnd();
-              }
-            }
-            oprot.writeMapEnd();
-          }
-          oprot.writeFieldEnd();
-        }
-      }
-      if (struct.outputMetric != null) {
-        if (struct.is_set_outputMetric()) {
-          oprot.writeFieldBegin(OUTPUT_METRIC_FIELD_DESC);
-          {
-            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, struct.outputMetric.size()));
-            for (Map.Entry<String, Map<String,MetricWindow>> _iter139 : struct.outputMetric.entrySet())
+            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, struct.metrics.size()));
+            for (Map.Entry<String, Map<Integer,MetricSnapshot>> _iter132 : struct.metrics.entrySet())
             {
-              oprot.writeString(_iter139.getKey());
+              oprot.writeString(_iter132.getKey());
               {
-                oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, _iter139.getValue().size()));
-                for (Map.Entry<String, MetricWindow> _iter140 : _iter139.getValue().entrySet())
+                oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I32, org.apache.thrift.protocol.TType.STRUCT, _iter132.getValue().size()));
+                for (Map.Entry<Integer, MetricSnapshot> _iter133 : _iter132.getValue().entrySet())
                 {
-                  oprot.writeString(_iter140.getKey());
-                  _iter140.getValue().write(oprot);
+                  oprot.writeI32(_iter133.getKey());
+                  _iter133.getValue().write(oprot);
                 }
                 oprot.writeMapEnd();
               }
@@ -769,53 +445,25 @@ public class MetricInfo implements org.apache.thrift.TBase<MetricInfo, MetricInf
   private static class MetricInfoTupleScheme extends TupleScheme<MetricInfo> {
 
     @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, MetricInfo struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol prot, MetricInfo struct) throws TException {
       TTupleProtocol oprot = (TTupleProtocol) prot;
-      {
-        oprot.writeI32(struct.baseMetric.size());
-        for (Map.Entry<String, MetricWindow> _iter141 : struct.baseMetric.entrySet())
-        {
-          oprot.writeString(_iter141.getKey());
-          _iter141.getValue().write(oprot);
-        }
-      }
       BitSet optionals = new BitSet();
-      if (struct.is_set_inputMetric()) {
+      if (struct.is_set_metrics()) {
         optionals.set(0);
       }
-      if (struct.is_set_outputMetric()) {
-        optionals.set(1);
-      }
-      oprot.writeBitSet(optionals, 2);
-      if (struct.is_set_inputMetric()) {
+      oprot.writeBitSet(optionals, 1);
+      if (struct.is_set_metrics()) {
         {
-          oprot.writeI32(struct.inputMetric.size());
-          for (Map.Entry<String, Map<String,MetricWindow>> _iter142 : struct.inputMetric.entrySet())
+          oprot.writeI32(struct.metrics.size());
+          for (Map.Entry<String, Map<Integer,MetricSnapshot>> _iter134 : struct.metrics.entrySet())
           {
-            oprot.writeString(_iter142.getKey());
+            oprot.writeString(_iter134.getKey());
             {
-              oprot.writeI32(_iter142.getValue().size());
-              for (Map.Entry<String, MetricWindow> _iter143 : _iter142.getValue().entrySet())
+              oprot.writeI32(_iter134.getValue().size());
+              for (Map.Entry<Integer, MetricSnapshot> _iter135 : _iter134.getValue().entrySet())
               {
-                oprot.writeString(_iter143.getKey());
-                _iter143.getValue().write(oprot);
-              }
-            }
-          }
-        }
-      }
-      if (struct.is_set_outputMetric()) {
-        {
-          oprot.writeI32(struct.outputMetric.size());
-          for (Map.Entry<String, Map<String,MetricWindow>> _iter144 : struct.outputMetric.entrySet())
-          {
-            oprot.writeString(_iter144.getKey());
-            {
-              oprot.writeI32(_iter144.getValue().size());
-              for (Map.Entry<String, MetricWindow> _iter145 : _iter144.getValue().entrySet())
-              {
-                oprot.writeString(_iter145.getKey());
-                _iter145.getValue().write(oprot);
+                oprot.writeI32(_iter135.getKey());
+                _iter135.getValue().write(oprot);
               }
             }
           }
@@ -824,76 +472,35 @@ public class MetricInfo implements org.apache.thrift.TBase<MetricInfo, MetricInf
     }
 
     @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, MetricInfo struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol prot, MetricInfo struct) throws TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
-      {
-        org.apache.thrift.protocol.TMap _map146 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-        struct.baseMetric = new HashMap<String,MetricWindow>(2*_map146.size);
-        String _key147;
-        MetricWindow _val148;
-        for (int _i149 = 0; _i149 < _map146.size; ++_i149)
-        {
-          _key147 = iprot.readString();
-          _val148 = new MetricWindow();
-          _val148.read(iprot);
-          struct.baseMetric.put(_key147, _val148);
-        }
-      }
-      struct.set_baseMetric_isSet(true);
-      BitSet incoming = iprot.readBitSet(2);
+      BitSet incoming = iprot.readBitSet(1);
       if (incoming.get(0)) {
         {
-          org.apache.thrift.protocol.TMap _map150 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, iprot.readI32());
-          struct.inputMetric = new HashMap<String,Map<String,MetricWindow>>(2*_map150.size);
-          String _key151;
-          Map<String,MetricWindow> _val152;
-          for (int _i153 = 0; _i153 < _map150.size; ++_i153)
-          {
-            _key151 = iprot.readString();
-            {
-              org.apache.thrift.protocol.TMap _map154 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-              _val152 = new HashMap<String,MetricWindow>(2*_map154.size);
-              String _key155;
-              MetricWindow _val156;
-              for (int _i157 = 0; _i157 < _map154.size; ++_i157)
-              {
-                _key155 = iprot.readString();
-                _val156 = new MetricWindow();
-                _val156.read(iprot);
-                _val152.put(_key155, _val156);
-              }
-            }
-            struct.inputMetric.put(_key151, _val152);
-          }
-        }
-        struct.set_inputMetric_isSet(true);
-      }
-      if (incoming.get(1)) {
-        {
-          org.apache.thrift.protocol.TMap _map158 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, iprot.readI32());
-          struct.outputMetric = new HashMap<String,Map<String,MetricWindow>>(2*_map158.size);
-          String _key159;
-          Map<String,MetricWindow> _val160;
-          for (int _i161 = 0; _i161 < _map158.size; ++_i161)
+          org.apache.thrift.protocol.TMap _map136 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, iprot.readI32());
+          struct.metrics = new HashMap<String,Map<Integer,MetricSnapshot>>(2*_map136.size);
+          String _key137;
+          Map<Integer,MetricSnapshot> _val138;
+          for (int _i139 = 0; _i139 < _map136.size; ++_i139)
           {
-            _key159 = iprot.readString();
+            _key137 = iprot.readString();
             {
-              org.apache.thrift.protocol.TMap _map162 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-              _val160 = new HashMap<String,MetricWindow>(2*_map162.size);
-              String _key163;
-              MetricWindow _val164;
-              for (int _i165 = 0; _i165 < _map162.size; ++_i165)
+              org.apache.thrift.protocol.TMap _map140 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I32, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+              _val138 = new HashMap<Integer,MetricSnapshot>(2*_map140.size);
+              int _key141;
+              MetricSnapshot _val142;
+              for (int _i143 = 0; _i143 < _map140.size; ++_i143)
               {
-                _key163 = iprot.readString();
-                _val164 = new MetricWindow();
-                _val164.read(iprot);
-                _val160.put(_key163, _val164);
+                _key141 = iprot.readI32();
+                _val142 = new MetricSnapshot();
+                _val142.read(iprot);
+                _val138.put(_key141, _val142);
               }
             }
-            struct.outputMetric.put(_key159, _val160);
+            struct.metrics.put(_key137, _val138);
           }
         }
-        struct.set_outputMetric_isSet(true);
+        struct.set_metrics_isSet(true);
       }
     }
   }


[27/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/CRC32OutputStream.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/CRC32OutputStream.java b/jstorm-core/src/main/java/backtype/storm/utils/CRC32OutputStream.java
index 7d5ce73..483f0df 100755
--- a/jstorm-core/src/main/java/backtype/storm/utils/CRC32OutputStream.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/CRC32OutputStream.java
@@ -23,11 +23,11 @@ import java.util.zip.CRC32;
 
 public class CRC32OutputStream extends OutputStream {
     private CRC32 hasher;
-    
+
     public CRC32OutputStream() {
         hasher = new CRC32();
     }
-    
+
     public long getValue() {
         return hasher.getValue();
     }
@@ -40,5 +40,5 @@ public class CRC32OutputStream extends OutputStream {
     @Override
     public void write(byte[] bytes, int start, int end) throws IOException {
         hasher.update(bytes, start, end);
-    }    
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/ClojureTimerTask.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/ClojureTimerTask.java b/jstorm-core/src/main/java/backtype/storm/utils/ClojureTimerTask.java
index ca9b010..677cf60 100755
--- a/jstorm-core/src/main/java/backtype/storm/utils/ClojureTimerTask.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/ClojureTimerTask.java
@@ -22,14 +22,14 @@ import java.util.TimerTask;
 
 public class ClojureTimerTask extends TimerTask {
     IFn _afn;
-    
+
     public ClojureTimerTask(IFn afn) {
         super();
         _afn = afn;
     }
-    
+
     @Override
     public void run() {
         _afn.run();
-    }    
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/Container.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/Container.java b/jstorm-core/src/main/java/backtype/storm/utils/Container.java
index d4edcdf..0927e7c 100755
--- a/jstorm-core/src/main/java/backtype/storm/utils/Container.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/Container.java
@@ -20,5 +20,5 @@ package backtype.storm.utils;
 import java.io.Serializable;
 
 public class Container implements Serializable {
-  public Object object;
+    public Object object;
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/DRPCClient.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/DRPCClient.java b/jstorm-core/src/main/java/backtype/storm/utils/DRPCClient.java
index b2a2a7d..03ede66 100755
--- a/jstorm-core/src/main/java/backtype/storm/utils/DRPCClient.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/DRPCClient.java
@@ -46,15 +46,15 @@ public class DRPCClient extends ThriftClient implements DistributedRPC.Iface {
         this.port = port;
         this.client = new DistributedRPC.Client(_protocol);
     }
-        
+
     public String getHost() {
         return host;
     }
-    
+
     public int getPort() {
         return port;
     }
-    
+
     public String execute(String func, String args) throws TException, DRPCExecutionException, AuthorizationException {
         return client.execute(func, args);
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/DisruptorQueue.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/DisruptorQueue.java b/jstorm-core/src/main/java/backtype/storm/utils/DisruptorQueue.java
index 94768e6..330a5c6 100755
--- a/jstorm-core/src/main/java/backtype/storm/utils/DisruptorQueue.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/DisruptorQueue.java
@@ -32,13 +32,13 @@ public abstract class DisruptorQueue implements IStatefulObject {
     public static void setUseSleep(boolean useSleep) {
         DisruptorQueueImpl.setUseSleep(useSleep);
     }
-    
+
     private static boolean CAPACITY_LIMITED = false;
-    
+
     public static void setLimited(boolean limited) {
         CAPACITY_LIMITED = limited;
     }
-    
+
     public static DisruptorQueue mkInstance(String queueName, ProducerType producerType, int bufferSize, WaitStrategy wait) {
         if (CAPACITY_LIMITED == true) {
             return new DisruptorQueueImpl(queueName, producerType, bufferSize, wait);
@@ -46,35 +46,35 @@ public abstract class DisruptorQueue implements IStatefulObject {
             return new DisruptorWrapBlockingQueue(queueName, producerType, bufferSize, wait);
         }
     }
-    
+
     public abstract String getName();
-    
+
     public abstract void haltWithInterrupt();
-    
+
     public abstract Object poll();
-    
+
     public abstract Object take();
-    
+
     public abstract void consumeBatch(EventHandler<Object> handler);
-    
+
     public abstract void consumeBatchWhenAvailable(EventHandler<Object> handler);
-    
+
     public abstract void publish(Object obj);
-    
+
     public abstract void publish(Object obj, boolean block) throws InsufficientCapacityException;
-    
+
     public abstract void consumerStarted();
-    
+
     public abstract void clear();
-    
+
     public abstract long population();
-    
+
     public abstract long capacity();
-    
+
     public abstract long writePos();
-    
+
     public abstract long readPos();
-    
+
     public abstract float pctFull();
-    
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/DisruptorQueueImpl.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/DisruptorQueueImpl.java b/jstorm-core/src/main/java/backtype/storm/utils/DisruptorQueueImpl.java
index 58d8313..2941cc9 100644
--- a/jstorm-core/src/main/java/backtype/storm/utils/DisruptorQueueImpl.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/DisruptorQueueImpl.java
@@ -45,30 +45,30 @@ import com.lmax.disruptor.dsl.ProducerType;
 public class DisruptorQueueImpl extends DisruptorQueue {
     private static final Logger LOG = LoggerFactory.getLogger(DisruptorQueueImpl.class);
     static boolean useSleep = true;
-    
+
     public static void setUseSleep(boolean useSleep) {
         AbstractSequencerExt.setWaitSleep(useSleep);
     }
-    
+
     private static final Object FLUSH_CACHE = new Object();
     private static final Object INTERRUPT = new Object();
     private static final String PREFIX = "disruptor-";
-    
+
     private final String _queueName;
     private final RingBuffer<MutableObject> _buffer;
     private final Sequence _consumer;
     private final SequenceBarrier _barrier;
-    
+
     // TODO: consider having a threadlocal cache of this variable to speed up
     // reads?
     volatile boolean consumerStartedFlag = false;
-    
+
     private final HashMap<String, Object> state = new HashMap<String, Object>(4);
     private final ConcurrentLinkedQueue<Object> _cache = new ConcurrentLinkedQueue<Object>();
     private final ReentrantReadWriteLock cacheLock = new ReentrantReadWriteLock();
     private final Lock readLock = cacheLock.readLock();
     private final Lock writeLock = cacheLock.writeLock();
-    
+
     public DisruptorQueueImpl(String queueName, ProducerType producerType, int bufferSize, WaitStrategy wait) {
         this._queueName = PREFIX + queueName;
         _buffer = RingBuffer.create(producerType, new ObjectEventFactory(), bufferSize, wait);
@@ -89,19 +89,19 @@ public class DisruptorQueueImpl extends DisruptorQueue {
             }
         }
     }
-    
+
     public String getName() {
         return _queueName;
     }
-    
+
     public void consumeBatch(EventHandler<Object> handler) {
         consumeBatchToCursor(_barrier.getCursor(), handler);
     }
-    
+
     public void haltWithInterrupt() {
         publish(INTERRUPT);
     }
-    
+
     public Object poll() {
         // @@@
         // should use _cache.isEmpty, but it is slow
@@ -109,7 +109,7 @@ public class DisruptorQueueImpl extends DisruptorQueue {
         if (consumerStartedFlag == false) {
             return _cache.poll();
         }
-        
+
         final long nextSequence = _consumer.get() + 1;
         if (nextSequence <= _barrier.getCursor()) {
             MutableObject mo = _buffer.get(nextSequence);
@@ -120,7 +120,7 @@ public class DisruptorQueueImpl extends DisruptorQueue {
         }
         return null;
     }
-    
+
     public Object take() {
         // @@@
         // should use _cache.isEmpty, but it is slow
@@ -128,7 +128,7 @@ public class DisruptorQueueImpl extends DisruptorQueue {
         if (consumerStartedFlag == false) {
             return _cache.poll();
         }
-        
+
         final long nextSequence = _consumer.get() + 1;
         // final long availableSequence;
         try {
@@ -141,7 +141,7 @@ public class DisruptorQueueImpl extends DisruptorQueue {
             // throw new RuntimeException(e);
             return null;
         } catch (TimeoutException e) {
-            //LOG.error(e.getCause(), e);
+            // LOG.error(e.getCause(), e);
             return null;
         }
         MutableObject mo = _buffer.get(nextSequence);
@@ -150,7 +150,7 @@ public class DisruptorQueueImpl extends DisruptorQueue {
         mo.setObject(null);
         return ret;
     }
-    
+
     public void consumeBatchWhenAvailable(EventHandler<Object> handler) {
         try {
             final long nextSequence = _consumer.get() + 1;
@@ -165,11 +165,11 @@ public class DisruptorQueueImpl extends DisruptorQueue {
             LOG.error("InterruptedException " + e.getCause());
             return;
         } catch (TimeoutException e) {
-            //LOG.error(e.getCause(), e);
+            // LOG.error(e.getCause(), e);
             return;
         }
     }
-    
+
     public void consumeBatchToCursor(long cursor, EventHandler<Object> handler) {
         for (long curr = _consumer.get() + 1; curr <= cursor; curr++) {
             try {
@@ -202,7 +202,7 @@ public class DisruptorQueueImpl extends DisruptorQueue {
         // TODO: only set this if the consumer cursor has changed?
         _consumer.set(cursor);
     }
-    
+
     /*
      * Caches until consumerStarted is called, upon which the cache is flushed to the consumer
      */
@@ -213,15 +213,15 @@ public class DisruptorQueueImpl extends DisruptorQueue {
             throw new RuntimeException("This code should be unreachable!");
         }
     }
-    
+
     public void tryPublish(Object obj) throws InsufficientCapacityException {
         publish(obj, false);
     }
-    
+
     public void publish(Object obj, boolean block) throws InsufficientCapacityException {
-        
+
         boolean publishNow = consumerStartedFlag;
-        
+
         if (!publishNow) {
             readLock.lock();
             try {
@@ -233,12 +233,12 @@ public class DisruptorQueueImpl extends DisruptorQueue {
                 readLock.unlock();
             }
         }
-        
+
         if (publishNow) {
             publishDirect(obj, block);
         }
     }
-    
+
     protected void publishDirect(Object obj, boolean block) throws InsufficientCapacityException {
         final long id;
         if (block) {
@@ -250,41 +250,41 @@ public class DisruptorQueueImpl extends DisruptorQueue {
         m.setObject(obj);
         _buffer.publish(id);
     }
-    
+
     public void consumerStarted() {
-        
+
         writeLock.lock();
         consumerStartedFlag = true;
-        
+
         writeLock.unlock();
     }
-    
+
     public void clear() {
         while (population() != 0L) {
             poll();
         }
     }
-    
+
     public long population() {
         return (writePos() - readPos());
     }
-    
+
     public long capacity() {
         return _buffer.getBufferSize();
     }
-    
+
     public long writePos() {
         return _buffer.getCursor();
     }
-    
+
     public long readPos() {
         return _consumer.get();
     }
-    
+
     public float pctFull() {
         return (1.0F * population() / capacity());
     }
-    
+
     @Override
     public Object getState() {
         // get readPos then writePos so it's never an under-estimate
@@ -296,7 +296,7 @@ public class DisruptorQueueImpl extends DisruptorQueue {
         state.put("read_pos", rp);
         return state;
     }
-    
+
     public static class ObjectEventFactory implements EventFactory<MutableObject> {
         @Override
         public MutableObject newInstance() {

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/DisruptorWrapBlockingQueue.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/DisruptorWrapBlockingQueue.java b/jstorm-core/src/main/java/backtype/storm/utils/DisruptorWrapBlockingQueue.java
index af5618b..5831a97 100755
--- a/jstorm-core/src/main/java/backtype/storm/utils/DisruptorWrapBlockingQueue.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/DisruptorWrapBlockingQueue.java
@@ -36,33 +36,33 @@ import com.lmax.disruptor.dsl.ProducerType;
  */
 public class DisruptorWrapBlockingQueue extends DisruptorQueue {
     private static final Logger LOG = LoggerFactory.getLogger(DisruptorWrapBlockingQueue.class);
-    
+
     private static final long QUEUE_CAPACITY = 512;
     private LinkedBlockingDeque<Object> queue;
-    
+
     private String queueName;
-    
+
     public DisruptorWrapBlockingQueue(String queueName, ProducerType producerType, int bufferSize, WaitStrategy wait) {
         this.queueName = queueName;
         queue = new LinkedBlockingDeque<Object>();
     }
-    
+
     public String getName() {
         return queueName;
     }
-    
+
     // poll method
     public void consumeBatch(EventHandler<Object> handler) {
         consumeBatchToCursor(0, handler);
     }
-    
+
     public void haltWithInterrupt() {
     }
-    
+
     public Object poll() {
         return queue.poll();
     }
-    
+
     public Object take() {
         try {
             return queue.take();
@@ -70,7 +70,7 @@ public class DisruptorWrapBlockingQueue extends DisruptorQueue {
             return null;
         }
     }
-    
+
     public void drainQueue(Object object, EventHandler<Object> handler) {
         while (object != null) {
             try {
@@ -84,7 +84,7 @@ public class DisruptorWrapBlockingQueue extends DisruptorQueue {
             }
         }
     }
-    
+
     public void consumeBatchWhenAvailable(EventHandler<Object> handler) {
         Object object = queue.poll();
         if (object == null) {
@@ -96,16 +96,16 @@ public class DisruptorWrapBlockingQueue extends DisruptorQueue {
                 throw new RuntimeException(e);
             }
         }
-        
+
         drainQueue(object, handler);
-        
+
     }
-    
+
     public void consumeBatchToCursor(long cursor, EventHandler<Object> handler) {
         Object object = queue.poll();
         drainQueue(object, handler);
     }
-    
+
     /*
      * Caches until consumerStarted is called, upon which the cache is flushed to the consumer
      */
@@ -118,17 +118,17 @@ public class DisruptorWrapBlockingQueue extends DisruptorQueue {
             }
             isSuccess = queue.offer(obj);
         }
-        
+
     }
-    
+
     public void tryPublish(Object obj) throws InsufficientCapacityException {
         boolean isSuccess = queue.offer(obj);
         if (isSuccess == false) {
             throw InsufficientCapacityException.INSTANCE;
         }
-        
+
     }
-    
+
     public void publish(Object obj, boolean block) throws InsufficientCapacityException {
         if (block == true) {
             publish(obj);
@@ -136,21 +136,21 @@ public class DisruptorWrapBlockingQueue extends DisruptorQueue {
             tryPublish(obj);
         }
     }
-    
+
     public void consumerStarted() {
     }
-    
+
     private void flushCache() {
     }
-    
+
     public void clear() {
         queue.clear();
     }
-    
+
     public long population() {
         return queue.size();
     }
-    
+
     public long capacity() {
         long used = queue.size();
         if (used < QUEUE_CAPACITY) {
@@ -159,15 +159,15 @@ public class DisruptorWrapBlockingQueue extends DisruptorQueue {
             return used;
         }
     }
-    
+
     public long writePos() {
         return 0;
     }
-    
+
     public long readPos() {
         return queue.size();
     }
-    
+
     public float pctFull() {
         long used = queue.size();
         if (used < QUEUE_CAPACITY) {
@@ -176,7 +176,7 @@ public class DisruptorWrapBlockingQueue extends DisruptorQueue {
             return 1.0f;
         }
     }
-    
+
     @Override
     public Object getState() {
         Map state = new HashMap<String, Object>();
@@ -189,12 +189,12 @@ public class DisruptorWrapBlockingQueue extends DisruptorQueue {
         state.put("read_pos", rp);
         return state;
     }
-    
+
     public static class ObjectEventFactory implements EventFactory<MutableObject> {
         @Override
         public MutableObject newInstance() {
             return new MutableObject();
         }
     }
-    
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/ExtendedThreadPoolExecutor.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/ExtendedThreadPoolExecutor.java b/jstorm-core/src/main/java/backtype/storm/utils/ExtendedThreadPoolExecutor.java
index 4614366..e68898e 100755
--- a/jstorm-core/src/main/java/backtype/storm/utils/ExtendedThreadPoolExecutor.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/ExtendedThreadPoolExecutor.java
@@ -27,41 +27,44 @@ import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 
-public class ExtendedThreadPoolExecutor extends ThreadPoolExecutor{
+public class ExtendedThreadPoolExecutor extends ThreadPoolExecutor {
 
-  public ExtendedThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, BlockingQueue<Runnable> workQueue) {
-    super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue);
-  }
-
-  public ExtendedThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, BlockingQueue<Runnable> workQueue, ThreadFactory threadFactory) {
-    super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, threadFactory);
-  }
+    public ExtendedThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, BlockingQueue<Runnable> workQueue) {
+        super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue);
+    }
 
-  public ExtendedThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, BlockingQueue<Runnable> workQueue, RejectedExecutionHandler handler) {
-    super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, handler);
-  }
+    public ExtendedThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, BlockingQueue<Runnable> workQueue,
+            ThreadFactory threadFactory) {
+        super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, threadFactory);
+    }
 
-  public ExtendedThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, BlockingQueue<Runnable> workQueue, ThreadFactory threadFactory, RejectedExecutionHandler handler) {
-    super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, threadFactory, handler);
-  }
+    public ExtendedThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, BlockingQueue<Runnable> workQueue,
+            RejectedExecutionHandler handler) {
+        super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, handler);
+    }
 
-  @Override
-  protected void afterExecute(Runnable r, Throwable t) {
-    super.afterExecute(r, t);
-    if (t == null && r instanceof Future<?>) {
-      try {
-        Object result = ((Future<?>) r).get();
-      } catch (CancellationException ce) {
-        t = ce;
-      } catch (ExecutionException ee) {
-        t = ee.getCause();
-      } catch (InterruptedException ie) {
-        // If future got interrupted exception, we want to interrupt parent thread itself.
-        Thread.currentThread().interrupt();
-      }
+    public ExtendedThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, BlockingQueue<Runnable> workQueue,
+            ThreadFactory threadFactory, RejectedExecutionHandler handler) {
+        super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, threadFactory, handler);
     }
-    if (t != null) {
-      Utils.handleUncaughtException(t);
+
+    @Override
+    protected void afterExecute(Runnable r, Throwable t) {
+        super.afterExecute(r, t);
+        if (t == null && r instanceof Future<?>) {
+            try {
+                Object result = ((Future<?>) r).get();
+            } catch (CancellationException ce) {
+                t = ce;
+            } catch (ExecutionException ee) {
+                t = ee.getCause();
+            } catch (InterruptedException ie) {
+                // If future got interrupted exception, we want to interrupt parent thread itself.
+                Thread.currentThread().interrupt();
+            }
+        }
+        if (t != null) {
+            Utils.handleUncaughtException(t);
+        }
     }
-  }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/IndifferentAccessMap.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/IndifferentAccessMap.java b/jstorm-core/src/main/java/backtype/storm/utils/IndifferentAccessMap.java
index c0190cc..2bc6e7d 100755
--- a/jstorm-core/src/main/java/backtype/storm/utils/IndifferentAccessMap.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/IndifferentAccessMap.java
@@ -17,7 +17,6 @@
  */
 package backtype.storm.utils;
 
-
 import clojure.lang.ILookup;
 import clojure.lang.ISeq;
 import clojure.lang.AFn;
@@ -65,16 +64,17 @@ public class IndifferentAccessMap extends AFn implements ILookup, IPersistentMap
 
     @Override
     public Object valAt(Object o) {
-        if(o instanceof Keyword) {
+        if (o instanceof Keyword) {
             return valAt(((Keyword) o).getName());
         }
         return getMap().valAt(o);
     }
-    
+
     @Override
     public Object valAt(Object o, Object def) {
         Object ret = valAt(o);
-        if(ret==null) ret = def;
+        if (ret == null)
+            ret = def;
         return ret;
     }
 
@@ -92,30 +92,35 @@ public class IndifferentAccessMap extends AFn implements ILookup, IPersistentMap
     /* IPersistentMap */
     /* Naive implementation, but it might be good enough */
     public IPersistentMap assoc(Object k, Object v) {
-        if(k instanceof Keyword) return assoc(((Keyword) k).getName(), v);
-        
+        if (k instanceof Keyword)
+            return assoc(((Keyword) k).getName(), v);
+
         return new IndifferentAccessMap(getMap().assoc(k, v));
     }
 
     public IPersistentMap assocEx(Object k, Object v) {
-        if(k instanceof Keyword) return assocEx(((Keyword) k).getName(), v);
+        if (k instanceof Keyword)
+            return assocEx(((Keyword) k).getName(), v);
 
         return new IndifferentAccessMap(getMap().assocEx(k, v));
     }
 
     public IPersistentMap without(Object k) {
-        if(k instanceof Keyword) return without(((Keyword) k).getName());
+        if (k instanceof Keyword)
+            return without(((Keyword) k).getName());
 
         return new IndifferentAccessMap(getMap().without(k));
     }
 
     public boolean containsKey(Object k) {
-        if(k instanceof Keyword) return containsKey(((Keyword) k).getName());
+        if (k instanceof Keyword)
+            return containsKey(((Keyword) k).getName());
         return getMap().containsKey(k);
     }
 
     public IMapEntry entryAt(Object k) {
-        if(k instanceof Keyword) return entryAt(((Keyword) k).getName());
+        if (k instanceof Keyword)
+            return entryAt(((Keyword) k).getName());
 
         return getMap().entryAt(k);
     }
@@ -160,17 +165,20 @@ public class IndifferentAccessMap extends AFn implements ILookup, IPersistentMap
     public Collection values() {
         return ((Map) getMap()).values();
     }
-    
+
     /* Not implemented */
     public void clear() {
         throw new UnsupportedOperationException();
     }
+
     public Object put(Object k, Object v) {
         throw new UnsupportedOperationException();
     }
+
     public void putAll(Map m) {
         throw new UnsupportedOperationException();
     }
+
     public Object remove(Object k) {
         throw new UnsupportedOperationException();
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/InprocMessaging.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/InprocMessaging.java b/jstorm-core/src/main/java/backtype/storm/utils/InprocMessaging.java
index b20c775..03c8e4b 100755
--- a/jstorm-core/src/main/java/backtype/storm/utils/InprocMessaging.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/InprocMessaging.java
@@ -25,32 +25,32 @@ public class InprocMessaging {
     private static Map<Integer, LinkedBlockingQueue<Object>> _queues = new HashMap<Integer, LinkedBlockingQueue<Object>>();
     private static final Object _lock = new Object();
     private static int port = 1;
-    
+
     public static int acquireNewPort() {
         int ret;
-        synchronized(_lock) {
+        synchronized (_lock) {
             ret = port;
             port++;
         }
         return ret;
     }
-    
+
     public static void sendMessage(int port, Object msg) {
         getQueue(port).add(msg);
     }
-    
+
     public static Object takeMessage(int port) throws InterruptedException {
         return getQueue(port).take();
     }
 
     public static Object pollMessage(int port) {
-        return  getQueue(port).poll();
-    }    
-    
+        return getQueue(port).poll();
+    }
+
     private static LinkedBlockingQueue<Object> getQueue(int port) {
-        synchronized(_lock) {
-            if(!_queues.containsKey(port)) {
-              _queues.put(port, new LinkedBlockingQueue<Object>());   
+        synchronized (_lock) {
+            if (!_queues.containsKey(port)) {
+                _queues.put(port, new LinkedBlockingQueue<Object>());
             }
             return _queues.get(port);
         }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/KeyedRoundRobinQueue.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/KeyedRoundRobinQueue.java b/jstorm-core/src/main/java/backtype/storm/utils/KeyedRoundRobinQueue.java
index 3cb455d..661c045 100755
--- a/jstorm-core/src/main/java/backtype/storm/utils/KeyedRoundRobinQueue.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/KeyedRoundRobinQueue.java
@@ -33,9 +33,9 @@ public class KeyedRoundRobinQueue<V> {
     private int _currIndex = 0;
 
     public void add(Object key, V val) {
-        synchronized(_lock) {
+        synchronized (_lock) {
             Queue<V> queue = _queues.get(key);
-            if(queue==null) {
+            if (queue == null) {
                 queue = new LinkedList<V>();
                 _queues.put(key, queue);
                 _keyOrder.add(key);
@@ -47,14 +47,14 @@ public class KeyedRoundRobinQueue<V> {
 
     public V take() throws InterruptedException {
         _size.acquire();
-        synchronized(_lock) {
+        synchronized (_lock) {
             Object key = _keyOrder.get(_currIndex);
             Queue<V> queue = _queues.get(key);
             V ret = queue.remove();
-            if(queue.isEmpty()) {
+            if (queue.isEmpty()) {
                 _keyOrder.remove(_currIndex);
                 _queues.remove(key);
-                if(_keyOrder.size()==0) {
+                if (_keyOrder.size() == 0) {
                     _currIndex = 0;
                 } else {
                     _currIndex = _currIndex % _keyOrder.size();

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/ListDelegate.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/ListDelegate.java b/jstorm-core/src/main/java/backtype/storm/utils/ListDelegate.java
index 1e091f0..25e6878 100755
--- a/jstorm-core/src/main/java/backtype/storm/utils/ListDelegate.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/ListDelegate.java
@@ -25,11 +25,11 @@ import java.util.ListIterator;
 
 public class ListDelegate implements List<Object> {
     private List<Object> _delegate;
-    
+
     public ListDelegate() {
-    	_delegate = new ArrayList<Object>();
+        _delegate = new ArrayList<Object>();
     }
-    
+
     public void setDelegate(List<Object> delegate) {
         _delegate = delegate;
     }
@@ -37,7 +37,7 @@ public class ListDelegate implements List<Object> {
     public List<Object> getDelegate() {
         return _delegate;
     }
-    
+
     @Override
     public int size() {
         return _delegate.size();
@@ -152,5 +152,5 @@ public class ListDelegate implements List<Object> {
     public List<Object> subList(int i, int i1) {
         return _delegate.subList(i, i1);
     }
-    
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/LocalState.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/LocalState.java b/jstorm-core/src/main/java/backtype/storm/utils/LocalState.java
index 0d8292f..843efb4 100755
--- a/jstorm-core/src/main/java/backtype/storm/utils/LocalState.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/LocalState.java
@@ -25,12 +25,9 @@ import java.util.HashMap;
 import java.io.IOException;
 
 /**
- * A simple, durable, atomic K/V database. *Very inefficient*, should only be
- * used for occasional reads/writes. Every read/write hits disk.
+ * A simple, durable, atomic K/V database. *Very inefficient*, should only be used for occasional reads/writes. Every read/write hits disk.
  * 
- * @@@ 
- * Right now, This class hasn't upgrade to storm's LocalState
- * It is need define every type in thrift, it is too complicated to do
+ * @@@ Right now, This class hasn't upgrade to storm's LocalState It is need define every type in thrift, it is too complicated to do
  */
 public class LocalState {
     private VersionedStore _vs;
@@ -46,8 +43,7 @@ public class LocalState {
             if (latestPath == null)
                 return new HashMap<Object, Object>();
             try {
-                return (Map<Object, Object>) Utils.javaDeserialize(FileUtils
-                        .readFileToByteArray(new File(latestPath)));
+                return (Map<Object, Object>) Utils.javaDeserialize(FileUtils.readFileToByteArray(new File(latestPath)));
             } catch (IOException e) {
                 attempts++;
                 if (attempts >= 10) {
@@ -65,8 +61,7 @@ public class LocalState {
         put(key, val, true);
     }
 
-    public synchronized void put(Object key, Object val, boolean cleanup)
-            throws IOException {
+    public synchronized void put(Object key, Object val, boolean cleanup) throws IOException {
         Map<Object, Object> curr = snapshot();
         curr.put(key, val);
         persist(curr, cleanup);
@@ -76,8 +71,7 @@ public class LocalState {
         remove(key, true);
     }
 
-    public synchronized void remove(Object key, boolean cleanup)
-            throws IOException {
+    public synchronized void remove(Object key, boolean cleanup) throws IOException {
         Map<Object, Object> curr = snapshot();
         curr.remove(key);
         persist(curr, cleanup);
@@ -87,8 +81,7 @@ public class LocalState {
         _vs.cleanup(keepVersions);
     }
 
-    private void persist(Map<Object, Object> val, boolean cleanup)
-            throws IOException {
+    private void persist(Map<Object, Object> val, boolean cleanup) throws IOException {
         byte[] toWrite = Utils.serialize(val);
         String newPath = _vs.createVersion();
         FileUtils.writeByteArrayToFile(new File(newPath), toWrite);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/Monitor.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/Monitor.java b/jstorm-core/src/main/java/backtype/storm/utils/Monitor.java
index eb57e99..b725084 100755
--- a/jstorm-core/src/main/java/backtype/storm/utils/Monitor.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/Monitor.java
@@ -17,16 +17,17 @@
  */
 package backtype.storm.utils;
 
-import backtype.storm.generated.*;
+import backtype.storm.generated.ClusterSummary;
+import backtype.storm.generated.Nimbus;
+import backtype.storm.generated.TopologySummary;
 
 import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
 
 /**
  * Deprecated in JStorm
+ * 
  * @author zhongyan.feng
- *
+ * 
  */
 @Deprecated
 public class Monitor {
@@ -106,17 +107,17 @@ public class Monitor {
     /**
      * @@@ Don't be compatible with Storm
      * 
-     * Here skip the logic
+     *     Here skip the logic
      * @param client
      * @param topology
      * @return
      * @throws Exception
      */
-    private HashSet<String> getComponents(Nimbus.Client client, String topology) throws Exception{
+    private HashSet<String> getComponents(Nimbus.Client client, String topology) throws Exception {
         HashSet<String> components = new HashSet<String>();
         ClusterSummary clusterSummary = client.getClusterInfo();
         TopologySummary topologySummary = null;
-        for (TopologySummary ts: clusterSummary.get_topologies()) {
+        for (TopologySummary ts : clusterSummary.get_topologies()) {
             if (topology.equals(ts.get_name())) {
                 topologySummary = ts;
                 break;
@@ -126,12 +127,12 @@ public class Monitor {
             throw new IllegalArgumentException("topology: " + topology + " not found");
         } else {
             String id = topologySummary.get_id();
-//            GetInfoOptions getInfoOpts = new GetInfoOptions();
-//            getInfoOpts.set_num_err_choice(NumErrorsChoice.NONE);
-//            TopologyInfo info = client.getTopologyInfoWithOpts(id, getInfoOpts);
-//            for (ExecutorSummary es: info.get_executors()) {
-//                components.add(es.get_component_id());
-//            }
+            // GetInfoOptions getInfoOpts = new GetInfoOptions();
+            // getInfoOpts.set_num_err_choice(NumErrorsChoice.NONE);
+            // TopologyInfo info = client.getTopologyInfoWithOpts(id, getInfoOpts);
+            // for (ExecutorSummary es: info.get_executors()) {
+            // components.add(es.get_component_id());
+            // }
         }
         return components;
     }
@@ -161,7 +162,7 @@ public class Monitor {
             throw new IllegalArgumentException("stream name must be something");
         }
 
-        if ( !WATCH_TRANSFERRED.equals(_watch) && !WATCH_EMITTED.equals(_watch)) {
+        if (!WATCH_TRANSFERRED.equals(_watch) && !WATCH_EMITTED.equals(_watch)) {
             throw new IllegalArgumentException("watch item must either be transferred or emitted");
         }
         System.out.println("topology\tcomponent\tparallelism\tstream\ttime-diff ms\t" + _watch + "\tthroughput (Kt/s)");
@@ -189,7 +190,7 @@ public class Monitor {
         boolean streamFound = false;
         ClusterSummary clusterSummary = client.getClusterInfo();
         TopologySummary topologySummary = null;
-        for (TopologySummary ts: clusterSummary.get_topologies()) {
+        for (TopologySummary ts : clusterSummary.get_topologies()) {
             if (_topology.equals(ts.get_name())) {
                 topologySummary = ts;
                 break;
@@ -198,30 +199,30 @@ public class Monitor {
         if (topologySummary == null) {
             throw new IllegalArgumentException("topology: " + _topology + " not found");
         } else {
-//            String id = topologySummary.get_id();
-//            GetInfoOptions getInfoOpts = new GetInfoOptions();
-//            getInfoOpts.set_num_err_choice(NumErrorsChoice.NONE);
-//            TopologyInfo info = client.getTopologyInfoWithOpts(id, getInfoOpts);
-//            for (ExecutorSummary es: info.get_executors()) {
-//                if (_component.equals(es.get_component_id())) {
-//                    componentParallelism ++;
-//                    ExecutorStats stats = es.get_stats();
-//                    if (stats != null) {
-//                        Map<String,Map<String,Long>> statted =
-//                                WATCH_EMITTED.equals(_watch) ? stats.get_emitted() : stats.get_transferred();
-//                        if ( statted != null) {
-//                            Map<String, Long> e2 = statted.get(":all-time");
-//                            if (e2 != null) {
-//                                Long stream = e2.get(_stream);
-//                                if (stream != null){
-//                                    streamFound = true;
-//                                    totalStatted += stream;
-//                                }
-//                            }
-//                        }
-//                    }
-//                }
-//            }
+            // String id = topologySummary.get_id();
+            // GetInfoOptions getInfoOpts = new GetInfoOptions();
+            // getInfoOpts.set_num_err_choice(NumErrorsChoice.NONE);
+            // TopologyInfo info = client.getTopologyInfoWithOpts(id, getInfoOpts);
+            // for (ExecutorSummary es: info.get_executors()) {
+            // if (_component.equals(es.get_component_id())) {
+            // componentParallelism ++;
+            // ExecutorStats stats = es.get_stats();
+            // if (stats != null) {
+            // Map<String,Map<String,Long>> statted =
+            // WATCH_EMITTED.equals(_watch) ? stats.get_emitted() : stats.get_transferred();
+            // if ( statted != null) {
+            // Map<String, Long> e2 = statted.get(":all-time");
+            // if (e2 != null) {
+            // Long stream = e2.get(_stream);
+            // if (stream != null){
+            // streamFound = true;
+            // totalStatted += stream;
+            // }
+            // }
+            // }
+            // }
+            // }
+            // }
         }
 
         if (componentParallelism <= 0) {
@@ -242,8 +243,9 @@ public class Monitor {
         long stattedDelta = totalStatted - state.getLastStatted();
         state.setLastTime(now);
         state.setLastStatted(totalStatted);
-        double throughput = (stattedDelta == 0 || timeDelta == 0) ? 0.0 : ((double)stattedDelta/(double)timeDelta);
-        System.out.println(_topology+"\t"+_component+"\t"+componentParallelism+"\t"+_stream+"\t"+timeDelta+"\t"+stattedDelta+"\t"+throughput);
+        double throughput = (stattedDelta == 0 || timeDelta == 0) ? 0.0 : ((double) stattedDelta / (double) timeDelta);
+        System.out.println(_topology + "\t" + _component + "\t" + componentParallelism + "\t" + _stream + "\t" + timeDelta + "\t" + stattedDelta + "\t"
+                + throughput);
     }
 
     public void set_interval(int _interval) {

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/MutableInt.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/MutableInt.java b/jstorm-core/src/main/java/backtype/storm/utils/MutableInt.java
index 326ade0..aca3a24 100755
--- a/jstorm-core/src/main/java/backtype/storm/utils/MutableInt.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/MutableInt.java
@@ -23,21 +23,21 @@ public class MutableInt {
     public MutableInt(int val) {
         this.val = val;
     }
-    
+
     public void set(int val) {
         this.val = val;
     }
-    
+
     public int get() {
         return val;
     }
-    
+
     public int increment() {
         return increment(1);
     }
-    
+
     public int increment(int amt) {
-        val+=amt;
+        val += amt;
         return val;
     }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/MutableLong.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/MutableLong.java b/jstorm-core/src/main/java/backtype/storm/utils/MutableLong.java
index a744c1c..2f4034e 100755
--- a/jstorm-core/src/main/java/backtype/storm/utils/MutableLong.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/MutableLong.java
@@ -23,21 +23,21 @@ public class MutableLong {
     public MutableLong(long val) {
         this.val = val;
     }
-    
+
     public void set(long val) {
         this.val = val;
     }
-    
+
     public long get() {
         return val;
     }
-    
+
     public long increment() {
         return increment(1);
     }
-    
+
     public long increment(long amt) {
-        val+=amt;
+        val += amt;
         return val;
     }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/MutableObject.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/MutableObject.java b/jstorm-core/src/main/java/backtype/storm/utils/MutableObject.java
index d5cb7db..d0f928c 100755
--- a/jstorm-core/src/main/java/backtype/storm/utils/MutableObject.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/MutableObject.java
@@ -19,19 +19,19 @@ package backtype.storm.utils;
 
 public class MutableObject {
     Object o = null;
-    
+
     public MutableObject() {
-        
+
     }
 
     public MutableObject(Object o) {
         this.o = o;
     }
-    
+
     public void setObject(Object o) {
         this.o = o;
     }
-    
+
     public Object getObject() {
         return o;
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/NimbusClient.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/NimbusClient.java b/jstorm-core/src/main/java/backtype/storm/utils/NimbusClient.java
index 5829b67..ac76439 100755
--- a/jstorm-core/src/main/java/backtype/storm/utils/NimbusClient.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/NimbusClient.java
@@ -31,24 +31,24 @@ import backtype.storm.security.auth.ThriftConnectionType;
 
 public class NimbusClient extends ThriftClient {
     private static final Logger LOG = LoggerFactory.getLogger(NimbusClient.class);
-    
+
     private Nimbus.Client _client;
     private static String clientVersion = Utils.getVersion();
-    
+
     @SuppressWarnings("unchecked")
     public static NimbusClient getConfiguredClient(Map conf) {
         return getConfiguredClient(conf, null);
     }
-    
+
     @SuppressWarnings("unchecked")
     public static NimbusClient getConfiguredClient(Map conf, Integer timeout) {
         return getConfiguredClientAs(conf, timeout, null);
     }
-    
+
     public static NimbusClient getConfiguredClientAs(Map conf, String asUser) {
         return getConfiguredClientAs(conf, null, asUser);
     }
-    
+
     public static void checkVersion(NimbusClient client) {
         String serverVersion;
         try {
@@ -56,24 +56,24 @@ public class NimbusClient extends ThriftClient {
         } catch (TException e) {
             // TODO Auto-generated catch block
             LOG.warn("Failed to get nimbus version ");
-            return ;
+            return;
         }
         if (!clientVersion.equals(serverVersion)) {
             LOG.warn("Your client version:  " + clientVersion + " but nimbus version: " + serverVersion);
         }
     }
-    
+
     public static NimbusClient getConfiguredClientAs(Map conf, Integer timeout, String asUser) {
         try {
-            if(conf.containsKey(Config.STORM_DO_AS_USER)) {
-                if(asUser != null && !asUser.isEmpty()) {
-                    LOG.warn("You have specified a doAsUser as param {} and a doAsParam as config, config will take precedence."
-                            , asUser, conf.get(Config.STORM_DO_AS_USER));
+            if (conf.containsKey(Config.STORM_DO_AS_USER)) {
+                if (asUser != null && !asUser.isEmpty()) {
+                    LOG.warn("You have specified a doAsUser as param {} and a doAsParam as config, config will take precedence.", asUser,
+                            conf.get(Config.STORM_DO_AS_USER));
                 }
                 asUser = (String) conf.get(Config.STORM_DO_AS_USER);
             }
-            
-            NimbusClient client = new NimbusClient(conf, null, null, timeout, asUser);  
+
+            NimbusClient client = new NimbusClient(conf, null, null, timeout, asUser);
             checkVersion(client);
             return client;
         } catch (Exception ex) {
@@ -84,24 +84,24 @@ public class NimbusClient extends ThriftClient {
     public NimbusClient(Map conf, String host, int port) throws TTransportException {
         this(conf, host, port, null);
     }
-    
+
     public NimbusClient(Map conf, String host, int port, Integer timeout) throws TTransportException {
         super(conf, ThriftConnectionType.NIMBUS, host, port, timeout, null);
         _client = new Nimbus.Client(_protocol);
     }
-    
+
     public NimbusClient(Map conf, String host, Integer port, Integer timeout, String asUser) throws TTransportException {
         super(conf, ThriftConnectionType.NIMBUS, host, port, timeout, asUser);
         _client = new Nimbus.Client(_protocol);
     }
-    
+
     public NimbusClient(Map conf, String host) throws TTransportException {
         super(conf, ThriftConnectionType.NIMBUS, host, null, null, null);
         _client = new Nimbus.Client(_protocol);
     }
-    
+
     public Nimbus.Client getClient() {
         return _client;
     }
-    
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/RegisteredGlobalState.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/RegisteredGlobalState.java b/jstorm-core/src/main/java/backtype/storm/utils/RegisteredGlobalState.java
index 48053fc..fbaf03b 100755
--- a/jstorm-core/src/main/java/backtype/storm/utils/RegisteredGlobalState.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/RegisteredGlobalState.java
@@ -21,44 +21,42 @@ import java.util.HashMap;
 import java.util.UUID;
 
 /**
- * This class is used as part of testing Storm. It is used to keep track of "global metrics"
- * in an atomic way. For example, it is used for doing fine-grained detection of when a 
- * local Storm cluster is idle by tracking the number of transferred tuples vs the number of processed
- * tuples.
+ * This class is used as part of testing Storm. It is used to keep track of "global metrics" in an atomic way. For example, it is used for doing fine-grained
+ * detection of when a local Storm cluster is idle by tracking the number of transferred tuples vs the number of processed tuples.
  */
 public class RegisteredGlobalState {
     private static HashMap<String, Object> _states = new HashMap<String, Object>();
     private static final Object _lock = new Object();
-    
+
     public static Object globalLock() {
         return _lock;
     }
-    
+
     public static String registerState(Object init) {
-        synchronized(_lock) {
+        synchronized (_lock) {
             String id = UUID.randomUUID().toString();
             _states.put(id, init);
             return id;
         }
     }
-    
+
     public static void setState(String id, Object init) {
-        synchronized(_lock) {
+        synchronized (_lock) {
             _states.put(id, init);
         }
     }
-    
+
     public static Object getState(String id) {
-        synchronized(_lock) {
+        synchronized (_lock) {
             Object ret = _states.get(id);
-            //System.out.println("State: " + ret.toString());
+            // System.out.println("State: " + ret.toString());
             return ret;
-        }        
+        }
     }
-    
+
     public static void clearState(String id) {
-        synchronized(_lock) {
+        synchronized (_lock) {
             _states.remove(id);
-        }        
+        }
     }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/RotatingMap.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/RotatingMap.java b/jstorm-core/src/main/java/backtype/storm/utils/RotatingMap.java
index 2ed0e33..db62e5c 100644
--- a/jstorm-core/src/main/java/backtype/storm/utils/RotatingMap.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/RotatingMap.java
@@ -24,18 +24,17 @@ import java.util.Map;
 import java.util.Map.Entry;
 
 /**
- * Expires keys that have not been updated in the configured number of seconds.
- * The algorithm used will take between expirationSecs and
- * expirationSecs * (1 + 1 / (numBuckets-1)) to actually expire the message.
- *
+ * Expires keys that have not been updated in the configured number of seconds. The algorithm used will take between expirationSecs and expirationSecs * (1 + 1
+ * / (numBuckets-1)) to actually expire the message.
+ * 
  * get, put, remove, containsKey, and size take O(numBuckets) time to run.
- *
- * The advantage of this design is that the expiration thread only locks the object
- * for O(1) time, meaning the object is essentially always available for gets/puts.
+ * 
+ * The advantage of this design is that the expiration thread only locks the object for O(1) time, meaning the object is essentially always available for
+ * gets/puts.
  */
 @Deprecated
 public class RotatingMap<K, V> {
-    //this default ensures things expire at most 50% past the expiration time
+    // this default ensures things expire at most 50% past the expiration time
     private static final int DEFAULT_NUM_BUCKETS = 3;
 
     public static interface ExpiredCallback<K, V> {
@@ -45,13 +44,13 @@ public class RotatingMap<K, V> {
     private LinkedList<HashMap<K, V>> _buckets;
 
     private ExpiredCallback _callback;
-    
+
     public RotatingMap(int numBuckets, ExpiredCallback<K, V> callback) {
-        if(numBuckets<2) {
+        if (numBuckets < 2) {
             throw new IllegalArgumentException("numBuckets must be >= 2");
         }
         _buckets = new LinkedList<HashMap<K, V>>();
-        for(int i=0; i<numBuckets; i++) {
+        for (int i = 0; i < numBuckets; i++) {
             _buckets.add(new HashMap<K, V>());
         }
 
@@ -64,13 +63,13 @@ public class RotatingMap<K, V> {
 
     public RotatingMap(int numBuckets) {
         this(numBuckets, null);
-    }   
-    
+    }
+
     public Map<K, V> rotate() {
         Map<K, V> dead = _buckets.removeLast();
         _buckets.addFirst(new HashMap<K, V>());
-        if(_callback!=null) {
-            for(Entry<K, V> entry: dead.entrySet()) {
+        if (_callback != null) {
+            for (Entry<K, V> entry : dead.entrySet()) {
                 _callback.expire(entry.getKey(), entry.getValue());
             }
         }
@@ -78,8 +77,8 @@ public class RotatingMap<K, V> {
     }
 
     public boolean containsKey(K key) {
-        for(HashMap<K, V> bucket: _buckets) {
-            if(bucket.containsKey(key)) {
+        for (HashMap<K, V> bucket : _buckets) {
+            if (bucket.containsKey(key)) {
                 return true;
             }
         }
@@ -87,8 +86,8 @@ public class RotatingMap<K, V> {
     }
 
     public V get(K key) {
-        for(HashMap<K, V> bucket: _buckets) {
-            if(bucket.containsKey(key)) {
+        for (HashMap<K, V> bucket : _buckets) {
+            if (bucket.containsKey(key)) {
                 return bucket.get(key);
             }
         }
@@ -99,16 +98,15 @@ public class RotatingMap<K, V> {
         Iterator<HashMap<K, V>> it = _buckets.iterator();
         HashMap<K, V> bucket = it.next();
         bucket.put(key, value);
-        while(it.hasNext()) {
+        while (it.hasNext()) {
             bucket = it.next();
             bucket.remove(key);
         }
     }
-    
-    
+
     public Object remove(K key) {
-        for(HashMap<K, V> bucket: _buckets) {
-            if(bucket.containsKey(key)) {
+        for (HashMap<K, V> bucket : _buckets) {
+            if (bucket.containsKey(key)) {
                 return bucket.remove(key);
             }
         }
@@ -117,9 +115,9 @@ public class RotatingMap<K, V> {
 
     public int size() {
         int size = 0;
-        for(HashMap<K, V> bucket: _buckets) {
-            size+=bucket.size();
+        for (HashMap<K, V> bucket : _buckets) {
+            size += bucket.size();
         }
         return size;
-    }    
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/ServiceRegistry.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/ServiceRegistry.java b/jstorm-core/src/main/java/backtype/storm/utils/ServiceRegistry.java
index 724bc3e..92dc2f7 100755
--- a/jstorm-core/src/main/java/backtype/storm/utils/ServiceRegistry.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/ServiceRegistry.java
@@ -24,24 +24,24 @@ import java.util.UUID;
 public class ServiceRegistry {
     private static HashMap<String, Object> _services = new HashMap<String, Object>();
     private static final Object _lock = new Object();
-    
+
     public static String registerService(Object service) {
-        synchronized(_lock) {
+        synchronized (_lock) {
             String id = UUID.randomUUID().toString();
             _services.put(id, service);
             return id;
         }
     }
-    
+
     public static Object getService(String id) {
-        synchronized(_lock) {
+        synchronized (_lock) {
             return _services.get(id);
-        }        
+        }
     }
-    
+
     public static void unregisterService(String id) {
-        synchronized(_lock) {
+        synchronized (_lock) {
             _services.remove(id);
-        }        
+        }
     }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/ShellProcess.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/ShellProcess.java b/jstorm-core/src/main/java/backtype/storm/utils/ShellProcess.java
index 78f47d6..69af852 100644
--- a/jstorm-core/src/main/java/backtype/storm/utils/ShellProcess.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/ShellProcess.java
@@ -37,10 +37,10 @@ import org.slf4j.LoggerFactory;
 public class ShellProcess implements Serializable {
     public static Logger LOG = LoggerFactory.getLogger(ShellProcess.class);
     public static Logger ShellLogger;
-    private Process      _subprocess;
-    private InputStream  processErrorStream;
-    private String[]     command;
-    public ISerializer   serializer;
+    private Process _subprocess;
+    private InputStream processErrorStream;
+    private String[] command;
+    public ISerializer serializer;
     public Number pid;
     public String componentName;
 
@@ -63,9 +63,7 @@ public class ShellProcess implements Serializable {
             serializer.initialize(_subprocess.getOutputStream(), _subprocess.getInputStream());
             this.pid = serializer.connect(conf, context);
         } catch (IOException e) {
-            throw new RuntimeException(
-                    "Error when launching multilang subprocess\n"
-                            + getErrorsString(), e);
+            throw new RuntimeException("Error when launching multilang subprocess\n" + getErrorsString(), e);
         } catch (NoOutputException e) {
             throw new RuntimeException(e + getErrorsString() + "\n");
         }
@@ -73,18 +71,18 @@ public class ShellProcess implements Serializable {
     }
 
     private ISerializer getSerializer(Map conf) {
-        //get factory class name
-        String serializer_className = (String)conf.get(Config.TOPOLOGY_MULTILANG_SERIALIZER);
+        // get factory class name
+        String serializer_className = (String) conf.get(Config.TOPOLOGY_MULTILANG_SERIALIZER);
         LOG.info("Storm multilang serializer: " + serializer_className);
 
         ISerializer serializer = null;
         try {
-            //create a factory class
+            // create a factory class
             Class klass = Class.forName(serializer_className);
-            //obtain a serializer object
+            // obtain a serializer object
             Object obj = klass.newInstance();
-            serializer = (ISerializer)obj;
-        } catch(Exception e) {
+            serializer = (ISerializer) obj;
+        } catch (Exception e) {
             throw new RuntimeException("Failed to construct multilang serializer from serializer " + serializer_className, e);
         }
         return serializer;
@@ -152,7 +150,7 @@ public class ShellProcess implements Serializable {
     }
 
     /**
-     *
+     * 
      * @return pid, if the process has been launched, null otherwise.
      */
     public Number getPid() {
@@ -160,7 +158,7 @@ public class ShellProcess implements Serializable {
     }
 
     /**
-     *
+     * 
      * @return the name of component.
      */
     public String getComponentName() {
@@ -168,13 +166,13 @@ public class ShellProcess implements Serializable {
     }
 
     /**
-     *
+     * 
      * @return exit code of the process if process is terminated, -1 if process is not started or terminated.
      */
     public int getExitCode() {
         try {
             return this._subprocess != null ? this._subprocess.exitValue() : -1;
-        } catch(IllegalThreadStateException e) {
+        } catch (IllegalThreadStateException e) {
             return -1;
         }
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/ShellUtils.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/ShellUtils.java b/jstorm-core/src/main/java/backtype/storm/utils/ShellUtils.java
index 1065ff9..261cbb7 100755
--- a/jstorm-core/src/main/java/backtype/storm/utils/ShellUtils.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/ShellUtils.java
@@ -31,19 +31,12 @@ import java.util.concurrent.atomic.AtomicBoolean;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-
-
 abstract public class ShellUtils {
     public static Logger LOG = LoggerFactory.getLogger(ShellUtils.class);
 
     // OSType detection
     public enum OSType {
-        OS_TYPE_LINUX,
-        OS_TYPE_WIN,
-        OS_TYPE_SOLARIS,
-        OS_TYPE_MAC,
-        OS_TYPE_FREEBSD,
-        OS_TYPE_OTHER
+        OS_TYPE_LINUX, OS_TYPE_WIN, OS_TYPE_SOLARIS, OS_TYPE_MAC, OS_TYPE_FREEBSD, OS_TYPE_OTHER
     }
 
     public static final OSType osType = getOSType();
@@ -69,29 +62,27 @@ abstract public class ShellUtils {
     // Helper static vars for each platform
     public static final boolean WINDOWS = (osType == OSType.OS_TYPE_WIN);
     public static final boolean SOLARIS = (osType == OSType.OS_TYPE_SOLARIS);
-    public static final boolean MAC     = (osType == OSType.OS_TYPE_MAC);
+    public static final boolean MAC = (osType == OSType.OS_TYPE_MAC);
     public static final boolean FREEBSD = (osType == OSType.OS_TYPE_FREEBSD);
-    public static final boolean LINUX   = (osType == OSType.OS_TYPE_LINUX);
-    public static final boolean OTHER   = (osType == OSType.OS_TYPE_OTHER);
-
+    public static final boolean LINUX = (osType == OSType.OS_TYPE_LINUX);
+    public static final boolean OTHER = (osType == OSType.OS_TYPE_OTHER);
 
     /** Token separator regex used to parse Shell tool outputs */
-    public static final String TOKEN_SEPARATOR_REGEX
-        = WINDOWS ? "[|\n\r]" : "[ \t\n\r\f]";
+    public static final String TOKEN_SEPARATOR_REGEX = WINDOWS ? "[|\n\r]" : "[ \t\n\r\f]";
 
-    private long    interval;   // refresh interval in msec
-    private long    lastTime;   // last time the command was performed
+    private long interval; // refresh interval in msec
+    private long lastTime; // last time the command was performed
     final private boolean redirectErrorStream; // merge stdout and stderr
     private Map<String, String> environment; // env for the command execution
     private File dir;
     private Process process; // sub process used to execute the command
     private int exitCode;
-    /**Time after which the executing script would be timedout*/
+    /** Time after which the executing script would be timedout */
     protected long timeOutInterval = 0L;
-    /** If or not script timed out*/
+    /** If or not script timed out */
     private AtomicBoolean timedOut;
 
-    /**If or not script finished executing*/
+    /** If or not script finished executing */
     private volatile AtomicBoolean completed;
 
     public ShellUtils() {
@@ -103,23 +94,26 @@ abstract public class ShellUtils {
     }
 
     /**
-     * @param interval the minimum duration to wait before re-executing the
-     *        command.
+     * @param interval the minimum duration to wait before re-executing the command.
      */
     public ShellUtils(long interval, boolean redirectErrorStream) {
         this.interval = interval;
-        this.lastTime = (interval<0) ? 0 : -interval;
+        this.lastTime = (interval < 0) ? 0 : -interval;
         this.redirectErrorStream = redirectErrorStream;
     }
 
-    /** set the environment for the command
+    /**
+     * set the environment for the command
+     * 
      * @param env Mapping of environment variables
      */
     protected void setEnvironment(Map<String, String> env) {
         this.environment = env;
     }
 
-    /** set the working directory
+    /**
+     * set the working directory
+     * 
      * @param dir The directory where the command would be executed
      */
     protected void setWorkingDirectory(File dir) {
@@ -128,23 +122,18 @@ abstract public class ShellUtils {
 
     /** a Unix command to get the current user's groups list */
     public static String[] getGroupsCommand() {
-        return (WINDOWS)? new String[]{"cmd", "/c", "groups"}
-        : new String[]{"bash", "-c", "groups"};
+        return (WINDOWS) ? new String[] { "cmd", "/c", "groups" } : new String[] { "bash", "-c", "groups" };
     }
 
     /**
-     * a Unix command to get a given user's groups list.
-     * If the OS is not WINDOWS, the command will get the user's primary group
-     * first and finally get the groups list which includes the primary group.
-     * i.e. the user's primary group will be included twice.
+     * a Unix command to get a given user's groups list. If the OS is not WINDOWS, the command will get the user's primary group first and finally get the
+     * groups list which includes the primary group. i.e. the user's primary group will be included twice.
      */
     public static String[] getGroupsForUserCommand(final String user) {
-        //'groups username' command return is non-consistent across different unixes
-        return new String [] {"bash", "-c", "id -gn " + user
-                         + "&& id -Gn " + user};
+        // 'groups username' command return is non-consistent across different unixes
+        return new String[] { "bash", "-c", "id -gn " + user + "&& id -Gn " + user };
     }
 
-
     /** check to see if a command needs to be executed and execute if needed */
     protected void run() throws IOException {
         if (lastTime + interval > System.currentTimeMillis())
@@ -174,51 +163,48 @@ abstract public class ShellUtils {
         if (timeOutInterval > 0) {
             timeOutTimer = new Timer("Shell command timeout");
             timeoutTimerTask = new ShellTimeoutTimerTask(this);
-            //One time scheduling.
+            // One time scheduling.
             timeOutTimer.schedule(timeoutTimerTask, timeOutInterval);
         }
-        final BufferedReader errReader =
-            new BufferedReader(new InputStreamReader(process
-                                                     .getErrorStream()));
-        BufferedReader inReader =
-            new BufferedReader(new InputStreamReader(process
-                                                     .getInputStream()));
+        final BufferedReader errReader = new BufferedReader(new InputStreamReader(process.getErrorStream()));
+        BufferedReader inReader = new BufferedReader(new InputStreamReader(process.getInputStream()));
         final StringBuffer errMsg = new StringBuffer();
 
         // read error and input streams as this would free up the buffers
         // free the error stream buffer
         Thread errThread = new Thread() {
-                @Override
-                public void run() {
-                    try {
-                        String line = errReader.readLine();
-                        while((line != null) && !isInterrupted()) {
-                            errMsg.append(line);
-                            errMsg.append(System.getProperty("line.separator"));
-                            line = errReader.readLine();
-                        }
-                    } catch(IOException ioe) {
-                        LOG.warn("Error reading the error stream", ioe);
+            @Override
+            public void run() {
+                try {
+                    String line = errReader.readLine();
+                    while ((line != null) && !isInterrupted()) {
+                        errMsg.append(line);
+                        errMsg.append(System.getProperty("line.separator"));
+                        line = errReader.readLine();
                     }
+                } catch (IOException ioe) {
+                    LOG.warn("Error reading the error stream", ioe);
                 }
-            };
+            }
+        };
         try {
             errThread.start();
-        } catch (IllegalStateException ise) { }
+        } catch (IllegalStateException ise) {
+        }
         try {
             parseExecResult(inReader); // parse the output
             // clear the input stream buffer
             String line = inReader.readLine();
-            while(line != null) {
+            while (line != null) {
                 line = inReader.readLine();
             }
             // wait for the process to finish and check the exit code
-            exitCode  = process.waitFor();
+            exitCode = process.waitFor();
             // make sure that the error thread exits
             joinThread(errThread);
             completed.set(true);
-            //the timeout thread handling
-            //taken care in finally block
+            // the timeout thread handling
+            // taken care in finally block
             if (exitCode != 0) {
                 throw new ExitCodeException(exitCode, errMsg.toString());
             }
@@ -233,10 +219,10 @@ abstract public class ShellUtils {
                 // JDK 7 tries to automatically drain the input streams for us
                 // when the process exits, but since close is not synchronized,
                 // it creates a race if we close the stream first and the same
-                // fd is recycled.  the stream draining thread will attempt to
-                // drain that fd!!  it may block, OOM, or cause bizarre behavior
+                // fd is recycled. the stream draining thread will attempt to
+                // drain that fd!! it may block, OOM, or cause bizarre behavior
                 // see: https://bugs.openjdk.java.net/browse/JDK-8024521
-                //      issue is fixed in build 7u60
+                // issue is fixed in build 7u60
                 InputStream stdout = process.getInputStream();
                 synchronized (stdout) {
                     inReader.close();
@@ -278,10 +264,11 @@ abstract public class ShellUtils {
     protected abstract String[] getExecString();
 
     /** Parse the execution result */
-    protected abstract void parseExecResult(BufferedReader lines)
-        throws IOException;
+    protected abstract void parseExecResult(BufferedReader lines) throws IOException;
 
-    /** get the current sub-process executing the given command
+    /**
+     * get the current sub-process executing the given command
+     * 
      * @return process executing the command
      */
     public Process getProcess() {
@@ -306,18 +293,15 @@ abstract public class ShellUtils {
 
     /**
      * A simple shell command executor.
-     *
-     * <code>ShellCommandExecutor</code>should be used in cases where the output
-     * of the command needs no explicit parsing and where the command, working
-     * directory and the environment remains unchanged. The output of the command
-     * is stored as-is and is expected to be small.
+     * 
+     * <code>ShellCommandExecutor</code>should be used in cases where the output of the command needs no explicit parsing and where the command, working
+     * directory and the environment remains unchanged. The output of the command is stored as-is and is expected to be small.
      */
     public static class ShellCommandExecutor extends ShellUtils {
 
         private String[] command;
         private StringBuffer output;
 
-
         public ShellCommandExecutor(String[] execString) {
             this(execString, null);
         }
@@ -326,27 +310,22 @@ abstract public class ShellUtils {
             this(execString, dir, null);
         }
 
-        public ShellCommandExecutor(String[] execString, File dir,
-                                    Map<String, String> env) {
-            this(execString, dir, env , 0L);
+        public ShellCommandExecutor(String[] execString, File dir, Map<String, String> env) {
+            this(execString, dir, env, 0L);
         }
 
         /**
          * Create a new instance of the ShellCommandExecutor to execute a command.
-         *
+         * 
          * @param execString The command to execute with arguments
-         * @param dir If not-null, specifies the directory which should be set
-         *            as the current working directory for the command.
-         *            If null, the current working directory is not modified.
-         * @param env If not-null, environment of the command will include the
-         *            key-value pairs specified in the map. If null, the current
-         *            environment is not modified.
-         * @param timeout Specifies the time in milliseconds, after which the
-         *                command will be killed and the status marked as timedout.
-         *                If 0, the command will not be timed out.
+         * @param dir If not-null, specifies the directory which should be set as the current working directory for the command. If null, the current working
+         *            directory is not modified.
+         * @param env If not-null, environment of the command will include the key-value pairs specified in the map. If null, the current environment is not
+         *            modified.
+         * @param timeout Specifies the time in milliseconds, after which the command will be killed and the status marked as timedout. If 0, the command will
+         *            not be timed out.
          */
-        public ShellCommandExecutor(String[] execString, File dir,
-                                    Map<String, String> env, long timeout) {
+        public ShellCommandExecutor(String[] execString, File dir, Map<String, String> env, long timeout) {
             command = execString.clone();
             if (dir != null) {
                 setWorkingDirectory(dir);
@@ -357,7 +336,6 @@ abstract public class ShellUtils {
             timeOutInterval = timeout;
         }
 
-
         /** Execute the shell command. */
         public void execute() throws IOException {
             this.run();
@@ -373,21 +351,19 @@ abstract public class ShellUtils {
             output = new StringBuffer();
             char[] buf = new char[512];
             int nRead;
-            while ( (nRead = lines.read(buf, 0, buf.length)) > 0 ) {
+            while ((nRead = lines.read(buf, 0, buf.length)) > 0) {
                 output.append(buf, 0, nRead);
             }
         }
 
-        /** Get the output of the shell command.*/
+        /** Get the output of the shell command. */
         public String getOutput() {
             return (output == null) ? "" : output.toString();
         }
 
         /**
-         * Returns the commands of this instance.
-         * Arguments with spaces in are presented with quotes round; other
-         * arguments are presented raw
-         *
+         * Returns the commands of this instance. Arguments with spaces in are presented with quotes round; other arguments are presented raw
+         * 
          * @return a string representation of the object.
          */
         @Override
@@ -407,9 +383,8 @@ abstract public class ShellUtils {
     }
 
     /**
-     * To check if the passed script to shell command executor timed out or
-     * not.
-     *
+     * To check if the passed script to shell command executor timed out or not.
+     * 
      * @return if the script timed out.
      */
     public boolean isTimedOut() {
@@ -418,52 +393,45 @@ abstract public class ShellUtils {
 
     /**
      * Set if the command has timed out.
-     *
+     * 
      */
     private void setTimedOut() {
         this.timedOut.set(true);
     }
 
-
     /**
-     * Static method to execute a shell command.
-     * Covers most of the simple cases without requiring the user to implement
-     * the <code>Shell</code> interface.
+     * Static method to execute a shell command. Covers most of the simple cases without requiring the user to implement the <code>Shell</code> interface.
+     * 
      * @param cmd shell command to execute.
      * @return the output of the executed command.
      */
-    public static String execCommand(String ... cmd) throws IOException {
+    public static String execCommand(String... cmd) throws IOException {
         return execCommand(null, cmd, 0L);
     }
 
     /**
-     * Static method to execute a shell command.
-     * Covers most of the simple cases without requiring the user to implement
-     * the <code>Shell</code> interface.
+     * Static method to execute a shell command. Covers most of the simple cases without requiring the user to implement the <code>Shell</code> interface.
+     * 
      * @param env the map of environment key=value
      * @param cmd shell command to execute.
      * @param timeout time in milliseconds after which script should be marked timeout
      * @return the output of the executed command.o
      */
 
-    public static String execCommand(Map<String, String> env, String[] cmd,
-                                     long timeout) throws IOException {
-        ShellCommandExecutor exec = new ShellCommandExecutor(cmd, null, env,
-                                                             timeout);
+    public static String execCommand(Map<String, String> env, String[] cmd, long timeout) throws IOException {
+        ShellCommandExecutor exec = new ShellCommandExecutor(cmd, null, env, timeout);
         exec.execute();
         return exec.getOutput();
     }
 
     /**
-     * Static method to execute a shell command.
-     * Covers most of the simple cases without requiring the user to implement
-     * the <code>Shell</code> interface.
+     * Static method to execute a shell command. Covers most of the simple cases without requiring the user to implement the <code>Shell</code> interface.
+     * 
      * @param env the map of environment key=value
      * @param cmd shell command to execute.
      * @return the output of the executed command.
      */
-    public static String execCommand(Map<String,String> env, String ... cmd)
-        throws IOException {
+    public static String execCommand(Map<String, String> env, String... cmd) throws IOException {
         return execCommand(env, cmd, 0L);
     }
 
@@ -484,9 +452,9 @@ abstract public class ShellUtils {
             try {
                 p.exitValue();
             } catch (Exception e) {
-                //Process has not terminated.
-                //So check if it has completed
-                //if not just destroy it.
+                // Process has not terminated.
+                // So check if it has completed
+                // if not just destroy it.
                 if (p != null && !shell.completed.get()) {
                     shell.setTimedOut();
                     p.destroy();

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/StormBoundedExponentialBackoffRetry.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/StormBoundedExponentialBackoffRetry.java b/jstorm-core/src/main/java/backtype/storm/utils/StormBoundedExponentialBackoffRetry.java
index 4aa5556..dd57832 100755
--- a/jstorm-core/src/main/java/backtype/storm/utils/StormBoundedExponentialBackoffRetry.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/StormBoundedExponentialBackoffRetry.java
@@ -31,12 +31,9 @@ public class StormBoundedExponentialBackoffRetry extends BoundedExponentialBacko
     private final int linearBaseSleepMs;
 
     /**
-     * The class provides generic exponential-linear backoff retry strategy for
-     * storm. It calculates threshold for exponentially increasing sleeptime
-     * for retries. Beyond this threshold, the sleeptime increase is linear.
-     * Also adds jitter for exponential/linear retry.
-     * It guarantees currSleepTimeMs >= prevSleepTimeMs and 
-     * baseSleepTimeMs <= currSleepTimeMs <= maxSleepTimeMs
+     * The class provides generic exponential-linear backoff retry strategy for storm. It calculates threshold for exponentially increasing sleeptime for
+     * retries. Beyond this threshold, the sleeptime increase is linear. Also adds jitter for exponential/linear retry. It guarantees currSleepTimeMs >=
+     * prevSleepTimeMs and baseSleepTimeMs <= currSleepTimeMs <= maxSleepTimeMs
      */
 
     public StormBoundedExponentialBackoffRetry(int baseSleepTimeMs, int maxSleepTimeMs, int maxRetries) {
@@ -44,17 +41,15 @@ public class StormBoundedExponentialBackoffRetry extends BoundedExponentialBacko
         expRetriesThreshold = 1;
         while ((1 << (expRetriesThreshold + 1)) < ((maxSleepTimeMs - baseSleepTimeMs) / 2))
             expRetriesThreshold++;
-        LOG.info("The baseSleepTimeMs [" + baseSleepTimeMs + "] the maxSleepTimeMs [" + maxSleepTimeMs + "] " +
-                "the maxRetries [" + maxRetries + "]");
+        LOG.info("The baseSleepTimeMs [" + baseSleepTimeMs + "] the maxSleepTimeMs [" + maxSleepTimeMs + "] " + "the maxRetries [" + maxRetries + "]");
         if (baseSleepTimeMs > maxSleepTimeMs) {
-            LOG.warn("Misconfiguration: the baseSleepTimeMs [" + baseSleepTimeMs + "] can't be greater than " +
-                    "the maxSleepTimeMs [" + maxSleepTimeMs + "].");
+            LOG.warn("Misconfiguration: the baseSleepTimeMs [" + baseSleepTimeMs + "] can't be greater than " + "the maxSleepTimeMs [" + maxSleepTimeMs + "].");
         }
-        if( maxRetries > 0 && maxRetries > expRetriesThreshold ) {
+        if (maxRetries > 0 && maxRetries > expRetriesThreshold) {
             this.stepSize = Math.max(1, (maxSleepTimeMs - (1 << expRetriesThreshold)) / (maxRetries - expRetriesThreshold));
         } else {
             this.stepSize = 1;
-	}
+        }
         this.linearBaseSleepMs = super.getBaseSleepTimeMs() + (1 << expRetriesThreshold);
     }
 
@@ -67,8 +62,7 @@ public class StormBoundedExponentialBackoffRetry extends BoundedExponentialBacko
             return sleepTimeMs;
         } else {
             int stepJitter = random.nextInt(stepSize);
-            return Math.min(super.getMaxSleepTimeMs(), (linearBaseSleepMs +
-                    (stepSize * (retryCount - expRetriesThreshold)) + stepJitter));
+            return Math.min(super.getMaxSleepTimeMs(), (linearBaseSleepMs + (stepSize * (retryCount - expRetriesThreshold)) + stepJitter));
         }
     }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/TestUtils.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/TestUtils.java b/jstorm-core/src/main/java/backtype/storm/utils/TestUtils.java
index 276559c..f905ae4 100755
--- a/jstorm-core/src/main/java/backtype/storm/utils/TestUtils.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/TestUtils.java
@@ -25,9 +25,7 @@ import java.util.Map;
 
 public class TestUtils extends Utils {
 
-    public static void testSetupBuilder(CuratorFrameworkFactory.Builder
-            builder, String zkStr, Map conf, ZookeeperAuthInfo auth)
-    {
+    public static void testSetupBuilder(CuratorFrameworkFactory.Builder builder, String zkStr, Map conf, ZookeeperAuthInfo auth) {
         setupBuilder(builder, zkStr, conf, auth);
     }
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/ThreadResourceManager.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/ThreadResourceManager.java b/jstorm-core/src/main/java/backtype/storm/utils/ThreadResourceManager.java
index e3ab03f..c43ff06 100755
--- a/jstorm-core/src/main/java/backtype/storm/utils/ThreadResourceManager.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/ThreadResourceManager.java
@@ -23,14 +23,14 @@ public class ThreadResourceManager<T> {
     public static interface ResourceFactory<X> {
         X makeResource();
     }
-    
+
     ResourceFactory<T> _factory;
     ConcurrentLinkedQueue<T> _resources = new ConcurrentLinkedQueue<T>();
-    
+
     public ThreadResourceManager(ResourceFactory<T> factory) {
         _factory = factory;
     }
-    
+
     public T acquire() {
         T ret = _resources.poll();
         if (ret == null) {
@@ -38,7 +38,7 @@ public class ThreadResourceManager<T> {
         }
         return ret;
     }
-    
+
     public void release(T resource) {
         _resources.add(resource);
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/ThriftTopologyUtils.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/ThriftTopologyUtils.java b/jstorm-core/src/main/java/backtype/storm/utils/ThriftTopologyUtils.java
index 47a48c7..c872721 100755
--- a/jstorm-core/src/main/java/backtype/storm/utils/ThriftTopologyUtils.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/ThriftTopologyUtils.java
@@ -17,17 +17,13 @@
  */
 package backtype.storm.utils;
 
+import backtype.storm.generated.*;
+
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
 
-import backtype.storm.generated.Bolt;
-import backtype.storm.generated.ComponentCommon;
-import backtype.storm.generated.SpoutSpec;
-import backtype.storm.generated.StateSpoutSpec;
-import backtype.storm.generated.StormTopology;
-
 public class ThriftTopologyUtils {
     public static Set<String> getComponentIds(StormTopology topology) {
         Set<String> ret = new HashSet<String>();
@@ -37,7 +33,7 @@ public class ThriftTopologyUtils {
         }
         return ret;
     }
-    
+
     public static Map<String, Object> getComponents(StormTopology topology) {
         Map<String, Object> ret = new HashMap<String, Object>();
         for (StormTopology._Fields f : StormTopology.metaDataMap.keySet()) {
@@ -46,7 +42,7 @@ public class ThriftTopologyUtils {
         }
         return ret;
     }
-    
+
     public static ComponentCommon getComponentCommon(StormTopology topology, String componentId) {
         for (StormTopology._Fields f : StormTopology.metaDataMap.keySet()) {
             Map<String, Object> componentMap = (Map<String, Object>) topology.getFieldValue(f);


[39/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/SpoutSpec.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/SpoutSpec.java b/jstorm-core/src/main/java/backtype/storm/generated/SpoutSpec.java
index 722fc54..065d187 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/SpoutSpec.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/SpoutSpec.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class SpoutSpec implements org.apache.thrift.TBase<SpoutSpec, SpoutSpec._Fields>, java.io.Serializable, Cloneable, Comparable<SpoutSpec> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SpoutSpec");
 
@@ -337,11 +337,11 @@ public class SpoutSpec implements org.apache.thrift.TBase<SpoutSpec, SpoutSpec._
     return _Fields.findByThriftId(fieldId);
   }
 
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
@@ -369,14 +369,14 @@ public class SpoutSpec implements org.apache.thrift.TBase<SpoutSpec, SpoutSpec._
     return sb.toString();
   }
 
-  public void validate() throws org.apache.thrift.TException {
+  public void validate() throws TException {
     // check for required fields
     if (!is_set_spout_object()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'spout_object' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'spout_object' is unset! Struct:" + toString());
     }
 
     if (!is_set_common()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'common' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'common' is unset! Struct:" + toString());
     }
 
     // check for sub-struct validity
@@ -388,7 +388,7 @@ public class SpoutSpec implements org.apache.thrift.TBase<SpoutSpec, SpoutSpec._
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -396,7 +396,7 @@ public class SpoutSpec implements org.apache.thrift.TBase<SpoutSpec, SpoutSpec._
   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
     try {
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -409,7 +409,7 @@ public class SpoutSpec implements org.apache.thrift.TBase<SpoutSpec, SpoutSpec._
 
   private static class SpoutSpecStandardScheme extends StandardScheme<SpoutSpec> {
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot, SpoutSpec struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot, SpoutSpec struct) throws TException {
       org.apache.thrift.protocol.TField schemeField;
       iprot.readStructBegin();
       while (true)
@@ -446,7 +446,7 @@ public class SpoutSpec implements org.apache.thrift.TBase<SpoutSpec, SpoutSpec._
       struct.validate();
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot, SpoutSpec struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot, SpoutSpec struct) throws TException {
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
@@ -475,14 +475,14 @@ public class SpoutSpec implements org.apache.thrift.TBase<SpoutSpec, SpoutSpec._
   private static class SpoutSpecTupleScheme extends TupleScheme<SpoutSpec> {
 
     @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, SpoutSpec struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol prot, SpoutSpec struct) throws TException {
       TTupleProtocol oprot = (TTupleProtocol) prot;
       struct.spout_object.write(oprot);
       struct.common.write(oprot);
     }
 
     @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, SpoutSpec struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol prot, SpoutSpec struct) throws TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       struct.spout_object = new ComponentObject();
       struct.spout_object.read(iprot);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/StateSpoutSpec.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/StateSpoutSpec.java b/jstorm-core/src/main/java/backtype/storm/generated/StateSpoutSpec.java
index 66cc735..ee1bc54 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/StateSpoutSpec.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/StateSpoutSpec.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class StateSpoutSpec implements org.apache.thrift.TBase<StateSpoutSpec, StateSpoutSpec._Fields>, java.io.Serializable, Cloneable, Comparable<StateSpoutSpec> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("StateSpoutSpec");
 
@@ -337,11 +337,11 @@ public class StateSpoutSpec implements org.apache.thrift.TBase<StateSpoutSpec, S
     return _Fields.findByThriftId(fieldId);
   }
 
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
@@ -369,14 +369,14 @@ public class StateSpoutSpec implements org.apache.thrift.TBase<StateSpoutSpec, S
     return sb.toString();
   }
 
-  public void validate() throws org.apache.thrift.TException {
+  public void validate() throws TException {
     // check for required fields
     if (!is_set_state_spout_object()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'state_spout_object' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'state_spout_object' is unset! Struct:" + toString());
     }
 
     if (!is_set_common()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'common' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'common' is unset! Struct:" + toString());
     }
 
     // check for sub-struct validity
@@ -388,7 +388,7 @@ public class StateSpoutSpec implements org.apache.thrift.TBase<StateSpoutSpec, S
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -396,7 +396,7 @@ public class StateSpoutSpec implements org.apache.thrift.TBase<StateSpoutSpec, S
   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
     try {
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -409,7 +409,7 @@ public class StateSpoutSpec implements org.apache.thrift.TBase<StateSpoutSpec, S
 
   private static class StateSpoutSpecStandardScheme extends StandardScheme<StateSpoutSpec> {
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot, StateSpoutSpec struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot, StateSpoutSpec struct) throws TException {
       org.apache.thrift.protocol.TField schemeField;
       iprot.readStructBegin();
       while (true)
@@ -446,7 +446,7 @@ public class StateSpoutSpec implements org.apache.thrift.TBase<StateSpoutSpec, S
       struct.validate();
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot, StateSpoutSpec struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot, StateSpoutSpec struct) throws TException {
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
@@ -475,14 +475,14 @@ public class StateSpoutSpec implements org.apache.thrift.TBase<StateSpoutSpec, S
   private static class StateSpoutSpecTupleScheme extends TupleScheme<StateSpoutSpec> {
 
     @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, StateSpoutSpec struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol prot, StateSpoutSpec struct) throws TException {
       TTupleProtocol oprot = (TTupleProtocol) prot;
       struct.state_spout_object.write(oprot);
       struct.common.write(oprot);
     }
 
     @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, StateSpoutSpec struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol prot, StateSpoutSpec struct) throws TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       struct.state_spout_object = new ComponentObject();
       struct.state_spout_object.read(iprot);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/StormTopology.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/StormTopology.java b/jstorm-core/src/main/java/backtype/storm/generated/StormTopology.java
index 205f73d..4358ab1 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/StormTopology.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/StormTopology.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class StormTopology implements org.apache.thrift.TBase<StormTopology, StormTopology._Fields>, java.io.Serializable, Cloneable, Comparable<StormTopology> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("StormTopology");
 
@@ -485,11 +485,11 @@ public class StormTopology implements org.apache.thrift.TBase<StormTopology, Sto
     return _Fields.findByThriftId(fieldId);
   }
 
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
@@ -525,18 +525,18 @@ public class StormTopology implements org.apache.thrift.TBase<StormTopology, Sto
     return sb.toString();
   }
 
-  public void validate() throws org.apache.thrift.TException {
+  public void validate() throws TException {
     // check for required fields
     if (!is_set_spouts()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'spouts' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'spouts' is unset! Struct:" + toString());
     }
 
     if (!is_set_bolts()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'bolts' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'bolts' is unset! Struct:" + toString());
     }
 
     if (!is_set_state_spouts()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'state_spouts' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'state_spouts' is unset! Struct:" + toString());
     }
 
     // check for sub-struct validity
@@ -545,7 +545,7 @@ public class StormTopology implements org.apache.thrift.TBase<StormTopology, Sto
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -553,7 +553,7 @@ public class StormTopology implements org.apache.thrift.TBase<StormTopology, Sto
   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
     try {
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -566,7 +566,7 @@ public class StormTopology implements org.apache.thrift.TBase<StormTopology, Sto
 
   private static class StormTopologyStandardScheme extends StandardScheme<StormTopology> {
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot, StormTopology struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot, StormTopology struct) throws TException {
       org.apache.thrift.protocol.TField schemeField;
       iprot.readStructBegin();
       while (true)
@@ -648,7 +648,7 @@ public class StormTopology implements org.apache.thrift.TBase<StormTopology, Sto
       struct.validate();
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot, StormTopology struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot, StormTopology struct) throws TException {
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
@@ -706,7 +706,7 @@ public class StormTopology implements org.apache.thrift.TBase<StormTopology, Sto
   private static class StormTopologyTupleScheme extends TupleScheme<StormTopology> {
 
     @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, StormTopology struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol prot, StormTopology struct) throws TException {
       TTupleProtocol oprot = (TTupleProtocol) prot;
       {
         oprot.writeI32(struct.spouts.size());
@@ -735,7 +735,7 @@ public class StormTopology implements org.apache.thrift.TBase<StormTopology, Sto
     }
 
     @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, StormTopology struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol prot, StormTopology struct) throws TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       {
         org.apache.thrift.protocol.TMap _map62 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/StreamInfo.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/StreamInfo.java b/jstorm-core/src/main/java/backtype/storm/generated/StreamInfo.java
index abed2ea..b586ac2 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/StreamInfo.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/StreamInfo.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class StreamInfo implements org.apache.thrift.TBase<StreamInfo, StreamInfo._Fields>, java.io.Serializable, Cloneable, Comparable<StreamInfo> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("StreamInfo");
 
@@ -356,11 +356,11 @@ public class StreamInfo implements org.apache.thrift.TBase<StreamInfo, StreamInf
     return _Fields.findByThriftId(fieldId);
   }
 
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
@@ -384,14 +384,14 @@ public class StreamInfo implements org.apache.thrift.TBase<StreamInfo, StreamInf
     return sb.toString();
   }
 
-  public void validate() throws org.apache.thrift.TException {
+  public void validate() throws TException {
     // check for required fields
     if (!is_set_output_fields()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'output_fields' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'output_fields' is unset! Struct:" + toString());
     }
 
     if (!is_set_direct()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'direct' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'direct' is unset! Struct:" + toString());
     }
 
     // check for sub-struct validity
@@ -400,7 +400,7 @@ public class StreamInfo implements org.apache.thrift.TBase<StreamInfo, StreamInf
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -410,7 +410,7 @@ public class StreamInfo implements org.apache.thrift.TBase<StreamInfo, StreamInf
       // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
       __isset_bitfield = 0;
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -423,7 +423,7 @@ public class StreamInfo implements org.apache.thrift.TBase<StreamInfo, StreamInf
 
   private static class StreamInfoStandardScheme extends StandardScheme<StreamInfo> {
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot, StreamInfo struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot, StreamInfo struct) throws TException {
       org.apache.thrift.protocol.TField schemeField;
       iprot.readStructBegin();
       while (true)
@@ -468,7 +468,7 @@ public class StreamInfo implements org.apache.thrift.TBase<StreamInfo, StreamInf
       struct.validate();
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot, StreamInfo struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot, StreamInfo struct) throws TException {
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
@@ -502,7 +502,7 @@ public class StreamInfo implements org.apache.thrift.TBase<StreamInfo, StreamInf
   private static class StreamInfoTupleScheme extends TupleScheme<StreamInfo> {
 
     @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, StreamInfo struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol prot, StreamInfo struct) throws TException {
       TTupleProtocol oprot = (TTupleProtocol) prot;
       {
         oprot.writeI32(struct.output_fields.size());
@@ -515,7 +515,7 @@ public class StreamInfo implements org.apache.thrift.TBase<StreamInfo, StreamInf
     }
 
     @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, StreamInfo struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol prot, StreamInfo struct) throws TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       {
         org.apache.thrift.protocol.TList _list21 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/SubmitOptions.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/SubmitOptions.java b/jstorm-core/src/main/java/backtype/storm/generated/SubmitOptions.java
index b4343ab..4a265cb 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/SubmitOptions.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/SubmitOptions.java
@@ -34,11 +34,12 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class SubmitOptions implements org.apache.thrift.TBase<SubmitOptions, SubmitOptions._Fields>, java.io.Serializable, Cloneable, Comparable<SubmitOptions> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SubmitOptions");
 
   private static final org.apache.thrift.protocol.TField INITIAL_STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("initial_status", org.apache.thrift.protocol.TType.I32, (short)1);
+  private static final org.apache.thrift.protocol.TField CREDS_FIELD_DESC = new org.apache.thrift.protocol.TField("creds", org.apache.thrift.protocol.TType.STRUCT, (short)2);
 
   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
   static {
@@ -47,6 +48,7 @@ public class SubmitOptions implements org.apache.thrift.TBase<SubmitOptions, Sub
   }
 
   private TopologyInitialStatus initial_status; // required
+  private Credentials creds; // optional
 
   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
@@ -54,7 +56,8 @@ public class SubmitOptions implements org.apache.thrift.TBase<SubmitOptions, Sub
      * 
      * @see TopologyInitialStatus
      */
-    INITIAL_STATUS((short)1, "initial_status");
+    INITIAL_STATUS((short)1, "initial_status"),
+    CREDS((short)2, "creds");
 
     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 
@@ -71,6 +74,8 @@ public class SubmitOptions implements org.apache.thrift.TBase<SubmitOptions, Sub
       switch(fieldId) {
         case 1: // INITIAL_STATUS
           return INITIAL_STATUS;
+        case 2: // CREDS
+          return CREDS;
         default:
           return null;
       }
@@ -111,11 +116,14 @@ public class SubmitOptions implements org.apache.thrift.TBase<SubmitOptions, Sub
   }
 
   // isset id assignments
+  private static final _Fields optionals[] = {_Fields.CREDS};
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
     tmpMap.put(_Fields.INITIAL_STATUS, new org.apache.thrift.meta_data.FieldMetaData("initial_status", org.apache.thrift.TFieldRequirementType.REQUIRED, 
         new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TopologyInitialStatus.class)));
+    tmpMap.put(_Fields.CREDS, new org.apache.thrift.meta_data.FieldMetaData("creds", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Credentials.class)));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(SubmitOptions.class, metaDataMap);
   }
@@ -137,6 +145,9 @@ public class SubmitOptions implements org.apache.thrift.TBase<SubmitOptions, Sub
     if (other.is_set_initial_status()) {
       this.initial_status = other.initial_status;
     }
+    if (other.is_set_creds()) {
+      this.creds = new Credentials(other.creds);
+    }
   }
 
   public SubmitOptions deepCopy() {
@@ -146,6 +157,7 @@ public class SubmitOptions implements org.apache.thrift.TBase<SubmitOptions, Sub
   @Override
   public void clear() {
     this.initial_status = null;
+    this.creds = null;
   }
 
   /**
@@ -179,6 +191,29 @@ public class SubmitOptions implements org.apache.thrift.TBase<SubmitOptions, Sub
     }
   }
 
+  public Credentials get_creds() {
+    return this.creds;
+  }
+
+  public void set_creds(Credentials creds) {
+    this.creds = creds;
+  }
+
+  public void unset_creds() {
+    this.creds = null;
+  }
+
+  /** Returns true if field creds is set (has been assigned a value) and false otherwise */
+  public boolean is_set_creds() {
+    return this.creds != null;
+  }
+
+  public void set_creds_isSet(boolean value) {
+    if (!value) {
+      this.creds = null;
+    }
+  }
+
   public void setFieldValue(_Fields field, Object value) {
     switch (field) {
     case INITIAL_STATUS:
@@ -189,6 +224,14 @@ public class SubmitOptions implements org.apache.thrift.TBase<SubmitOptions, Sub
       }
       break;
 
+    case CREDS:
+      if (value == null) {
+        unset_creds();
+      } else {
+        set_creds((Credentials)value);
+      }
+      break;
+
     }
   }
 
@@ -197,6 +240,9 @@ public class SubmitOptions implements org.apache.thrift.TBase<SubmitOptions, Sub
     case INITIAL_STATUS:
       return get_initial_status();
 
+    case CREDS:
+      return get_creds();
+
     }
     throw new IllegalStateException();
   }
@@ -210,6 +256,8 @@ public class SubmitOptions implements org.apache.thrift.TBase<SubmitOptions, Sub
     switch (field) {
     case INITIAL_STATUS:
       return is_set_initial_status();
+    case CREDS:
+      return is_set_creds();
     }
     throw new IllegalStateException();
   }
@@ -236,6 +284,15 @@ public class SubmitOptions implements org.apache.thrift.TBase<SubmitOptions, Sub
         return false;
     }
 
+    boolean this_present_creds = true && this.is_set_creds();
+    boolean that_present_creds = true && that.is_set_creds();
+    if (this_present_creds || that_present_creds) {
+      if (!(this_present_creds && that_present_creds))
+        return false;
+      if (!this.creds.equals(that.creds))
+        return false;
+    }
+
     return true;
   }
 
@@ -248,6 +305,11 @@ public class SubmitOptions implements org.apache.thrift.TBase<SubmitOptions, Sub
     if (present_initial_status)
       list.add(initial_status.getValue());
 
+    boolean present_creds = true && (is_set_creds());
+    list.add(present_creds);
+    if (present_creds)
+      list.add(creds);
+
     return list.hashCode();
   }
 
@@ -269,6 +331,16 @@ public class SubmitOptions implements org.apache.thrift.TBase<SubmitOptions, Sub
         return lastComparison;
       }
     }
+    lastComparison = Boolean.valueOf(is_set_creds()).compareTo(other.is_set_creds());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_creds()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.creds, other.creds);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
     return 0;
   }
 
@@ -276,11 +348,11 @@ public class SubmitOptions implements org.apache.thrift.TBase<SubmitOptions, Sub
     return _Fields.findByThriftId(fieldId);
   }
 
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
@@ -296,23 +368,36 @@ public class SubmitOptions implements org.apache.thrift.TBase<SubmitOptions, Sub
       sb.append(this.initial_status);
     }
     first = false;
+    if (is_set_creds()) {
+      if (!first) sb.append(", ");
+      sb.append("creds:");
+      if (this.creds == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.creds);
+      }
+      first = false;
+    }
     sb.append(")");
     return sb.toString();
   }
 
-  public void validate() throws org.apache.thrift.TException {
+  public void validate() throws TException {
     // check for required fields
     if (!is_set_initial_status()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'initial_status' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'initial_status' is unset! Struct:" + toString());
     }
 
     // check for sub-struct validity
+    if (creds != null) {
+      creds.validate();
+    }
   }
 
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -320,7 +405,7 @@ public class SubmitOptions implements org.apache.thrift.TBase<SubmitOptions, Sub
   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
     try {
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -333,7 +418,7 @@ public class SubmitOptions implements org.apache.thrift.TBase<SubmitOptions, Sub
 
   private static class SubmitOptionsStandardScheme extends StandardScheme<SubmitOptions> {
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot, SubmitOptions struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot, SubmitOptions struct) throws TException {
       org.apache.thrift.protocol.TField schemeField;
       iprot.readStructBegin();
       while (true)
@@ -351,6 +436,15 @@ public class SubmitOptions implements org.apache.thrift.TBase<SubmitOptions, Sub
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
+          case 2: // CREDS
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.creds = new Credentials();
+              struct.creds.read(iprot);
+              struct.set_creds_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
           default:
             org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
         }
@@ -360,7 +454,7 @@ public class SubmitOptions implements org.apache.thrift.TBase<SubmitOptions, Sub
       struct.validate();
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot, SubmitOptions struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot, SubmitOptions struct) throws TException {
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
@@ -369,6 +463,13 @@ public class SubmitOptions implements org.apache.thrift.TBase<SubmitOptions, Sub
         oprot.writeI32(struct.initial_status.getValue());
         oprot.writeFieldEnd();
       }
+      if (struct.creds != null) {
+        if (struct.is_set_creds()) {
+          oprot.writeFieldBegin(CREDS_FIELD_DESC);
+          struct.creds.write(oprot);
+          oprot.writeFieldEnd();
+        }
+      }
       oprot.writeFieldStop();
       oprot.writeStructEnd();
     }
@@ -384,16 +485,30 @@ public class SubmitOptions implements org.apache.thrift.TBase<SubmitOptions, Sub
   private static class SubmitOptionsTupleScheme extends TupleScheme<SubmitOptions> {
 
     @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, SubmitOptions struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol prot, SubmitOptions struct) throws TException {
       TTupleProtocol oprot = (TTupleProtocol) prot;
       oprot.writeI32(struct.initial_status.getValue());
+      BitSet optionals = new BitSet();
+      if (struct.is_set_creds()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.is_set_creds()) {
+        struct.creds.write(oprot);
+      }
     }
 
     @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, SubmitOptions struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol prot, SubmitOptions struct) throws TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       struct.initial_status = backtype.storm.generated.TopologyInitialStatus.findByValue(iprot.readI32());
       struct.set_initial_status_isSet(true);
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        struct.creds = new Credentials();
+        struct.creds.read(iprot);
+        struct.set_creds_isSet(true);
+      }
     }
   }
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/SupervisorSummary.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/SupervisorSummary.java b/jstorm-core/src/main/java/backtype/storm/generated/SupervisorSummary.java
index 903dab0..3be87b3 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/SupervisorSummary.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/SupervisorSummary.java
@@ -34,15 +34,15 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class SupervisorSummary implements org.apache.thrift.TBase<SupervisorSummary, SupervisorSummary._Fields>, java.io.Serializable, Cloneable, Comparable<SupervisorSummary> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SupervisorSummary");
 
   private static final org.apache.thrift.protocol.TField HOST_FIELD_DESC = new org.apache.thrift.protocol.TField("host", org.apache.thrift.protocol.TType.STRING, (short)1);
-  private static final org.apache.thrift.protocol.TField SUPERVISOR_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("supervisor_id", org.apache.thrift.protocol.TType.STRING, (short)2);
-  private static final org.apache.thrift.protocol.TField UPTIME_SECS_FIELD_DESC = new org.apache.thrift.protocol.TField("uptime_secs", org.apache.thrift.protocol.TType.I32, (short)3);
-  private static final org.apache.thrift.protocol.TField NUM_WORKERS_FIELD_DESC = new org.apache.thrift.protocol.TField("num_workers", org.apache.thrift.protocol.TType.I32, (short)4);
-  private static final org.apache.thrift.protocol.TField NUM_USED_WORKERS_FIELD_DESC = new org.apache.thrift.protocol.TField("num_used_workers", org.apache.thrift.protocol.TType.I32, (short)5);
+  private static final org.apache.thrift.protocol.TField SUPERVISOR_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("supervisorId", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField UPTIME_SECS_FIELD_DESC = new org.apache.thrift.protocol.TField("uptimeSecs", org.apache.thrift.protocol.TType.I32, (short)3);
+  private static final org.apache.thrift.protocol.TField NUM_WORKERS_FIELD_DESC = new org.apache.thrift.protocol.TField("numWorkers", org.apache.thrift.protocol.TType.I32, (short)4);
+  private static final org.apache.thrift.protocol.TField NUM_USED_WORKERS_FIELD_DESC = new org.apache.thrift.protocol.TField("numUsedWorkers", org.apache.thrift.protocol.TType.I32, (short)5);
 
   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
   static {
@@ -51,18 +51,18 @@ public class SupervisorSummary implements org.apache.thrift.TBase<SupervisorSumm
   }
 
   private String host; // required
-  private String supervisor_id; // required
-  private int uptime_secs; // required
-  private int num_workers; // required
-  private int num_used_workers; // required
+  private String supervisorId; // required
+  private int uptimeSecs; // required
+  private int numWorkers; // required
+  private int numUsedWorkers; // required
 
   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
     HOST((short)1, "host"),
-    SUPERVISOR_ID((short)2, "supervisor_id"),
-    UPTIME_SECS((short)3, "uptime_secs"),
-    NUM_WORKERS((short)4, "num_workers"),
-    NUM_USED_WORKERS((short)5, "num_used_workers");
+    SUPERVISOR_ID((short)2, "supervisorId"),
+    UPTIME_SECS((short)3, "uptimeSecs"),
+    NUM_WORKERS((short)4, "numWorkers"),
+    NUM_USED_WORKERS((short)5, "numUsedWorkers");
 
     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 
@@ -127,22 +127,22 @@ public class SupervisorSummary implements org.apache.thrift.TBase<SupervisorSumm
   }
 
   // isset id assignments
-  private static final int __UPTIME_SECS_ISSET_ID = 0;
-  private static final int __NUM_WORKERS_ISSET_ID = 1;
-  private static final int __NUM_USED_WORKERS_ISSET_ID = 2;
+  private static final int __UPTIMESECS_ISSET_ID = 0;
+  private static final int __NUMWORKERS_ISSET_ID = 1;
+  private static final int __NUMUSEDWORKERS_ISSET_ID = 2;
   private byte __isset_bitfield = 0;
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
     tmpMap.put(_Fields.HOST, new org.apache.thrift.meta_data.FieldMetaData("host", org.apache.thrift.TFieldRequirementType.REQUIRED, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    tmpMap.put(_Fields.SUPERVISOR_ID, new org.apache.thrift.meta_data.FieldMetaData("supervisor_id", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+    tmpMap.put(_Fields.SUPERVISOR_ID, new org.apache.thrift.meta_data.FieldMetaData("supervisorId", org.apache.thrift.TFieldRequirementType.REQUIRED, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    tmpMap.put(_Fields.UPTIME_SECS, new org.apache.thrift.meta_data.FieldMetaData("uptime_secs", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+    tmpMap.put(_Fields.UPTIME_SECS, new org.apache.thrift.meta_data.FieldMetaData("uptimeSecs", org.apache.thrift.TFieldRequirementType.REQUIRED, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
-    tmpMap.put(_Fields.NUM_WORKERS, new org.apache.thrift.meta_data.FieldMetaData("num_workers", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+    tmpMap.put(_Fields.NUM_WORKERS, new org.apache.thrift.meta_data.FieldMetaData("numWorkers", org.apache.thrift.TFieldRequirementType.REQUIRED, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
-    tmpMap.put(_Fields.NUM_USED_WORKERS, new org.apache.thrift.meta_data.FieldMetaData("num_used_workers", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+    tmpMap.put(_Fields.NUM_USED_WORKERS, new org.apache.thrift.meta_data.FieldMetaData("numUsedWorkers", org.apache.thrift.TFieldRequirementType.REQUIRED, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(SupervisorSummary.class, metaDataMap);
@@ -153,20 +153,20 @@ public class SupervisorSummary implements org.apache.thrift.TBase<SupervisorSumm
 
   public SupervisorSummary(
     String host,
-    String supervisor_id,
-    int uptime_secs,
-    int num_workers,
-    int num_used_workers)
+    String supervisorId,
+    int uptimeSecs,
+    int numWorkers,
+    int numUsedWorkers)
   {
     this();
     this.host = host;
-    this.supervisor_id = supervisor_id;
-    this.uptime_secs = uptime_secs;
-    set_uptime_secs_isSet(true);
-    this.num_workers = num_workers;
-    set_num_workers_isSet(true);
-    this.num_used_workers = num_used_workers;
-    set_num_used_workers_isSet(true);
+    this.supervisorId = supervisorId;
+    this.uptimeSecs = uptimeSecs;
+    set_uptimeSecs_isSet(true);
+    this.numWorkers = numWorkers;
+    set_numWorkers_isSet(true);
+    this.numUsedWorkers = numUsedWorkers;
+    set_numUsedWorkers_isSet(true);
   }
 
   /**
@@ -177,12 +177,12 @@ public class SupervisorSummary implements org.apache.thrift.TBase<SupervisorSumm
     if (other.is_set_host()) {
       this.host = other.host;
     }
-    if (other.is_set_supervisor_id()) {
-      this.supervisor_id = other.supervisor_id;
+    if (other.is_set_supervisorId()) {
+      this.supervisorId = other.supervisorId;
     }
-    this.uptime_secs = other.uptime_secs;
-    this.num_workers = other.num_workers;
-    this.num_used_workers = other.num_used_workers;
+    this.uptimeSecs = other.uptimeSecs;
+    this.numWorkers = other.numWorkers;
+    this.numUsedWorkers = other.numUsedWorkers;
   }
 
   public SupervisorSummary deepCopy() {
@@ -192,13 +192,13 @@ public class SupervisorSummary implements org.apache.thrift.TBase<SupervisorSumm
   @Override
   public void clear() {
     this.host = null;
-    this.supervisor_id = null;
-    set_uptime_secs_isSet(false);
-    this.uptime_secs = 0;
-    set_num_workers_isSet(false);
-    this.num_workers = 0;
-    set_num_used_workers_isSet(false);
-    this.num_used_workers = 0;
+    this.supervisorId = null;
+    set_uptimeSecs_isSet(false);
+    this.uptimeSecs = 0;
+    set_numWorkers_isSet(false);
+    this.numWorkers = 0;
+    set_numUsedWorkers_isSet(false);
+    this.numUsedWorkers = 0;
   }
 
   public String get_host() {
@@ -224,93 +224,93 @@ public class SupervisorSummary implements org.apache.thrift.TBase<SupervisorSumm
     }
   }
 
-  public String get_supervisor_id() {
-    return this.supervisor_id;
+  public String get_supervisorId() {
+    return this.supervisorId;
   }
 
-  public void set_supervisor_id(String supervisor_id) {
-    this.supervisor_id = supervisor_id;
+  public void set_supervisorId(String supervisorId) {
+    this.supervisorId = supervisorId;
   }
 
-  public void unset_supervisor_id() {
-    this.supervisor_id = null;
+  public void unset_supervisorId() {
+    this.supervisorId = null;
   }
 
-  /** Returns true if field supervisor_id is set (has been assigned a value) and false otherwise */
-  public boolean is_set_supervisor_id() {
-    return this.supervisor_id != null;
+  /** Returns true if field supervisorId is set (has been assigned a value) and false otherwise */
+  public boolean is_set_supervisorId() {
+    return this.supervisorId != null;
   }
 
-  public void set_supervisor_id_isSet(boolean value) {
+  public void set_supervisorId_isSet(boolean value) {
     if (!value) {
-      this.supervisor_id = null;
+      this.supervisorId = null;
     }
   }
 
-  public int get_uptime_secs() {
-    return this.uptime_secs;
+  public int get_uptimeSecs() {
+    return this.uptimeSecs;
   }
 
-  public void set_uptime_secs(int uptime_secs) {
-    this.uptime_secs = uptime_secs;
-    set_uptime_secs_isSet(true);
+  public void set_uptimeSecs(int uptimeSecs) {
+    this.uptimeSecs = uptimeSecs;
+    set_uptimeSecs_isSet(true);
   }
 
-  public void unset_uptime_secs() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __UPTIME_SECS_ISSET_ID);
+  public void unset_uptimeSecs() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __UPTIMESECS_ISSET_ID);
   }
 
-  /** Returns true if field uptime_secs is set (has been assigned a value) and false otherwise */
-  public boolean is_set_uptime_secs() {
-    return EncodingUtils.testBit(__isset_bitfield, __UPTIME_SECS_ISSET_ID);
+  /** Returns true if field uptimeSecs is set (has been assigned a value) and false otherwise */
+  public boolean is_set_uptimeSecs() {
+    return EncodingUtils.testBit(__isset_bitfield, __UPTIMESECS_ISSET_ID);
   }
 
-  public void set_uptime_secs_isSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __UPTIME_SECS_ISSET_ID, value);
+  public void set_uptimeSecs_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __UPTIMESECS_ISSET_ID, value);
   }
 
-  public int get_num_workers() {
-    return this.num_workers;
+  public int get_numWorkers() {
+    return this.numWorkers;
   }
 
-  public void set_num_workers(int num_workers) {
-    this.num_workers = num_workers;
-    set_num_workers_isSet(true);
+  public void set_numWorkers(int numWorkers) {
+    this.numWorkers = numWorkers;
+    set_numWorkers_isSet(true);
   }
 
-  public void unset_num_workers() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUM_WORKERS_ISSET_ID);
+  public void unset_numWorkers() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUMWORKERS_ISSET_ID);
   }
 
-  /** Returns true if field num_workers is set (has been assigned a value) and false otherwise */
-  public boolean is_set_num_workers() {
-    return EncodingUtils.testBit(__isset_bitfield, __NUM_WORKERS_ISSET_ID);
+  /** Returns true if field numWorkers is set (has been assigned a value) and false otherwise */
+  public boolean is_set_numWorkers() {
+    return EncodingUtils.testBit(__isset_bitfield, __NUMWORKERS_ISSET_ID);
   }
 
-  public void set_num_workers_isSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUM_WORKERS_ISSET_ID, value);
+  public void set_numWorkers_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUMWORKERS_ISSET_ID, value);
   }
 
-  public int get_num_used_workers() {
-    return this.num_used_workers;
+  public int get_numUsedWorkers() {
+    return this.numUsedWorkers;
   }
 
-  public void set_num_used_workers(int num_used_workers) {
-    this.num_used_workers = num_used_workers;
-    set_num_used_workers_isSet(true);
+  public void set_numUsedWorkers(int numUsedWorkers) {
+    this.numUsedWorkers = numUsedWorkers;
+    set_numUsedWorkers_isSet(true);
   }
 
-  public void unset_num_used_workers() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUM_USED_WORKERS_ISSET_ID);
+  public void unset_numUsedWorkers() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUMUSEDWORKERS_ISSET_ID);
   }
 
-  /** Returns true if field num_used_workers is set (has been assigned a value) and false otherwise */
-  public boolean is_set_num_used_workers() {
-    return EncodingUtils.testBit(__isset_bitfield, __NUM_USED_WORKERS_ISSET_ID);
+  /** Returns true if field numUsedWorkers is set (has been assigned a value) and false otherwise */
+  public boolean is_set_numUsedWorkers() {
+    return EncodingUtils.testBit(__isset_bitfield, __NUMUSEDWORKERS_ISSET_ID);
   }
 
-  public void set_num_used_workers_isSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUM_USED_WORKERS_ISSET_ID, value);
+  public void set_numUsedWorkers_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUMUSEDWORKERS_ISSET_ID, value);
   }
 
   public void setFieldValue(_Fields field, Object value) {
@@ -325,33 +325,33 @@ public class SupervisorSummary implements org.apache.thrift.TBase<SupervisorSumm
 
     case SUPERVISOR_ID:
       if (value == null) {
-        unset_supervisor_id();
+        unset_supervisorId();
       } else {
-        set_supervisor_id((String)value);
+        set_supervisorId((String)value);
       }
       break;
 
     case UPTIME_SECS:
       if (value == null) {
-        unset_uptime_secs();
+        unset_uptimeSecs();
       } else {
-        set_uptime_secs((Integer)value);
+        set_uptimeSecs((Integer)value);
       }
       break;
 
     case NUM_WORKERS:
       if (value == null) {
-        unset_num_workers();
+        unset_numWorkers();
       } else {
-        set_num_workers((Integer)value);
+        set_numWorkers((Integer)value);
       }
       break;
 
     case NUM_USED_WORKERS:
       if (value == null) {
-        unset_num_used_workers();
+        unset_numUsedWorkers();
       } else {
-        set_num_used_workers((Integer)value);
+        set_numUsedWorkers((Integer)value);
       }
       break;
 
@@ -364,16 +364,16 @@ public class SupervisorSummary implements org.apache.thrift.TBase<SupervisorSumm
       return get_host();
 
     case SUPERVISOR_ID:
-      return get_supervisor_id();
+      return get_supervisorId();
 
     case UPTIME_SECS:
-      return Integer.valueOf(get_uptime_secs());
+      return Integer.valueOf(get_uptimeSecs());
 
     case NUM_WORKERS:
-      return Integer.valueOf(get_num_workers());
+      return Integer.valueOf(get_numWorkers());
 
     case NUM_USED_WORKERS:
-      return Integer.valueOf(get_num_used_workers());
+      return Integer.valueOf(get_numUsedWorkers());
 
     }
     throw new IllegalStateException();
@@ -389,13 +389,13 @@ public class SupervisorSummary implements org.apache.thrift.TBase<SupervisorSumm
     case HOST:
       return is_set_host();
     case SUPERVISOR_ID:
-      return is_set_supervisor_id();
+      return is_set_supervisorId();
     case UPTIME_SECS:
-      return is_set_uptime_secs();
+      return is_set_uptimeSecs();
     case NUM_WORKERS:
-      return is_set_num_workers();
+      return is_set_numWorkers();
     case NUM_USED_WORKERS:
-      return is_set_num_used_workers();
+      return is_set_numUsedWorkers();
     }
     throw new IllegalStateException();
   }
@@ -422,39 +422,39 @@ public class SupervisorSummary implements org.apache.thrift.TBase<SupervisorSumm
         return false;
     }
 
-    boolean this_present_supervisor_id = true && this.is_set_supervisor_id();
-    boolean that_present_supervisor_id = true && that.is_set_supervisor_id();
-    if (this_present_supervisor_id || that_present_supervisor_id) {
-      if (!(this_present_supervisor_id && that_present_supervisor_id))
+    boolean this_present_supervisorId = true && this.is_set_supervisorId();
+    boolean that_present_supervisorId = true && that.is_set_supervisorId();
+    if (this_present_supervisorId || that_present_supervisorId) {
+      if (!(this_present_supervisorId && that_present_supervisorId))
         return false;
-      if (!this.supervisor_id.equals(that.supervisor_id))
+      if (!this.supervisorId.equals(that.supervisorId))
         return false;
     }
 
-    boolean this_present_uptime_secs = true;
-    boolean that_present_uptime_secs = true;
-    if (this_present_uptime_secs || that_present_uptime_secs) {
-      if (!(this_present_uptime_secs && that_present_uptime_secs))
+    boolean this_present_uptimeSecs = true;
+    boolean that_present_uptimeSecs = true;
+    if (this_present_uptimeSecs || that_present_uptimeSecs) {
+      if (!(this_present_uptimeSecs && that_present_uptimeSecs))
         return false;
-      if (this.uptime_secs != that.uptime_secs)
+      if (this.uptimeSecs != that.uptimeSecs)
         return false;
     }
 
-    boolean this_present_num_workers = true;
-    boolean that_present_num_workers = true;
-    if (this_present_num_workers || that_present_num_workers) {
-      if (!(this_present_num_workers && that_present_num_workers))
+    boolean this_present_numWorkers = true;
+    boolean that_present_numWorkers = true;
+    if (this_present_numWorkers || that_present_numWorkers) {
+      if (!(this_present_numWorkers && that_present_numWorkers))
         return false;
-      if (this.num_workers != that.num_workers)
+      if (this.numWorkers != that.numWorkers)
         return false;
     }
 
-    boolean this_present_num_used_workers = true;
-    boolean that_present_num_used_workers = true;
-    if (this_present_num_used_workers || that_present_num_used_workers) {
-      if (!(this_present_num_used_workers && that_present_num_used_workers))
+    boolean this_present_numUsedWorkers = true;
+    boolean that_present_numUsedWorkers = true;
+    if (this_present_numUsedWorkers || that_present_numUsedWorkers) {
+      if (!(this_present_numUsedWorkers && that_present_numUsedWorkers))
         return false;
-      if (this.num_used_workers != that.num_used_workers)
+      if (this.numUsedWorkers != that.numUsedWorkers)
         return false;
     }
 
@@ -470,25 +470,25 @@ public class SupervisorSummary implements org.apache.thrift.TBase<SupervisorSumm
     if (present_host)
       list.add(host);
 
-    boolean present_supervisor_id = true && (is_set_supervisor_id());
-    list.add(present_supervisor_id);
-    if (present_supervisor_id)
-      list.add(supervisor_id);
+    boolean present_supervisorId = true && (is_set_supervisorId());
+    list.add(present_supervisorId);
+    if (present_supervisorId)
+      list.add(supervisorId);
 
-    boolean present_uptime_secs = true;
-    list.add(present_uptime_secs);
-    if (present_uptime_secs)
-      list.add(uptime_secs);
+    boolean present_uptimeSecs = true;
+    list.add(present_uptimeSecs);
+    if (present_uptimeSecs)
+      list.add(uptimeSecs);
 
-    boolean present_num_workers = true;
-    list.add(present_num_workers);
-    if (present_num_workers)
-      list.add(num_workers);
+    boolean present_numWorkers = true;
+    list.add(present_numWorkers);
+    if (present_numWorkers)
+      list.add(numWorkers);
 
-    boolean present_num_used_workers = true;
-    list.add(present_num_used_workers);
-    if (present_num_used_workers)
-      list.add(num_used_workers);
+    boolean present_numUsedWorkers = true;
+    list.add(present_numUsedWorkers);
+    if (present_numUsedWorkers)
+      list.add(numUsedWorkers);
 
     return list.hashCode();
   }
@@ -511,42 +511,42 @@ public class SupervisorSummary implements org.apache.thrift.TBase<SupervisorSumm
         return lastComparison;
       }
     }
-    lastComparison = Boolean.valueOf(is_set_supervisor_id()).compareTo(other.is_set_supervisor_id());
+    lastComparison = Boolean.valueOf(is_set_supervisorId()).compareTo(other.is_set_supervisorId());
     if (lastComparison != 0) {
       return lastComparison;
     }
-    if (is_set_supervisor_id()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.supervisor_id, other.supervisor_id);
+    if (is_set_supervisorId()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.supervisorId, other.supervisorId);
       if (lastComparison != 0) {
         return lastComparison;
       }
     }
-    lastComparison = Boolean.valueOf(is_set_uptime_secs()).compareTo(other.is_set_uptime_secs());
+    lastComparison = Boolean.valueOf(is_set_uptimeSecs()).compareTo(other.is_set_uptimeSecs());
     if (lastComparison != 0) {
       return lastComparison;
     }
-    if (is_set_uptime_secs()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.uptime_secs, other.uptime_secs);
+    if (is_set_uptimeSecs()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.uptimeSecs, other.uptimeSecs);
       if (lastComparison != 0) {
         return lastComparison;
       }
     }
-    lastComparison = Boolean.valueOf(is_set_num_workers()).compareTo(other.is_set_num_workers());
+    lastComparison = Boolean.valueOf(is_set_numWorkers()).compareTo(other.is_set_numWorkers());
     if (lastComparison != 0) {
       return lastComparison;
     }
-    if (is_set_num_workers()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.num_workers, other.num_workers);
+    if (is_set_numWorkers()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.numWorkers, other.numWorkers);
       if (lastComparison != 0) {
         return lastComparison;
       }
     }
-    lastComparison = Boolean.valueOf(is_set_num_used_workers()).compareTo(other.is_set_num_used_workers());
+    lastComparison = Boolean.valueOf(is_set_numUsedWorkers()).compareTo(other.is_set_numUsedWorkers());
     if (lastComparison != 0) {
       return lastComparison;
     }
-    if (is_set_num_used_workers()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.num_used_workers, other.num_used_workers);
+    if (is_set_numUsedWorkers()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.numUsedWorkers, other.numUsedWorkers);
       if (lastComparison != 0) {
         return lastComparison;
       }
@@ -558,11 +558,11 @@ public class SupervisorSummary implements org.apache.thrift.TBase<SupervisorSumm
     return _Fields.findByThriftId(fieldId);
   }
 
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
@@ -579,49 +579,49 @@ public class SupervisorSummary implements org.apache.thrift.TBase<SupervisorSumm
     }
     first = false;
     if (!first) sb.append(", ");
-    sb.append("supervisor_id:");
-    if (this.supervisor_id == null) {
+    sb.append("supervisorId:");
+    if (this.supervisorId == null) {
       sb.append("null");
     } else {
-      sb.append(this.supervisor_id);
+      sb.append(this.supervisorId);
     }
     first = false;
     if (!first) sb.append(", ");
-    sb.append("uptime_secs:");
-    sb.append(this.uptime_secs);
+    sb.append("uptimeSecs:");
+    sb.append(this.uptimeSecs);
     first = false;
     if (!first) sb.append(", ");
-    sb.append("num_workers:");
-    sb.append(this.num_workers);
+    sb.append("numWorkers:");
+    sb.append(this.numWorkers);
     first = false;
     if (!first) sb.append(", ");
-    sb.append("num_used_workers:");
-    sb.append(this.num_used_workers);
+    sb.append("numUsedWorkers:");
+    sb.append(this.numUsedWorkers);
     first = false;
     sb.append(")");
     return sb.toString();
   }
 
-  public void validate() throws org.apache.thrift.TException {
+  public void validate() throws TException {
     // check for required fields
     if (!is_set_host()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'host' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'host' is unset! Struct:" + toString());
     }
 
-    if (!is_set_supervisor_id()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'supervisor_id' is unset! Struct:" + toString());
+    if (!is_set_supervisorId()) {
+      throw new TProtocolException("Required field 'supervisorId' is unset! Struct:" + toString());
     }
 
-    if (!is_set_uptime_secs()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'uptime_secs' is unset! Struct:" + toString());
+    if (!is_set_uptimeSecs()) {
+      throw new TProtocolException("Required field 'uptimeSecs' is unset! Struct:" + toString());
     }
 
-    if (!is_set_num_workers()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'num_workers' is unset! Struct:" + toString());
+    if (!is_set_numWorkers()) {
+      throw new TProtocolException("Required field 'numWorkers' is unset! Struct:" + toString());
     }
 
-    if (!is_set_num_used_workers()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'num_used_workers' is unset! Struct:" + toString());
+    if (!is_set_numUsedWorkers()) {
+      throw new TProtocolException("Required field 'numUsedWorkers' is unset! Struct:" + toString());
     }
 
     // check for sub-struct validity
@@ -630,7 +630,7 @@ public class SupervisorSummary implements org.apache.thrift.TBase<SupervisorSumm
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -640,7 +640,7 @@ public class SupervisorSummary implements org.apache.thrift.TBase<SupervisorSumm
       // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
       __isset_bitfield = 0;
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -653,7 +653,7 @@ public class SupervisorSummary implements org.apache.thrift.TBase<SupervisorSumm
 
   private static class SupervisorSummaryStandardScheme extends StandardScheme<SupervisorSummary> {
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot, SupervisorSummary struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot, SupervisorSummary struct) throws TException {
       org.apache.thrift.protocol.TField schemeField;
       iprot.readStructBegin();
       while (true)
@@ -673,32 +673,32 @@ public class SupervisorSummary implements org.apache.thrift.TBase<SupervisorSumm
             break;
           case 2: // SUPERVISOR_ID
             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
-              struct.supervisor_id = iprot.readString();
-              struct.set_supervisor_id_isSet(true);
+              struct.supervisorId = iprot.readString();
+              struct.set_supervisorId_isSet(true);
             } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
           case 3: // UPTIME_SECS
             if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
-              struct.uptime_secs = iprot.readI32();
-              struct.set_uptime_secs_isSet(true);
+              struct.uptimeSecs = iprot.readI32();
+              struct.set_uptimeSecs_isSet(true);
             } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
           case 4: // NUM_WORKERS
             if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
-              struct.num_workers = iprot.readI32();
-              struct.set_num_workers_isSet(true);
+              struct.numWorkers = iprot.readI32();
+              struct.set_numWorkers_isSet(true);
             } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
           case 5: // NUM_USED_WORKERS
             if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
-              struct.num_used_workers = iprot.readI32();
-              struct.set_num_used_workers_isSet(true);
+              struct.numUsedWorkers = iprot.readI32();
+              struct.set_numUsedWorkers_isSet(true);
             } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
@@ -712,7 +712,7 @@ public class SupervisorSummary implements org.apache.thrift.TBase<SupervisorSumm
       struct.validate();
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot, SupervisorSummary struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot, SupervisorSummary struct) throws TException {
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
@@ -721,19 +721,19 @@ public class SupervisorSummary implements org.apache.thrift.TBase<SupervisorSumm
         oprot.writeString(struct.host);
         oprot.writeFieldEnd();
       }
-      if (struct.supervisor_id != null) {
+      if (struct.supervisorId != null) {
         oprot.writeFieldBegin(SUPERVISOR_ID_FIELD_DESC);
-        oprot.writeString(struct.supervisor_id);
+        oprot.writeString(struct.supervisorId);
         oprot.writeFieldEnd();
       }
       oprot.writeFieldBegin(UPTIME_SECS_FIELD_DESC);
-      oprot.writeI32(struct.uptime_secs);
+      oprot.writeI32(struct.uptimeSecs);
       oprot.writeFieldEnd();
       oprot.writeFieldBegin(NUM_WORKERS_FIELD_DESC);
-      oprot.writeI32(struct.num_workers);
+      oprot.writeI32(struct.numWorkers);
       oprot.writeFieldEnd();
       oprot.writeFieldBegin(NUM_USED_WORKERS_FIELD_DESC);
-      oprot.writeI32(struct.num_used_workers);
+      oprot.writeI32(struct.numUsedWorkers);
       oprot.writeFieldEnd();
       oprot.writeFieldStop();
       oprot.writeStructEnd();
@@ -750,28 +750,28 @@ public class SupervisorSummary implements org.apache.thrift.TBase<SupervisorSumm
   private static class SupervisorSummaryTupleScheme extends TupleScheme<SupervisorSummary> {
 
     @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, SupervisorSummary struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol prot, SupervisorSummary struct) throws TException {
       TTupleProtocol oprot = (TTupleProtocol) prot;
       oprot.writeString(struct.host);
-      oprot.writeString(struct.supervisor_id);
-      oprot.writeI32(struct.uptime_secs);
-      oprot.writeI32(struct.num_workers);
-      oprot.writeI32(struct.num_used_workers);
+      oprot.writeString(struct.supervisorId);
+      oprot.writeI32(struct.uptimeSecs);
+      oprot.writeI32(struct.numWorkers);
+      oprot.writeI32(struct.numUsedWorkers);
     }
 
     @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, SupervisorSummary struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol prot, SupervisorSummary struct) throws TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       struct.host = iprot.readString();
       struct.set_host_isSet(true);
-      struct.supervisor_id = iprot.readString();
-      struct.set_supervisor_id_isSet(true);
-      struct.uptime_secs = iprot.readI32();
-      struct.set_uptime_secs_isSet(true);
-      struct.num_workers = iprot.readI32();
-      struct.set_num_workers_isSet(true);
-      struct.num_used_workers = iprot.readI32();
-      struct.set_num_used_workers_isSet(true);
+      struct.supervisorId = iprot.readString();
+      struct.set_supervisorId_isSet(true);
+      struct.uptimeSecs = iprot.readI32();
+      struct.set_uptimeSecs_isSet(true);
+      struct.numWorkers = iprot.readI32();
+      struct.set_numWorkers_isSet(true);
+      struct.numUsedWorkers = iprot.readI32();
+      struct.set_numUsedWorkers_isSet(true);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/SupervisorWorkers.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/SupervisorWorkers.java b/jstorm-core/src/main/java/backtype/storm/generated/SupervisorWorkers.java
index 93ec504..1735b75 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/SupervisorWorkers.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/SupervisorWorkers.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class SupervisorWorkers implements org.apache.thrift.TBase<SupervisorWorkers, SupervisorWorkers._Fields>, java.io.Serializable, Cloneable, Comparable<SupervisorWorkers> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SupervisorWorkers");
 
@@ -455,11 +455,11 @@ public class SupervisorWorkers implements org.apache.thrift.TBase<SupervisorWork
     return _Fields.findByThriftId(fieldId);
   }
 
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
@@ -495,18 +495,18 @@ public class SupervisorWorkers implements org.apache.thrift.TBase<SupervisorWork
     return sb.toString();
   }
 
-  public void validate() throws org.apache.thrift.TException {
+  public void validate() throws TException {
     // check for required fields
     if (!is_set_supervisor()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'supervisor' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'supervisor' is unset! Struct:" + toString());
     }
 
     if (!is_set_workers()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'workers' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'workers' is unset! Struct:" + toString());
     }
 
     if (!is_set_workerMetric()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'workerMetric' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'workerMetric' is unset! Struct:" + toString());
     }
 
     // check for sub-struct validity
@@ -518,7 +518,7 @@ public class SupervisorWorkers implements org.apache.thrift.TBase<SupervisorWork
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -526,7 +526,7 @@ public class SupervisorWorkers implements org.apache.thrift.TBase<SupervisorWork
   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
     try {
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -539,7 +539,7 @@ public class SupervisorWorkers implements org.apache.thrift.TBase<SupervisorWork
 
   private static class SupervisorWorkersStandardScheme extends StandardScheme<SupervisorWorkers> {
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot, SupervisorWorkers struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot, SupervisorWorkers struct) throws TException {
       org.apache.thrift.protocol.TField schemeField;
       iprot.readStructBegin();
       while (true)
@@ -561,14 +561,14 @@ public class SupervisorWorkers implements org.apache.thrift.TBase<SupervisorWork
           case 2: // WORKERS
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list196 = iprot.readListBegin();
-                struct.workers = new ArrayList<WorkerSummary>(_list196.size);
-                WorkerSummary _elem197;
-                for (int _i198 = 0; _i198 < _list196.size; ++_i198)
+                org.apache.thrift.protocol.TList _list144 = iprot.readListBegin();
+                struct.workers = new ArrayList<WorkerSummary>(_list144.size);
+                WorkerSummary _elem145;
+                for (int _i146 = 0; _i146 < _list144.size; ++_i146)
                 {
-                  _elem197 = new WorkerSummary();
-                  _elem197.read(iprot);
-                  struct.workers.add(_elem197);
+                  _elem145 = new WorkerSummary();
+                  _elem145.read(iprot);
+                  struct.workers.add(_elem145);
                 }
                 iprot.readListEnd();
               }
@@ -580,16 +580,16 @@ public class SupervisorWorkers implements org.apache.thrift.TBase<SupervisorWork
           case 3: // WORKER_METRIC
             if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
               {
-                org.apache.thrift.protocol.TMap _map199 = iprot.readMapBegin();
-                struct.workerMetric = new HashMap<String,MetricInfo>(2*_map199.size);
-                String _key200;
-                MetricInfo _val201;
-                for (int _i202 = 0; _i202 < _map199.size; ++_i202)
+                org.apache.thrift.protocol.TMap _map147 = iprot.readMapBegin();
+                struct.workerMetric = new HashMap<String,MetricInfo>(2*_map147.size);
+                String _key148;
+                MetricInfo _val149;
+                for (int _i150 = 0; _i150 < _map147.size; ++_i150)
                 {
-                  _key200 = iprot.readString();
-                  _val201 = new MetricInfo();
-                  _val201.read(iprot);
-                  struct.workerMetric.put(_key200, _val201);
+                  _key148 = iprot.readString();
+                  _val149 = new MetricInfo();
+                  _val149.read(iprot);
+                  struct.workerMetric.put(_key148, _val149);
                 }
                 iprot.readMapEnd();
               }
@@ -607,7 +607,7 @@ public class SupervisorWorkers implements org.apache.thrift.TBase<SupervisorWork
       struct.validate();
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot, SupervisorWorkers struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot, SupervisorWorkers struct) throws TException {
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
@@ -620,9 +620,9 @@ public class SupervisorWorkers implements org.apache.thrift.TBase<SupervisorWork
         oprot.writeFieldBegin(WORKERS_FIELD_DESC);
         {
           oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.workers.size()));
-          for (WorkerSummary _iter203 : struct.workers)
+          for (WorkerSummary _iter151 : struct.workers)
           {
-            _iter203.write(oprot);
+            _iter151.write(oprot);
           }
           oprot.writeListEnd();
         }
@@ -632,10 +632,10 @@ public class SupervisorWorkers implements org.apache.thrift.TBase<SupervisorWork
         oprot.writeFieldBegin(WORKER_METRIC_FIELD_DESC);
         {
           oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.workerMetric.size()));
-          for (Map.Entry<String, MetricInfo> _iter204 : struct.workerMetric.entrySet())
+          for (Map.Entry<String, MetricInfo> _iter152 : struct.workerMetric.entrySet())
           {
-            oprot.writeString(_iter204.getKey());
-            _iter204.getValue().write(oprot);
+            oprot.writeString(_iter152.getKey());
+            _iter152.getValue().write(oprot);
           }
           oprot.writeMapEnd();
         }
@@ -656,55 +656,55 @@ public class SupervisorWorkers implements org.apache.thrift.TBase<SupervisorWork
   private static class SupervisorWorkersTupleScheme extends TupleScheme<SupervisorWorkers> {
 
     @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, SupervisorWorkers struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol prot, SupervisorWorkers struct) throws TException {
       TTupleProtocol oprot = (TTupleProtocol) prot;
       struct.supervisor.write(oprot);
       {
         oprot.writeI32(struct.workers.size());
-        for (WorkerSummary _iter205 : struct.workers)
+        for (WorkerSummary _iter153 : struct.workers)
         {
-          _iter205.write(oprot);
+          _iter153.write(oprot);
         }
       }
       {
         oprot.writeI32(struct.workerMetric.size());
-        for (Map.Entry<String, MetricInfo> _iter206 : struct.workerMetric.entrySet())
+        for (Map.Entry<String, MetricInfo> _iter154 : struct.workerMetric.entrySet())
         {
-          oprot.writeString(_iter206.getKey());
-          _iter206.getValue().write(oprot);
+          oprot.writeString(_iter154.getKey());
+          _iter154.getValue().write(oprot);
         }
       }
     }
 
     @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, SupervisorWorkers struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol prot, SupervisorWorkers struct) throws TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       struct.supervisor = new SupervisorSummary();
       struct.supervisor.read(iprot);
       struct.set_supervisor_isSet(true);
       {
-        org.apache.thrift.protocol.TList _list207 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-        struct.workers = new ArrayList<WorkerSummary>(_list207.size);
-        WorkerSummary _elem208;
-        for (int _i209 = 0; _i209 < _list207.size; ++_i209)
+        org.apache.thrift.protocol.TList _list155 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.workers = new ArrayList<WorkerSummary>(_list155.size);
+        WorkerSummary _elem156;
+        for (int _i157 = 0; _i157 < _list155.size; ++_i157)
         {
-          _elem208 = new WorkerSummary();
-          _elem208.read(iprot);
-          struct.workers.add(_elem208);
+          _elem156 = new WorkerSummary();
+          _elem156.read(iprot);
+          struct.workers.add(_elem156);
         }
       }
       struct.set_workers_isSet(true);
       {
-        org.apache.thrift.protocol.TMap _map210 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-        struct.workerMetric = new HashMap<String,MetricInfo>(2*_map210.size);
-        String _key211;
-        MetricInfo _val212;
-        for (int _i213 = 0; _i213 < _map210.size; ++_i213)
+        org.apache.thrift.protocol.TMap _map158 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.workerMetric = new HashMap<String,MetricInfo>(2*_map158.size);
+        String _key159;
+        MetricInfo _val160;
+        for (int _i161 = 0; _i161 < _map158.size; ++_i161)
         {
-          _key211 = iprot.readString();
-          _val212 = new MetricInfo();
-          _val212.read(iprot);
-          struct.workerMetric.put(_key211, _val212);
+          _key159 = iprot.readString();
+          _val160 = new MetricInfo();
+          _val160.read(iprot);
+          struct.workerMetric.put(_key159, _val160);
         }
       }
       struct.set_workerMetric_isSet(true);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/TaskComponent.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/TaskComponent.java b/jstorm-core/src/main/java/backtype/storm/generated/TaskComponent.java
index 53603fe..ac6cfc6 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/TaskComponent.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/TaskComponent.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class TaskComponent implements org.apache.thrift.TBase<TaskComponent, TaskComponent._Fields>, java.io.Serializable, Cloneable, Comparable<TaskComponent> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TaskComponent");
 
@@ -339,11 +339,11 @@ public class TaskComponent implements org.apache.thrift.TBase<TaskComponent, Tas
     return _Fields.findByThriftId(fieldId);
   }
 
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
@@ -367,14 +367,14 @@ public class TaskComponent implements org.apache.thrift.TBase<TaskComponent, Tas
     return sb.toString();
   }
 
-  public void validate() throws org.apache.thrift.TException {
+  public void validate() throws TException {
     // check for required fields
     if (!is_set_taskId()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'taskId' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'taskId' is unset! Struct:" + toString());
     }
 
     if (!is_set_component()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'component' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'component' is unset! Struct:" + toString());
     }
 
     // check for sub-struct validity
@@ -383,7 +383,7 @@ public class TaskComponent implements org.apache.thrift.TBase<TaskComponent, Tas
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -393,7 +393,7 @@ public class TaskComponent implements org.apache.thrift.TBase<TaskComponent, Tas
       // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
       __isset_bitfield = 0;
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -406,7 +406,7 @@ public class TaskComponent implements org.apache.thrift.TBase<TaskComponent, Tas
 
   private static class TaskComponentStandardScheme extends StandardScheme<TaskComponent> {
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot, TaskComponent struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot, TaskComponent struct) throws TException {
       org.apache.thrift.protocol.TField schemeField;
       iprot.readStructBegin();
       while (true)
@@ -441,7 +441,7 @@ public class TaskComponent implements org.apache.thrift.TBase<TaskComponent, Tas
       struct.validate();
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot, TaskComponent struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot, TaskComponent struct) throws TException {
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
@@ -468,14 +468,14 @@ public class TaskComponent implements org.apache.thrift.TBase<TaskComponent, Tas
   private static class TaskComponentTupleScheme extends TupleScheme<TaskComponent> {
 
     @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, TaskComponent struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol prot, TaskComponent struct) throws TException {
       TTupleProtocol oprot = (TTupleProtocol) prot;
       oprot.writeI32(struct.taskId);
       oprot.writeString(struct.component);
     }
 
     @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, TaskComponent struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol prot, TaskComponent struct) throws TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       struct.taskId = iprot.readI32();
       struct.set_taskId_isSet(true);


[14/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/TopologyMetricsRunnable.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/TopologyMetricsRunnable.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/TopologyMetricsRunnable.java
index b8eeb20..7af67c2 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/TopologyMetricsRunnable.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/TopologyMetricsRunnable.java
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- *
+ * <p/>
  * http://www.apache.org/licenses/LICENSE-2.0
- *
+ * <p/>
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -17,711 +17,895 @@
  */
 package com.alibaba.jstorm.daemon.nimbus;
 
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.SortedMap;
-import java.util.TreeMap;
-import java.util.TreeSet;
-import java.util.concurrent.BlockingDeque;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.LinkedBlockingDeque;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.alibaba.jstorm.cache.JStormCache;
+import backtype.storm.generated.MetricInfo;
+import backtype.storm.generated.MetricSnapshot;
+import backtype.storm.generated.TopologyMetric;
+import backtype.storm.utils.Utils;
+import com.alibaba.jstorm.callback.AsyncLoopThread;
 import com.alibaba.jstorm.callback.RunnableCallback;
 import com.alibaba.jstorm.client.ConfigExtension;
 import com.alibaba.jstorm.cluster.Cluster;
-import com.alibaba.jstorm.cluster.Common;
 import com.alibaba.jstorm.cluster.StormClusterState;
-import com.alibaba.jstorm.cluster.StormConfig;
-import com.alibaba.jstorm.common.metric.Histogram;
-import com.alibaba.jstorm.metric.AlimonitorClient;
-import com.alibaba.jstorm.metric.MetricDef;
-import com.alibaba.jstorm.metric.MetricSendClient;
-import com.alibaba.jstorm.metric.MetricThrift;
-import com.alibaba.jstorm.metric.SimpleJStormMetric;
+import com.alibaba.jstorm.common.metric.AsmGauge;
+import com.alibaba.jstorm.common.metric.MetricMeta;
+import com.alibaba.jstorm.daemon.nimbus.metric.uploader.DefaultMetricUploader;
+import com.alibaba.jstorm.daemon.nimbus.metric.uploader.MetricUploader;
+import com.alibaba.jstorm.metric.*;
 import com.alibaba.jstorm.schedule.Assignment;
 import com.alibaba.jstorm.schedule.default_assign.ResourceWorkerSlot;
-import com.alibaba.jstorm.utils.TimeCacheMap;
+import com.alibaba.jstorm.utils.JStormUtils;
+import com.alibaba.jstorm.utils.TimeUtils;
 import com.codahale.metrics.Gauge;
+import com.google.common.collect.Sets;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang.builder.ToStringBuilder;
+import org.apache.commons.lang.builder.ToStringStyle;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import backtype.storm.generated.MetricInfo;
-import backtype.storm.generated.MetricWindow;
-import backtype.storm.generated.TopologyMetric;
-import backtype.storm.generated.WorkerUploadMetrics;
+import java.io.Serializable;
+import java.lang.management.ManagementFactory;
+import java.lang.management.MemoryMXBean;
+import java.lang.management.MemoryUsage;
+import java.util.*;
+import java.util.concurrent.*;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicIntegerArray;
 
-public class TopologyMetricsRunnable extends RunnableCallback {
+/**
+ * Topology metrics thread which resides in nimbus.
+ * This class is responsible for generating metrics IDs and uploading metrics to the underlying storage system.
+ *
+ * @author Cody (weiyue.wy@alibaba-inc.com)
+ * @since 2.0.5
+ */
+public class TopologyMetricsRunnable extends Thread {
     private static final Logger LOG = LoggerFactory.getLogger(TopologyMetricsRunnable.class);
-    private static final String DEAD_SUPERVISOR_HEAD = "DeadSupervisor-";
-    
-    public static interface Event {
-        
-    }
-    
-    public static class Update implements Event {
-        public WorkerUploadMetrics workerMetrics;
-    }
-    
-    public static class Remove implements Event {
-        public String topologyId;
-    }
-    
-    public static class Upload implements Event {
-        public long timeStamp;
-    }
-    
-    public static final String CACHE_NAMESPACE_METRIC = "cache_namespace_metric";
-    public static final String CACHE_NAMESPACE_NETTY = "cache_namespace_netty";
-    protected NimbusCache nimbusCache;
-    protected JStormCache dbCache;
-    
+
+    protected JStormMetricCache metricCache;
+
     /**
-     * cache all worker metrics will waste a little memory
-     * 
+     * map<topologyId, map<worker, metricInfo>>, local memory cache, keeps only one snapshot of metrics.
      */
-    protected Map<String, Set<String>> topologyWorkers;
-    protected TimeCacheMap<String, Long> removing;
-    
-    protected BlockingDeque<TopologyMetricsRunnable.Event> queue;
+    protected final ConcurrentMap<String, TopologyMetricContext> topologyMetricContexts =
+            new ConcurrentHashMap<>();
+
+    protected final BlockingDeque<TopologyMetricsRunnable.Event> queue = new LinkedBlockingDeque<>();
+
+    private static final String PENDING_UPLOAD_METRIC_DATA = "__pending.upload.metrics__";
+    private static final String PENDING_UPLOAD_METRIC_DATA_INFO = "__pending.upload.metrics.info__";
+
+    // the slot is empty
+    private static final int UNSET = 0;
+    // the slot is ready for uploading
+    private static final int SET = 1;
+    // the slot is being uploaded
+    private static final int UPLOADING = 2;
+    // the slot will be set ready for uploading
+    private static final int PRE_SET = 3;
+
+    protected final AtomicIntegerArray metricStat;
+
     protected StormClusterState stormClusterState;
-    
-    protected MetricSendClient metricSendClient;
-    protected TopologyMetric emptyTopologyMetric = mkTopologyMetric();
-    protected TreeMap<String, MetricInfo>   emptyNettyMetric = new TreeMap<String, MetricInfo>();
+
+    protected MetricUploader metricUploader;
+
     protected AtomicBoolean isShutdown;
-    protected boolean localMode;
-    protected TopologyNettyMgr topologyNettyMgr;
-    
-    protected Histogram updateHistogram;
-    protected AtomicBoolean isUploading = new AtomicBoolean(false);
-    protected Histogram uploadHistogram;
-    
-    public TopologyMetricsRunnable(NimbusData nimbusData) {
-        
-        this.nimbusCache = nimbusData.getNimbusCache();
-        this.dbCache = nimbusCache.getDbCache();
-        this.topologyWorkers = new ConcurrentHashMap<String, Set<String>>();
-        this.removing = new TimeCacheMap<String, Long>(600);
-        this.queue = new LinkedBlockingDeque<TopologyMetricsRunnable.Event>();
+    protected String clusterName;
+    protected int maxPendingUploadMetrics;
+
+    private final boolean localMode;
+    private final NimbusData nimbusData;
+    private MetricQueryClient metricQueryClient;
+
+    private ScheduledExecutorService clusterMetricsUpdateExecutor;
+
+    /**
+     * refreshes alive topologies every min or on startup.
+     */
+    protected AsyncLoopThread refreshTopologiesThread;
+
+    /**
+     * the thread for metric sending, checks every second.
+     */
+    private final Thread uploadThread = new MetricsUploadThread();
+
+    /**
+     * async flush metric meta
+     */
+    private final Thread flushMetricMetaThread = new FlushMetricMetaThread();
+
+    /**
+     * use default UUID generator
+     */
+    private final MetricIDGenerator metricIDGenerator = new DefaultMetricIDGenerator();
+
+    public TopologyMetricsRunnable(final NimbusData nimbusData) {
+        setName(getClass().getSimpleName());
+
+        this.nimbusData = nimbusData;
+
+        this.localMode = nimbusData.isLocalMode();
+        if (localMode) {
+            this.metricStat = new AtomicIntegerArray(1);
+            return;
+        }
+
+        LOG.info("create topology metrics runnable.");
+        this.metricCache = nimbusData.getMetricCache();
         this.stormClusterState = nimbusData.getStormClusterState();
         this.isShutdown = nimbusData.getIsShutdown();
-        this.topologyNettyMgr = nimbusData.getTopologyNettyMgr();
-        
-        if (ConfigExtension.isAlimonitorMetricsPost(nimbusData.getConf())) {
-            metricSendClient = new AlimonitorClient(AlimonitorClient.DEFAUT_ADDR, AlimonitorClient.DEFAULT_PORT, true);
-        } else {
-            metricSendClient = new MetricSendClient();
-        }
-        localMode = StormConfig.local_mode(nimbusData.getConf());
-        
-        updateHistogram = SimpleJStormMetric.registerHistorgram("TopologyMetricsRunnable_Update");
-        uploadHistogram = SimpleJStormMetric.registerHistorgram("TopologyMetricsRunnable_Upload");
-        
-        SimpleJStormMetric.registerWorkerGauge(new Gauge<Double>() {
-            
+
+        clusterName = ConfigExtension.getClusterName(nimbusData.getConf());
+        if (clusterName == null) {
+            throw new RuntimeException("cluster.name property must be set in storm.yaml!");
+        }
+
+        this.maxPendingUploadMetrics = ConfigExtension.getMaxPendingMetricNum(nimbusData.getConf());
+        this.metricStat = new AtomicIntegerArray(this.maxPendingUploadMetrics);
+
+        int cnt = 0;
+        for (int i = 0; i < maxPendingUploadMetrics; i++) {
+            TopologyMetricDataInfo obj = getMetricDataInfoFromCache(i);
+            if (obj != null) {
+                this.metricStat.set(i, SET);
+                cnt++;
+            }
+        }
+        LOG.info("pending upload metrics: {}", cnt);
+
+        // init alive topologies from zk
+        this.refreshTopologies();
+        this.refreshTopologiesThread = new AsyncLoopThread(new RefreshTopologiesThread());
+
+        this.clusterMetricsUpdateExecutor = Executors.newSingleThreadScheduledExecutor();
+        this.clusterMetricsUpdateExecutor.scheduleAtFixedRate(new Runnable() {
             @Override
-            public Double getValue() {
-                // TODO Auto-generated method stub
-                return (double) queue.size();
+            public void run() {
+                int secOffset = TimeUtils.secOffset();
+                int offset = 55;
+                if (secOffset < offset) {
+                    JStormUtils.sleepMs((offset - secOffset) * 1000);
+                } else if (secOffset == offset) {
+                    // do nothing
+                } else {
+                    JStormUtils.sleepMs((60 - secOffset + offset) * 1000);
+                }
+
+                LOG.info("cluster metrics force upload.");
+                mergeAndUploadClusterMetrics();
             }
-        }, "TopologyMetricsRunnable_Queue");
+        }, 5, 60, TimeUnit.SECONDS);
+
+        // track nimbus JVM heap
+        JStormMetrics.registerWorkerGauge(JStormMetrics.NIMBUS_METRIC_KEY, MetricDef.MEMORY_USED,
+                new AsmGauge(new Gauge<Double>() {
+                    @Override
+                    public Double getValue() {
+                        MemoryMXBean memoryMXBean = ManagementFactory.getMemoryMXBean();
+                        MemoryUsage memoryUsage = memoryMXBean.getHeapMemoryUsage();
+                        return (double) memoryUsage.getUsed();
+                    }
+                }));
     }
-    
-    public void pushEvent(TopologyMetricsRunnable.Event cmd) {
-        queue.offer(cmd);
+
+    /**
+     * init metric uploader
+     */
+    public void init() {
+        String metricUploadClass = ConfigExtension.getMetricUploaderClass(nimbusData.getConf());
+        if (StringUtils.isBlank(metricUploadClass)) {
+            metricUploadClass = DefaultMetricUploader.class.getName();
+        }
+        // init metric uploader
+        LOG.info("metric uploader class:{}", metricUploadClass);
+        Object instance = Utils.newInstance(metricUploadClass);
+        if (!(instance instanceof MetricUploader)) {
+            throw new RuntimeException(metricUploadClass + " isn't MetricUploader class ");
+        }
+        this.metricUploader = (MetricUploader) instance;
+        try {
+            metricUploader.init(nimbusData);
+        } catch (Exception e) {
+            throw new RuntimeException(e);
+        }
+        LOG.info("Successfully init {}", metricUploadClass);
+
+        // init metric query client
+        String metricQueryClientClass = ConfigExtension.getMetricQueryClientClass(nimbusData.getConf());
+        if (!StringUtils.isBlank(metricQueryClientClass)) {
+            LOG.info("metric query client class:{}", metricQueryClientClass);
+            this.metricQueryClient = (MetricQueryClient) Utils.newInstance(metricQueryClientClass);
+        } else {
+            LOG.warn("use default metric query client class.");
+            this.metricQueryClient = new DefaultMetricQueryClient();
+        }
+        try {
+            metricQueryClient.init(nimbusData.getConf());
+        } catch (Exception e) {
+            throw new RuntimeException(e);
+        }
+
+        this.uploadThread.start();
+        this.flushMetricMetaThread.start();
+
+        LOG.info("init topology metric runnable done.");
+    }
+
+    public void shutdown() {
+        LOG.info("Begin to shutdown");
+        metricUploader.cleanup();
+
+        LOG.info("Successfully shutdown");
     }
-    
-    public TopologyMetric mkTopologyMetric() {
-        TopologyMetric emptyTopologyMetric = new TopologyMetric();
-        
-        MetricInfo topologyMetricInfo = MetricThrift.mkMetricInfo();
-        emptyTopologyMetric.set_topologyMetric(topologyMetricInfo);
-        
-        emptyTopologyMetric.set_componentMetric(new HashMap<String, MetricInfo>());
-        emptyTopologyMetric.set_workerMetric(new HashMap<String, MetricInfo>());
-        emptyTopologyMetric.set_taskMetric(new HashMap<Integer, MetricInfo>());
-        return emptyTopologyMetric;
-    }
-    
+
     @Override
     public void run() {
-        try {
-            TopologyMetricsRunnable.Event event = queue.take();
-            
-            if (event instanceof Remove) {
-                
-                handleRemoveEvent((Remove) event);
-                return;
-            } else if (event instanceof Update) {
-                handleUpdateEvent((Update) event);
-                return;
-            } else if (event instanceof Upload) {
-                handleUploadEvent((Upload) event);
-                return;
-            } else {
-                LOG.error("Unknow event type");
+        while (!isShutdown.get()) {
+            if (localMode) {
                 return;
             }
-            
-        } catch (Exception e) {
-            if (isShutdown.get() == false) {
-                LOG.error(e.getMessage(), e);
+
+            try {
+                // wait for metricUploader to be ready, for some external plugin like database, it'll take a few seconds
+                if (this.metricUploader != null) {
+                    Event event = queue.poll();
+                    if (event == null) {
+                        continue;
+                    }
+
+                    if (event instanceof Remove) {
+                        handleRemoveEvent((Remove) event);
+                    } else if (event instanceof Update) {
+                        handleUpdateEvent((Update) event);
+                    } else if (event instanceof Refresh) {
+                        handleRefreshEvent((Refresh) event);
+                    } else if (event instanceof KillTopologyEvent) {
+                        handleKillTopologyEvent((KillTopologyEvent) event);
+                    } else if (event instanceof StartTopologyEvent) {
+                        handleStartTopologyEvent((StartTopologyEvent) event);
+                    } else if (event instanceof TaskDeadEvent) {
+                        handleTaskDeadEvent((TaskDeadEvent) event);
+                    } else if (event instanceof TaskStartEvent) {
+                        handleTaskStartEvent((TaskStartEvent) event);
+                    } else {
+                        LOG.error("Unknown event type:{}", event.getClass());
+                    }
+                }
+            } catch (Exception e) {
+                if (!isShutdown.get()) {
+                    LOG.error(e.getMessage(), e);
+                }
             }
         }
     }
-    
-    public void handleRemoveEvent(Remove event) {
-        String topologyId = event.topologyId;
-        TopologyMetric topologyMetric = (TopologyMetric) dbCache.get(getTopologyKey(topologyId));
-        if (topologyMetric == null) {
-            LOG.warn("No TopologyMetric of  " + topologyId);
-            return;
+
+
+    public boolean isTopologyAlive(String topologyId) {
+        return topologyMetricContexts.containsKey(topologyId);
+    }
+
+    private int getAndPresetFirstEmptyIndex() {
+        for (int i = 0; i < maxPendingUploadMetrics; i++) {
+            if (metricStat.get(i) == UNSET) {
+                if (metricStat.compareAndSet(i, UNSET, PRE_SET)) {
+                    return i;
+                }
+            }
         }
-        
-        removing.put(topologyId, System.currentTimeMillis());
-        dbCache.remove(getTopologyKey(topologyId));
-        dbCache.remove(getNettyTopologyKey(topologyId));
-        topologyNettyMgr.rmTopology(topologyId);
-        LOG.info("Successfully remove TopologyMetric of " + topologyId);
-        return;
-        
-    }
-    
-    public void cleanDeadSupervisorWorker(TopologyMetric metric) {
-        List<String> removeList = new ArrayList<String>();
-        
-        Map<String, MetricInfo> workerMetric = metric.get_workerMetric();
-        if (workerMetric == null) {
-            return;
+        return -1;
+    }
+
+    private int getFirstPendingUploadIndex() {
+        for (int i = 0; i < maxPendingUploadMetrics; i++) {
+            if (metricStat.get(i) == SET) {
+                return i;
+            }
         }
-        for (String hostPort : workerMetric.keySet()) {
-            if (hostPort.startsWith(DEAD_SUPERVISOR_HEAD)) {
-                removeList.add(hostPort);
+        return -1;
+    }
+
+    public void markUploaded(int idx) {
+        this.metricCache.remove(PENDING_UPLOAD_METRIC_DATA + idx);
+        this.metricCache.remove(PENDING_UPLOAD_METRIC_DATA_INFO + idx);
+        this.metricStat.set(idx, UNSET);
+    }
+
+    public void markUploading(int idx) {
+        this.metricStat.set(idx, UPLOADING);
+    }
+
+    public void markSet(int idx) {
+        this.metricStat.set(idx, SET);
+    }
+
+    public TopologyMetric getMetricDataFromCache(int idx) {
+        return (TopologyMetric) metricCache.get(PENDING_UPLOAD_METRIC_DATA + idx);
+    }
+
+    public TopologyMetricDataInfo getMetricDataInfoFromCache(int idx) {
+        return (TopologyMetricDataInfo) metricCache.get(PENDING_UPLOAD_METRIC_DATA_INFO + idx);
+    }
+
+    public void pushEvent(Event cmd) {
+        queue.offer(cmd);
+    }
+
+    public Map<String, Long> registerMetrics(String topologyId, Set<String> metricNames) {
+        TimeTicker ticker = new TimeTicker(TimeUnit.MILLISECONDS, true);
+
+        ConcurrentMap<String, Long> memMeta = topologyMetricContexts.get(topologyId).getMemMeta();
+        Map<String, Long> ret = new HashMap<>();
+        for (String metricName : metricNames) {
+            Long id = memMeta.get(metricName);
+            if (id != null && MetricUtils.isValidId(id)) {
+                ret.put(metricName, id);
+            } else {
+                id = metricIDGenerator.genMetricId(metricName);
+                Long old = memMeta.putIfAbsent(metricName, id);
+                if (old == null) {
+                    ret.put(metricName, id);
+                } else {
+                    ret.put(metricName, old);
+                }
             }
         }
-        
-        for (String removed : removeList) {
-            workerMetric.remove(removed);
+
+        long cost = ticker.stop();
+        LOG.info("register metrics, topology:{}, size:{}, cost:{}", topologyId, metricNames.size(), cost);
+
+        return ret;
+    }
+
+    public void handleRemoveEvent(Remove event) {
+        String topologyId = event.topologyId;
+        if (topologyId != null) {
+            removeTopology(topologyId);
         }
+        LOG.info("remove topology:{}.", topologyId);
+
     }
-    
-    public void cleanTopology() {
-        Map<String, Long> removingMap = removing.buildMap();
-        
-        Map<String, Assignment> assignMap = null;
+
+    private void removeTopology(String topologyId) {
+        metricCache.removeTopology(topologyId);
+        metricCache.removeSampleRate(topologyId);
+
+        topologyMetricContexts.remove(topologyId);
+    }
+
+
+    public void refreshTopologies() {
+        if (!topologyMetricContexts.containsKey(JStormMetrics.NIMBUS_METRIC_KEY)) {
+            LOG.info("adding __nimbus__ to metric context.");
+            Set<ResourceWorkerSlot> workerSlot = Sets.newHashSet(new ResourceWorkerSlot());
+            TopologyMetricContext metricContext = new TopologyMetricContext(workerSlot);
+            topologyMetricContexts.putIfAbsent(JStormMetrics.NIMBUS_METRIC_KEY, metricContext);
+            syncMetaFromCache(JStormMetrics.NIMBUS_METRIC_KEY, topologyMetricContexts.get(JStormMetrics.NIMBUS_METRIC_KEY));
+        }
+        if (!topologyMetricContexts.containsKey(JStormMetrics.CLUSTER_METRIC_KEY)) {
+            LOG.info("adding __cluster__ to metric context.");
+            Set<ResourceWorkerSlot> workerSlot = Sets.newHashSet(new ResourceWorkerSlot());
+            Map conf = new HashMap();
+            //there's no need to consider sample rate when cluster metrics merge
+            conf.put(ConfigExtension.TOPOLOGY_METRIC_SAMPLE_RATE, 1.0);
+            TopologyMetricContext metricContext = new TopologyMetricContext(
+                    JStormMetrics.CLUSTER_METRIC_KEY, workerSlot, conf);
+            topologyMetricContexts.putIfAbsent(JStormMetrics.CLUSTER_METRIC_KEY, metricContext);
+            syncMetaFromCache(JStormMetrics.CLUSTER_METRIC_KEY, topologyMetricContexts.get(JStormMetrics.CLUSTER_METRIC_KEY));
+        }
+
+        Map<String, Assignment> assignMap;
         try {
             assignMap = Cluster.get_all_assignment(stormClusterState, null);
+            for (String topologyId : assignMap.keySet()) {
+                if (!topologyMetricContexts.containsKey(topologyId)) {
+                    Assignment assignment = assignMap.get(topologyId);
+                    TopologyMetricContext metricContext =
+                            new TopologyMetricContext(assignment.getWorkers());
+                    metricContext.setTaskNum(NimbusUtils.getTopologyTaskNum(assignment));
+                    syncMetaFromCache(topologyId, metricContext);
+
+                    LOG.info("adding {} to metric context.", topologyId);
+                    topologyMetricContexts.put(topologyId, metricContext);
+                }
+            }
         } catch (Exception e1) {
-            // TODO Auto-generated catch block
-            LOG.info("Failed to get Assignments");
+            LOG.warn("failed to get assignments");
+            return;
         }
-        
-        for (String topologyId : topologyWorkers.keySet()) {
-            if (assignMap.containsKey(topologyId) == false) {
-                removingMap.put(topologyId, System.currentTimeMillis());
+
+        List<String> removing = new ArrayList<>();
+        for (String topologyId : topologyMetricContexts.keySet()) {
+            if (!JStormMetrics.NIMBUS_METRIC_KEY.equals(topologyId)
+                    && !JStormMetrics.CLUSTER_METRIC_KEY.equals(topologyId)
+                    && !assignMap.containsKey(topologyId)) {
+                removing.add(topologyId);
             }
         }
-        
-        for (String topologyId : removingMap.keySet()) {
-            dbCache.remove(getTopologyKey(topologyId));
-            
-            Set<String> workers = topologyWorkers.get(topologyId);
-            if (workers != null) {
-                for (String workerSlot : workers) {
-                    dbCache.remove(getWorkerKey(topologyId, workerSlot));
+
+        for (String topologyId : removing) {
+            LOG.info("removing topology:{}", topologyId);
+            removeTopology(topologyId);
+        }
+    }
+
+    /**
+     * sync topology metric meta from external storage like TDDL/OTS.
+     * nimbus server will skip syncing, only followers do this
+     */
+    public void syncTopologyMeta() {
+        String nimbus = JStormMetrics.NIMBUS_METRIC_KEY;
+        if (topologyMetricContexts.containsKey(nimbus)) {
+            syncMetaFromRemote(nimbus, topologyMetricContexts.get(nimbus));
+        }
+        String cluster = JStormMetrics.CLUSTER_METRIC_KEY;
+        if (topologyMetricContexts.containsKey(cluster)) {
+            syncMetaFromRemote(cluster, topologyMetricContexts.get(cluster));
+        }
+
+        Map<String, Assignment> assignMap;
+        try {
+            assignMap = Cluster.get_all_assignment(stormClusterState, null);
+            for (String topologyId : assignMap.keySet()) {
+                if (topologyMetricContexts.containsKey(topologyId)) {
+                    Assignment assignment = assignMap.get(topologyId);
+                    TopologyMetricContext metricContext =
+                            new TopologyMetricContext(assignment.getWorkers());
+                    metricContext.setTaskNum(NimbusUtils.getTopologyTaskNum(assignment));
+
+                    syncMetaFromCache(topologyId, metricContext);
+                    syncMetaFromRemote(topologyId, metricContext);
                 }
-                topologyWorkers.remove(topologyId);
-            }
-            
-        }
-        
-        for (Entry<String, Set<String>> entry : topologyWorkers.entrySet()) {
-            String topologyId = entry.getKey();
-            Set<String> metricWorkers = entry.getValue();
-            
-            Set<String> workerSlots = new HashSet<String>();
-            
-            Assignment assignment = assignMap.get(topologyId);
-            if (assignment == null) {
-                LOG.error("Assignment disappear of " + topologyId);
-                continue;
             }
-            
-            for (ResourceWorkerSlot worker : assignment.getWorkers()) {
-                String slot = getWorkerSlotName(worker.getNodeId(), worker.getPort());
-                workerSlots.add(slot);
+        } catch (Exception e1) {
+            LOG.warn("failed to get assignments");
+        }
+    }
+
+    /**
+     * sync metric meta from rocks db into mem cache on startup
+     */
+    private void syncMetaFromCache(String topologyId, TopologyMetricContext context) {
+        if (!context.syncMeta()) {
+            Map<String, Long> meta = metricCache.getMeta(topologyId);
+            if (meta != null) {
+                context.getMemMeta().putAll(meta);
             }
-            
-            Set<String> removes = new HashSet<String>();
-            for (String slot : metricWorkers) {
-                if (workerSlots.contains(slot) == false) {
-                    LOG.info("Remove worker metrics of {}:{}", topologyId, slot);
-                    removes.add(slot);
+            context.setSyncMeta(true);
+        }
+    }
+
+    private void syncMetaFromRemote(String topologyId, TopologyMetricContext context) {
+        try {
+            int memSize = context.getMemMeta().size();
+            int zkSize = (Integer) stormClusterState.get_topology_metric(topologyId);
+
+            if (memSize != zkSize) {
+                ConcurrentMap<String, Long> memMeta = context.getMemMeta();
+                for (MetaType metaType : MetaType.values()) {
+                    List<MetricMeta> metaList = metricQueryClient.getMetricMeta(clusterName, topologyId, metaType);
+                    if (metaList != null) {
+                        LOG.info("get remote metric meta, topology:{}, metaType:{}, mem:{}, zk:{}, new size:{}",
+                                topologyId, metaType, memSize, zkSize, metaList.size());
+                        for (MetricMeta meta : metaList) {
+                            memMeta.putIfAbsent(meta.getFQN(), meta.getId());
+                        }
+                    }
                 }
+                metricCache.putMeta(topologyId, memMeta);
             }
-            
-            for (String slot : removes) {
-                metricWorkers.remove(slot);
-                dbCache.remove(getWorkerKey(topologyId, slot));
-            }
+        } catch (Exception ex) {
+            LOG.error("failed to sync remote meta", ex);
         }
     }
-    
+
     /**
-     * Upload metric to ZK
-     * 
-     * @param event
+     * send topology track to jstorm monitor
      */
-    public void handleUploadEvent(Upload event) {
-        if (isUploading.getAndSet(true) == true) {
-            LOG.info("Nimbus is alread uploading");
-            return ;
-        }
-        
-        long start = System.currentTimeMillis();
-        
-        cleanTopology();
-        
-        render();
-        
-        isUploading.set(false);
-        
-        long end = System.currentTimeMillis();
-        uploadHistogram.update(end - start);
-        
-        
-    }
-    
-    public String getWorkerHostname(WorkerUploadMetrics workerMetrics) {
-        
-        String hostname = null;
-        String supervisorId = workerMetrics.get_supervisor_id();
-        try {
-            hostname = Cluster.get_supervisor_hostname(stormClusterState, supervisorId);
-        } catch (Exception e) {
-            // TODO Auto-generated catch block
-            LOG.warn("Failed to get hostname of " + supervisorId);
+    protected void handleKillTopologyEvent(KillTopologyEvent event) {
+        metricUploader.sendEvent(this.clusterName, event);
+        removeTopology(event.topologyId);
+    }
+
+    private void handleStartTopologyEvent(StartTopologyEvent event) {
+        this.metricCache.putSampleRate(event.topologyId, event.sampleRate);
+        metricUploader.sendEvent(this.clusterName, event);
+        if (!topologyMetricContexts.containsKey(event.topologyId)) {
+            TopologyMetricContext metricContext = new TopologyMetricContext();
+            // note that workerNum is not set here.
+            this.topologyMetricContexts.put(event.topologyId, metricContext);
         }
-        if (hostname == null) {
-            hostname = DEAD_SUPERVISOR_HEAD + supervisorId;
+    }
+
+    private void handleTaskDeadEvent(TaskDeadEvent event) {
+        metricUploader.sendEvent(this.clusterName, event);
+
+        // unregister dead workers
+        Set<ResourceWorkerSlot> workers = new HashSet<>();
+        workers.addAll(event.deadTasks.values());
+        for (ResourceWorkerSlot worker : workers) {
+            metricCache.unregisterWorker(event.topologyId, worker.getHostname(), worker.getPort());
         }
-        
-        return hostname;
     }
-    
-    public void avgMetricWindow(MetricWindow metric, int parallel) {
-        if (parallel == 0) {
-            return;
+
+    private void handleTaskStartEvent(final TaskStartEvent event) {
+        Assignment assignment = event.newAssignment;
+        TopologyMetricContext metricContext = topologyMetricContexts.get(event.topologyId);
+        if (metricContext != null) {
+            metricContext.setWorkerSet(assignment.getWorkers());
+        } else {
+            metricContext = new TopologyMetricContext();
+            metricContext.setWorkerSet(assignment.getWorkers());
+            topologyMetricContexts.put(event.topologyId, metricContext);
         }
-        Map<Integer, Double> map = metric.get_metricWindow();
-        Map<Integer, Double> newMap = new HashMap<Integer, Double>();
-        if (map != null) {
-            for (Entry<Integer, Double> entry : map.entrySet()) {
-                newMap.put(entry.getKey(), entry.getValue() / parallel);
+        metricUploader.sendEvent(this.clusterName, event);
+    }
+
+    /**
+     * merge and send all metric data.
+     */
+    public void handleRefreshEvent(Refresh dummy) {
+        TimeTicker ticker = new TimeTicker(TimeUnit.MILLISECONDS, true);
+        try {
+            refreshTopologies();
+            LOG.info("refresh topologies, cost:{}", ticker.stopAndRestart());
+            if (!nimbusData.isLeader()) {
+                syncTopologyMeta();
+                LOG.info("sync topology meta, cost:{}", ticker.stop());
             }
+        } catch (Exception ex) {
+            LOG.error("handleRefreshEvent error:", ex);
         }
-        
-        metric.set_metricWindow(newMap);
-    }
-    
-    public MetricInfo mergeMetricInfo(MetricInfo from, MetricInfo to, Set<String> tags) {
-        if (to == null) {
-            to = MetricThrift.mkMetricInfo();
-        }
-        
-        if (from.get_baseMetric() == null) {
-            LOG.warn("No base Metric ");
-            return to;
-        }
-        
-        for (String tag : tags) {
-            
-            MetricWindow fromMetric = from.get_baseMetric().get(tag);
-            Map<String, MetricWindow> toMetricMap = to.get_baseMetric();
-            if (toMetricMap == null) {
-                toMetricMap = new HashMap<String, MetricWindow>();
-                to.set_baseMetric(toMetricMap);
-            }
-            
-            MetricWindow toMetric = toMetricMap.get(tag);
-            
-            toMetric = MetricThrift.mergeMetricWindow(fromMetric, toMetric);
-            
-            toMetricMap.put(tag, toMetric);
-            
-        }
-        
-        return to;
-    }
-    
-    public Map<String, Map<String, MetricWindow>> mergeTaskStreams(
-            Map<String, Map<String, MetricWindow>> componentStreams,
-            Map<String, Map<String, MetricWindow>> taskStreams,
-            Map<String, Map<String, AtomicInteger>> componentStreamParallel) {
-        
-        if (taskStreams == null || taskStreams.size() == 0) {
-            return componentStreams;
-        }
-        
-        if (componentStreams == null) {
-            componentStreams = new HashMap<String, Map<String, MetricWindow>>();
-        }
-        
-        for (Entry<String, Map<String, MetricWindow>> entry : taskStreams.entrySet()) {
+    }
+
+    private TopologyMetricContext getClusterTopologyMetricContext() {
+        return topologyMetricContexts.get(JStormMetrics.CLUSTER_METRIC_KEY);
+    }
+
+    private void mergeAndUploadClusterMetrics() {
+        TopologyMetricContext context = getClusterTopologyMetricContext();
+        TopologyMetric tpMetric = context.mergeMetrics();
+        if (tpMetric == null) {
+            tpMetric = MetricUtils.mkTopologyMetric();
+            tpMetric.set_topologyMetric(MetricUtils.mkMetricInfo());
+        }
+
+        //reset snapshots metric id
+        MetricInfo clusterMetrics = tpMetric.get_topologyMetric();
+        Map<String, Long> metricNames = context.getMemMeta();
+        for (Map.Entry<String, Map<Integer, MetricSnapshot>> entry : clusterMetrics.get_metrics().entrySet()) {
             String metricName = entry.getKey();
-            Map<String, MetricWindow> streamMetricWindows = entry.getValue();
-            
-            if (streamMetricWindows == null) {
-                continue;
-            }
-            
-            Map<String, AtomicInteger> streamCounters = componentStreamParallel.get(metricName);
-            if (streamCounters == null) {
-                streamCounters = new HashMap<String, AtomicInteger>();
-                componentStreamParallel.put(metricName, streamCounters);
+            MetricType metricType = MetricUtils.metricType(metricName);
+            Long metricId = metricNames.get(metricName);
+            for (Map.Entry<Integer, MetricSnapshot> metric : entry.getValue().entrySet()) {
+                MetricSnapshot snapshot = metric.getValue();
+                snapshot.set_metricId(metricId);
+                if (metricType == MetricType.HISTOGRAM || metricType == MetricType.TIMER) {
+                    snapshot.set_points(new ArrayList<Long>(0));
+                }
+//                entry.getValue().put(metric.getKey(), snapshot);
             }
-            
-            Map<String, MetricWindow> componentStreamMetricWindows = componentStreams.get(metricName);
-            if (componentStreamMetricWindows == null) {
-                componentStreamMetricWindows = new HashMap<String, MetricWindow>();
-                componentStreams.put(metricName, componentStreamMetricWindows);
+        }
+
+        //fill the unacquired metrics with zero
+        long ts = System.currentTimeMillis();
+        for (Map.Entry<String, Long> entry : metricNames.entrySet()) {
+            String name = entry.getKey();
+            if (!clusterMetrics.get_metrics().containsKey(name)) {
+                Map<Integer, MetricSnapshot> metric = new HashMap<>();
+                MetricType type = MetricUtils.metricType(name);
+                metric.put(AsmWindow.M1_WINDOW, new MetricSnapshot(entry.getValue(), ts, type.getT()));
+                clusterMetrics.put_to_metrics(name, metric);
             }
-            
-            for (Entry<String, MetricWindow> streamEntry : streamMetricWindows.entrySet()) {
-                String streamName = streamEntry.getKey();
-                MetricWindow taskMetricWindow = streamEntry.getValue();
-                
-                MetricWindow componentMetricWindow = componentStreamMetricWindows.get(streamName);
-                
-                componentMetricWindow = MetricThrift.mergeMetricWindow(taskMetricWindow, componentMetricWindow);
-                
-                componentStreamMetricWindows.put(streamName, componentMetricWindow);
-                
-                AtomicInteger counter = streamCounters.get(streamName);
-                if (counter == null) {
-                    counter = new AtomicInteger(0);
-                    streamCounters.put(streamName, counter);
+        }
+
+        //upload to cache
+        Update event = new Update();
+        event.timestamp = System.currentTimeMillis();
+        event.topologyMetrics = tpMetric;
+        event.topologyId = JStormMetrics.CLUSTER_METRIC_KEY;
+        pushEvent(event);
+
+        LOG.info("send update event for cluster metrics, size : {}", clusterMetrics.get_metrics_size());
+    }
+
+    //update cluster metrics local cache
+    private void updateClusterMetrics(String topologyId, TopologyMetric tpMetric) {
+        if (tpMetric.get_topologyMetric().get_metrics_size() > 0) {
+            TopologyMetricContext context = getClusterTopologyMetricContext();
+            MetricInfo topologyMetrics = tpMetric.get_topologyMetric();
+            // make a new MetricInfo to save the topologyId's metric
+            MetricInfo clusterMetrics = MetricUtils.mkMetricInfo();
+            Set<String> metricNames = new HashSet<>();
+            for (Map.Entry<String, Map<Integer, MetricSnapshot>> entry : topologyMetrics.get_metrics().entrySet()) {
+                String metricName = MetricUtils.topo2clusterName(entry.getKey());
+                MetricType metricType = MetricUtils.metricType(metricName);
+                Map<Integer, MetricSnapshot> winData = new HashMap<>();
+                for (Map.Entry<Integer, MetricSnapshot> entryData : entry.getValue().entrySet()) {
+                    MetricSnapshot snapshot = entryData.getValue().deepCopy();
+                    winData.put(entryData.getKey(), snapshot);
+                    if (metricType == MetricType.HISTOGRAM || metricType == MetricType.TIMER) {
+                        // reset topology metric points
+                        entryData.getValue().set_points(new ArrayList<Long>(0));
+                    }
                 }
-                counter.incrementAndGet();
+                clusterMetrics.put_to_metrics(metricName, winData);
+                metricNames.add(metricName);
             }
+            // save to local cache, waiting for merging
+            context.addToMemCache(topologyId, clusterMetrics);
+            registerMetrics(JStormMetrics.CLUSTER_METRIC_KEY, metricNames);
         }
-        
-        return componentStreams;
     }
-    
-    public void avgStreams(Map<String, Map<String, MetricWindow>> tagStreamsMetrics, Map<String, Map<String, AtomicInteger>> counters, String tag) {
-        if (tagStreamsMetrics == null) {
-            return;
-        }
-        
-        Map<String, MetricWindow> streamMetrics = tagStreamsMetrics.get(tag);
-        if (streamMetrics == null) {
-            return;
+
+    /**
+     * put metric data to metric cache.
+     */
+    public void handleUpdateEvent(Update event) {
+        TopologyMetric topologyMetrics = event.topologyMetrics;
+        final String topologyId = event.topologyId;
+
+        if (this.topologyMetricContexts.containsKey(topologyId)) {
+            if (!JStormMetrics.CLUSTER_METRIC_KEY.equals(topologyId)) {
+                updateClusterMetrics(topologyId, topologyMetrics);
+            }
+
+            // overwrite
+            metricCache.putMetricData(topologyId, topologyMetrics);
+
+            // below process is kind of a transaction, first we lock an empty slot, mark it as PRE_SET
+            // by this time the slot is not yet ready for uploading as the upload thread looks for SET slots only
+            // after all metrics data has been saved, we mark it as SET, then it's ready for uploading.
+            int idx = getAndPresetFirstEmptyIndex();
+            if (idx >= 0) {
+                TopologyMetricDataInfo summary = new TopologyMetricDataInfo();
+                summary.topologyId = topologyId;
+                summary.timestamp = event.timestamp;
+                if (topologyId.equals(JStormMetrics.NIMBUS_METRIC_KEY) ||
+                        topologyId.equals(JStormMetrics.CLUSTER_METRIC_KEY)) {
+                    summary.type = MetricUploader.METRIC_TYPE_TOPLOGY;
+                } else {
+                    if (topologyMetrics.get_topologyMetric().get_metrics_size() > 0 ||
+                            topologyMetrics.get_componentMetric().get_metrics_size() > 0) {
+                        if (topologyMetrics.get_taskMetric().get_metrics_size() +
+                                topologyMetrics.get_workerMetric().get_metrics_size() +
+                                topologyMetrics.get_nettyMetric().get_metrics_size() +
+                                topologyMetrics.get_streamMetric().get_metrics_size() > 0) {
+                            summary.type = MetricUploader.METRIC_TYPE_ALL;
+                        } else {
+                            summary.type = MetricUploader.METRIC_TYPE_TOPLOGY;
+                        }
+                    } else {
+                        summary.type = MetricUploader.METRIC_TYPE_TASK;
+                    }
+                }
+
+                metricCache.put(PENDING_UPLOAD_METRIC_DATA_INFO + idx, summary);
+                metricCache.put(PENDING_UPLOAD_METRIC_DATA + idx, topologyMetrics);
+                markSet(idx);
+                LOG.info("put metric data to local cache, topology:{}, idx:{}", topologyId, idx);
+            } else {
+                LOG.error("exceeding maxPendingUploadMetrics, skip metrics data for topology:{}", topologyId);
+            }
+        } else {
+            LOG.warn("topology {} has been killed or has not started, skip update.", topologyId);
         }
-        
-        for (Entry<String, MetricWindow> entry : streamMetrics.entrySet()) {
-            String streamName = entry.getKey();
-            MetricWindow metric = entry.getValue();
-            
-            AtomicInteger counter = counters.get(tag).get(streamName);
-            if (counter == null) {
-                continue;
-                
+    }
+
+    /**
+     * get topology metrics, note that only topology & component & worker metrics are returned
+     */
+    public TopologyMetric getTopologyMetric(String topologyId) {
+        long start = System.nanoTime();
+        try {
+            TopologyMetric ret = new TopologyMetric();
+            List<MetricInfo> topologyMetrics = metricCache.getMetricData(topologyId, MetaType.TOPOLOGY);
+            List<MetricInfo> componentMetrics = metricCache.getMetricData(topologyId, MetaType.COMPONENT);
+            List<MetricInfo> workerMetrics = metricCache.getMetricData(topologyId, MetaType.WORKER);
+
+            MetricInfo dummy = MetricUtils.mkMetricInfo();
+            if (topologyMetrics.size() > 0) {
+                // get the last min topology metric
+                ret.set_topologyMetric(topologyMetrics.get(topologyMetrics.size() - 1));
+            } else {
+                ret.set_topologyMetric(dummy);
             }
-            
-            avgMetricWindow(metric, counter.get());
-        }
-    }
-    
-    public void mergeTasks(TopologyMetric topologyMetric, String topologyId) {
-        Map<Integer, MetricInfo> taskMetrics = topologyMetric.get_taskMetric();
-        
-        Map<Integer, String> taskToComponent = null;
-		try {
-			taskToComponent = Cluster.get_all_task_component(stormClusterState, topologyId, null);
-		} catch (Exception e) {
-			// TODO Auto-generated catch block
-			LOG.error("Failed to get taskToComponent");
-            return ;
-		}
-        if (taskToComponent == null) {
-            LOG.error("Failed to get taskToComponent");
-            return ;
-        }
-        
-        Map<String, MetricInfo> componentMetrics = topologyMetric.get_componentMetric();
-        if (componentMetrics == null) {
-            componentMetrics = new HashMap<String, MetricInfo>();
-            topologyMetric.set_componentMetric(componentMetrics);
-        }
-        
-        Map<String, AtomicInteger> componentTaskParallel = new HashMap<String, AtomicInteger>();
-        Map<String, Map<String, AtomicInteger>> componentStreamParallel = new HashMap<String, Map<String, AtomicInteger>>();
-        
-        for (Entry<Integer, MetricInfo> entry : taskMetrics.entrySet()) {
-            Integer taskId = entry.getKey();
-            MetricInfo taskMetric = entry.getValue();
-            
-            String component = taskToComponent.get(taskId);
-            if (component == null) {
-                LOG.error("Failed to get component of task " + taskId);
-                continue;
+            if (componentMetrics.size() > 0) {
+                ret.set_componentMetric(componentMetrics.get(0));
+            } else {
+                ret.set_componentMetric(dummy);
             }
-            
-            MetricInfo componentMetric = componentMetrics.get(component);
-            
-            componentMetric = mergeMetricInfo(taskMetric, componentMetric, MetricDef.MERGE_SUM_TAG);
-            componentMetric = mergeMetricInfo(taskMetric, componentMetric, MetricDef.MERGE_AVG_TAG);
-            
-            Map<String, Map<String, MetricWindow>> input = mergeTaskStreams(componentMetric.get_inputMetric(), taskMetric.get_inputMetric(), componentStreamParallel);
-            componentMetric.set_inputMetric(input);
-            
-            Map<String, Map<String, MetricWindow>> output = mergeTaskStreams(componentMetric.get_outputMetric(), taskMetric.get_outputMetric(), componentStreamParallel);
-            componentMetric.set_outputMetric(output);
-            
-            componentMetrics.put(component, componentMetric);
-            
-            AtomicInteger counter = componentTaskParallel.get(component);
-            if (counter == null) {
-                counter = new AtomicInteger(0);
-                componentTaskParallel.put(component, counter);
+            if (workerMetrics.size() > 0) {
+                ret.set_workerMetric(workerMetrics.get(0));
+            } else {
+                ret.set_workerMetric(dummy);
             }
-            
-            counter.incrementAndGet();
-        }
-        
-        for (Entry<String, MetricInfo> entry : componentMetrics.entrySet()) {
-            String componentName = entry.getKey();
-            MetricInfo metricInfo = entry.getValue();
-            
-            AtomicInteger counter = componentTaskParallel.get(componentName);
-            
-            for (String tag : MetricDef.MERGE_AVG_TAG) {
-                MetricWindow metricWindow = metricInfo.get_baseMetric().get(tag);
-                
-                avgMetricWindow(metricWindow, counter.get());
-                
-                avgStreams(metricInfo.get_inputMetric(), componentStreamParallel, tag);
-                avgStreams(metricInfo.get_outputMetric(), componentStreamParallel, tag);
+            ret.set_taskMetric(dummy);
+            ret.set_streamMetric(dummy);
+            ret.set_nettyMetric(dummy);
+
+            return ret;
+        } finally {
+            long end = System.nanoTime();
+            SimpleJStormMetric.updateNimbusHistogram("getTopologyMetric", (end - start) / TimeUtils.NS_PER_US);
+        }
+    }
+
+    public static String getWorkerSlotName(String hostname, Integer port) {
+        return hostname + ":" + port;
+    }
+
+    class RefreshTopologiesThread extends RunnableCallback {
+        @Override
+        public void run() {
+            if (!isShutdown.get()) {
+                pushEvent(new Refresh());
             }
         }
+
+        @Override
+        public Object getResult() {
+            return TimeUtils.SEC_PER_MIN;
+        }
+
+        @Override
+        public String getThreadName() {
+            return "RefreshThread";
+        }
     }
-    
-    public void mergeComponent(TopologyMetric topologyMetric) {
-        MetricInfo topologyMetricInfo = MetricThrift.mkMetricInfo();
-        topologyMetric.set_topologyMetric(topologyMetricInfo);
-        Map<String, MetricInfo> componentMetrics = topologyMetric.get_componentMetric();
-        if (componentMetrics == null) {
-            return;
+
+    class MetricsUploadThread extends Thread {
+        public MetricsUploadThread() {
+            setName("main-upload-thread");
         }
-        
-        for (MetricInfo componentMetric : componentMetrics.values()) {
-            topologyMetricInfo = mergeMetricInfo(componentMetric, topologyMetricInfo, MetricDef.MERGE_SUM_TAG);
-        }
-        
-        topologyMetric.set_topologyMetric(topologyMetricInfo);
-    }
-    
-    public void mergeTopology(TopologyMetric topologyMetric, WorkerUploadMetrics workerMetrics) {
-        String topologyId = workerMetrics.get_topology_id();
-        
-        Map<Integer, MetricInfo> taskMetrics = topologyMetric.get_taskMetric();
-        if (taskMetrics == null) {
-            taskMetrics = new HashMap<Integer, MetricInfo>();
-            topologyMetric.set_taskMetric(taskMetrics);
-        }
-        taskMetrics.putAll(workerMetrics.get_taskMetric());
-        
-        String hostname = getWorkerHostname(workerMetrics);
-        topologyMetric.put_to_workerMetric(getWorkerSlotName(hostname, workerMetrics.get_port()), workerMetrics.get_workerMetric());
-        
-    }
-    
-    public void mergeNetty(WorkerUploadMetrics workerMetric, String topologyId, Set<String> connections) {
-        
-    	if (topologyNettyMgr.getTopology(topologyId) == false) {
-            return ;
-        }
-        Map<String, MetricInfo> connectionMetrics = workerMetric.get_nettyMetric().get_connections();
-        for (Entry<String, MetricInfo> entry : connectionMetrics.entrySet()) {
-            String connectionName = entry.getKey();
-            MetricInfo metric = entry.getValue();
-            
-            MetricInfo cacheMetric = (MetricInfo)dbCache.get(getNettyConnectionKey(topologyId, connectionName));
-            cacheMetric = MetricThrift.mergeMetricInfo(metric, cacheMetric);
-            
-            connections.add(connectionName);
-            
-            dbCache.put(getNettyConnectionKey(topologyId, connectionName), cacheMetric);
-        }
-    }
-    
-    public void mergeNetty(String topologyId, Set<String> connections) {
-    	if (topologyNettyMgr.getTopology(topologyId) == false) {
-    		LOG.info("Skip merge netty detail metrics");
-            return ;
-        }
-        // @@@
-        // this function will cost much memory when worker number is more than 200
-        Map<String, MetricInfo> metricMap = new TreeMap<String, MetricInfo>();
-        
-        for (String connection : connections) {
-            MetricInfo cacheMetric = (MetricInfo)dbCache.get(getNettyConnectionKey(topologyId, connection));
-            if (cacheMetric == null) {
-                LOG.warn("Failed to get cacheMetric of {}:{}", topologyId, connection );
-                continue;
-            }
-            
-            metricMap.put(connection, cacheMetric);
-            dbCache.remove(getNettyConnectionKey(topologyId, connection));
-        }
-        
-        dbCache.put(getNettyTopologyKey(topologyId), metricMap);
-        // accelerate free memory
-        metricMap.clear();
-    }
-    
-    public void render() {
-        for (Entry<String, Set<String>> entry : topologyWorkers.entrySet()) {
-            String topologyId = entry.getKey();
-            Set<String> workers = entry.getValue();
-            Set<String> connections = new TreeSet<String>();
-            
-            TopologyMetric topologyMetric = new TopologyMetric();
-            
-            boolean isExistWorker = false;
-            for (String workerId : workers) {
-                WorkerUploadMetrics workerMetric = (WorkerUploadMetrics) dbCache.get(getWorkerKey(topologyId, workerId));
-                if (workerMetric == null) {
-                    LOG.warn("Failed to get WorkerUploadMetrics of " + getWorkerKey(topologyId, workerId));
-                    continue;
+
+        @Override
+        public void run() {
+            while (!isShutdown.get()) {
+                try {
+                    if (metricUploader != null && nimbusData.isLeader()) {
+                        final int idx = getFirstPendingUploadIndex();
+                        if (idx >= 0) {
+                            markUploading(idx);
+                            upload(clusterName, idx);
+                        }
+                    }
+                    JStormUtils.sleepMs(5);
+                } catch (Exception ex) {
+                    LOG.error("Error", ex);
                 }
-                isExistWorker = true;
-                mergeTopology(topologyMetric, workerMetric);
-                
-                mergeNetty(workerMetric, topologyId, connections);
             }
-            if (isExistWorker == false) {
-            	LOG.info("No worker metrics of {}", topologyId);
-            	continue;
+        }
+
+        public boolean upload(final String clusterName, final int idx) {
+            final TopologyMetricDataInfo summary = getMetricDataInfoFromCache(idx);
+            if (summary == null) {
+                LOG.warn("metric summary is null from cache idx:{}", idx);
+                markUploaded(idx);
+                return true;
             }
-            
-            mergeTasks(topologyMetric, topologyId);
-            
-            mergeComponent(topologyMetric);
-            
-            
-            dbCache.put(getTopologyKey(topologyId), topologyMetric);
-            
-            mergeNetty(topologyId, connections);
-            
-            LOG.info("Successfully render topologyId of " + topologyId);
-            
-            uploadToAlimonitor(topologyMetric, topologyId);
-            
-            cleanDeadSupervisorWorker(topologyMetric);
-            
-            
-            try {
-                
-                //LOG.info(topologyId + " metrics is :\n" + Utils.toPrettyJsonString(topologyMetric));
-                LOG.info(topologyId + " finish metric");
-                stormClusterState.set_topology_metric(topologyId, topologyMetric);
-                LOG.info("Successfully uploaded toplogy metrics: " + topologyId);
-            } catch (Exception e) {
-                // TODO Auto-generated catch block
-                LOG.info("Failed to upload toplogy metrics: " + topologyId, e);
-                continue;
+
+            final String topologyId = summary.topologyId;
+            if (!isTopologyAlive(topologyId)) {
+                LOG.warn("topology {} is not alive, skip sending metrics.", topologyId);
+                markUploaded(idx);
+                return true;
             }
-            
+
+            return metricUploader.upload(clusterName, topologyId, idx, summary.toMap());
         }
     }
-    
-    public void handleUpdateEvent(Update event) {
-        long start = System.currentTimeMillis();
-        
-        WorkerUploadMetrics workerMetrics = event.workerMetrics;
-        
-        String topologyId = workerMetrics.get_topology_id();
-        if (removing.containsKey(topologyId) == true) {
-            LOG.info("Topology " + topologyId + " has been removed, skip update");
-            return;
+
+    class FlushMetricMetaThread extends Thread {
+
+        public FlushMetricMetaThread() {
+            setName("FlushMetricMetaThread");
         }
-        
-        Set<String> workers = topologyWorkers.get(topologyId);
-        if (workers == null) {
-            workers = new HashSet<String>();
-            topologyWorkers.put(topologyId, workers);
-        }
-        
-        String workerSlot = getWorkerSlotName(workerMetrics.get_supervisor_id(), workerMetrics.get_port());
-        
-        workers.add(workerSlot);
-        dbCache.put(getWorkerKey(topologyId, workerSlot), workerMetrics);
-        
-        long end = System.currentTimeMillis();
-        
-        updateHistogram.update((end - start));
-    }
-    
-    public void uploadToAlimonitor(TopologyMetric topologyMetric, String topologyId) {
-        // @@@ TODO
-    }
-    
-    
-    public TopologyMetric getTopologyMetric(String topologyId) {
-        long start = System.nanoTime();
-        try {
-            TopologyMetric ret = (TopologyMetric) dbCache.get(getTopologyKey(topologyId));
-            if (ret == null) {
-                return emptyTopologyMetric;
-            } else {
-                return ret;
+
+        @Override
+        public void run() {
+            while (!isShutdown.get()) {
+                long start = System.currentTimeMillis();
+                try {
+                    // if metricUploader is not fully initialized, return directly
+                    if (nimbusData.isLeader() && metricUploader != null) {
+                        for (Map.Entry<String, TopologyMetricContext> entry : topologyMetricContexts.entrySet()) {
+                            String topologyId = entry.getKey();
+                            TopologyMetricContext metricContext = entry.getValue();
+
+                            Map<String, Long> cachedMeta = metricCache.getMeta(topologyId);
+                            if (cachedMeta == null) {
+                                cachedMeta = new HashMap<>();
+                            }
+                            Map<String, Long> memMeta = metricContext.getMemMeta();
+                            if (memMeta.size() > cachedMeta.size()) {
+                                cachedMeta.putAll(memMeta);
+                            }
+                            metricCache.putMeta(topologyId, cachedMeta);
+
+                            int curSize = cachedMeta.size();
+                            if (curSize != metricContext.getFlushedMetaNum()) {
+                                metricContext.setFlushedMetaNum(curSize);
+
+                                metricUploader.registerMetrics(clusterName, topologyId, cachedMeta);
+                                LOG.info("flush metric meta, topology:{}, total:{}, cost:{}.",
+                                        topologyId, curSize, System.currentTimeMillis() - start);
+                            }
+                            stormClusterState.set_topology_metric(topologyId, curSize);
+                        }
+                    }
+
+                    JStormUtils.sleepMs(15000);
+                } catch (Exception ex) {
+                    LOG.error("Error", ex);
+                }
             }
-        }finally {
-            long end = System.nanoTime();
-            
-            SimpleJStormMetric.updateHistorgram("getTopologyMetric", (end - start)/1000000.0d);
         }
     }
-    
-    public SortedMap<String, MetricInfo> getNettyMetric(String topologyId) {
-        TreeMap<String, MetricInfo> ret = (TreeMap<String, MetricInfo>)dbCache.get(getNettyTopologyKey(topologyId));
-        if (ret == null) {
-            return emptyNettyMetric;
-        }else {
+
+    public static class TopologyMetricDataInfo implements Serializable {
+        private static final long serialVersionUID = 1303262512351757610L;
+
+        public String topologyId;
+        public String type; // "tp" for tp/comp metrics OR "task" for task/stream/worker/netty metrics
+        public long timestamp;   // metrics report time
+
+        public Map<String, Object> toMap() {
+            Map<String, Object> ret = new HashMap<String, Object>();
+            ret.put(MetricUploader.METRIC_TIME, timestamp);
+            ret.put(MetricUploader.METRIC_TYPE, type);
+
             return ret;
         }
+
+        @Override
+        public String toString() {
+            return ToStringBuilder.reflectionToString(this, ToStringStyle.SHORT_PREFIX_STYLE);
+        }
     }
-    
-    public static String getWorkerSlotName(String hostname, Integer port) {
-        return hostname + ":" + port;
+
+    // ==============================================
+    // =================== events ===================
+    // ==============================================
+    public static class Event {
+        protected Event() {
+        }
+
+        public String clusterName;
+        public String topologyId;
+        public long timestamp;
+
+        @Override
+        public String toString() {
+            return ToStringBuilder.reflectionToString(this, ToStringStyle.SHORT_PREFIX_STYLE);
+        }
+    }
+
+    public static class Update extends Event {
+        public TopologyMetric topologyMetrics;
     }
-    
-    public static String getWorkerKey(String topologyId, String workerSlot) {
-        return CACHE_NAMESPACE_METRIC + "@" + topologyId + "@" + workerSlot;
+
+    public static class Remove extends Event {
     }
-    
-    public static String getTopologyKey(String topologyId) {
-        return CACHE_NAMESPACE_METRIC + "@" + topologyId;
+
+    public static class Refresh extends Event {
     }
-    
-    public static String getNettyConnectionKey(String topologyId, String connection) {
-        return CACHE_NAMESPACE_NETTY + "@" + topologyId + "@" + connection;
+
+
+    public static class KillTopologyEvent extends Event {
     }
-    
-    public static String getNettyTopologyKey(String topologyId) {
-        return CACHE_NAMESPACE_NETTY + "@" + topologyId;
+
+    public static class StartTopologyEvent extends Event {
+        public double sampleRate;
+    }
+
+    public static class TaskDeadEvent extends Event {
+        public Map<Integer, ResourceWorkerSlot> deadTasks;
+    }
+
+    public static class TaskStartEvent extends Event {
+        public Assignment oldAssignment;
+        public Assignment newAssignment;
+        public Map<Integer, String> task2Component;
     }
-    
-    
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/TopologyNettyMgr.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/TopologyNettyMgr.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/TopologyNettyMgr.java
index 7eaccab..6e55049 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/TopologyNettyMgr.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/TopologyNettyMgr.java
@@ -1,105 +1,78 @@
 package com.alibaba.jstorm.daemon.nimbus;
 
-import java.util.Map;
-
+import backtype.storm.Config;
+import backtype.storm.generated.InvalidTopologyException;
+import com.alibaba.jstorm.cluster.Common;
+import com.alibaba.jstorm.cluster.StormConfig;
+import com.alibaba.jstorm.metric.MetricUtils;
 import org.jboss.netty.util.internal.ConcurrentHashMap;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.alibaba.jstorm.client.ConfigExtension;
-import com.alibaba.jstorm.cluster.Common;
-import com.alibaba.jstorm.cluster.StormConfig;
-import com.alibaba.jstorm.utils.JStormUtils;
-
-import backtype.storm.Config;
-import backtype.storm.generated.InvalidTopologyException;
+import java.util.Map;
 
 public class TopologyNettyMgr {
-	private static Logger LOG = LoggerFactory.getLogger(TopologyNettyMgr.class);
-	private boolean defaultEnable = true;
-	private Map nimbusConf;
-	private ConcurrentHashMap<String, Boolean> setting = new ConcurrentHashMap<String, Boolean>();
-	private static final int WORKER_DISABLE_THREADHOLD = 200;
-	
-	public TopologyNettyMgr(Map conf) {
-		nimbusConf = conf;
-		
-		Boolean isEnable = ConfigExtension.isEnableTopologyNettyMetrics(conf);
-		if (isEnable != null) {
-			defaultEnable = isEnable;
-		}
-		
-		LOG.info("Default netty metrics setting is " + defaultEnable);
-	}
-	
-	protected boolean getTopology(Map conf) {
-		Boolean isEnable = ConfigExtension.isEnableTopologyNettyMetrics(conf);
-		if (isEnable != null) {
-			return isEnable;
-		}
-		
-		int workerNum = JStormUtils.parseInt(conf.get(Config.TOPOLOGY_WORKERS), 1);
-		if (workerNum <=  WORKER_DISABLE_THREADHOLD) {
-			isEnable = Boolean.TRUE;
-		}else {
-			isEnable = Boolean.FALSE;
-		}
-		
-		return isEnable;
-	}
-	
-	public boolean getTopology(String topologyId) {
-		try {
-			String topologyName = Common.topologyIdToName(topologyId);
-			
-			Boolean isEnable = setting.get(topologyName);
-			if (isEnable != null) {
-				return isEnable;
-			}
-			
-			Map topologyConf =
-	                StormConfig.read_nimbus_topology_conf(nimbusConf, topologyId);
-			
-		    isEnable = getTopology(topologyConf);
-			setting.put(topologyName, isEnable);
-			LOG.info("{} netty metrics setting is {}", topologyName, isEnable);
-			return isEnable;
-		
-		}catch(Exception e) {
-			LOG.info("Failed to get {} netty metrics setting ", topologyId);
-			return defaultEnable;
-		}
-		
-		
-	}
-	
-	public void setTopology(Map conf) {
-		String topologyName = (String)conf.get(Config.TOPOLOGY_NAME);
-		if (topologyName == null) {
-			LOG.info("No topologyName setting");
-			return ;
-		}
-		
-		boolean isEnable = getTopology(conf);
-		
-		setting.put(topologyName, isEnable);
-		
-		LOG.info("{} netty metrics setting is {}", topologyName, isEnable);
-		return ;
-		
-	}
-	
-	public void rmTopology(String topologyId) {
-		String topologyName;
-		try {
-			topologyName = Common.topologyIdToName(topologyId);
-			setting.remove(topologyName);
-			LOG.info("Remove {} netty metrics setting ", topologyName);
-		} catch (InvalidTopologyException e) {
-			// TODO Auto-generated catch block
-			
-		}
-		
-	}
+    private static Logger LOG = LoggerFactory.getLogger(TopologyNettyMgr.class);
+    private Map nimbusConf;
+    private ConcurrentHashMap<String, Boolean> setting = new ConcurrentHashMap<String, Boolean>();
+
+    public TopologyNettyMgr(Map conf) {
+        nimbusConf = conf;
+
+    }
+
+    protected boolean getTopology(Map conf) {
+        return MetricUtils.isEnableNettyMetrics(conf);
+    }
+
+    public boolean getTopology(String topologyId) {
+        try {
+            String topologyName = Common.topologyIdToName(topologyId);
+
+            Boolean isEnable = setting.get(topologyName);
+            if (isEnable != null) {
+                return isEnable;
+            }
+
+            Map topologyConf = StormConfig.read_nimbus_topology_conf(nimbusConf, topologyId);
+
+            isEnable = getTopology(topologyConf);
+            setting.put(topologyName, isEnable);
+            LOG.info("{} netty metrics setting is {}", topologyName, isEnable);
+            return isEnable;
+
+        } catch (Exception e) {
+            LOG.info("Failed to get {} netty metrics setting ", topologyId);
+            return true;
+        }
+
+    }
+
+    public void setTopology(Map conf) {
+        String topologyName = (String) conf.get(Config.TOPOLOGY_NAME);
+        if (topologyName == null) {
+            LOG.info("No topologyName setting");
+            return;
+        }
+
+        boolean isEnable = getTopology(conf);
+
+        setting.put(topologyName, isEnable);
+
+        LOG.info("{} netty metrics setting is {}", topologyName, isEnable);
+        return;
+
+    }
+
+    public void rmTopology(String topologyId) {
+        String topologyName;
+        try {
+            topologyName = Common.topologyIdToName(topologyId);
+            setting.remove(topologyName);
+            LOG.info("Remove {} netty metrics setting ", topologyName);
+        } catch (InvalidTopologyException ignored) {
+        }
+
+    }
 
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/metric/uploader/AlimonitorClient.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/metric/uploader/AlimonitorClient.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/metric/uploader/AlimonitorClient.java
new file mode 100644
index 0000000..78bb1d2
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/metric/uploader/AlimonitorClient.java
@@ -0,0 +1,226 @@
+package com.alibaba.jstorm.daemon.nimbus.metric.uploader;
+
+import backtype.storm.generated.TopologyMetric;
+import org.apache.http.HttpEntity;
+import org.apache.http.NameValuePair;
+import org.apache.http.client.entity.UrlEncodedFormEntity;
+import org.apache.http.client.methods.CloseableHttpResponse;
+import org.apache.http.client.methods.HttpGet;
+import org.apache.http.client.methods.HttpPost;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.http.impl.client.HttpClientBuilder;
+import org.apache.http.message.BasicNameValuePair;
+import org.apache.http.util.EntityUtils;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.net.URLEncoder;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+public class AlimonitorClient extends DefaultMetricUploader {
+
+    public static Logger LOG = LoggerFactory.getLogger(AlimonitorClient.class);
+
+    // Send to localhost:15776 by default
+    public static final String DEFAUT_ADDR = "127.0.0.1";
+    public static final String DEFAULT_PORT = "15776";
+    public static final int DEFAUTL_FLAG = 0;
+    public static final String DEFAULT_ERROR_INFO = "";
+
+    private final String COLLECTION_FLAG = "collection_flag";
+    private final String ERROR_INFO = "error_info";
+    private final String MSG = "MSG";
+
+    private String port;
+    private String requestIP;
+    private String monitorName;
+    private int collectionFlag;
+    private String errorInfo;
+
+    private boolean post;
+
+    public AlimonitorClient() {
+    }
+
+    public AlimonitorClient(String requestIP, String port, boolean post) {
+        this.requestIP = requestIP;
+        this.port = port;
+        this.post = post;
+        this.monitorName = null;
+        this.collectionFlag = 0;
+        this.errorInfo = null;
+    }
+
+    public void setIpAddr(String ipAddr) {
+        this.requestIP = ipAddr;
+    }
+
+    public void setPort(String port) {
+        this.port = port;
+    }
+
+    public void setMonitorName(String monitorName) {
+        this.monitorName = monitorName;
+    }
+
+    public void setCollectionFlag(int flag) {
+        this.collectionFlag = flag;
+    }
+
+    public void setErrorInfo(String msg) {
+        this.errorInfo = msg;
+    }
+
+    public void setPostFlag(boolean post) {
+        this.post = post;
+    }
+
+    public String buildURL() {
+        return "http://" + requestIP + ":" + port + "/passive";
+    }
+
+    public String buildRqstAddr() {
+        return "http://" + requestIP + ":" + port + "/passive?name=" + monitorName + "&msg=";
+    }
+
+    
+    public Map buildAliMonitorMsg(int collection_flag, String error_message) {
+        // Json format of the message sent to Alimonitor
+        // {
+        // "collection_flag":int,
+        // "error_info":string,
+        // "MSG": ojbect | array
+        // }
+        Map ret = new HashMap();
+        ret.put(COLLECTION_FLAG, collection_flag);
+        ret.put(ERROR_INFO, error_message);
+        ret.put(MSG, null);
+
+        return ret;
+    }
+
+    private void addMsgData(Map jsonObj, Map<String, Object> map) {
+        jsonObj.put(MSG, map);
+    }
+
+    private boolean sendRequest(int collection_flag, String error_message, Map<String, Object> msg) throws Exception {
+        boolean ret = false;
+
+        if (msg.size() == 0)
+            return ret;
+
+        Map jsonObj = buildAliMonitorMsg(collection_flag, error_message);
+        addMsgData(jsonObj, msg);
+        String jsonMsg = jsonObj.toString();
+        LOG.info(jsonMsg);
+
+        if (post == true) {
+            String url = buildURL();
+            ret = httpPost(url, jsonMsg);
+        } else {
+            String request = buildRqstAddr();
+            StringBuilder postAddr = new StringBuilder();
+            postAddr.append(request);
+            postAddr.append(URLEncoder.encode(jsonMsg));
+
+            ret = httpGet(postAddr);
+        }
+
+        return ret;
+    }
+
+    private boolean httpGet(StringBuilder postAddr) {
+        boolean ret = false;
+
+        CloseableHttpClient httpClient = HttpClientBuilder.create().build();
+        CloseableHttpResponse response = null;
+
+        try {
+            HttpGet request = new HttpGet(postAddr.toString());
+            response = httpClient.execute(request);
+            HttpEntity entity = response.getEntity();
+            if (entity != null) {
+                LOG.info(EntityUtils.toString(entity));
+            }
+            EntityUtils.consume(entity);
+            ret = true;
+        } catch (Exception e) {
+            LOG.error("Exception when sending http request to alimonitor", e);
+        } finally {
+            try {
+                if (response != null)
+                    response.close();
+                httpClient.close();
+            } catch (Exception e) {
+                LOG.error("Exception when closing httpclient", e);
+            }
+        }
+
+        return ret;
+    }
+
+    private boolean httpPost(String url, String msg) {
+        boolean ret = false;
+
+        CloseableHttpClient httpClient = HttpClientBuilder.create().build();
+        CloseableHttpResponse response = null;
+
+        try {
+            HttpPost request = new HttpPost(url);
+            List<NameValuePair> nvps = new ArrayList<NameValuePair>();
+            nvps.add(new BasicNameValuePair("name", monitorName));
+            nvps.add(new BasicNameValuePair("msg", msg));
+            request.setEntity(new UrlEncodedFormEntity(nvps));
+            response = httpClient.execute(request);
+            HttpEntity entity = response.getEntity();
+            if (entity != null) {
+                LOG.info(EntityUtils.toString(entity));
+            }
+            EntityUtils.consume(entity);
+            ret = true;
+        } catch (Exception e) {
+            LOG.error("Exception when sending http request to alimonitor", e);
+        } finally {
+            try {
+                if (response != null)
+                    response.close();
+                httpClient.close();
+            } catch (Exception e) {
+                LOG.error("Exception when closing httpclient", e);
+            }
+        }
+
+        return ret;
+    }
+
+
+    protected Map<String, Object> convertMap(String clusterName, String topologyId, TopologyMetric tpMetric) {
+    	/**
+    	 * @@@ Todo
+    	 */
+    	return null;
+    }
+
+	@Override
+	public boolean upload(String clusterName, String topologyId, TopologyMetric tpMetric, Map<String, Object> metricContext) {
+		// TODO Auto-generated method stub
+		Map<String, Object> metricMap = convertMap(clusterName, topologyId, tpMetric);
+		if (metricMap == null || metricMap.isEmpty() == true) {
+			return false;
+		}
+		
+		try {
+			sendRequest(collectionFlag, errorInfo, metricMap);
+			return true;
+		} catch (Exception e) {
+			// TODO Auto-generated catch block
+			LOG.error("Failed upload metric to Alimonitor", e);
+			return false;
+		}
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/metric/uploader/DefaultMetricUploader.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/metric/uploader/DefaultMetricUploader.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/metric/uploader/DefaultMetricUploader.java
new file mode 100644
index 0000000..58e4e7d
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/metric/uploader/DefaultMetricUploader.java
@@ -0,0 +1,71 @@
+package com.alibaba.jstorm.daemon.nimbus.metric.uploader;
+
+import backtype.storm.generated.TopologyMetric;
+import com.alibaba.jstorm.daemon.nimbus.NimbusData;
+import com.alibaba.jstorm.daemon.nimbus.TopologyMetricsRunnable;
+import com.alibaba.jstorm.daemon.nimbus.TopologyMetricsRunnable.Event;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Map;
+
+public class DefaultMetricUploader implements MetricUploader {
+    private final Logger logger = LoggerFactory.getLogger(getClass());
+    protected NimbusData nimbusData;
+    protected TopologyMetricsRunnable metricsRunnable;
+
+    public DefaultMetricUploader() {
+    }
+
+    @Override
+    public void init(NimbusData nimbusData) throws Exception {
+        this.nimbusData = nimbusData;
+        this.metricsRunnable = nimbusData.getMetricRunnable();
+    }
+
+    @Override
+    public void cleanup() {
+    }
+
+    @Override
+    public boolean registerMetrics(String clusterName, String topologyId,
+                                   Map<String, Long> metrics) {
+        if (metrics.size() > 0) {
+            logger.info("register metrics, topology:{}, total:{}", topologyId, metrics.size());
+        }
+        return true;
+    }
+
+    @Override
+    public boolean upload(String clusterName, String topologyId, TopologyMetric tpMetric, Map<String, Object> metricContext) {
+        if (tpMetric == null) {
+            logger.info("No metric of {}", topologyId);
+            return true;
+        }
+
+        int totalSize = tpMetric.get_topologyMetric().get_metrics_size() +
+                tpMetric.get_componentMetric().get_metrics_size() +
+                tpMetric.get_taskMetric().get_metrics_size() +
+                tpMetric.get_streamMetric().get_metrics_size() +
+                tpMetric.get_workerMetric().get_metrics_size() +
+                tpMetric.get_nettyMetric().get_metrics_size();
+
+        logger.info("send metrics, cluster:{}, topology:{}, metric size:{}, metricContext:{}",
+                clusterName, topologyId, totalSize, metricContext);
+
+        return true;
+    }
+
+    @Override
+    public boolean upload(String clusterName, String topologyId, Object key, Map<String, Object> metricContext) {
+        metricsRunnable.markUploaded((Integer) key);
+        return true;
+    }
+
+
+    @Override
+    public boolean sendEvent(String clusterName, Event event) {
+        logger.info("Successfully sendEvent {} of {}", event, clusterName);
+        return true;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/metric/uploader/MetricUploader.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/metric/uploader/MetricUploader.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/metric/uploader/MetricUploader.java
new file mode 100644
index 0000000..9b7c745
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/metric/uploader/MetricUploader.java
@@ -0,0 +1,46 @@
+package com.alibaba.jstorm.daemon.nimbus.metric.uploader;
+
+import backtype.storm.generated.TopologyMetric;
+import com.alibaba.jstorm.daemon.nimbus.NimbusData;
+import com.alibaba.jstorm.daemon.nimbus.TopologyMetricsRunnable;
+
+import java.util.Map;
+
+public interface MetricUploader {
+    /**
+     * Set NimbusData to MetricUploader
+     */
+    void init(NimbusData nimbusData) throws Exception;
+
+    void cleanup();
+
+    /**
+     * register metrics to external metric plugin
+     */
+    boolean registerMetrics(String clusterName, String topologyId,
+                            Map<String, Long> metrics) throws Exception;
+
+    String METRIC_TYPE = "metric.type";
+    String METRIC_TYPE_TOPLOGY = "TP";
+    String METRIC_TYPE_TASK = "TASK";
+    String METRIC_TYPE_ALL = "ALL";
+    String METRIC_TIME = "metric.timestamp";
+
+    /**
+     * upload topologyMetric to external metric plugin (such as database plugin)
+     *
+     * @return true means success, false means failure
+     */
+    boolean upload(String clusterName, String topologyId, TopologyMetric tpMetric, Map<String, Object> metricContext);
+
+    /**
+     * upload metrics with given key and metric context. the implementation can retrieve metric data from rocks db
+     * in the handler thread, which is kind of lazy-init, making it more GC-friendly
+     */
+    boolean upload(String clusterName, String topologyId, Object key, Map<String, Object> metricContext);
+
+    /**
+     * Send an event to underlying handler
+     */
+    boolean sendEvent(String clusterName, TopologyMetricsRunnable.Event event);
+}


[08/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/metric/JStormMetricsReporter.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/metric/JStormMetricsReporter.java b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/JStormMetricsReporter.java
index 489bec8..5039c87 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/metric/JStormMetricsReporter.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/JStormMetricsReporter.java
@@ -17,390 +17,300 @@
  */
 package com.alibaba.jstorm.metric;
 
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.apache.thrift.TException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import backtype.storm.Config;
-import backtype.storm.LocalCluster;
 import backtype.storm.generated.MetricInfo;
-import backtype.storm.generated.MetricWindow;
-import backtype.storm.generated.NettyMetric;
+import backtype.storm.generated.TopologyMetric;
 import backtype.storm.generated.WorkerUploadMetrics;
+import backtype.storm.spout.SpoutOutputCollector;
+import backtype.storm.task.OutputCollector;
+import backtype.storm.tuple.Values;
 import backtype.storm.utils.NimbusClient;
-import backtype.storm.utils.Utils;
-
+import com.alibaba.jstorm.callback.AsyncLoopThread;
 import com.alibaba.jstorm.callback.RunnableCallback;
 import com.alibaba.jstorm.client.ConfigExtension;
-import com.alibaba.jstorm.cluster.StormClusterState;
+import com.alibaba.jstorm.cluster.Common;
 import com.alibaba.jstorm.cluster.StormConfig;
-import com.alibaba.jstorm.common.metric.Gauge;
-import com.alibaba.jstorm.common.metric.MetricFilter;
-import com.alibaba.jstorm.common.metric.MetricRegistry;
-import com.alibaba.jstorm.common.metric.window.Metric;
+import com.alibaba.jstorm.common.metric.AsmMetric;
+import com.alibaba.jstorm.daemon.nimbus.NimbusData;
+import com.alibaba.jstorm.daemon.nimbus.TopologyMetricsRunnable.Update;
 import com.alibaba.jstorm.daemon.worker.WorkerData;
-import com.alibaba.jstorm.utils.JStormUtils;
-import com.codahale.metrics.health.HealthCheck;
-import com.codahale.metrics.health.HealthCheckRegistry;
-
-public class JStormMetricsReporter extends RunnableCallback {
-    private static final Logger LOG = LoggerFactory
-            .getLogger(JStormMetricsReporter.class);
-
-    private MetricRegistry workerMetrics = JStormMetrics.workerMetrics;
-    private Map<Integer, MetricRegistry> taskMetrics =
-            JStormMetrics.taskMetrics;
-    private MetricRegistry skipMetrics = JStormMetrics.skipMetrics;
+import com.alibaba.jstorm.utils.JStormServerUtils;
+import com.alibaba.jstorm.utils.TimeUtils;
+import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-    private JStormMetricFilter inputFilter;
+import java.util.*;
 
-    private JStormMetricFilter outputFilter;
+/**
+ * report metrics from worker to nimbus server. this class serves as an object in Worker/Nimbus/Supervisor.
+ * when in Worker, it reports data via netty transport; otherwise reports via thrift.
+ * <p/>
+ * there are 2 threads:
+ * 1.flush thread: check every 1 sec, when current time is aligned to 1 min, flush all metrics to snapshots
+ * 2.check meta thread: use thrift to get metric id from nimbus server.
+ *
+ * @author Cody (weiyue.wy@alibaba-inc.com)
+ * @since 2.0.5
+ */
+public class JStormMetricsReporter {
+    private static final Logger LOG = LoggerFactory.getLogger(JStormMetricsReporter.class);
 
     private Map conf;
-    private String topologyId;
-    private String supervisorId;
-    private int port;
-    private int frequence;
-
-    private StormClusterState clusterState;
-    private boolean localMode = false;
-    private NimbusClient client;
-
-    public JStormMetricsReporter(WorkerData workerData) {
-        this.conf = workerData.getStormConf();
-        this.topologyId = (String) conf.get(Config.TOPOLOGY_ID);
-        this.supervisorId = workerData.getSupervisorId();
-        this.port = workerData.getPort();
-        this.frequence = ConfigExtension.getWorkerMetricReportFrequency(conf);
-        this.clusterState = workerData.getZkCluster();
-
-        outputFilter = new JStormMetricFilter(MetricDef.OUTPUT_TAG);
-        inputFilter = new JStormMetricFilter(MetricDef.INPUT_TAG);
-        localMode = StormConfig.local_mode(conf);
-        LOG.info("Successfully start ");
-    }
+    protected String clusterName;
+    protected String topologyId;
+    protected String host;
+    protected int port;
 
-    protected boolean getMoreMetric(
-            Map<String, Map<String, MetricWindow>> extraMap,
-            JStormMetricFilter metricFilter, String metricFullName,
-            Map<Integer, Double> metricWindow) {
-        if (metricFilter.matches(metricFullName, null) == false) {
-            return false;
-        }
+    protected boolean localMode = false;
 
-        int pos = metricFullName.indexOf(MetricRegistry.NAME_SEPERATOR);
-        if (pos <= 0 || pos >= metricFullName.length() - 1) {
-            return false;
-        }
+    private AsyncLoopThread checkMetricMetaThread;
+    protected final int checkMetaThreadCycle;
 
-        String metricName = metricFullName.substring(0, pos);
-        String extraName = metricFullName.substring(pos + 1);
+    private AsyncLoopThread flushMetricThread;
+    protected final int flushMetricThreadCycle;
 
-        Map<String, MetricWindow> item = extraMap.get(metricName);
-        if (item == null) {
-            item = new HashMap<String, MetricWindow>();
-            extraMap.put(metricName, item);
-        }
+    private boolean test = false;
 
-        MetricWindow metricWindowThrift = new MetricWindow();
-        metricWindowThrift.set_metricWindow(metricWindow);
+    private boolean isInWorker = false;
 
-        item.put(extraName, metricWindowThrift);
+    private SpoutOutputCollector spoutOutput;
+    private OutputCollector boltOutput;
 
-        return true;
-    }
-    
-    protected void insertNettyMetrics(Map<String, MetricInfo> nettyMetricInfo, 
-                    Map<Integer, Double> snapshot,
-                    String metricFullName) {
-        int pos = metricFullName.indexOf(MetricRegistry.NAME_SEPERATOR);
-        if (pos < 0 || pos >= metricFullName.length() - 1) {
-            return ;
-        }
-        
-        String realHeader = metricFullName.substring(0, pos);
-        String nettyConnection = metricFullName.substring(pos + 1);
-        
-        MetricInfo metricInfo = nettyMetricInfo.get(nettyConnection);
-        if (metricInfo == null) {
-            metricInfo = MetricThrift.mkMetricInfo();
-            
-            nettyMetricInfo.put(nettyConnection, metricInfo);
-        }
-        
-        MetricThrift.insert(metricInfo, realHeader, snapshot);
-    }
-    
-    protected void insertMergeList(Map<String, List<Map<Integer, Double> > > mergeMap, 
-                    List<String> mergeList, 
-                    Map<Integer, Double> snapshot,
-                    String name) {
-        for (String tag : mergeList) {
-            if (name.startsWith(tag) == false) {
-                continue;
-            }
-            List<Map<Integer, Double> > list = mergeMap.get(tag);
-            if (list == null) {
-                list = new ArrayList<Map<Integer,Double>>();
-                mergeMap.put(tag, list);
-            }
-            
-            list.add(snapshot);
-            
-        }
-    }
-    
-    protected void doMergeList(MetricInfo workerMetricInfo, 
-                    Map<String, List<Map<Integer, Double> > > mergeMap) {
-        for (Entry<String, List<Map<Integer, Double> > > entry : mergeMap.entrySet()) {
-            String name = entry.getKey();
-            List<Map<Integer, Double>> list = entry.getValue();
-            
-            Map<Integer, Double> merged = JStormUtils.mergeMapList(list);
-            
-            MetricThrift.insert(workerMetricInfo, name, merged);
-        }
-    }
+    private boolean enableMetrics;
+    private NimbusClient client = null;
 
-    public MetricInfo computWorkerMetrics() {
-        MetricInfo workerMetricInfo = MetricThrift.mkMetricInfo();
-        Map<String, MetricInfo> nettyMetricInfo = new HashMap<String, MetricInfo>();
-
-        Map<String, List<Map<Integer, Double> > > mergeMap = 
-                new HashMap<String, List<Map<Integer,Double> > >();
-        List<String> mergeList = new ArrayList<String>();
-        mergeList.add(MetricDef.NETTY_CLI_SEND_SPEED);
-        
-        Map<String, Metric> workerMetricMap = workerMetrics.getMetrics();
-        for (Entry<String, Metric> entry : workerMetricMap.entrySet()) {
-            String name = entry.getKey();
-            Map<Integer, Double> snapshot = entry.getValue().getSnapshot();
-
-            if (MetricDef.isNettyDetails(name) == false) {
-                MetricThrift.insert(workerMetricInfo, name, snapshot);
-                continue;
-            }
-            
-            insertNettyMetrics(nettyMetricInfo, snapshot, name);
-            
-            insertMergeList(mergeMap, mergeList, snapshot, name);
-            
+    public JStormMetricsReporter(Object role) {
+        LOG.info("starting jstorm metrics reporter");
+        if (role instanceof WorkerData) {
+            WorkerData workerData = (WorkerData) role;
+            this.conf = workerData.getStormConf();
+            this.topologyId = (String) conf.get(Config.TOPOLOGY_ID);
+            this.port = workerData.getPort();
+            this.isInWorker = true;
+        } else if (role instanceof NimbusData) {
+            NimbusData nimbusData = (NimbusData) role;
+            this.conf = nimbusData.getConf();
+            this.topologyId = JStormMetrics.NIMBUS_METRIC_KEY;
         }
-        
-        doMergeList(workerMetricInfo, mergeMap);
-        
-        JStormMetrics.setExposeWorkerMetrics(workerMetricInfo);
-        JStormMetrics.setExposeNettyMetrics(nettyMetricInfo);
-        return workerMetricInfo;
-    }
-
-    public boolean isTaskQueueFull(Metric metric,
-            Map<Integer, Double> snapshot, String name) {
-        if (metric instanceof Gauge) {
-            if (MetricDef.TASK_QUEUE_SET.contains(name)) {
-                for (Entry<Integer, Double> entry : snapshot.entrySet()) {
-                    if (entry.getValue() == MetricDef.FULL_RATIO) {
-                        return true;
-                    }
-                }
-            }
+        this.host = JStormMetrics.getHost();
+        this.enableMetrics = JStormMetrics.isEnabled();
+        if (!enableMetrics) {
+            LOG.warn("***** topology metrics is disabled! *****");
+        } else {
+            LOG.info("topology metrics is enabled.");
         }
 
-        return false;
-    }
-
-    public Map<Integer, MetricInfo> computeTaskMetrics() {
-        Map<Integer, MetricInfo> ret = new HashMap<Integer, MetricInfo>();
+        this.checkMetaThreadCycle = 30;
+        // flush metric snapshots when time is aligned, check every sec.
+        this.flushMetricThreadCycle = 1;
 
-        for (Entry<Integer, MetricRegistry> entry : taskMetrics.entrySet()) {
-            Integer taskId = entry.getKey();
-            MetricRegistry taskMetrics = entry.getValue();
+        LOG.info("check meta thread freq:{}, flush metrics thread freq:{}", checkMetaThreadCycle, flushMetricThreadCycle);
 
-            Map<String, Map<String, MetricWindow>> inputMap =
-                    new HashMap<String, Map<String, MetricWindow>>();
-            Map<String, Map<String, MetricWindow>> outputMap =
-                    new HashMap<String, Map<String, MetricWindow>>();
-
-            MetricInfo taskMetricInfo = MetricThrift.mkMetricInfo();
-            taskMetricInfo.set_inputMetric(inputMap);
-            taskMetricInfo.set_outputMetric(outputMap);
-            ret.put(taskId, taskMetricInfo);
-
-            for (Entry<String, Metric> metricEntry : taskMetrics.getMetrics()
-                    .entrySet()) {
-                String name = metricEntry.getKey();
-                Metric metric = metricEntry.getValue();
-                Map<Integer, Double> snapshot = metric.getSnapshot();
-
-                boolean isInput =
-                        getMoreMetric(inputMap, inputFilter, name, snapshot);
-                boolean isOutput =
-                        getMoreMetric(outputMap, outputFilter, name, snapshot);
-
-                if (isInput == false && isOutput == false) {
-                    MetricThrift.insert(taskMetricInfo, name, snapshot);
-                }
-            }
-
-            MetricThrift.merge(taskMetricInfo, inputMap);
-            MetricThrift.merge(taskMetricInfo, outputMap);
-
-        }
+        this.localMode = StormConfig.local_mode(conf);
+        this.clusterName = ConfigExtension.getClusterName(conf);
+        LOG.info("done.");
+    }
 
-        JStormMetrics.setExposeTaskMetrics(ret);
-        return ret;
+    @VisibleForTesting
+    JStormMetricsReporter() {
+        LOG.info("Successfully started jstorm metrics reporter for test.");
+        this.test = true;
+        this.flushMetricThreadCycle = 1;
+        this.checkMetaThreadCycle = 30;
     }
-    
-    public void healthCheck(Integer taskId, HealthCheckRegistry healthCheck) {
-    	if (taskId == null) {
-    		return ;
-    	}
-    	
-    	final Map<String, HealthCheck.Result> results =
-    			healthCheck.runHealthChecks();
-        for (Entry<String, HealthCheck.Result> resultEntry : results
-                .entrySet()) {
-            HealthCheck.Result result = resultEntry.getValue();
-            if (result.isHealthy() == false) {
-                LOG.warn("{}:{}", taskId, result.getMessage());
-                try {
-                    clusterState.report_task_error(topologyId, taskId,
-                            result.getMessage());
-                } catch (Exception e) {
-                    // TODO Auto-generated catch block
-                    LOG.error(e.getMessage(), e);
-                }
 
-            }
+    public void init() {
+        if (!localMode && enableMetrics) {
+            this.checkMetricMetaThread = new AsyncLoopThread(new CheckMetricMetaThread());
+            this.flushMetricThread = new AsyncLoopThread(new FlushMetricThread());
         }
     }
 
-    public void healthCheck() {
-        Integer firstTask = null;
-
-        Map<Integer, HealthCheckRegistry> taskHealthCheckMap =
-                JStormHealthCheck.getTaskhealthcheckmap();
-
-        for (Entry<Integer, HealthCheckRegistry> entry : taskHealthCheckMap
-                .entrySet()) {
-            Integer taskId = entry.getKey();
-            HealthCheckRegistry taskHealthCheck = entry.getValue();
-
-            healthCheck(taskId, taskHealthCheck);
+    private Map<String, Long> registerMetrics(Set<String> names) {
+        if (test || !enableMetrics) {
+            return new HashMap<>();
+        }
+        try {
+            if (client == null) {
+                client = NimbusClient.getConfiguredClient(conf);
+            }
 
-            if (firstTask != null) {
-                firstTask = taskId;
+            return client.getClient().registerMetrics(topologyId, names);
+        } catch (Exception e) {
+            LOG.error("Failed to gen metric ids", e);
+            if (client != null) {
+                client.close();
+                client = NimbusClient.getConfiguredClient(conf);
             }
         }
 
-        HealthCheckRegistry workerHealthCheck =
-                JStormHealthCheck.getWorkerhealthcheck();
-        healthCheck(firstTask, workerHealthCheck);
-        
-
+        return null;
     }
 
-    @Override
-    public void run() {
+    public void shutdown() {
+        if (!localMode && enableMetrics) {
+            this.checkMetricMetaThread.cleanup();
+            this.flushMetricThread.cleanup();
+        }
+    }
 
+    public void doUpload() {
+        if (test) {
+            return;
+        }
         try {
-            // TODO Auto-generated method stub
-            MetricInfo workerMetricInfo = computWorkerMetrics();
-            
-            Map<Integer, MetricInfo> taskMetricMap = computeTaskMetrics();
+            long start = System.currentTimeMillis();
+            MetricInfo workerMetricInfo = JStormMetrics.computeAllMetrics();
 
             WorkerUploadMetrics upload = new WorkerUploadMetrics();
-            upload.set_topology_id(topologyId);
-            upload.set_supervisor_id(supervisorId);
+            upload.set_topologyId(topologyId);
+            upload.set_supervisorId(host);
             upload.set_port(port);
-            upload.set_workerMetric(workerMetricInfo);
-            upload.set_nettyMetric(
-                            new NettyMetric(
-                                   JStormMetrics.getExposeNettyMetrics(), 
-                                   JStormMetrics.getExposeNettyMetrics().size()));
-            upload.set_taskMetric(taskMetricMap);
-            
-            uploadMetric(upload);
-            
-            healthCheck();
-
-            LOG.info("Successfully upload worker's metrics");
-            LOG.info(Utils.toPrettyJsonString(workerMetricInfo));
-            LOG.info(Utils.toPrettyJsonString(JStormMetrics.getExposeNettyMetrics()));
-            LOG.info(Utils.toPrettyJsonString(taskMetricMap));
+            upload.set_allMetrics(workerMetricInfo);
+
+            if (workerMetricInfo.get_metrics_size() > 0) {
+                uploadMetric(upload);
+                LOG.info("Successfully upload worker metrics, size:{}, cost:{}",
+                        workerMetricInfo.get_metrics_size(), System.currentTimeMillis() - start);
+            } else {
+                LOG.info("No metrics to upload.");
+            }
         } catch (Exception e) {
             LOG.error("Failed to upload worker metrics", e);
         }
-
     }
 
-    public void uploadMetric(WorkerUploadMetrics upload) {
-        if (StormConfig.local_mode(conf)) {
-            try {
-                byte[] temp = Utils.serialize(upload);
 
-                LocalCluster.getInstance().getLocalClusterMap().getNimbus()
-                        .workerUploadMetric(upload);
-            } catch (TException e) {
-                // TODO Auto-generated catch block
-                LOG.error("Failed to upload worker metrics", e);
+    public void uploadMetric(WorkerUploadMetrics metrics) {
+        if (isInWorker) {
+        //in Worker, we upload data via netty transport
+            if (boltOutput != null) {
+                LOG.info("emit metrics through bolt collector.");
+                boltOutput.emit(Common.TOPOLOGY_MASTER_METRICS_STREAM_ID,
+                        new Values(JStormServerUtils.getName(host, port), metrics));
+            } else if (spoutOutput != null) {
+                LOG.info("emit metrics through spout collector.");
+                spoutOutput.emit(Common.TOPOLOGY_MASTER_METRICS_STREAM_ID,
+                        new Values(JStormServerUtils.getName(host, port), metrics));
             }
-        } else {
+        }else {
+        // in supervisor or nimbus, we upload metric data via thrift
+            LOG.info("emit metrics through nimbus client.");
+            Update event = new Update();
+            TopologyMetric tpMetric = MetricUtils.mkTopologyMetric();
+            tpMetric.set_workerMetric(metrics.get_allMetrics());
+
+            event.topologyMetrics = tpMetric;
+            event.topologyId = topologyId;
+
             try {
-            	if (client == null) {
-            		client = NimbusClient.getConfiguredClient(conf);
-            	}
-                client.getClient().workerUploadMetric(upload);
-            } catch (Exception e) {
-                LOG.error("Failed to upload worker metrics", e);
+                if (client == null) {
+                    client = NimbusClient.getConfiguredClient(conf);
+                }
+                client.getClient().uploadTopologyMetrics(topologyId, tpMetric);
+            } catch (Exception ex) {
+                LOG.error("upload metric error:", ex);
                 if (client != null) {
                     client.close();
-                    client = null;
+                    client = NimbusClient.getConfiguredClient(conf);
                 }
-            } finally {
-                
             }
         }
+        //MetricUtils.logMetrics(metrics.get_allMetrics());
     }
 
-    @Override
-    public Object getResult() {
-        return frequence;
-    }
-    
-    @Override
-    public void shutdown() {
-    	if (client != null) {
-            client.close();
-            client = null;
+
+    public void setOutputCollector(Object outputCollector) {
+        if (outputCollector instanceof OutputCollector) {
+            this.boltOutput = (OutputCollector) outputCollector;
+        } else if (outputCollector instanceof SpoutOutputCollector) {
+            this.spoutOutput = (SpoutOutputCollector) outputCollector;
         }
+
     }
 
-    public static class JStormMetricFilter implements MetricFilter {
-        private static final long serialVersionUID = -8886536175626248855L;
-        private String[] tags;
 
-        public JStormMetricFilter(String[] tags) {
-            this.tags = tags;
+    class FlushMetricThread extends RunnableCallback {
+        @Override
+        public void run() {
+            if (TimeUtils.isTimeAligned()) {
+                int cnt = 0;
+                try {
+                    for (AsmMetricRegistry registry : JStormMetrics.allRegistries) {
+                        for (Map.Entry<String, AsmMetric> entry : registry.getMetrics().entrySet()) {
+                            entry.getValue().flush();
+                            cnt++;
+                        }
+                    }
+                    LOG.info("flush metrics, total:{}.", cnt);
+
+                    doUpload();
+                } catch (Exception ex) {
+                    LOG.error("Error", ex);
+                }
+            }
+        }
+
+        @Override
+        public Object getResult() {
+            return flushMetricThreadCycle;
         }
+    }
+
+    class CheckMetricMetaThread extends RunnableCallback {
+        private volatile boolean processing = false;
+        private final long start = TimeUtils.current_time_secs();
+        private final long initialDelay = 30 + new Random().nextInt(15);
 
         @Override
-        public boolean matches(String name, Metric metric) {
-            // TODO Auto-generated method stub
-            for (String tag : tags) {
-                if (name.startsWith(tag)) {
-                    return true;
+        public void run() {
+            if (TimeUtils.current_time_secs() - start < initialDelay) {
+                return;
+            }
+
+            if (processing) {
+                LOG.info("still processing, skip...");
+            } else {
+                processing = true;
+                long start = System.currentTimeMillis();
+                try {
+                    Set<String> names = new HashSet<>();
+                    for (AsmMetricRegistry registry : JStormMetrics.allRegistries) {
+                        Map<String, AsmMetric> metricMap = registry.getMetrics();
+                        for (Map.Entry<String, AsmMetric> metricEntry : metricMap.entrySet()) {
+                            AsmMetric metric = metricEntry.getValue();
+                            if (((metric.getOp() & AsmMetric.MetricOp.REPORT) == AsmMetric.MetricOp.REPORT) &&
+                                    metric.getMetricId() == 0L) {
+                                names.add(metricEntry.getKey());
+                            }
+                        }
+                    }
+
+                    if (names.size() > 0) {
+                        Map<String, Long> nameIdMap = registerMetrics(names);
+                        if (nameIdMap != null) {
+                            for (String name : nameIdMap.keySet()) {
+                                AsmMetric metric = JStormMetrics.find(name);
+                                if (metric != null) {
+                                    long id = nameIdMap.get(name);
+                                    metric.setMetricId(id);
+                                    LOG.info("set metric id, {}:{}", name, id);
+                                }
+                            }
+                        }
+                        LOG.info("register metrics, size:{}, cost:{}", names.size(), System.currentTimeMillis() - start);
+                    }
+                } catch (Exception ex) {
+                    LOG.error("Error", ex);
                 }
+                processing = false;
             }
-            return false;
         }
 
+        @Override
+        public Object getResult() {
+            return checkMetaThreadCycle;
+        }
     }
-
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/metric/KVSerializable.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/metric/KVSerializable.java b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/KVSerializable.java
new file mode 100644
index 0000000..7adaf46
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/KVSerializable.java
@@ -0,0 +1,17 @@
+package com.alibaba.jstorm.metric;
+
+/**
+ * @author Cody (weiyue.wy@alibaba-inc.com)
+ * @since 2.0.5
+ */
+public interface KVSerializable {
+     String START = "S", END = "E";
+    int LONG_SIZE = 8;
+    int INT_SIZE = 4;
+
+    public byte[] getKey();
+
+    public byte[] getValue();
+
+    public Object fromKV(byte[] key, byte[] value);
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetaFilter.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetaFilter.java b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetaFilter.java
new file mode 100644
index 0000000..f39f51c
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetaFilter.java
@@ -0,0 +1,11 @@
+package com.alibaba.jstorm.metric;
+
+import com.alibaba.jstorm.common.metric.MetricMeta;
+
+/**
+ * @author Cody (weiyue.wy@alibaba-inc.com)
+ * @since 2.0.5
+ */
+public interface MetaFilter {
+    boolean matches(MetricMeta meta, Object arg);
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetaType.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetaType.java b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetaType.java
new file mode 100644
index 0000000..3dfe665
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetaType.java
@@ -0,0 +1,50 @@
+package com.alibaba.jstorm.metric;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * @author Cody (weiyue.wy@alibaba-inc.com)
+ * @since 2.0.5
+ */
+public enum MetaType {
+    TASK(1, "T"), COMPONENT(2, "C"), STREAM(3, "S"), WORKER(4, "W"), TOPOLOGY(5, "P"), NETTY(6, "N"), NIMBUS(7, "M");
+
+    private int t;
+    private String v;
+
+    MetaType(int t, String v) {
+        this.t = t;
+        this.v = v;
+    }
+
+    private static final Map<String, MetaType> valueMap = new HashMap<String, MetaType>();
+    private static final Map<Integer, MetaType> typeMap = new HashMap<Integer, MetaType>();
+
+    static {
+        for (MetaType type : MetaType.values()) {
+            typeMap.put(type.getT(), type);
+            valueMap.put(type.getV(), type);
+        }
+    }
+
+    public String getV() {
+        return this.v;
+    }
+
+    public int getT() {
+        return t;
+    }
+
+    public static MetaType parse(char ch) {
+        return parse(ch + "");
+    }
+
+    public static MetaType parse(String v) {
+        return valueMap.get(v);
+    }
+
+    public static MetaType parse(int t) {
+        return typeMap.get(t);
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricClient.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricClient.java b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricClient.java
new file mode 100644
index 0000000..1d63f46
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricClient.java
@@ -0,0 +1,92 @@
+package com.alibaba.jstorm.metric;
+
+import backtype.storm.task.TopologyContext;
+import com.alibaba.jstorm.common.metric.*;
+import com.codahale.metrics.Gauge;
+
+/**
+ * metric client for end users to add custom metrics.
+ *
+ * @author Cody (weiyue.wy@alibaba-inc.com)
+ * @since 2.0.5
+ */
+@SuppressWarnings("unused")
+public class MetricClient {
+    private static final String GROUP_UDF = "udf";
+
+    private final String topologyId;
+    private final String componentId;
+    private final int taskId;
+
+    public MetricClient(TopologyContext context) {
+        taskId = context.getThisTaskId();
+        this.topologyId = context.getTopologyId();
+        this.componentId = context.getThisComponentId();
+    }
+
+    public AsmGauge registerGauge(String name, Gauge<Double> gauge) {
+        return registerGauge(name, GROUP_UDF, gauge);
+    }
+
+    public AsmGauge registerGauge(String name, String group, Gauge<Double> gauge) {
+        String userMetricName = getMetricName(name, group, MetricType.GAUGE);
+        AsmGauge asmGauge = new AsmGauge(gauge);
+        JStormMetrics.registerTaskMetric(userMetricName, asmGauge);
+        return asmGauge;
+    }
+
+    public AsmCounter registerCounter(String name) {
+        return registerCounter(name, GROUP_UDF);
+    }
+
+    public AsmCounter registerCounter(String name, String group) {
+        String userMetricName = getMetricName(name, group, MetricType.COUNTER);
+        AsmCounter counter = new AsmCounter();
+        JStormMetrics.registerTaskMetric(userMetricName, counter);
+        return counter;
+    }
+
+    public AsmMeter registerMeter(String name) {
+        return registerMeter(name, GROUP_UDF);
+    }
+
+    public AsmMeter registerMeter(String name, String group) {
+        String userMetricName = getMetricName(name, group, MetricType.METER);
+        return (AsmMeter) JStormMetrics.registerTaskMetric(userMetricName, new AsmMeter());
+    }
+
+    public AsmTimer registerTimer(String name) {
+        return registerTimer(name, GROUP_UDF);
+    }
+
+    public AsmTimer registerTimer(String name, String group) {
+        String userMetricName = getMetricName(name, group, MetricType.TIMER);
+        return (AsmTimer) JStormMetrics.registerTaskMetric(userMetricName, new AsmTimer());
+    }
+
+    public AsmHistogram registerHistogram(String name) {
+        return registerHistogram(name, GROUP_UDF);
+    }
+
+    public AsmHistogram registerHistogram(String name, String group) {
+        String userMetricName = getMetricName(name, group, MetricType.HISTOGRAM);
+        return (AsmHistogram) JStormMetrics.registerTaskMetric(userMetricName, new AsmHistogram());
+    }
+
+    public void unregister(String name, MetricType type) {
+        unregister(name, GROUP_UDF, type);
+    }
+
+    public void unregister(String name, String group, MetricType type) {
+        String userMetricName = getMetricName(name, group, type);
+        JStormMetrics.unregisterTaskMetric(userMetricName);
+    }
+
+    private String getMetricName(String name, MetricType type) {
+        return getMetricName(name, GROUP_UDF, type);
+    }
+
+    private String getMetricName(String name, String group, MetricType type) {
+        return MetricUtils.taskMetricName(topologyId, componentId, taskId, group, name, type);
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricDataConverter.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricDataConverter.java b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricDataConverter.java
new file mode 100644
index 0000000..cdb47f3
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricDataConverter.java
@@ -0,0 +1,87 @@
+package com.alibaba.jstorm.metric;
+
+import backtype.storm.generated.MetricSnapshot;
+import com.alibaba.jstorm.common.metric.*;
+import com.alibaba.jstorm.utils.TimeUtils;
+
+import java.util.Date;
+
+/**
+ * @author Cody (weiyue.wy@alibaba-inc.com)
+ * @since 2.0.5
+ */
+public class MetricDataConverter {
+
+    public static CounterData toCounterData(MetricSnapshot snapshot, int win) {
+        CounterData data = new CounterData();
+        convertBase(snapshot, data, win);
+
+        data.setV(snapshot.get_longValue());
+        return data;
+    }
+
+    public static GaugeData toGaugeData(MetricSnapshot snapshot, int win) {
+        GaugeData data = new GaugeData();
+        convertBase(snapshot, data, win);
+
+        data.setV(snapshot.get_doubleValue());
+        return data;
+    }
+
+    public static MeterData toMeterData(MetricSnapshot snapshot, int win) {
+        MeterData data = new MeterData();
+        convertBase(snapshot, data, win);
+
+        data.setM1(snapshot.get_m1());
+        data.setM5(snapshot.get_m5());
+        data.setM15(snapshot.get_m15());
+        data.setMean(snapshot.get_mean());
+
+        return data;
+    }
+
+    public static HistogramData toHistogramData(MetricSnapshot snapshot, int win) {
+        HistogramData data = new HistogramData();
+        convertBase(snapshot, data, win);
+
+        data.setMin(snapshot.get_min());
+        data.setMax(snapshot.get_max());
+        data.setP50(snapshot.get_p50());
+        data.setP75(snapshot.get_p75());
+        data.setP95(snapshot.get_p95());
+        data.setP98(snapshot.get_p98());
+        data.setP99(snapshot.get_p99());
+        data.setP999(snapshot.get_p999());
+        data.setMean(snapshot.get_mean());
+
+        return data;
+    }
+
+    public static TimerData toTimerData(MetricSnapshot snapshot, int win) {
+        TimerData data = new TimerData();
+        convertBase(snapshot, data, win);
+
+        data.setMin(snapshot.get_min());
+        data.setMax(snapshot.get_max());
+        data.setP50(snapshot.get_p50());
+        data.setP75(snapshot.get_p75());
+        data.setP95(snapshot.get_p95());
+        data.setP98(snapshot.get_p98());
+        data.setP99(snapshot.get_p99());
+        data.setP999(snapshot.get_p999());
+        data.setMean(snapshot.get_mean());
+        data.setM1(snapshot.get_m1());
+        data.setM5(snapshot.get_m5());
+        data.setM15(snapshot.get_m15());
+
+        return data;
+    }
+
+    private static void convertBase(MetricSnapshot snapshot, MetricBaseData data, int win) {
+        long newTs = TimeUtils.alignTimeToWin(snapshot.get_ts(), win);
+        data.setWin(win);
+        data.setMetricId(snapshot.get_metricId());
+        data.setTs(new Date(newTs));
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricDef.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricDef.java b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricDef.java
index 58413bb..c060e51 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricDef.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricDef.java
@@ -17,9 +17,7 @@
  */
 package com.alibaba.jstorm.metric;
 
-import java.util.ArrayList;
 import java.util.HashSet;
-import java.util.List;
 import java.util.Set;
 
 public class MetricDef {
@@ -28,10 +26,8 @@ public class MetricDef {
     public static final String TIME_TYPE = "Time";
 
     public static final String DESERIALIZE_THREAD = "Deserialize";
-    public static final String DESERIALIZE_QUEUE = DESERIALIZE_THREAD
-            + QUEUE_TYPE;
-    public static final String DESERIALIZE_TIME = DESERIALIZE_THREAD
-            + TIME_TYPE;
+    public static final String DESERIALIZE_QUEUE = DESERIALIZE_THREAD + QUEUE_TYPE;
+    public static final String DESERIALIZE_TIME = DESERIALIZE_THREAD + TIME_TYPE;
 
     public static final String SERIALIZE_THREAD = "Serialize";
     public static final String SERIALIZE_QUEUE = SERIALIZE_THREAD + QUEUE_TYPE;
@@ -45,54 +41,46 @@ public class MetricDef {
     public static final String EMPTY_CPU_RATIO = "EmptyCpuRatio";
     public static final String PENDING_MAP = "PendingNum";
     public static final String COLLECTOR_EMIT_TIME = "EmitTime";
-
-   
+    public static final String TUPLE_LIEF_CYCLE = "TupleLifeCycle";
 
     public static final String DISPATCH_THREAD = "VirtualPortDispatch";
     public static final String DISPATCH_QUEUE = DISPATCH_THREAD + QUEUE_TYPE;
     public static final String DISPATCH_TIME = DISPATCH_THREAD + TIME_TYPE;
 
     public static final String BATCH_DRAINER_THREAD = "BatchDrainer";
-    public static final String BATCH_DRAINER_QUEUE = BATCH_DRAINER_THREAD
-            + QUEUE_TYPE;
-    public static final String BATCH_DRAINER_TIME = BATCH_DRAINER_THREAD
-            + TIME_TYPE;
+    public static final String BATCH_DRAINER_QUEUE = BATCH_DRAINER_THREAD + QUEUE_TYPE;
+    public static final String BATCH_DRAINER_TIME = BATCH_DRAINER_THREAD + TIME_TYPE;
 
     public static final String DRAINER_THREAD = "Drainer";
     public static final String DRAINER_QUEUE = DRAINER_THREAD + QUEUE_TYPE;
     public static final String DRAINER_TIME = DRAINER_THREAD + TIME_TYPE;
 
-    
     public static final String NETWORK_MSG_DECODE_TIME = "NetworkMsgDecodeTime";
-    
+
     // all tag start with "Netty" will be specially display in Web UI
     public static final String NETTY = "Netty";
     public static final String NETTY_CLI = NETTY + "Client";
     public static final String NETTY_SRV = NETTY + "Server";
     public static final String NETTY_CLI_SEND_SPEED = NETTY_CLI + "SendSpeed";
     public static final String NETTY_SRV_RECV_SPEED = NETTY_SRV + "RecvSpeed";
-    
+
     public static final String NETTY_CLI_SEND_TIME = NETTY_CLI + "SendTime";
-    public static final String NETTY_CLI_BATCH_SIZE =
-            NETTY_CLI + "SendBatchSize";
-    public static final String NETTY_CLI_SEND_PENDING =
-            NETTY_CLI + "SendPendings";
-    public static final String NETTY_CLI_SYNC_BATCH_QUEUE =
-            NETTY_CLI + "SyncBatchQueue";
-    public static final String NETTY_CLI_SYNC_DISR_QUEUE =
-            NETTY_CLI + "SyncDisrQueue";
+    public static final String NETTY_CLI_BATCH_SIZE = NETTY_CLI + "SendBatchSize";
+    public static final String NETTY_CLI_SEND_PENDING = NETTY_CLI + "SendPendings";
+    public static final String NETTY_CLI_SYNC_BATCH_QUEUE = NETTY_CLI + "SyncBatchQueue";
+    public static final String NETTY_CLI_SYNC_DISR_QUEUE = NETTY_CLI + "SyncDisrQueue";
     public static final String NETTY_CLI_CACHE_SIZE = NETTY_CLI + "CacheSize";
     public static final String NETTY_CLI_CONNECTION = NETTY_CLI + "ConnectionCheck";
-    
+
     // metric name for worker
     public static final String NETTY_SRV_MSG_TRANS_TIME = NETTY_SRV + "TransmitTime";
-    
 
     public static final String ZMQ_SEND_TIME = "ZMQSendTime";
     public static final String ZMQ_SEND_MSG_SIZE = "ZMQSendMSGSize";
 
     public static final String CPU_USED_RATIO = "CpuUsedRatio";
     public static final String MEMORY_USED = "MemoryUsed";
+    public static final String DISK_USAGE = "DiskUsage";
 
     public static final String REMOTE_CLI_ADDR = "RemoteClientAddress";
     public static final String REMOTE_SERV_ADDR = "RemoteServerAddress";
@@ -105,10 +93,10 @@ public class MetricDef {
     public static final String PROCESS_LATENCY = "ProcessLatency";
 
     public static final String[] OUTPUT_TAG = { EMMITTED_NUM, SEND_TPS };
-    public static final String[] INPUT_TAG = { RECV_TPS, ACKED_NUM, FAILED_NUM,
-            PROCESS_LATENCY };
+    public static final String[] INPUT_TAG = { RECV_TPS, ACKED_NUM, FAILED_NUM, PROCESS_LATENCY };
 
     public static final Set<String> MERGE_SUM_TAG = new HashSet<String>();
+
     static {
         MERGE_SUM_TAG.add(MetricDef.EMMITTED_NUM);
         MERGE_SUM_TAG.add(MetricDef.SEND_TPS);
@@ -119,6 +107,7 @@ public class MetricDef {
     }
 
     public static final Set<String> MERGE_AVG_TAG = new HashSet<String>();
+
     static {
         MERGE_AVG_TAG.add(PROCESS_LATENCY);
     }
@@ -128,6 +117,7 @@ public class MetricDef {
     public static final String QEUEU_IS_FULL = "queue is full";
 
     public static final Set<String> TASK_QUEUE_SET = new HashSet<String>();
+
     static {
         TASK_QUEUE_SET.add(DESERIALIZE_QUEUE);
         TASK_QUEUE_SET.add(SERIALIZE_QUEUE);
@@ -136,27 +126,28 @@ public class MetricDef {
     }
 
     public static final Set<String> WORKER_QUEUE_SET = new HashSet<String>();
+
     static {
         WORKER_QUEUE_SET.add(DISPATCH_QUEUE);
         WORKER_QUEUE_SET.add(BATCH_DRAINER_QUEUE);
         WORKER_QUEUE_SET.add(DRAINER_QUEUE);
     }
-    
-    
+
     public static final int NETTY_METRICS_PACKAGE_SIZE = 200;
+
     public static boolean isNettyDetails(String metricName) {
-        
+
         Set<String> specialNettySet = new HashSet<String>();
         specialNettySet.add(MetricDef.NETTY_CLI_SEND_SPEED);
         specialNettySet.add(MetricDef.NETTY_SRV_RECV_SPEED);
-        
+
         if (specialNettySet.contains(metricName)) {
             return false;
         }
         if (metricName.startsWith(MetricDef.NETTY)) {
             return true;
         }
-        
+
         return false;
     }
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricIDGenerator.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricIDGenerator.java b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricIDGenerator.java
new file mode 100644
index 0000000..3df7bab
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricIDGenerator.java
@@ -0,0 +1,9 @@
+package com.alibaba.jstorm.metric;
+
+/**
+ * @author Cody (weiyue.wy@alibaba-inc.com)
+ * @since 2.0.5
+ */
+public interface MetricIDGenerator {
+    long genMetricId(String metricName);
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricJstack.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricJstack.java b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricJstack.java
index 2989196..16e6357 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricJstack.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricJstack.java
@@ -55,8 +55,7 @@ public class MetricJstack implements Gauge<String> {
             writer.append("\n");
         }
 
-        long[] deadLockMonitorTids =
-                threadMXBean.findMonitorDeadlockedThreads();
+        long[] deadLockMonitorTids = threadMXBean.findMonitorDeadlockedThreads();
         if (deadLockMonitorTids != null) {
             writer.append(threadIds.length + " deadlocked monitor threads:");
             for (long tid : deadLockMonitorTids) {
@@ -66,60 +65,38 @@ public class MetricJstack implements Gauge<String> {
         }
 
         for (long tid : threadIds) {
-            ThreadInfo info =
-                    threadMXBean.getThreadInfo(tid, Integer.MAX_VALUE);
+            ThreadInfo info = threadMXBean.getThreadInfo(tid, Integer.MAX_VALUE);
             if (info == null) {
                 writer.append("  Inactive").append("\n");
                 continue;
             }
-            writer.append(
-                    "Thread "
-                            + getTaskName(info.getThreadId(),
-                                    info.getThreadName()) + ":").append("\n");
+            writer.append("Thread " + getTaskName(info.getThreadId(), info.getThreadName()) + ":").append("\n");
             Thread.State state = info.getThreadState();
             writer.append("  State: " + state).append("\n");
-            writer.append("  Blocked count: " + info.getBlockedCount()).append(
-                    "\n");
-            writer.append("  Waited count: " + info.getWaitedCount()).append(
-                    "\n");
-            writer.append(" Cpu time:")
-                    .append(threadMXBean.getThreadCpuTime(tid) / 1000000)
-                    .append("ms").append("\n");
-            writer.append(" User time:")
-                    .append(threadMXBean.getThreadUserTime(tid) / 1000000)
-                    .append("ms").append("\n");
+            writer.append("  Blocked count: " + info.getBlockedCount()).append("\n");
+            writer.append("  Waited count: " + info.getWaitedCount()).append("\n");
+            writer.append(" Cpu time:").append(threadMXBean.getThreadCpuTime(tid) / 1000000).append("ms").append("\n");
+            writer.append(" User time:").append(threadMXBean.getThreadUserTime(tid) / 1000000).append("ms").append("\n");
             if (contention) {
-                writer.append("  Blocked time: " + info.getBlockedTime())
-                        .append("\n");
-                writer.append("  Waited time: " + info.getWaitedTime()).append(
-                        "\n");
+                writer.append("  Blocked time: " + info.getBlockedTime()).append("\n");
+                writer.append("  Waited time: " + info.getWaitedTime()).append("\n");
             }
             if (state == Thread.State.WAITING) {
-                writer.append("  Waiting on " + info.getLockName())
-                        .append("\n");
+                writer.append("  Waiting on " + info.getLockName()).append("\n");
             } else if (state == Thread.State.BLOCKED) {
-                writer.append("  Blocked on " + info.getLockName())
-                        .append("\n");
-                writer.append(
-                        "  Blocked by "
-                                + getTaskName(info.getLockOwnerId(),
-                                        info.getLockOwnerName())).append("\n");
+                writer.append("  Blocked on " + info.getLockName()).append("\n");
+                writer.append("  Blocked by " + getTaskName(info.getLockOwnerId(), info.getLockOwnerName())).append("\n");
             }
 
         }
         for (long tid : threadIds) {
-            ThreadInfo info =
-                    threadMXBean.getThreadInfo(tid, Integer.MAX_VALUE);
+            ThreadInfo info = threadMXBean.getThreadInfo(tid, Integer.MAX_VALUE);
             if (info == null) {
                 writer.append("  Inactive").append("\n");
                 continue;
             }
 
-            writer.append(
-                    "Thread "
-                            + getTaskName(info.getThreadId(),
-                                    info.getThreadName()) + ": Stack").append(
-                    "\n");
+            writer.append("Thread " + getTaskName(info.getThreadId(), info.getThreadName()) + ": Stack").append("\n");
             for (StackTraceElement frame : info.getStackTrace()) {
                 writer.append("    " + frame.toString()).append("\n");
             }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricQueryClient.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricQueryClient.java b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricQueryClient.java
new file mode 100644
index 0000000..c8a8f56
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricQueryClient.java
@@ -0,0 +1,148 @@
+package com.alibaba.jstorm.metric;
+
+import com.alibaba.jstorm.common.metric.MetricMeta;
+import com.alibaba.jstorm.common.metric.TaskTrack;
+import com.alibaba.jstorm.common.metric.TopologyHistory;
+
+import java.util.List;
+import java.util.Map;
+
+/**
+ * metric query client for getting metric meta & data
+ *
+ * @author Cody (weiyue.wy@alibaba-inc.com)
+ * @since 2.0.5
+ */
+public interface MetricQueryClient {
+
+    /**
+     * init metric query client
+     */
+    void init(Map conf);
+
+    /**
+     * get metric meta with optional meta filter
+     *
+     * @param clusterName cluster name
+     * @param topologyId  topology id
+     * @param type        meta type
+     * @param filter      meta filter, if filter matches, the corresponding meta will be returned.
+     * @param arg         filter argument
+     * @return meta list
+     */
+    List<MetricMeta> getMetricMeta(String clusterName, String topologyId, MetaType type, MetaFilter filter, Object arg);
+
+    /**
+     * get metric meta by topology id and meta type
+     *
+     * @param clusterName cluster name
+     * @param topologyId  topology id
+     * @param type        meta type
+     * @return all metric meta
+     */
+    List<MetricMeta> getMetricMeta(String clusterName, String topologyId, MetaType type);
+
+    /**
+     * get worker metric meta by topology id
+     *
+     * @param clusterName cluster name
+     * @param topologyId  topology id
+     * @return all worker metric meta
+     */
+    List<MetricMeta> getWorkerMeta(String clusterName, String topologyId);
+
+    /**
+     * get netty metric meta by topology id
+     *
+     * @param clusterName cluster name
+     * @param topologyId  topology id
+     * @return all netty metric meta
+     */
+    List<MetricMeta> getNettyMeta(String clusterName, String topologyId);
+
+    /**
+     * get task metric meta
+     *
+     * @param clusterName cluster name
+     * @param topologyId  topology id
+     * @param taskId      task id
+     * @return task metric meta
+     */
+    List<MetricMeta> getTaskMeta(String clusterName, String topologyId, int taskId);
+
+    /**
+     * get component metric meta
+     *
+     * @param clusterName cluster name
+     * @param topologyId  topology id
+     * @param componentId component id
+     * @return component metric meta
+     */
+    List<MetricMeta> getComponentMeta(String clusterName, String topologyId, String componentId);
+
+    /**
+     * get metric meta by id
+     *
+     * @param clusterName cluster name
+     * @param topologyId  topology id
+     * @param metaType    meta type
+     * @param metricId    metric id
+     * @return metric meta
+     */
+    MetricMeta getMetricMeta(String clusterName, String topologyId, MetaType metaType, long metricId);
+
+    /**
+     * get metric data
+     *
+     * @param metricId   metric id
+     * @param metricType metric type
+     * @param win        metric window
+     * @param start      start time
+     * @param end        end time
+     * @return metric data objects, depending on metric type, could be CounterData, GaugeData, ... etc.
+     */
+    List<Object> getMetricData(long metricId, MetricType metricType, int win, long start, long end);
+
+    /**
+     * get all task track by topology id
+     *
+     * @param clusterName cluster name
+     * @param topologyId  topology id
+     * @return task track
+     */
+    List<TaskTrack> getTaskTrack(String clusterName, String topologyId);
+
+    /**
+     * get task track by task id
+     *
+     * @param clusterName cluster name
+     * @param topologyId  topology id
+     * @param taskId      task id
+     * @return task track
+     */
+    List<TaskTrack> getTaskTrack(String clusterName, String topologyId, int taskId);
+
+    /**
+     * get topology history
+     *
+     * @param clusterName  cluster name
+     * @param topologyName topology name
+     * @param size         size
+     * @return topology history list
+     */
+    List<TopologyHistory> getTopologyHistory(String clusterName, String topologyName, int size);
+
+    /**
+     * delete metrics meta. note that clusterName, topologyId, metaType & id must be set.
+     *
+     * @param meta metric meta
+     */
+    void deleteMeta(MetricMeta meta);
+
+    /**
+     * delete metrics meta list. note that clusterName, topologyId, metaType & id must be set.
+     *
+     * @param metaList metric meta list
+     */
+    void deleteMeta(List<MetricMeta> metaList);
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricSendClient.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricSendClient.java b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricSendClient.java
deleted file mode 100755
index e1313f6..0000000
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricSendClient.java
+++ /dev/null
@@ -1,18 +0,0 @@
-package com.alibaba.jstorm.metric;
-
-import java.util.List;
-import java.util.Map;
-
-public class MetricSendClient {
-
-    public MetricSendClient() {
-    }
-
-    public boolean send(Map<String, Object> msg) {
-        return true;
-    }
-
-    public boolean send(List<Map<String, Object>> msgList) {
-        return true;
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricThrift.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricThrift.java b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricThrift.java
deleted file mode 100755
index 5286a1c..0000000
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricThrift.java
+++ /dev/null
@@ -1,129 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.alibaba.jstorm.metric;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import backtype.storm.generated.MetricInfo;
-import backtype.storm.generated.MetricWindow;
-
-import com.alibaba.jstorm.utils.JStormUtils;
-
-public class MetricThrift {
-    private static final Logger LOG = LoggerFactory
-            .getLogger(MetricThrift.class);
-
-    public static MetricInfo mkMetricInfo() {
-        MetricInfo metricInfo =
-                new MetricInfo(new HashMap<String, MetricWindow>());
-
-        return metricInfo;
-    }
-
-    public static void insert(MetricInfo metricInfo, String key,
-            Map<Integer, Double> windowSet) {
-        MetricWindow metricWindow = new MetricWindow();
-        metricWindow.set_metricWindow(windowSet);
-
-        metricInfo.put_to_baseMetric(key, metricWindow);
-
-    }
-
-    public static MetricWindow merge(Map<String, MetricWindow> details) {
-        Map<Integer, Double> merge = new HashMap<Integer, Double>();
-
-        for (Entry<String, MetricWindow> entry : details.entrySet()) {
-            MetricWindow metricWindow = entry.getValue();
-            Map<Integer, Double> metric = metricWindow.get_metricWindow();
-
-            for (Entry<Integer, Double> metricEntry : metric.entrySet()) {
-                Integer key = metricEntry.getKey();
-                try {
-                    Double value =
-                            ((Number) JStormUtils.add(metricEntry.getValue(),
-                                    merge.get(key))).doubleValue();
-
-                    merge.put(key, value);
-                } catch (Exception e) {
-                    LOG.error("Invalid type of " + entry.getKey() + ":" + key,
-                            e);
-                    continue;
-                }
-            }
-        }
-
-        MetricWindow ret = new MetricWindow();
-
-        ret.set_metricWindow(merge);
-        return ret;
-    }
-
-    public static void merge(MetricInfo metricInfo,
-            Map<String, Map<String, MetricWindow>> extraMap) {
-        for (Entry<String, Map<String, MetricWindow>> entry : extraMap
-                .entrySet()) {
-            String metricName = entry.getKey();
-
-            metricInfo.put_to_baseMetric(metricName, merge(entry.getValue()));
-
-        }
-    }
-
-    public static MetricWindow mergeMetricWindow(MetricWindow fromMetric,
-            MetricWindow toMetric) {
-        if (toMetric == null) {
-            toMetric = new MetricWindow(new HashMap<Integer, Double>());
-        }
-
-        if (fromMetric == null) {
-            return toMetric;
-        }
-
-        List<Map<Integer, Double>> list = new ArrayList<Map<Integer, Double>>();
-        list.add(fromMetric.get_metricWindow());
-        list.add(toMetric.get_metricWindow());
-        Map<Integer, Double> merged = JStormUtils.mergeMapList(list);
-
-        toMetric.set_metricWindow(merged);
-
-        return toMetric;
-    }
-    
-    public static MetricInfo mergeMetricInfo(MetricInfo from, MetricInfo to) {
-        if (to == null) {
-            to = mkMetricInfo();
-        }
-        
-        if (from == null) {
-            return to;
-        }
-        
-        to.get_baseMetric().putAll(from.get_baseMetric());
-
-        return to;
-        
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricType.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricType.java b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricType.java
new file mode 100644
index 0000000..0203f9e
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricType.java
@@ -0,0 +1,50 @@
+package com.alibaba.jstorm.metric;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * @author Cody (weiyue.wy@alibaba-inc.com)
+ * @since 2.0.5
+ */
+public enum MetricType {
+    COUNTER("C", 1), GAUGE("G", 2), METER("M", 3), HISTOGRAM("H", 4), TIMER("T", 5);
+
+    private String v;
+    private int t;
+
+    MetricType(String v, int t) {
+        this.v = v;
+        this.t = t;
+    }
+
+    public int getT() {
+        return this.t;
+    }
+
+    public String getV() {
+        return this.v;
+    }
+
+    private static final Map<String, MetricType> valueMap = new HashMap<String, MetricType>();
+    private static final Map<Integer, MetricType> typeMap = new HashMap<Integer, MetricType>();
+
+    static {
+        for (MetricType type : MetricType.values()) {
+            typeMap.put(type.getT(), type);
+            valueMap.put(type.getV(), type);
+        }
+    }
+
+    public static MetricType parse(char ch) {
+        return parse(ch + "");
+    }
+
+    public static MetricType parse(String s) {
+        return valueMap.get(s);
+    }
+
+    public static MetricType parse(int t) {
+        return typeMap.get(t);
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricUtils.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricUtils.java b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricUtils.java
new file mode 100644
index 0000000..e76effd
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/MetricUtils.java
@@ -0,0 +1,600 @@
+package com.alibaba.jstorm.metric;
+
+import backtype.storm.generated.MetricInfo;
+import backtype.storm.generated.MetricSnapshot;
+import backtype.storm.generated.TopologyMetric;
+import com.alibaba.jstorm.client.ConfigExtension;
+import com.alibaba.jstorm.common.metric.AsmMetric;
+import com.alibaba.jstorm.common.metric.snapshot.*;
+import com.alibaba.jstorm.utils.JStormUtils;
+import com.alibaba.jstorm.utils.TimeUtils;
+import com.codahale.metrics.*;
+import com.codahale.metrics.Timer;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.*;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * @author Cody (weiyue.wy@alibaba-inc.com)
+ * @since 2.0.5
+ */
+public class MetricUtils {
+    private static final Logger LOG = LoggerFactory.getLogger(MetricUtils.class);
+
+    public static final char AT = '@';
+    public static final String DELIM = AT + "";
+    public static final String EMPTY = "";
+    public static final String DEFAULT_GROUP = "sys";
+
+    public static final int MAX_POINTS_PER_WORKER = 200;
+    public static final int NETTY_METRIC_PAGE_SIZE = 200;
+
+    public static boolean isValidId(long metricId) {
+        return metricId != 0;
+    }
+
+    public static MetricInfo mkMetricInfo() {
+        MetricInfo ret = new MetricInfo();
+        ret.set_metrics(new HashMap<String, Map<Integer, MetricSnapshot>>());
+
+        return ret;
+    }
+
+    public static TopologyMetric mkTopologyMetric() {
+        TopologyMetric emptyTopologyMetric = new TopologyMetric();
+
+        emptyTopologyMetric.set_topologyMetric(new MetricInfo());
+        emptyTopologyMetric.set_componentMetric(new MetricInfo());
+        emptyTopologyMetric.set_workerMetric(new MetricInfo());
+        emptyTopologyMetric.set_taskMetric(new MetricInfo());
+        emptyTopologyMetric.set_streamMetric(new MetricInfo());
+        emptyTopologyMetric.set_nettyMetric(new MetricInfo());
+
+        return emptyTopologyMetric;
+    }
+
+    public static boolean isEnableNettyMetrics(Map stormConf) {
+        int maxWorkerNumForNetty = ConfigExtension.getTopologyMaxWorkerNumForNettyMetrics(stormConf);
+        int workerNum = JStormUtils.parseInt(stormConf.get("topology.workers"), 1);
+        return workerNum < maxWorkerNumForNetty;
+    }
+
+    /**
+     * a metric name composites of: type@topologyId@componentId@taskId@streamId@group@name for non-worker metrics OR type@topologyId@host@port@group@name for
+     * worker metrics
+     */
+    public static String metricName(String type, String topologyId, String componentId, int taskId, String streamId, String group, String name) {
+        return concat(type, topologyId, componentId, taskId, streamId, group, name);
+    }
+
+    public static String streamMetricName(String topologyId, String componentId, int taskId, String streamId, String name, MetricType type) {
+        return concat(MetaType.STREAM.getV() + type.getV(), topologyId, componentId, taskId, streamId, DEFAULT_GROUP, name);
+    }
+
+    public static String workerMetricName(String topologyId, String host, int port, String name, MetricType type) {
+        return concat(MetaType.WORKER.getV() + type.getV(), topologyId, host, port, DEFAULT_GROUP, name);
+    }
+
+    public static String workerMetricName(String name, MetricType type) {
+        return concat(MetaType.WORKER.getV() + type.getV(), EMPTY, EMPTY, 0, DEFAULT_GROUP, name);
+    }
+
+    public static String nettyMetricName(String name, MetricType type) {
+        return concat(MetaType.NETTY.getV() + type.getV(), EMPTY, EMPTY, 0, JStormMetrics.NETTY_GROUP, name);
+    }
+
+    public static String workerMetricPrefix(String topologyId, String host, int port) {
+        return concat(MetaType.WORKER.getV(), topologyId, host, port);
+    }
+
+    public static String taskMetricName(String topologyId, String componentId, int taskId, String name, MetricType type) {
+        return concat(MetaType.TASK.getV() + type.getV(), topologyId, componentId, taskId, EMPTY, DEFAULT_GROUP, name);
+    }
+
+    public static String taskMetricName(String topologyId, String componentId, int taskId, String group, String name, MetricType type) {
+        return concat(MetaType.TASK.getV() + type.getV(), topologyId, componentId, taskId, EMPTY, group, name);
+    }
+
+    public static String compMetricName(String topologyId, String componentId, String name, MetricType type) {
+        return concat(MetaType.COMPONENT.getV() + type.getV(), topologyId, componentId, 0, EMPTY, DEFAULT_GROUP, name);
+    }
+
+    public static String removeDelimIfPossible(String name) {
+        if (name.contains(DELIM)) {
+            return name.replace(DELIM, EMPTY);
+        }
+        return name;
+    }
+
+    public static MetaType metaType(String name) {
+        return MetaType.parse(name.charAt(0) + EMPTY);
+    }
+
+    public static MetricType metricType(String name) {
+        return MetricType.parse(name.charAt(1) + EMPTY);
+    }
+
+    /**
+     * make streamId empty, remain other parts the same
+     */
+    public static String stream2taskName(String old) {
+        String[] parts = old.split(DELIM);
+        if (parts.length >= 7) {
+            parts[0] = MetaType.TASK.getV() + parts[0].charAt(1);
+            parts[parts.length - 3] = EMPTY;
+        }
+        return concat(parts);
+    }
+
+    /**
+     * make taskId=0 and streamId empty.
+     */
+    public static String task2compName(String old) {
+        String[] parts = old.split(DELIM);
+        if (parts.length >= 7) {
+            parts[0] = MetaType.COMPONENT.getV() + parts[0].charAt(1);
+            parts[parts.length - 3] = EMPTY;
+            parts[parts.length - 4] = "0";
+        }
+        return concat(parts);
+    }
+
+    /**
+     * make taskId=0 and streamId empty and metricName remain the string after `.`.
+     */
+    public static String task2MergeCompName(String old) {
+        String[] parts = old.split(DELIM);
+        if (parts.length >= 7) {
+            parts[0] = MetaType.COMPONENT.getV() + parts[0].charAt(1);
+            parts[parts.length - 3] = EMPTY;
+            parts[parts.length - 4] = "0";
+
+            String metricName = parts[parts.length - 1];
+            int dotIndex = metricName.indexOf(".");
+            if (dotIndex != -1){
+                metricName = metricName.substring(dotIndex+1);
+                parts[parts.length - 1] = metricName;
+            }
+        }
+        return concat(parts);
+    }
+
+    /**
+     * change component metric name to worker metric name, only for topology metrics
+     */
+    public static String comp2topologyName(String old) {
+        String[] parts = old.split(DELIM);
+        parts[0] = MetaType.TOPOLOGY.getV() + parts[0].charAt(1);
+        // type + topologyId + host + port + group + name
+        return concat(parts[0], parts[1], EMPTY, "0", parts[5], parts[6]);
+    }
+
+    public static String worker2topologyName(String old) {
+        String[] parts = old.split(DELIM);
+        if (parts.length >= 5) {
+            parts[0] = MetaType.TOPOLOGY.getV() + parts[0].charAt(1);
+            parts[2] = EMPTY;   // host
+            parts[3] = "0";     // port
+        }
+        return concat(parts);
+    }
+
+    public static String topo2clusterName(String old){
+        String[] parts = old.split(DELIM);
+        parts[1] = JStormMetrics.CLUSTER_METRIC_KEY;
+        return concat(parts);
+    }
+
+    public static String concat(Object... args) {
+        StringBuilder sb = new StringBuilder(50);
+        int last = args.length - 1;
+        if (args[last] instanceof String) {
+            args[last] = removeDelimIfPossible((String) args[last]);
+        }
+        for (Object arg : args) {
+            sb.append(arg).append(DELIM);
+        }
+        sb.deleteCharAt(sb.length() - 1);
+        return sb.toString();
+    }
+
+    public static String concat2(Object... args) {
+        StringBuilder sb = new StringBuilder(50);
+        for (Object arg : args) {
+            sb.append(arg).append(DELIM);
+        }
+        sb.deleteCharAt(sb.length() - 1);
+        return sb.toString();
+    }
+
+    public static String concat3(String delim, Object... args) {
+        StringBuilder sb = new StringBuilder(50);
+        for (Object arg : args) {
+            sb.append(arg).append(delim);
+        }
+        sb.deleteCharAt(sb.length() - 1);
+        return sb.toString();
+    }
+
+    public static Histogram metricSnapshot2Histogram(MetricSnapshot snapshot) {
+        Histogram histogram = new Histogram(new ExponentiallyDecayingReservoir());
+        List<Long> points = snapshot.get_points();
+        updateHistogramPoints(histogram, points);
+        return histogram;
+    }
+
+    public static Timer metricSnapshot2Timer(MetricSnapshot snapshot) {
+        Timer timer = new Timer(new ExponentiallyDecayingReservoir());
+        List<Long> points = snapshot.get_points();
+        updateTimerPoints(timer, points);
+        return timer;
+    }
+
+    public static void updateHistogramPoints(Histogram histogram, List<Long> points) {
+        if (points != null) {
+            for (Long pt : points) {
+                histogram.update(pt);
+            }
+        }
+    }
+
+    public static void updateTimerPoints(Timer timer, List<Long> points) {
+        if (points != null) {
+            for (Long pt : points) {
+                timer.update(pt, TimeUnit.MILLISECONDS);
+            }
+        }
+    }
+
+    public static Map<Integer, MetricSnapshot> toThriftCounterSnapshots(Map<Integer, AsmSnapshot> snapshots) {
+        Map<Integer, MetricSnapshot> ret = Maps.newHashMapWithExpectedSize(snapshots.size());
+        for (Map.Entry<Integer, AsmSnapshot> entry : snapshots.entrySet()) {
+            ret.put(entry.getKey(), convert((AsmCounterSnapshot) entry.getValue()));
+        }
+        return ret;
+    }
+
+    public static Map<Integer, MetricSnapshot> toThriftGaugeSnapshots(Map<Integer, AsmSnapshot> snapshots) {
+        Map<Integer, MetricSnapshot> ret = Maps.newHashMapWithExpectedSize(snapshots.size());
+        for (Map.Entry<Integer, AsmSnapshot> entry : snapshots.entrySet()) {
+            ret.put(entry.getKey(), convert((AsmGaugeSnapshot) entry.getValue()));
+        }
+        return ret;
+    }
+
+    public static Map<Integer, MetricSnapshot> toThriftMeterSnapshots(Map<Integer, AsmSnapshot> snapshots) {
+        Map<Integer, MetricSnapshot> ret = Maps.newHashMapWithExpectedSize(snapshots.size());
+        for (Map.Entry<Integer, AsmSnapshot> entry : snapshots.entrySet()) {
+            ret.put(entry.getKey(), convert((AsmMeterSnapshot) entry.getValue()));
+        }
+        return ret;
+    }
+
+    public static Map<Integer, MetricSnapshot> toThriftHistoSnapshots(MetaType metaType, Map<Integer, AsmSnapshot> snapshots) {
+        Map<Integer, MetricSnapshot> ret = Maps.newHashMapWithExpectedSize(snapshots.size());
+        for (Map.Entry<Integer, AsmSnapshot> entry : snapshots.entrySet()) {
+            MetricSnapshot histogramSnapshot = convert(metaType, (AsmHistogramSnapshot) entry.getValue());
+            if (histogramSnapshot != null) {
+                ret.put(entry.getKey(), histogramSnapshot);
+            }
+        }
+        return ret;
+    }
+
+    public static Map<Integer, MetricSnapshot> toThriftTimerSnapshots(MetaType metaType, Map<Integer, AsmSnapshot> snapshots) {
+        Map<Integer, MetricSnapshot> ret = Maps.newHashMapWithExpectedSize(snapshots.size());
+        for (Map.Entry<Integer, AsmSnapshot> entry : snapshots.entrySet()) {
+            MetricSnapshot timerSnapshot = convert(metaType, (AsmTimerSnapshot) entry.getValue());
+            if (timerSnapshot != null) {
+                ret.put(entry.getKey(), timerSnapshot);
+            }
+        }
+        return ret;
+    }
+
+    public static MetricSnapshot convert(AsmCounterSnapshot snapshot) {
+        MetricSnapshot ret = new MetricSnapshot();
+        ret.set_metricId(snapshot.getMetricId());
+        ret.set_ts(TimeUtils.alignTimeToMin(snapshot.getTs()));
+        ret.set_metricType(MetricType.COUNTER.getT());
+        ret.set_longValue(snapshot.getV());
+
+        return ret;
+    }
+
+    public static MetricSnapshot convert(AsmGaugeSnapshot snapshot) {
+        MetricSnapshot ret = new MetricSnapshot();
+        ret.set_metricId(snapshot.getMetricId());
+        ret.set_ts(TimeUtils.alignTimeToMin(snapshot.getTs()));
+        ret.set_metricType(MetricType.GAUGE.getT());
+        ret.set_doubleValue(snapshot.getV());
+
+        return ret;
+    }
+
+    public static MetricSnapshot convert(AsmMeterSnapshot snapshot) {
+        MetricSnapshot ret = new MetricSnapshot();
+        ret.set_metricId(snapshot.getMetricId());
+        ret.set_ts(TimeUtils.alignTimeToMin(snapshot.getTs()));
+        ret.set_metricType(MetricType.METER.getT());
+
+        ret.set_m1(snapshot.getM1());
+        ret.set_m5(snapshot.getM5());
+        ret.set_m15(snapshot.getM15());
+        ret.set_mean(snapshot.getMean());
+
+        return ret;
+    }
+
+    public static MetricSnapshot convert(MetaType metaType, AsmHistogramSnapshot snapshot) {
+        // some histograms are never updated, skip such metrics
+        //if (snapshot.getSnapshot().getValues().length == 0) {
+        //    return null;
+        //}
+
+        MetricSnapshot ret = new MetricSnapshot();
+        ret.set_metricId(snapshot.getMetricId());
+        ret.set_ts(TimeUtils.alignTimeToMin(snapshot.getTs()));
+        ret.set_metricType(MetricType.HISTOGRAM.getT());
+
+        Snapshot ws = snapshot.getSnapshot();
+        ret.set_min(ws.getMin());
+        ret.set_max(ws.getMax());
+        ret.set_p50(ws.getMedian());
+        ret.set_p75(ws.get75thPercentile());
+        ret.set_p95(ws.get95thPercentile());
+        ret.set_p98(ws.get98thPercentile());
+        ret.set_p99(ws.get99thPercentile());
+        ret.set_p999(ws.get999thPercentile());
+        ret.set_mean(ws.getMean());
+        ret.set_stddev(ws.getStdDev());
+
+        // only upload points for component metrics
+        if (metaType == MetaType.COMPONENT || metaType == MetaType.TOPOLOGY) {
+            List<Long> pts = Lists.newArrayListWithCapacity(ws.getValues().length);
+            for (Long pt : ws.getValues()) {
+                pts.add(pt);
+            }
+            ret.set_points(pts);
+        } else {
+            ret.set_points(new ArrayList<Long>(0));
+        }
+
+        return ret;
+    }
+
+    public static MetricSnapshot convert(MetaType metaType, AsmTimerSnapshot snapshot) {
+        // some histograms are never updated, skip such metrics
+        /*
+        if (snapshot.getHistogram().getValues().length == 0) {
+            return null;
+        }
+        */
+
+        MetricSnapshot ret = new MetricSnapshot();
+        ret.set_metricId(snapshot.getMetricId());
+        ret.set_ts(TimeUtils.alignTimeToMin(snapshot.getTs()));
+        ret.set_metricType(MetricType.TIMER.getT());
+
+        Snapshot ws = snapshot.getHistogram();
+        ret.set_min(ws.getMin());
+        ret.set_max(ws.getMax());
+        ret.set_p50(ws.getMedian());
+        ret.set_p75(ws.get75thPercentile());
+        ret.set_p95(ws.get95thPercentile());
+        ret.set_p98(ws.get98thPercentile());
+        ret.set_p99(ws.get99thPercentile());
+        ret.set_p999(ws.get999thPercentile());
+        ret.set_mean(ws.getMean());
+        ret.set_stddev(ws.getStdDev());
+
+        AsmMeterSnapshot ms = snapshot.getMeter();
+        ret.set_m1(ms.getM1());
+        ret.set_m15(ms.getM5());
+        ret.set_m15(ms.getM15());
+
+        // only upload points for component metrics
+        if (metaType == MetaType.COMPONENT || metaType == MetaType.TOPOLOGY) {
+            List<Long> pts = Lists.newArrayListWithCapacity(ws.getValues().length);
+            for (Long pt : ws.getValues()) {
+                pts.add(pt);
+            }
+            ret.set_points(pts);
+        } else {
+            ret.set_points(new ArrayList<Long>(0));
+        }
+
+        return ret;
+    }
+
+    public static String getMetricName(String fullName) {
+        String[] parts = fullName.split(DELIM);
+        return parts[parts.length - 1];
+    }
+
+    public static String str(Object obj) {
+        if (obj instanceof MetricSnapshot) {
+            MetricSnapshot snapshot = (MetricSnapshot) obj;
+            MetricType type = MetricType.parse(snapshot.get_metricType());
+            if (type == MetricType.COUNTER) {
+                return counterStr(snapshot);
+            } else if (type == MetricType.GAUGE) {
+                return gaugeStr(snapshot);
+            } else if (type == MetricType.METER) {
+                return meterStr(snapshot);
+            } else if (type == MetricType.HISTOGRAM) {
+                return histogramStr(snapshot);
+            } else if (type == MetricType.TIMER) {
+                return timerStr(snapshot);
+            }
+        }
+        return obj.toString();
+    }
+
+    public static String counterStr(MetricSnapshot snapshot) {
+        StringBuilder sb = new StringBuilder(32);
+        sb.append("id:").append(snapshot.get_metricId()).append(",v:").append(snapshot.get_longValue());
+
+        return sb.toString();
+    }
+
+    public static String gaugeStr(MetricSnapshot snapshot) {
+        StringBuilder sb = new StringBuilder(32);
+        sb.append("id:").append(snapshot.get_metricId()).append(",v:").append(snapshot.get_doubleValue());
+
+        return sb.toString();
+    }
+
+    public static String meterStr(MetricSnapshot snapshot) {
+        StringBuilder sb = new StringBuilder(50);
+        sb.append("id:").append(snapshot.get_metricId());
+        sb.append(",m1:").append(snapshot.get_m1()).append(",").append("m5:").append(snapshot.get_m5())
+                .append(",").append("m15:").append(snapshot.get_m15());
+        return sb.toString();
+    }
+
+    public static String histogramStr(MetricSnapshot snapshot) {
+        StringBuilder sb = new StringBuilder(128);
+        sb.append("histogram");
+        sb.append("(").append("id:").append(snapshot.get_metricId()).append(",").append("min:").append(snapshot.get_min()).append(",").append("max:")
+                .append(snapshot.get_max()).append(",").append("mean:").append(snapshot.get_mean()).append(",").append("p50:").append(snapshot.get_p50())
+                .append(",").append("p75:").append(snapshot.get_p75()).append(",").append("p95:").append(snapshot.get_p95()).append(",").append("p98:")
+                .append(snapshot.get_p98()).append(",").append("p99:").append(snapshot.get_p99()).append(",").append("pts:").append(snapshot.get_points_size())
+                .append(")");
+        return sb.toString();
+    }
+
+    public static String timerStr(MetricSnapshot snapshot) {
+        StringBuilder sb = new StringBuilder(128);
+        sb.append("timer");
+        sb.append("(").append("id:").append(snapshot.get_metricId()).append(",").append("min:").append(snapshot.get_min()).append(",").append("max:")
+                .append(snapshot.get_max()).append(",").append("mean:").append(snapshot.get_mean()).append(",").append("p50:").append(snapshot.get_p50())
+                .append(",").append("p75:").append(snapshot.get_p75()).append(",").append("p95:").append(snapshot.get_p95()).append(",").append("p98:")
+                .append(snapshot.get_p98()).append(",").append("p99:").append(snapshot.get_p99()).append(",").append("m1:").append(snapshot.get_m1())
+                .append(",").append("m5:").append(snapshot.get_m5()).append(",").append("m15:").append(snapshot.get_m15()).append(",").append("pts:")
+                .append(snapshot.get_points_size()).append(")");
+        return sb.toString();
+    }
+
+    public static void printMetricSnapshot(AsmMetric metric, Map<Integer, AsmSnapshot> snapshots) {
+        StringBuilder sb = new StringBuilder(128);
+        sb.append("metric:").append(metric.getMetricName()).append(", ");
+        for (Map.Entry<Integer, AsmSnapshot> entry : snapshots.entrySet()) {
+            sb.append("win:").append(entry.getKey()).append(", v:")
+                    .append(getSnapshotDefaultValue(entry.getValue())).append("; ");
+        }
+
+        LOG.info(sb.toString());
+    }
+
+    public static double getSnapshotDefaultValue(AsmSnapshot snapshot) {
+        if (snapshot instanceof AsmCounterSnapshot) {
+            return ((AsmCounterSnapshot) snapshot).getV();
+        } else if (snapshot instanceof AsmGaugeSnapshot) {
+            return ((AsmGaugeSnapshot) snapshot).getV();
+        } else if (snapshot instanceof AsmMeterSnapshot) {
+            return ((AsmMeterSnapshot) snapshot).getM1();
+        } else if (snapshot instanceof AsmHistogramSnapshot) {
+            return ((AsmHistogramSnapshot) snapshot).getSnapshot().getMean();
+        } else if (snapshot instanceof AsmTimerSnapshot) {
+            return ((AsmTimerSnapshot) snapshot).getHistogram().getMean();
+        }
+        return 0;
+    }
+
+    public static void printMetricInfo(MetricInfo metricInfo) {
+        iterateMap(metricInfo.get_metrics());
+    }
+
+    public static void printMetricInfo(MetricInfo metricInfo, Set<String> metrics) {
+        iterateMap(metricInfo.get_metrics(), metrics);
+    }
+
+    public static <T> void iterateMap(Map<String, Map<Integer, T>> map) {
+        iterateMap(map, null);
+    }
+
+    public static <T> void iterateMap(Map<String, Map<Integer, T>> map, Set<String> metrics) {
+        for (Map.Entry<String, Map<Integer, T>> entry : map.entrySet()) {
+            String name = entry.getKey();
+            boolean print = false;
+            if (metrics == null) {
+                print = true;
+            } else {
+                for (String metric : metrics) {
+                    if (name.contains(metric)) {
+                        print = true;
+                        break;
+                    }
+                }
+            }
+            if (print) {
+                Map<Integer, T> winData = entry.getValue();
+                for (Map.Entry<Integer, T> win : winData.entrySet()) {
+                    T v = win.getValue();
+                    String str;
+                    if (v instanceof MetricSnapshot) {
+                        str = MetricUtils.str(v);
+                    } else {
+                        str = v.toString();
+                    }
+                    LOG.info("metric:{}, win:{}, data:{}", name, win.getKey(), str);
+                }
+            }
+        }
+    }
+
+    private static <T> void iter(Map<String, T> map, Func func, Object... args) {
+        for (Map.Entry<String, T> entry : map.entrySet()) {
+            func.exec(entry, args);
+        }
+    }
+
+
+    public interface Func {
+        void exec(Object... args);
+    }
+
+
+    /**
+     * print default value for all metrics, in the format of: name|type|value
+     */
+    public static void logMetrics(MetricInfo metricInfo) {
+        Map<String, Map<Integer, MetricSnapshot>> metrics = metricInfo.get_metrics();
+        if (metrics != null) {
+            LOG.info("\nprint metrics:");
+            for (Map.Entry<String, Map<Integer, MetricSnapshot>> entry : metrics.entrySet()) {
+                String name = entry.getKey();
+                MetricSnapshot metricSnapshot = entry.getValue().get(AsmWindow.M1_WINDOW);
+                if (metricSnapshot != null) {
+                    MetricType metricType = MetricType.parse(metricSnapshot.get_metricType());
+                    double v;
+                    if (metricType == MetricType.COUNTER) {
+                        v = metricSnapshot.get_longValue();
+                    } else if (metricType == MetricType.GAUGE) {
+                        v = metricSnapshot.get_doubleValue();
+                    } else if (metricType == MetricType.METER) {
+                        v = metricSnapshot.get_m1();
+                    } else if (metricType == MetricType.HISTOGRAM) {
+                        v = metricSnapshot.get_mean();
+                    } else if (metricType == MetricType.TIMER) {
+                        v = metricSnapshot.get_mean();
+                    } else {
+                        v = 0;
+                    }
+                    LOG.info("{}|{}|{}", metricType, v, name);
+                }
+            }
+            LOG.info("\n");
+        }
+    }
+
+    public static void main(String[] args) {
+        String workerName = "WC@SequenceTest@10.1.1.0@6800@sys@Counter";
+        System.out.println(worker2topologyName(workerName));
+    }
+}


[06/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/Selector/ComponentNumSelector.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/Selector/ComponentNumSelector.java b/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/Selector/ComponentNumSelector.java
index 8170ae2..496c2fc 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/Selector/ComponentNumSelector.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/Selector/ComponentNumSelector.java
@@ -40,12 +40,8 @@ public class ComponentNumSelector extends AbstractSelector {
             @Override
             public int compare(ResourceWorkerSlot o1, ResourceWorkerSlot o2) {
                 // TODO Auto-generated method stub
-                int o1Num =
-                        context.getComponentNumOnSupervisor(o1.getNodeId(),
-                                name);
-                int o2Num =
-                        context.getComponentNumOnSupervisor(o2.getNodeId(),
-                                name);
+                int o1Num = context.getComponentNumOnSupervisor(o1.getNodeId(), name);
+                int o2Num = context.getComponentNumOnSupervisor(o2.getNodeId(), name);
                 if (o1Num == o2Num)
                     return 0;
                 return o1Num > o2Num ? 1 : -1;

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/Selector/InputComponentNumSelector.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/Selector/InputComponentNumSelector.java b/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/Selector/InputComponentNumSelector.java
index 49eb447..f7f4f5b 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/Selector/InputComponentNumSelector.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/Selector/InputComponentNumSelector.java
@@ -40,12 +40,8 @@ public class InputComponentNumSelector extends AbstractSelector {
             @Override
             public int compare(ResourceWorkerSlot o1, ResourceWorkerSlot o2) {
                 // TODO Auto-generated method stub
-                int o1Num =
-                        context.getInputComponentNumOnSupervisor(
-                                o1.getNodeId(), name);
-                int o2Num =
-                        context.getInputComponentNumOnSupervisor(
-                                o2.getNodeId(), name);
+                int o1Num = context.getInputComponentNumOnSupervisor(o1.getNodeId(), name);
+                int o2Num = context.getInputComponentNumOnSupervisor(o2.getNodeId(), name);
                 if (o1Num == o2Num)
                     return 0;
                 return o1Num > o2Num ? -1 : 1;

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/Selector/Selector.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/Selector/Selector.java b/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/Selector/Selector.java
index adc8b29..6ef5736 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/Selector/Selector.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/Selector/Selector.java
@@ -22,6 +22,5 @@ import java.util.List;
 import com.alibaba.jstorm.schedule.default_assign.ResourceWorkerSlot;
 
 public interface Selector {
-    public List<ResourceWorkerSlot> select(List<ResourceWorkerSlot> result,
-            String name);
+    public List<ResourceWorkerSlot> select(List<ResourceWorkerSlot> result, String name);
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/Selector/WorkerComparator.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/Selector/WorkerComparator.java b/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/Selector/WorkerComparator.java
index f01ee9a..8643d6a 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/Selector/WorkerComparator.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/Selector/WorkerComparator.java
@@ -21,8 +21,7 @@ import java.util.Comparator;
 
 import com.alibaba.jstorm.schedule.default_assign.ResourceWorkerSlot;
 
-public abstract class WorkerComparator implements
-        Comparator<ResourceWorkerSlot> {
+public abstract class WorkerComparator implements Comparator<ResourceWorkerSlot> {
 
     protected String name;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/TaskAssignContext.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/TaskAssignContext.java b/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/TaskAssignContext.java
index f81d072..33f762a 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/TaskAssignContext.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/TaskAssignContext.java
@@ -24,33 +24,39 @@ import java.util.Map.Entry;
 import java.util.Set;
 
 public class TaskAssignContext {
-
+    private final Map<Integer, String> taskToComponent;
+    
     private final Map<String, List<ResourceWorkerSlot>> supervisorToWorker;
 
     private final Map<String, Set<String>> relationship;
 
     // Map<worker, Map<component name, assigned task num in this worker>
-    private final Map<ResourceWorkerSlot, Map<String, Integer>> workerToComponentNum =
-            new HashMap<ResourceWorkerSlot, Map<String, Integer>>();
+    private final Map<ResourceWorkerSlot, Map<String, Integer>> workerToComponentNum = new HashMap<ResourceWorkerSlot, Map<String, Integer>>();
 
     // Map<available worker, assigned task num in this worker>
-    private final Map<ResourceWorkerSlot, Integer> workerToTaskNum =
-            new HashMap<ResourceWorkerSlot, Integer>();
+    private final Map<ResourceWorkerSlot, Integer> workerToTaskNum = new HashMap<ResourceWorkerSlot, Integer>();
 
-    private final Map<String, ResourceWorkerSlot> HostPortToWorkerMap =
-            new HashMap<String, ResourceWorkerSlot>();
+    private final Map<String, ResourceWorkerSlot> HostPortToWorkerMap = new HashMap<String, ResourceWorkerSlot>();
 
-    public TaskAssignContext(
-            Map<String, List<ResourceWorkerSlot>> supervisorToWorker,
-            Map<String, Set<String>> relationship) {
+    public TaskAssignContext(Map<String, List<ResourceWorkerSlot>> supervisorToWorker, Map<String, Set<String>> relationship, Map<Integer, String> taskToComponent) {
+        this.taskToComponent = taskToComponent;
         this.supervisorToWorker = supervisorToWorker;
         this.relationship = relationship;
 
-        for (Entry<String, List<ResourceWorkerSlot>> entry : supervisorToWorker
-                .entrySet()) {
+        for (Entry<String, List<ResourceWorkerSlot>> entry : supervisorToWorker.entrySet()) {
             for (ResourceWorkerSlot worker : entry.getValue()) {
-                workerToTaskNum.put(worker, 0);
+                workerToTaskNum.put(worker, (worker.getTasks() != null ? worker.getTasks().size() : 0));
                 HostPortToWorkerMap.put(worker.getHostPort(), worker);
+   
+                if (worker.getTasks() != null) {
+                    Map<String, Integer> componentToNum = new HashMap<String, Integer>();
+                    for (Integer taskId : worker.getTasks()) {
+                        String componentId = taskToComponent.get(taskId);
+                        int num = componentToNum.get(componentId) == null ? 0 : componentToNum.get(componentId);
+                        componentToNum.put(componentId, ++num);
+                    }
+                    workerToComponentNum.put(worker, componentToNum);
+                }
             }
         }
     }
@@ -115,8 +121,7 @@ public class TaskAssignContext {
         return result;
     }
 
-    public int getInputComponentNumOnWorker(ResourceWorkerSlot worker,
-            String name) {
+    public int getInputComponentNumOnWorker(ResourceWorkerSlot worker, String name) {
         int result = 0;
         for (String component : relationship.get(name))
             result = result + this.getComponentNumOnWorker(worker, component);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/TaskScheduler.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/TaskScheduler.java b/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/TaskScheduler.java
index 7131463..a359972 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/TaskScheduler.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/TaskScheduler.java
@@ -41,20 +41,15 @@ public class TaskScheduler {
 
     public static Logger LOG = LoggerFactory.getLogger(TaskScheduler.class);
 
-    public static final String ACKER_NAME = "__acker";
-
     private final TaskAssignContext taskContext;
 
-    private List<ResourceWorkerSlot> assignments =
-            new ArrayList<ResourceWorkerSlot>();
+    private List<ResourceWorkerSlot> assignments = new ArrayList<ResourceWorkerSlot>();
 
     private int workerNum;
 
     /**
-     * For balance purpose, default scheduler is trying to assign the same
-     * number of tasks to a worker. e.g. There are 4 tasks and 3 available
-     * workers. Each worker will be assigned one task first. And then one worker
-     * is chosen for the last one.
+     * For balance purpose, default scheduler is trying to assign the same number of tasks to a worker. e.g. There are 4 tasks and 3 available workers. Each
+     * worker will be assigned one task first. And then one worker is chosen for the last one.
      */
     private int avgTaskNum;
     private int leftTaskNum;
@@ -69,48 +64,91 @@ public class TaskScheduler {
 
     private Selector totalTaskNumSelector;
 
-    public TaskScheduler(DefaultTopologyAssignContext context,
-            Set<Integer> tasks, List<ResourceWorkerSlot> workers) {
+    public TaskScheduler(DefaultTopologyAssignContext context, Set<Integer> tasks, List<ResourceWorkerSlot> workers) {
         this.tasks = tasks;
-        LOG.info("Tasks " + tasks + " is going to be assigned in workers "
-                + workers);
+        LOG.info("Tasks " + tasks + " is going to be assigned in workers " + workers);
         this.context = context;
         this.taskContext =
-                new TaskAssignContext(this.buildSupervisorToWorker(workers),
-                        Common.buildSpoutOutoputAndBoltInputMap(context));
+                new TaskAssignContext(this.buildSupervisorToWorker(workers), Common.buildSpoutOutoputAndBoltInputMap(context), context.getTaskToComponent());
         this.componentSelector = new ComponentNumSelector(taskContext);
-        this.inputComponentSelector =
-                new InputComponentNumSelector(taskContext);
+        this.inputComponentSelector = new InputComponentNumSelector(taskContext);
         this.totalTaskNumSelector = new TotalTaskNumSelector(taskContext);
         if (tasks.size() == 0)
             return;
-        setTaskNum(tasks.size(), workerNum);
+        if (context.getAssignType() != TopologyAssignContext.ASSIGN_TYPE_REBALANCE || context.isReassign() != false){
+            // warning ! it doesn't consider HA TM now!!
+            if (context.getAssignSingleWorkerForTM() && tasks.contains(context.getTopologyMasterTaskId())) {
+                assignForTopologyMaster();
+            }
+        }
+
+        int taskNum = tasks.size();
+        Map<ResourceWorkerSlot, Integer> workerSlotIntegerMap = taskContext.getWorkerToTaskNum();
+        Set<ResourceWorkerSlot> preAssignWorkers = new HashSet<ResourceWorkerSlot>();
+        for (Entry<ResourceWorkerSlot, Integer> worker : workerSlotIntegerMap.entrySet()) {
+            if (worker.getValue() > 0) {
+                taskNum += worker.getValue();
+                preAssignWorkers.add(worker.getKey());
+            }
+        }
+        setTaskNum(taskNum, workerNum);
+
+        // Check the worker assignment status of pre-assigned workers, e.g user defined or old assignment workers.
+        // Remove the workers which have been assigned with enough workers.
+        for (ResourceWorkerSlot worker : preAssignWorkers) {
+            Set<ResourceWorkerSlot> doneWorkers = removeWorkerFromSrcPool(taskContext.getWorkerToTaskNum().get(worker), worker);
+            if (doneWorkers != null) {
+                for (ResourceWorkerSlot doneWorker : doneWorkers) {
+                    taskNum -= doneWorker.getTasks().size();
+                    workerNum--;
+                }
+            }
+        }
+        setTaskNum(taskNum, workerNum);
 
         // For Scale-out case, the old assignment should be kept.
-        if (context.getAssignType() == TopologyAssignContext.ASSIGN_TYPE_REBALANCE
-                && context.isReassign() == false) {
-            keepAssignment(context.getOldAssignment().getWorkers());
+        if (context.getAssignType() == TopologyAssignContext.ASSIGN_TYPE_REBALANCE && context.isReassign() == false) {
+            keepAssignment(taskNum, context.getOldAssignment().getWorkers());
         }
     }
 
-    private void keepAssignment(Set<ResourceWorkerSlot> keepAssignments) {
+    private void keepAssignment(int taskNum, Set<ResourceWorkerSlot> keepAssignments) {
         Set<Integer> keepTasks = new HashSet<Integer>();
+        ResourceWorkerSlot tmWorker = null;
         for (ResourceWorkerSlot worker : keepAssignments) {
+            if (worker.getTasks().contains(context.getTopologyMasterTaskId()))
+                tmWorker = worker;
             for (Integer taskId : worker.getTasks()) {
                 if (tasks.contains(taskId)) {
-                    ResourceWorkerSlot contextWorker =
-                            taskContext.getWorker(worker);
+                    ResourceWorkerSlot contextWorker = taskContext.getWorker(worker);
                     if (contextWorker != null) {
-                        String componentName =
-                                context.getTaskToComponent().get(taskId);
-                        updateAssignedTasksOfWorker(taskId, contextWorker);
-                        updateComponentsNumOfWorker(componentName,
-                                contextWorker);
-                        keepTasks.add(taskId);
+                        if (tmWorker != null && tmWorker.getTasks().contains(taskId) && context.getAssignSingleWorkerForTM() ) {
+                            if (context.getTopologyMasterTaskId() == taskId){
+                                updateAssignedTasksOfWorker(taskId, contextWorker);
+                                taskContext.getWorkerToTaskNum().remove(contextWorker);
+                                contextWorker.getTasks().clear();
+                                contextWorker.getTasks().add(taskId);
+                                assignments.add(contextWorker);
+                                tasks.remove(taskId);
+                                taskNum--;
+                                workerNum--;
+                                LOG.info("assignForTopologyMaster: " + contextWorker);
+                            }
+                        }else {
+                            String componentName = context.getTaskToComponent().get(taskId);
+                            updateAssignedTasksOfWorker(taskId, contextWorker);
+                            updateComponentsNumOfWorker(componentName, contextWorker);
+                            keepTasks.add(taskId);
+                        }
                     }
                 }
             }
         }
+        if ( tmWorker != null){
+            setTaskNum(taskNum, workerNum);
+            keepAssignments.remove(tmWorker);
+        }
+
 
         // Try to find the workers which have been assigned too much tasks
         // If found, remove the workers from worker resource pool and update
@@ -118,11 +156,9 @@ public class TaskScheduler {
         int doneAssignedTaskNum = 0;
         while (true) {
             boolean found = false;
-            Set<ResourceWorkerSlot> doneAssignedWorkers =
-                    new HashSet<ResourceWorkerSlot>();
+            Set<ResourceWorkerSlot> doneAssignedWorkers = new HashSet<ResourceWorkerSlot>();
             for (ResourceWorkerSlot worker : keepAssignments) {
-                ResourceWorkerSlot contextWorker =
-                        taskContext.getWorker(worker);
+                ResourceWorkerSlot contextWorker = taskContext.getWorker(worker);
                 if (contextWorker != null && isTaskFullForWorker(contextWorker)) {
                     found = true;
                     workerNum--;
@@ -135,7 +171,8 @@ public class TaskScheduler {
             }
 
             if (found) {
-                setTaskNum((tasks.size() - doneAssignedTaskNum), workerNum);
+                taskNum -= doneAssignedTaskNum;
+                setTaskNum(taskNum, workerNum);
                 keepAssignments.removeAll(doneAssignedWorkers);
             } else {
                 break;
@@ -150,45 +187,89 @@ public class TaskScheduler {
         Set<Integer> tasks = worker.getTasks();
 
         if (tasks != null) {
-            if ((leftTaskNum == 0 && tasks.size() >= avgTaskNum)
-                    || (leftTaskNum > 0 && tasks.size() >= (avgTaskNum + 1))) {
+            if ((leftTaskNum <= 0 && tasks.size() >= avgTaskNum) || (leftTaskNum > 0 && tasks.size() >= (avgTaskNum + 1))) {
                 ret = true;
             }
         }
         return ret;
     }
 
+    private Set<ResourceWorkerSlot> getRestAssignedWorkers() {
+        Set<ResourceWorkerSlot> ret = new HashSet<ResourceWorkerSlot>();
+        for (ResourceWorkerSlot worker : taskContext.getWorkerToTaskNum().keySet()) {
+            if (worker.getTasks() != null && worker.getTasks().size() > 0) {
+                ret.add(worker);
+            }
+        }
+        return ret;
+    }
+
     public List<ResourceWorkerSlot> assign() {
-        if (tasks.size() == 0)
+        if (tasks.size() == 0) {
+            assignments.addAll(getRestAssignedWorkers());
             return assignments;
+        }
 
         // Firstly, assign workers to the components which are configured
         // by "task.on.differ.node"
         Set<Integer> assignedTasks = assignForDifferNodeTask();
 
-        // Assign for the tasks except acker
+        // Assign for the tasks except system tasks
         tasks.removeAll(assignedTasks);
-        Set<Integer> ackers = new HashSet<Integer>();
+        Map<Integer, String> systemTasks = new HashMap<Integer, String>();
         for (Integer task : tasks) {
             String name = context.getTaskToComponent().get(task);
-            if (name.equals(TaskScheduler.ACKER_NAME)) {
-                ackers.add(task);
+            if (Common.isSystemComponent(name)) {
+                systemTasks.put(task, name);
                 continue;
             }
             assignForTask(name, task);
         }
-
-        // At last, make the assignment for acker
-        for (Integer task : ackers) {
-            assignForTask(TaskScheduler.ACKER_NAME, task);
+        
+        /*
+         * At last, make the assignment for system component, e.g. acker, topology master...
+         */
+        for (Entry<Integer, String> entry : systemTasks.entrySet()) {
+            assignForTask(entry.getValue(), entry.getKey());
         }
+
+        assignments.addAll(getRestAssignedWorkers());
         return assignments;
     }
 
+    private void assignForTopologyMaster() {
+        int taskId = context.getTopologyMasterTaskId();
+
+        // Try to find a worker which is in a supervisor with most workers,
+        // to avoid the balance problem when the assignment for other workers.
+        ResourceWorkerSlot workerAssigned = null;
+        int workerNumOfSuperv = 0;
+        for (ResourceWorkerSlot workerSlot : taskContext.getWorkerToTaskNum().keySet()){
+            List<ResourceWorkerSlot> workers = taskContext.getSupervisorToWorker().get(workerSlot.getNodeId());
+            if (workers != null && workers.size() > workerNumOfSuperv) {
+                for (ResourceWorkerSlot worker : workers) {
+                    Set<Integer> tasks = worker.getTasks();
+                    if (tasks == null || tasks.size() == 0) {
+                        workerAssigned = worker;
+                        workerNumOfSuperv = workers.size();
+                        break;
+                    }
+                }
+            }
+        }
+
+        if (workerAssigned == null)
+            throw new FailedAssignTopologyException("there's no enough workers for the assignment of topology master");
+        updateAssignedTasksOfWorker(taskId, workerAssigned);
+        taskContext.getWorkerToTaskNum().remove(workerAssigned);
+        assignments.add(workerAssigned);
+        tasks.remove(taskId);
+        workerNum--;
+        LOG.info("assignForTopologyMaster, assignments=" + assignments);
+    }
+
     private void assignForTask(String name, Integer task) {
-        ResourceWorkerSlot worker =
-                chooseWorker(name, new ArrayList<ResourceWorkerSlot>(
-                        taskContext.getWorkerToTaskNum().keySet()));
+        ResourceWorkerSlot worker = chooseWorker(name, new ArrayList<ResourceWorkerSlot>(taskContext.getWorkerToTaskNum().keySet()));
         pushTaskToWorker(task, name, worker);
     }
 
@@ -201,98 +282,97 @@ public class TaskScheduler {
         }
         for (Integer task : ret) {
             String name = context.getTaskToComponent().get(task);
-            ResourceWorkerSlot worker =
-                    chooseWorker(name, getDifferNodeTaskWokers(name));
+            ResourceWorkerSlot worker = chooseWorker(name, getDifferNodeTaskWokers(name));
+            LOG.info("Due to task.on.differ.node, push task-{} to {}:{}", task, worker.getHostname(), worker.getPort());
             pushTaskToWorker(task, name, worker);
         }
         return ret;
     }
 
-    private Map<String, List<ResourceWorkerSlot>> buildSupervisorToWorker(
-            List<ResourceWorkerSlot> workers) {
-        Map<String, List<ResourceWorkerSlot>> supervisorToWorker =
-                new HashMap<String, List<ResourceWorkerSlot>>();
+    private Map<String, List<ResourceWorkerSlot>> buildSupervisorToWorker(List<ResourceWorkerSlot> workers) {
+        Map<String, List<ResourceWorkerSlot>> supervisorToWorker = new HashMap<String, List<ResourceWorkerSlot>>();
         for (ResourceWorkerSlot worker : workers) {
-            if (worker.getTasks() == null || worker.getTasks().size() == 0) {
-                List<ResourceWorkerSlot> supervisor =
-                        supervisorToWorker.get(worker.getNodeId());
-                if (supervisor == null) {
-                    supervisor = new ArrayList<ResourceWorkerSlot>();
-                    supervisorToWorker.put(worker.getNodeId(), supervisor);
-                }
-                supervisor.add(worker);
-            } else {
-                assignments.add(worker);
+                List<ResourceWorkerSlot> supervisor = supervisorToWorker.get(worker.getNodeId());
+            if (supervisor == null) {
+                supervisor = new ArrayList<ResourceWorkerSlot>();
+                supervisorToWorker.put(worker.getNodeId(), supervisor);
             }
+            supervisor.add(worker);
         }
-        this.workerNum = workers.size() - assignments.size();
+        this.workerNum = workers.size();
         return supervisorToWorker;
     }
 
-    private ResourceWorkerSlot chooseWorker(String name,
-            List<ResourceWorkerSlot> workers) {
-        List<ResourceWorkerSlot> result =
-                componentSelector.select(workers, name);
+    private ResourceWorkerSlot chooseWorker(String name, List<ResourceWorkerSlot> workers) {
+        List<ResourceWorkerSlot> result = componentSelector.select(workers, name);
         result = totalTaskNumSelector.select(result, name);
-        if (name.equals(TaskScheduler.ACKER_NAME))
+        if (Common.isSystemComponent(name))
             return result.iterator().next();
         result = inputComponentSelector.select(result, name);
         return result.iterator().next();
     }
 
-    private void pushTaskToWorker(Integer task, String name,
-            ResourceWorkerSlot worker) {
+    private void pushTaskToWorker(Integer task, String name, ResourceWorkerSlot worker) {
         LOG.debug("Push task-" + task + " to worker-" + worker.getPort());
         int taskNum = updateAssignedTasksOfWorker(task, worker);
 
+        removeWorkerFromSrcPool(taskNum, worker);
+
+        updateComponentsNumOfWorker(name, worker);
+    }
+
+    private int updateAssignedTasksOfWorker(Integer task, ResourceWorkerSlot worker) {
+        int ret = 0;
+        Set<Integer> tasks = worker.getTasks();
+        if (tasks == null) {
+            tasks = new HashSet<Integer>();
+            worker.setTasks(tasks);
+        }
+        tasks.add(task);
+
+        ret = taskContext.getWorkerToTaskNum().get(worker);
+        taskContext.getWorkerToTaskNum().put(worker, ++ret);
+        return ret;
+    }
+
+    /*
+     * Remove the worker from source worker pool, if the worker is assigned with enough tasks,
+     */
+    private Set<ResourceWorkerSlot> removeWorkerFromSrcPool(int taskNum, ResourceWorkerSlot worker) {
+        Set<ResourceWorkerSlot> ret = new HashSet<ResourceWorkerSlot>();
+
         if (leftTaskNum <= 0) {
-            if (taskNum == avgTaskNum) {
+            if (taskNum >= avgTaskNum) {
                 taskContext.getWorkerToTaskNum().remove(worker);
                 assignments.add(worker);
+                ret.add(worker);
             }
         } else {
-            if (taskNum == (avgTaskNum + 1)) {
+            if (taskNum > avgTaskNum ) {
                 taskContext.getWorkerToTaskNum().remove(worker);
-                leftTaskNum--;
+                leftTaskNum = leftTaskNum -(taskNum -avgTaskNum);
                 assignments.add(worker);
+                ret.add(worker);
             }
             if (leftTaskNum <= 0) {
-                List<ResourceWorkerSlot> needDelete =
-                        new ArrayList<ResourceWorkerSlot>();
-                for (Entry<ResourceWorkerSlot, Integer> entry : taskContext
-                        .getWorkerToTaskNum().entrySet()) {
+                List<ResourceWorkerSlot> needDelete = new ArrayList<ResourceWorkerSlot>();
+                for (Entry<ResourceWorkerSlot, Integer> entry : taskContext.getWorkerToTaskNum().entrySet()) {
                     if (entry.getValue() == avgTaskNum)
                         needDelete.add(entry.getKey());
                 }
                 for (ResourceWorkerSlot workerToDelete : needDelete) {
                     taskContext.getWorkerToTaskNum().remove(workerToDelete);
                     assignments.add(workerToDelete);
+                    ret.add(workerToDelete);
                 }
             }
         }
 
-        updateComponentsNumOfWorker(name, worker);
-    }
-
-    private int updateAssignedTasksOfWorker(Integer task,
-            ResourceWorkerSlot worker) {
-        int ret = 0;
-        Set<Integer> tasks = worker.getTasks();
-        if (tasks == null) {
-            tasks = new HashSet<Integer>();
-            worker.setTasks(tasks);
-        }
-        tasks.add(task);
-
-        ret = taskContext.getWorkerToTaskNum().get(worker);
-        taskContext.getWorkerToTaskNum().put(worker, ++ret);
         return ret;
     }
 
-    private void updateComponentsNumOfWorker(String name,
-            ResourceWorkerSlot worker) {
-        Map<String, Integer> components =
-                taskContext.getWorkerToComponentNum().get(worker);
+    private void updateComponentsNumOfWorker(String name, ResourceWorkerSlot worker) {
+        Map<String, Integer> components = taskContext.getWorkerToComponentNum().get(worker);
         if (components == null) {
             components = new HashMap<String, Integer>();
             taskContext.getWorkerToComponentNum().put(worker, components);
@@ -308,11 +388,9 @@ public class TaskScheduler {
         if (taskNum >= 0 && workerNum > 0) {
             this.avgTaskNum = taskNum / workerNum;
             this.leftTaskNum = taskNum % workerNum;
-            LOG.debug("avgTaskNum=" + avgTaskNum + ", leftTaskNum="
-                    + leftTaskNum);
+            LOG.debug("avgTaskNum=" + avgTaskNum + ", leftTaskNum=" + leftTaskNum);
         } else {
-            LOG.debug("Illegal parameters, taskNum=" + taskNum + ", workerNum="
-                    + workerNum);
+            LOG.debug("Illegal parameters, taskNum=" + taskNum + ", workerNum=" + workerNum);
         }
     }
 
@@ -320,15 +398,12 @@ public class TaskScheduler {
         List<ResourceWorkerSlot> workers = new ArrayList<ResourceWorkerSlot>();
         workers.addAll(taskContext.getWorkerToTaskNum().keySet());
 
-        for (Entry<String, List<ResourceWorkerSlot>> entry : taskContext
-                .getSupervisorToWorker().entrySet()) {
+        for (Entry<String, List<ResourceWorkerSlot>> entry : taskContext.getSupervisorToWorker().entrySet()) {
             if (taskContext.getComponentNumOnSupervisor(entry.getKey(), name) != 0)
                 workers.removeAll(entry.getValue());
         }
         if (workers.size() == 0)
-            throw new FailedAssignTopologyException(
-                    "there's no enough supervisor for making component: "
-                            + name + " 's tasks on different node");
+            throw new FailedAssignTopologyException("there's no enough supervisor for making component: " + name + " 's tasks on different node");
         return workers;
     }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/WorkerScheduler.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/WorkerScheduler.java b/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/WorkerScheduler.java
index c85d723..08c4730 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/WorkerScheduler.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/WorkerScheduler.java
@@ -28,6 +28,7 @@ import backtype.storm.Config;
 
 import com.alibaba.jstorm.client.ConfigExtension;
 import com.alibaba.jstorm.client.WorkerAssignment;
+import com.alibaba.jstorm.cluster.Common;
 import com.alibaba.jstorm.daemon.supervisor.SupervisorInfo;
 import com.alibaba.jstorm.schedule.TopologyAssignContext;
 import com.alibaba.jstorm.utils.FailedAssignTopologyException;
@@ -35,353 +36,369 @@ import com.alibaba.jstorm.utils.NetWorkUtils;
 
 public class WorkerScheduler {
 
-    public static Logger LOG = LoggerFactory.getLogger(WorkerScheduler.class);
-
-    private static WorkerScheduler instance;
-
-    private WorkerScheduler() {
-
-    }
-
-    public static WorkerScheduler getInstance() {
-        if (instance == null) {
-            instance = new WorkerScheduler();
-        }
-        return instance;
-    }
-
-    public List<ResourceWorkerSlot> getAvailableWorkers(
-            DefaultTopologyAssignContext context, Set<Integer> needAssign,
-            int num) {
-        int workersNum = getWorkersNum(context, num);
-        if (workersNum == 0) {
-            throw new FailedAssignTopologyException("there's no enough worker");
-        }
-        List<ResourceWorkerSlot> assignedWorkers =
-                new ArrayList<ResourceWorkerSlot>();
-        // userdefine assignments
-        getRightWorkers(
-                context,
-                needAssign,
-                assignedWorkers,
-                workersNum,
-                getUserDefineWorkers(context, ConfigExtension
-                        .getUserDefineAssignment(context.getStormConf())));
-        // old assignments
-        if (ConfigExtension.isUseOldAssignment(context.getStormConf())) {
-            getRightWorkers(context, needAssign, assignedWorkers, workersNum,
-                    context.getOldWorkers());
-        } else if (context.getAssignType() == TopologyAssignContext.ASSIGN_TYPE_REBALANCE
-                && context.isReassign() == false) {
-            int cnt = 0;
-            for (ResourceWorkerSlot worker : context.getOldWorkers()) {
-                if (cnt < workersNum) {
-                    ResourceWorkerSlot resFreeWorker = new ResourceWorkerSlot();
-                    resFreeWorker.setPort(worker.getPort());
-                    resFreeWorker.setHostname(worker.getHostname());
-                    resFreeWorker.setNodeId(worker.getNodeId());
-                    assignedWorkers.add(resFreeWorker);
-                    cnt++;
-                } else {
-                    break;
-                }
-            }
-        }
-        int defaultWorkerNum =
-                Math.min(workersNum - assignedWorkers.size(), needAssign.size());
-        LOG.info("Get workers from user define and old assignments: "
-                + assignedWorkers);
-        for (int i = 0; i < defaultWorkerNum; i++) {
-            assignedWorkers.add(new ResourceWorkerSlot());
-        }
-        List<SupervisorInfo> isolationSupervisors =
-                this.getIsolationSupervisors(context);
-        if (isolationSupervisors.size() != 0) {
-            putAllWorkerToSupervisor(assignedWorkers,
-                    getResAvailSupervisors(isolationSupervisors));
-        } else {
-            putAllWorkerToSupervisor(assignedWorkers,
-                    getResAvailSupervisors(context.getCluster()));
-        }
-        this.setAllWorkerMemAndCpu(context.getStormConf(), assignedWorkers);
-        LOG.info("Assigned workers=" + assignedWorkers);
-        return assignedWorkers;
-    }
-
-    private void setAllWorkerMemAndCpu(Map conf,
-            List<ResourceWorkerSlot> assignedWorkers) {
-        long defaultSize = ConfigExtension.getMemSizePerWorker(conf);
-        int defaultCpu = ConfigExtension.getCpuSlotPerWorker(conf);
-        for (ResourceWorkerSlot worker : assignedWorkers) {
-            if (worker.getMemSize() <= 0)
-                worker.setMemSize(defaultSize);
-            if (worker.getCpu() <= 0)
-                worker.setCpu(defaultCpu);
-        }
-    }
-
-    private void putAllWorkerToSupervisor(
-            List<ResourceWorkerSlot> assignedWorkers,
-            List<SupervisorInfo> supervisors) {
-        for (ResourceWorkerSlot worker : assignedWorkers) {
-            if (worker.getHostname() != null) {
-                for (SupervisorInfo supervisor : supervisors) {
-                    if (NetWorkUtils.equals(supervisor.getHostName(),
-                            worker.getHostname())
-                            && supervisor.getAvailableWorkerPorts().size() > 0) {
-                        putWorkerToSupervisor(supervisor, worker);
-                        break;
-                    }
-                }
-            }
-        }
-        supervisors = getResAvailSupervisors(supervisors);
-        Collections.sort(supervisors, new Comparator<SupervisorInfo>() {
-
-            @Override
-            public int compare(SupervisorInfo o1, SupervisorInfo o2) {
-                // TODO Auto-generated method stub
-                return -NumberUtils.compare(o1.getAvailableWorkerPorts().size(), o2
-                        .getAvailableWorkerPorts().size());
-            }
-
-        });
-        putWorkerToSupervisor(assignedWorkers, supervisors);
-    }
-
-    private void putWorkerToSupervisor(SupervisorInfo supervisor,
-            ResourceWorkerSlot worker) {
-        int port = worker.getPort();
-        if (!supervisor.getAvailableWorkerPorts().contains(worker.getPort())) {
-            port = supervisor.getAvailableWorkerPorts().iterator().next();
-        }
-        worker.setPort(port);
-        supervisor.getAvailableWorkerPorts().remove(port);
-        worker.setNodeId(supervisor.getSupervisorId());
-    }
-
-    private void putWorkerToSupervisor(
-            List<ResourceWorkerSlot> assignedWorkers,
-            List<SupervisorInfo> supervisors) {
-        int allUsedPorts = 0;
-        for (SupervisorInfo supervisor : supervisors) {
-            int supervisorUsedPorts = supervisor.getWorkerPorts().size()
-                    - supervisor.getAvailableWorkerPorts().size();
-            allUsedPorts = allUsedPorts + supervisorUsedPorts;
-        }
-        // per supervisor should be allocated ports in theory
-        int theoryAveragePorts =
-                (allUsedPorts + assignedWorkers.size()) / supervisors.size()
-                        + 1;
-        // supervisor which use more than theoryAveragePorts ports will be
-        // pushed overLoadSupervisors
-        List<SupervisorInfo> overLoadSupervisors =
-                new ArrayList<SupervisorInfo>();
-        int key = 0;
-        Iterator<ResourceWorkerSlot> iterator = assignedWorkers.iterator();
-        while (iterator.hasNext()) {
-            if (supervisors.size() == 0)
-                break;
-            if (key >= supervisors.size())
-                key = 0;
-            SupervisorInfo supervisor = supervisors.get(key);
-            int supervisorUsedPorts = supervisor.getWorkerPorts().size()
-                    - supervisor.getAvailableWorkerPorts().size();
-            if (supervisorUsedPorts < theoryAveragePorts) {
-                ResourceWorkerSlot worker = iterator.next();
-                if (worker.getNodeId() != null)
-                    continue;
-                worker.setHostname(supervisor.getHostName());
-                worker.setNodeId(supervisor.getSupervisorId());
-                worker.setPort(
-                        supervisor.getAvailableWorkerPorts().iterator().next());
-                supervisor.getAvailableWorkerPorts().remove(worker.getPort());
-                if (supervisor.getAvailableWorkerPorts().size() == 0)
-                    supervisors.remove(supervisor);
-                key++;
-            } else {
-                overLoadSupervisors.add(supervisor);
-                supervisors.remove(supervisor);
-            }
-        }
-        // rest assignedWorkers will be allocate supervisor by deal
-        Collections.sort(overLoadSupervisors, new Comparator<SupervisorInfo>() {
-
-            @Override
-            public int compare(SupervisorInfo o1, SupervisorInfo o2) {
-                // TODO Auto-generated method stub
-                return -NumberUtils.compare(o1.getAvailableWorkerPorts().size(),
-                        o2.getAvailableWorkerPorts().size());
-            }
-
-        });
-        key = 0;
-        while (iterator.hasNext()) {
-            if (overLoadSupervisors.size() == 0)
-                break;
-            if (key >= overLoadSupervisors.size())
-                key = 0;
-            ResourceWorkerSlot worker = iterator.next();
-            if (worker.getNodeId() != null)
-                continue;
-            SupervisorInfo supervisor = overLoadSupervisors.get(key);
-            worker.setHostname(supervisor.getHostName());
-            worker.setNodeId(supervisor.getSupervisorId());
-            worker.setPort(
-                    supervisor.getAvailableWorkerPorts().iterator().next());
-            supervisor.getAvailableWorkerPorts().remove(worker.getPort());
-            if (supervisor.getAvailableWorkerPorts().size() == 0)
-                overLoadSupervisors.remove(supervisor);
-            key++;
-        }
-    }
-
-    private void getRightWorkers(DefaultTopologyAssignContext context,
-            Set<Integer> needAssign, List<ResourceWorkerSlot> assignedWorkers,
-            int workersNum, Collection<ResourceWorkerSlot> workers) {
-        Set<Integer> assigned = new HashSet<Integer>();
-        List<ResourceWorkerSlot> users = new ArrayList<ResourceWorkerSlot>();
-        if (workers == null)
-            return;
-        for (ResourceWorkerSlot worker : workers) {
-            boolean right = true;
-            Set<Integer> tasks = worker.getTasks();
-            if (tasks == null)
-                continue;
-            for (Integer task : tasks) {
-                if (!needAssign.contains(task) || assigned.contains(task)) {
-                    right = false;
-                    break;
-                }
-            }
-            if (right) {
-                assigned.addAll(tasks);
-                users.add(worker);
-            }
-        }
-        if (users.size() + assignedWorkers.size() > workersNum) {
-            return;
-        }
-
-        if (users.size() + assignedWorkers.size() == workersNum
-                && assigned.size() != needAssign.size()) {
-            return;
-        }
-        assignedWorkers.addAll(users);
-        needAssign.removeAll(assigned);
-    }
-
-    private int getWorkersNum(DefaultTopologyAssignContext context,
-            int workersNum) {
-        Map<String, SupervisorInfo> supervisors = context.getCluster();
-        List<SupervisorInfo> isolationSupervisors =
-                this.getIsolationSupervisors(context);
-        int slotNum = 0;
-
-        if (isolationSupervisors.size() != 0) {
-            for (SupervisorInfo superivsor : isolationSupervisors) {
-                slotNum = slotNum + superivsor.getAvailableWorkerPorts().size();
-            }
-            return Math.min(slotNum, workersNum);
-        }
-        for (Entry<String, SupervisorInfo> entry : supervisors.entrySet()) {
-            slotNum = slotNum + entry.getValue().getAvailableWorkerPorts().size();
-        }
-        return Math.min(slotNum, workersNum);
-    }
-
-    /**
-     * @param context
-     * @param workers
-     * @return
-     */
-    private List<ResourceWorkerSlot> getUserDefineWorkers(
-            DefaultTopologyAssignContext context, List<WorkerAssignment> workers) {
-        List<ResourceWorkerSlot> ret = new ArrayList<ResourceWorkerSlot>();
-        if (workers == null)
-            return ret;
-        Map<String, List<Integer>> componentToTask =
-                (HashMap<String, List<Integer>>) ((HashMap<String, List<Integer>>) context
-                        .getComponentTasks()).clone();
-        if (context.getAssignType() != context.ASSIGN_TYPE_NEW) {
-            checkUserDefineWorkers(context, workers,
-                    context.getTaskToComponent());
-        }
-        for (WorkerAssignment worker : workers) {
-            ResourceWorkerSlot workerSlot =
-                    new ResourceWorkerSlot(worker, componentToTask);
-            if (workerSlot.getTasks().size() != 0) {
-                ret.add(workerSlot);
-            }
-        }
-        return ret;
-    }
-
-    private void checkUserDefineWorkers(DefaultTopologyAssignContext context,
-            List<WorkerAssignment> workers, Map<Integer, String> taskToComponent) {
-        Set<ResourceWorkerSlot> unstoppedWorkers =
-                context.getUnstoppedWorkers();
-        List<WorkerAssignment> re = new ArrayList<WorkerAssignment>();
-        for (WorkerAssignment worker : workers) {
-            for (ResourceWorkerSlot unstopped : unstoppedWorkers) {
-                if (unstopped
-                        .compareToUserDefineWorker(worker, taskToComponent))
-                    re.add(worker);
-            }
-        }
-        workers.removeAll(re);
-
-    }
-
-    private List<SupervisorInfo> getResAvailSupervisors(
-            Map<String, SupervisorInfo> supervisors) {
-        List<SupervisorInfo> availableSupervisors =
-                new ArrayList<SupervisorInfo>();
-        for (Entry<String, SupervisorInfo> entry : supervisors.entrySet()) {
-            SupervisorInfo supervisor = entry.getValue();
-            if (supervisor.getAvailableWorkerPorts().size() > 0)
-                availableSupervisors.add(entry.getValue());
-        }
-        return availableSupervisors;
-    }
-
-    private List<SupervisorInfo> getResAvailSupervisors(
-            List<SupervisorInfo> supervisors) {
-        List<SupervisorInfo> availableSupervisors =
-                new ArrayList<SupervisorInfo>();
-        for (SupervisorInfo supervisor : supervisors) {
-            if (supervisor.getAvailableWorkerPorts().size() > 0)
-                availableSupervisors.add(supervisor);
-        }
-        return availableSupervisors;
-    }
-
-    private List<SupervisorInfo> getIsolationSupervisors(
-            DefaultTopologyAssignContext context) {
-        List<String> isolationHosts =
-                (List<String>) context.getStormConf().get(
-                        Config.ISOLATION_SCHEDULER_MACHINES);
-        LOG.info("Isolation machines: " + isolationHosts);
-        if (isolationHosts == null)
-            return new ArrayList<SupervisorInfo>();
-        List<SupervisorInfo> isolationSupervisors =
-                new ArrayList<SupervisorInfo>();
-        for (Entry<String, SupervisorInfo> entry : context.getCluster()
-                .entrySet()) {
-            if (containTargetHost(isolationHosts, entry.getValue()
-                    .getHostName())) {
-                isolationSupervisors.add(entry.getValue());
-            }
-        }
-        return isolationSupervisors;
-    }
-
-    private boolean containTargetHost(Collection<String> hosts, String target) {
-        for (String host : hosts) {
-            if (NetWorkUtils.equals(host, target) == true) {
-                return true;
-            }
-        }
-        return false;
-    }
+	public static Logger LOG = LoggerFactory.getLogger(WorkerScheduler.class);
+
+	private static WorkerScheduler instance;
+
+	private WorkerScheduler() {
+
+	}
+
+	public static WorkerScheduler getInstance() {
+		if (instance == null) {
+			instance = new WorkerScheduler();
+		}
+		return instance;
+	}
+
+	public List<ResourceWorkerSlot> getAvailableWorkers(
+			DefaultTopologyAssignContext context, Set<Integer> needAssign,
+			int allocWorkerNum) {
+		int workersNum = getAvailableWorkersNum(context);
+		if (workersNum < allocWorkerNum) {
+			throw new FailedAssignTopologyException(
+					"there's no enough worker. allocWorkerNum="
+							+ allocWorkerNum + ", availableWorkerNum="
+							+ workersNum);
+		}
+		workersNum = allocWorkerNum;
+		List<ResourceWorkerSlot> assignedWorkers = new ArrayList<ResourceWorkerSlot>();
+		// userdefine assignments, but dont't try to use custom scheduling for
+		// TM bolts now.
+		getRightWorkers(
+				context,
+				needAssign,
+				assignedWorkers,
+				workersNum,
+				getUserDefineWorkers(context, ConfigExtension
+						.getUserDefineAssignment(context.getStormConf())));
+
+		// old assignments
+		if (ConfigExtension.isUseOldAssignment(context.getStormConf())) {
+			getRightWorkers(context, needAssign, assignedWorkers, workersNum,
+					context.getOldWorkers());
+		} else if (context.getAssignType() == TopologyAssignContext.ASSIGN_TYPE_REBALANCE
+				&& context.isReassign() == false) {
+			int cnt = 0;
+			for (ResourceWorkerSlot worker : context.getOldWorkers()) {
+				if (cnt < workersNum) {
+					ResourceWorkerSlot resFreeWorker = new ResourceWorkerSlot();
+					resFreeWorker.setPort(worker.getPort());
+					resFreeWorker.setHostname(worker.getHostname());
+					resFreeWorker.setNodeId(worker.getNodeId());
+					assignedWorkers.add(resFreeWorker);
+					cnt++;
+				} else {
+					break;
+				}
+			}
+		}
+		// calculate rest TM bolts
+		int workersForSingleTM = 0;
+		if (context.getAssignSingleWorkerForTM()) {
+			for (Integer taskId : needAssign) {
+				String componentName = context.getTaskToComponent().get(taskId);
+				if (componentName.equals(Common.TOPOLOGY_MASTER_COMPONENT_ID)) {
+					workersForSingleTM++;
+				}
+			}
+		}
+
+		LOG.info("Get workers from user define and old assignments: "
+				+ assignedWorkers);
+
+		int restWokerNum = workersNum - assignedWorkers.size();
+		if (restWokerNum < 0)
+			throw new FailedAssignTopologyException(
+					"Too much workers are needed for user define or old assignments. workersNum="
+							+ workersNum + ", assignedWokersNum="
+							+ assignedWorkers.size());
+
+		for (int i = 0; i < restWokerNum; i++) {
+			assignedWorkers.add(new ResourceWorkerSlot());
+		}
+		List<SupervisorInfo> isolationSupervisors = this
+				.getIsolationSupervisors(context);
+		if (isolationSupervisors.size() != 0) {
+			putAllWorkerToSupervisor(assignedWorkers,
+					getResAvailSupervisors(isolationSupervisors));
+		} else {
+			putAllWorkerToSupervisor(assignedWorkers,
+					getResAvailSupervisors(context.getCluster()));
+		}
+		this.setAllWorkerMemAndCpu(context.getStormConf(), assignedWorkers);
+		LOG.info("Assigned workers=" + assignedWorkers);
+		return assignedWorkers;
+	}
+
+	private void setAllWorkerMemAndCpu(Map conf,
+			List<ResourceWorkerSlot> assignedWorkers) {
+		long defaultSize = ConfigExtension.getMemSizePerWorker(conf);
+		int defaultCpu = ConfigExtension.getCpuSlotPerWorker(conf);
+		for (ResourceWorkerSlot worker : assignedWorkers) {
+			if (worker.getMemSize() <= 0)
+				worker.setMemSize(defaultSize);
+			if (worker.getCpu() <= 0)
+				worker.setCpu(defaultCpu);
+		}
+	}
+
+	private void putAllWorkerToSupervisor(
+			List<ResourceWorkerSlot> assignedWorkers,
+			List<SupervisorInfo> supervisors) {
+		for (ResourceWorkerSlot worker : assignedWorkers) {
+			if (worker.getHostname() != null) {
+				for (SupervisorInfo supervisor : supervisors) {
+					if (NetWorkUtils.equals(supervisor.getHostName(),
+							worker.getHostname())
+							&& supervisor.getAvailableWorkerPorts().size() > 0) {
+						putWorkerToSupervisor(supervisor, worker);
+						break;
+					}
+				}
+			}
+		}
+		supervisors = getResAvailSupervisors(supervisors);
+		Collections.sort(supervisors, new Comparator<SupervisorInfo>() {
+
+			@Override
+			public int compare(SupervisorInfo o1, SupervisorInfo o2) {
+				// TODO Auto-generated method stub
+				return -NumberUtils.compare(
+						o1.getAvailableWorkerPorts().size(), o2
+								.getAvailableWorkerPorts().size());
+			}
+
+		});
+		putWorkerToSupervisor(assignedWorkers, supervisors);
+	}
+
+	private void putWorkerToSupervisor(SupervisorInfo supervisor,
+			ResourceWorkerSlot worker) {
+		int port = worker.getPort();
+		if (!supervisor.getAvailableWorkerPorts().contains(worker.getPort())) {
+			port = supervisor.getAvailableWorkerPorts().iterator().next();
+		}
+		worker.setPort(port);
+		supervisor.getAvailableWorkerPorts().remove(port);
+		worker.setNodeId(supervisor.getSupervisorId());
+	}
+
+	private void putWorkerToSupervisor(
+			List<ResourceWorkerSlot> assignedWorkers,
+			List<SupervisorInfo> supervisors) {
+		int allUsedPorts = 0;
+		for (SupervisorInfo supervisor : supervisors) {
+			int supervisorUsedPorts = supervisor.getWorkerPorts().size()
+					- supervisor.getAvailableWorkerPorts().size();
+			allUsedPorts = allUsedPorts + supervisorUsedPorts;
+		}
+		// per supervisor should be allocated ports in theory
+		int theoryAveragePorts = (allUsedPorts + assignedWorkers.size())
+				/ supervisors.size() + 1;
+		// supervisor which use more than theoryAveragePorts ports will be
+		// pushed overLoadSupervisors
+		List<SupervisorInfo> overLoadSupervisors = new ArrayList<SupervisorInfo>();
+		int key = 0;
+		Iterator<ResourceWorkerSlot> iterator = assignedWorkers.iterator();
+		while (iterator.hasNext()) {
+			if (supervisors.size() == 0)
+				break;
+			if (key >= supervisors.size())
+				key = 0;
+			SupervisorInfo supervisor = supervisors.get(key);
+			int supervisorUsedPorts = supervisor.getWorkerPorts().size()
+					- supervisor.getAvailableWorkerPorts().size();
+			if (supervisorUsedPorts < theoryAveragePorts) {
+				ResourceWorkerSlot worker = iterator.next();
+				if (worker.getNodeId() != null)
+					continue;
+				worker.setHostname(supervisor.getHostName());
+				worker.setNodeId(supervisor.getSupervisorId());
+				worker.setPort(supervisor.getAvailableWorkerPorts().iterator()
+						.next());
+				supervisor.getAvailableWorkerPorts().remove(worker.getPort());
+				if (supervisor.getAvailableWorkerPorts().size() == 0)
+					supervisors.remove(supervisor);
+				key++;
+			} else {
+				overLoadSupervisors.add(supervisor);
+				supervisors.remove(supervisor);
+			}
+		}
+		// rest assignedWorkers will be allocate supervisor by deal
+		Collections.sort(overLoadSupervisors, new Comparator<SupervisorInfo>() {
+
+			@Override
+			public int compare(SupervisorInfo o1, SupervisorInfo o2) {
+				// TODO Auto-generated method stub
+				return -NumberUtils.compare(
+						o1.getAvailableWorkerPorts().size(), o2
+								.getAvailableWorkerPorts().size());
+			}
+
+		});
+		key = 0;
+		while (iterator.hasNext()) {
+			if (overLoadSupervisors.size() == 0)
+				break;
+			if (key >= overLoadSupervisors.size())
+				key = 0;
+			ResourceWorkerSlot worker = iterator.next();
+			if (worker.getNodeId() != null)
+				continue;
+			SupervisorInfo supervisor = overLoadSupervisors.get(key);
+			worker.setHostname(supervisor.getHostName());
+			worker.setNodeId(supervisor.getSupervisorId());
+			worker.setPort(supervisor.getAvailableWorkerPorts().iterator()
+					.next());
+			supervisor.getAvailableWorkerPorts().remove(worker.getPort());
+			if (supervisor.getAvailableWorkerPorts().size() == 0)
+				overLoadSupervisors.remove(supervisor);
+			key++;
+		}
+	}
+
+	private void getRightWorkers(DefaultTopologyAssignContext context,
+			Set<Integer> needAssign, List<ResourceWorkerSlot> assignedWorkers,
+			int workersNum, Collection<ResourceWorkerSlot> workers) {
+		Set<Integer> assigned = new HashSet<Integer>();
+		List<ResourceWorkerSlot> users = new ArrayList<ResourceWorkerSlot>();
+		if (workers == null)
+			return;
+		for (ResourceWorkerSlot worker : workers) {
+			boolean right = true;
+			Set<Integer> tasks = worker.getTasks();
+			if (tasks == null)
+				continue;
+			for (Integer task : tasks) {
+				if (!needAssign.contains(task) || assigned.contains(task)) {
+					right = false;
+					break;
+				}
+			}
+			if (right) {
+				assigned.addAll(tasks);
+				users.add(worker);
+			}
+		}
+		if (users.size() + assignedWorkers.size() > workersNum) {
+			LOG.warn(
+					"There are no enough workers for user define scheduler / keeping old assignment, userdefineWorkers={}, assignedWorkers={}, workerNum={}",
+					users, assignedWorkers, workersNum);
+			return;
+		}
+
+		assignedWorkers.addAll(users);
+		needAssign.removeAll(assigned);
+	}
+
+	private int getAvailableWorkersNum(DefaultTopologyAssignContext context) {
+		Map<String, SupervisorInfo> supervisors = context.getCluster();
+		List<SupervisorInfo> isolationSupervisors = this
+				.getIsolationSupervisors(context);
+		int slotNum = 0;
+
+		if (isolationSupervisors.size() != 0) {
+			for (SupervisorInfo superivsor : isolationSupervisors) {
+				slotNum = slotNum + superivsor.getAvailableWorkerPorts().size();
+			}
+		} else {
+			for (Entry<String, SupervisorInfo> entry : supervisors.entrySet()) {
+				slotNum = slotNum
+						+ entry.getValue().getAvailableWorkerPorts().size();
+			}
+		}
+		return slotNum;
+	}
+
+	/**
+	 * @param context
+	 * @param workers
+	 * @return
+	 */
+	private List<ResourceWorkerSlot> getUserDefineWorkers(
+			DefaultTopologyAssignContext context, List<WorkerAssignment> workers) {
+		List<ResourceWorkerSlot> ret = new ArrayList<ResourceWorkerSlot>();
+		if (workers == null)
+			return ret;
+		Map<String, List<Integer>> componentToTask = (HashMap<String, List<Integer>>) ((HashMap<String, List<Integer>>) context
+				.getComponentTasks()).clone();
+		if (context.getAssignType() != context.ASSIGN_TYPE_NEW) {
+			checkUserDefineWorkers(context, workers,
+					context.getTaskToComponent());
+		}
+		for (WorkerAssignment worker : workers) {
+			ResourceWorkerSlot workerSlot = new ResourceWorkerSlot(worker,
+					componentToTask);
+			if (workerSlot.getTasks().size() != 0) {
+				ret.add(workerSlot);
+			}
+		}
+		return ret;
+	}
+
+	private void checkUserDefineWorkers(DefaultTopologyAssignContext context,
+			List<WorkerAssignment> workers, Map<Integer, String> taskToComponent) {
+		Set<ResourceWorkerSlot> unstoppedWorkers = context
+				.getUnstoppedWorkers();
+		List<WorkerAssignment> re = new ArrayList<WorkerAssignment>();
+		for (WorkerAssignment worker : workers) {
+			for (ResourceWorkerSlot unstopped : unstoppedWorkers) {
+				if (unstopped
+						.compareToUserDefineWorker(worker, taskToComponent))
+					re.add(worker);
+			}
+		}
+		workers.removeAll(re);
+
+	}
+
+	private List<SupervisorInfo> getResAvailSupervisors(
+			Map<String, SupervisorInfo> supervisors) {
+		List<SupervisorInfo> availableSupervisors = new ArrayList<SupervisorInfo>();
+		for (Entry<String, SupervisorInfo> entry : supervisors.entrySet()) {
+			SupervisorInfo supervisor = entry.getValue();
+			if (supervisor.getAvailableWorkerPorts().size() > 0)
+				availableSupervisors.add(entry.getValue());
+		}
+		return availableSupervisors;
+	}
+
+	private List<SupervisorInfo> getResAvailSupervisors(
+			List<SupervisorInfo> supervisors) {
+		List<SupervisorInfo> availableSupervisors = new ArrayList<SupervisorInfo>();
+		for (SupervisorInfo supervisor : supervisors) {
+			if (supervisor.getAvailableWorkerPorts().size() > 0)
+				availableSupervisors.add(supervisor);
+		}
+		return availableSupervisors;
+	}
+
+	private List<SupervisorInfo> getIsolationSupervisors(
+			DefaultTopologyAssignContext context) {
+		List<String> isolationHosts = (List<String>) context.getStormConf()
+				.get(Config.ISOLATION_SCHEDULER_MACHINES);
+		LOG.info("Isolation machines: " + isolationHosts);
+		if (isolationHosts == null)
+			return new ArrayList<SupervisorInfo>();
+		List<SupervisorInfo> isolationSupervisors = new ArrayList<SupervisorInfo>();
+		for (Entry<String, SupervisorInfo> entry : context.getCluster()
+				.entrySet()) {
+			if (containTargetHost(isolationHosts, entry.getValue()
+					.getHostName())) {
+				isolationSupervisors.add(entry.getValue());
+			}
+		}
+		return isolationSupervisors;
+	}
+
+	private boolean containTargetHost(Collection<String> hosts, String target) {
+		for (String host : hosts) {
+			if (NetWorkUtils.equals(host, target) == true) {
+				return true;
+			}
+		}
+		return false;
+	}
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/Task.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/Task.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/Task.java
index 6481c5e..b0fdc92 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/task/Task.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/Task.java
@@ -17,31 +17,20 @@
  */
 package com.alibaba.jstorm.task;
 
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import backtype.storm.Config;
 import backtype.storm.messaging.IContext;
 import backtype.storm.serialization.KryoTupleSerializer;
 import backtype.storm.spout.ISpout;
 import backtype.storm.task.IBolt;
 import backtype.storm.task.TopologyContext;
 import backtype.storm.utils.DisruptorQueue;
-import backtype.storm.utils.Utils;
 import backtype.storm.utils.WorkerClassLoader;
 import clojure.lang.Atom;
-
 import com.alibaba.jstorm.callback.AsyncLoopDefaultKill;
 import com.alibaba.jstorm.callback.AsyncLoopThread;
 import com.alibaba.jstorm.callback.RunnableCallback;
 import com.alibaba.jstorm.client.ConfigExtension;
 import com.alibaba.jstorm.cluster.Common;
 import com.alibaba.jstorm.cluster.StormClusterState;
-import com.alibaba.jstorm.cluster.StormZkClusterState;
 import com.alibaba.jstorm.daemon.worker.WorkerData;
 import com.alibaba.jstorm.schedule.Assignment.AssignmentType;
 import com.alibaba.jstorm.task.comm.TaskSendTargets;
@@ -55,29 +44,27 @@ import com.alibaba.jstorm.task.execute.spout.MultipleThreadSpoutExecutors;
 import com.alibaba.jstorm.task.execute.spout.SingleThreadSpoutExecutors;
 import com.alibaba.jstorm.task.execute.spout.SpoutExecutors;
 import com.alibaba.jstorm.task.group.MkGrouper;
-import com.alibaba.jstorm.task.heartbeat.TaskHeartbeatRunable;
-import com.alibaba.jstorm.task.heartbeat.TaskStats;
 import com.alibaba.jstorm.utils.JStormServerUtils;
 import com.alibaba.jstorm.utils.JStormUtils;
-import com.alibaba.jstorm.utils.NetWorkUtils;
-import com.lmax.disruptor.WaitStrategy;
-import com.lmax.disruptor.dsl.ProducerType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
 
 /**
  * Task instance
  * 
  * @author yannian/Longda
- * 
  */
-public class Task {
-
+public class Task implements Runnable{
     private final static Logger LOG = LoggerFactory.getLogger(Task.class);
 
     private Map<Object, Object> stormConf;
 
     private TopologyContext topologyContext;
     private TopologyContext userContext;
-    private String topologyid;
     private IContext context;
 
     private TaskTransfer taskTransfer;
@@ -86,8 +73,9 @@ public class Task {
     private Map<Integer, DisruptorQueue> deserializeQueues;
     private AsyncLoopDefaultKill workHalt;
 
-    private Integer taskid;
-    private String componentid;
+    private String topologyId;
+    private Integer taskId;
+    private String componentId;
     private volatile TaskStatus taskStatus;
     private Atom openOrPrepareWasCalled;
     // running time counter
@@ -97,63 +85,46 @@ public class Task {
     private Object taskObj;
     private TaskBaseMetric taskStats;
     private WorkerData workerData;
-    private String componentType; // "spout" or "bolt"
 
     private TaskSendTargets taskSendTargets;
+    private TaskReportErrorAndDie reportErrorDie;
 
     private boolean isTaskBatchTuple;
+    private TaskShutdownDameon taskShutdownDameon;
 
     @SuppressWarnings("rawtypes")
     public Task(WorkerData workerData, int taskId) throws Exception {
         openOrPrepareWasCalled = new Atom(Boolean.valueOf(false));
 
         this.workerData = workerData;
-        this.topologyContext =
-                workerData.getContextMaker().makeTopologyContext(
-                        workerData.getSysTopology(), taskId,
-                        openOrPrepareWasCalled);
-        this.userContext =
-                workerData.getContextMaker().makeTopologyContext(
-                        workerData.getRawTopology(), taskId,
-                        openOrPrepareWasCalled);
-        this.taskid = taskId;
-        this.componentid = topologyContext.getThisComponentId();
-        this.stormConf =
-                Common.component_conf(workerData.getStormConf(),
-                        topologyContext, componentid);
+        this.topologyContext = workerData.getContextMaker().makeTopologyContext(workerData.getSysTopology(), taskId, openOrPrepareWasCalled);
+        this.userContext = workerData.getContextMaker().makeTopologyContext(workerData.getRawTopology(), taskId, openOrPrepareWasCalled);
+        this.taskId = taskId;
+        this.componentId = topologyContext.getThisComponentId();
+        this.stormConf = Common.component_conf(workerData.getStormConf(), topologyContext, componentId);
 
         this.taskStatus = new TaskStatus();
-        this.taskStats = new TaskBaseMetric(taskId);
 
         this.innerTaskTransfer = workerData.getInnerTaskTransfer();
         this.deserializeQueues = workerData.getDeserializeQueues();
-        this.topologyid = workerData.getTopologyId();
+        this.topologyId = workerData.getTopologyId();
         this.context = workerData.getContext();
         this.workHalt = workerData.getWorkHalt();
-        this.zkCluster =
-                new StormZkClusterState(workerData.getZkClusterstate());
+        this.zkCluster =workerData.getZkCluster();
+        this.taskStats = new TaskBaseMetric(topologyId, componentId, taskId,
+                ConfigExtension.isEnableMetrics(workerData.getStormConf()));
 
-        LOG.info("Begin to deserialize taskObj " + componentid + ":" + taskid);
+        LOG.info("Begin to deserialize taskObj " + componentId + ":" + this.taskId);
 
         WorkerClassLoader.switchThreadContext();
         // get real task object -- spout/bolt/spoutspec
-        this.taskObj =
-                Common.get_task_object(topologyContext.getRawTopology(),
-                        componentid, WorkerClassLoader.getInstance());
+        this.taskObj = Common.get_task_object(topologyContext.getRawTopology(), componentId, WorkerClassLoader.getInstance());
         WorkerClassLoader.restoreThreadContext();
 
         isTaskBatchTuple = ConfigExtension.isTaskBatchTuple(stormConf);
         LOG.info("Transfer/receive in batch mode :" + isTaskBatchTuple);
 
-        LOG.info("Loading task " + componentid + ":" + taskid);
-    }
-
-    private void setComponentType() {
-        if (taskObj instanceof IBolt) {
-            componentType = "bolt";
-        } else if (taskObj instanceof ISpout) {
-            componentType = "spout";
-        }
+        LOG.info("Loading task " + componentId + ":" + this.taskId);
     }
 
     private TaskSendTargets makeSendTargets() {
@@ -161,44 +132,20 @@ public class Task {
 
         // get current task's output
         // <Stream_id,<component, Grouping>>
-        Map<String, Map<String, MkGrouper>> streamComponentGrouper =
-                Common.outbound_components(topologyContext, workerData);
+        Map<String, Map<String, MkGrouper>> streamComponentGrouper = Common.outbound_components(topologyContext, workerData);
 
-        return new TaskSendTargets(stormConf, component,
-                streamComponentGrouper, topologyContext, taskStats);
+        return new TaskSendTargets(stormConf, component, streamComponentGrouper, topologyContext, taskStats);
     }
 
     private void updateSendTargets() {
         if (taskSendTargets != null) {
-            Map<String, Map<String, MkGrouper>> streamComponentGrouper =
-                    Common.outbound_components(topologyContext, workerData);
+            Map<String, Map<String, MkGrouper>> streamComponentGrouper = Common.outbound_components(topologyContext, workerData);
             taskSendTargets.updateStreamCompGrouper(streamComponentGrouper);
         } else {
             LOG.error("taskSendTargets is null when trying to update it.");
         }
     }
 
-    private TaskTransfer mkTaskSending(WorkerData workerData) {
-
-        // sending tuple's serializer
-        KryoTupleSerializer serializer =
-                new KryoTupleSerializer(workerData.getStormConf(),
-                        topologyContext);
-
-        String taskName = JStormServerUtils.getName(componentid, taskid);
-        // Task sending all tuples through this Object
-        TaskTransfer taskTransfer;
-        if (isTaskBatchTuple)
-            taskTransfer =
-                    new TaskBatchTransfer(this, taskName, serializer,
-                            taskStatus, workerData);
-        else
-            taskTransfer =
-                    new TaskTransfer(this, taskName, serializer, taskStatus,
-                            workerData);
-        return taskTransfer;
-    }
-
     public TaskSendTargets echoToSystemBolt() {
         // send "startup" tuple to system bolt
         List<Object> msg = new ArrayList<Object>();
@@ -206,9 +153,7 @@ public class Task {
 
         // create task receive object
         TaskSendTargets sendTargets = makeSendTargets();
-        UnanchoredSend.send(topologyContext, sendTargets, taskTransfer,
-                Common.SYSTEM_STREAM_ID, msg);
-
+        UnanchoredSend.send(topologyContext, sendTargets, taskTransfer, Common.SYSTEM_STREAM_ID, msg);
         return sendTargets;
     }
 
@@ -217,102 +162,90 @@ public class Task {
         if (isOnePending == true) {
             return true;
         }
-
         return ConfigExtension.isSpoutSingleThread(conf);
     }
 
-    public RunnableCallback mk_executors(TaskSendTargets sendTargets,
-            ITaskReportErr report_error) {
+    public BaseExecutors mkExecutor() {
+    	BaseExecutors baseExecutor = null;
 
         if (taskObj instanceof IBolt) {
-            return new BoltExecutors(this, (IBolt) taskObj, taskTransfer,
-                    innerTaskTransfer, stormConf, sendTargets, taskStatus,
-                    topologyContext, userContext, taskStats, report_error);
+        	baseExecutor = new BoltExecutors(this);
         } else if (taskObj instanceof ISpout) {
             if (isSingleThread(stormConf) == true) {
-                return new SingleThreadSpoutExecutors(this, (ISpout) taskObj,
-                        taskTransfer, innerTaskTransfer, stormConf,
-                        sendTargets, taskStatus, topologyContext, userContext,
-                        taskStats, report_error);
+            	baseExecutor = new SingleThreadSpoutExecutors(this);
             } else {
-                return new MultipleThreadSpoutExecutors(this, (ISpout) taskObj,
-                        taskTransfer, innerTaskTransfer, stormConf,
-                        sendTargets, taskStatus, topologyContext, userContext,
-                        taskStats, report_error);
+                baseExecutor = new MultipleThreadSpoutExecutors(this);
             }
         }
-
-        return null;
+        
+        return baseExecutor;
     }
 
     /**
      * create executor to receive tuples and run bolt/spout execute function
-     * 
-     * @param puller
-     * @param sendTargets
-     * @return
      */
-    private RunnableCallback mkExecutor(TaskSendTargets sendTargets) {
+    private RunnableCallback prepareExecutor() {
         // create report error callback,
         // in fact it is storm_cluster.report-task-error
-        ITaskReportErr reportError =
-                new TaskReportError(zkCluster, topologyid, taskid);
+        ITaskReportErr reportError = new TaskReportError(zkCluster, topologyId, taskId);
 
         // report error and halt worker
-        TaskReportErrorAndDie reportErrorDie =
-                new TaskReportErrorAndDie(reportError, workHalt);
+        reportErrorDie = new TaskReportErrorAndDie(reportError, workHalt);
+        
+        final BaseExecutors baseExecutor = mkExecutor();
 
-        return mk_executors(sendTargets, reportErrorDie);
+        return baseExecutor;
     }
 
     public TaskReceiver mkTaskReceiver() {
-        String taskName = JStormServerUtils.getName(componentid, taskid);
-        TaskReceiver taskReceiver;
+        String taskName = JStormServerUtils.getName(componentId, taskId);
         if (isTaskBatchTuple)
-            taskReceiver =
-                    new TaskBatchReceiver(this, taskid, stormConf,
-                            topologyContext, innerTaskTransfer, taskStatus,
-                            taskName);
+            taskReceiver = new TaskBatchReceiver(this, taskId, stormConf, topologyContext, innerTaskTransfer, taskStatus, taskName);
         else
-            taskReceiver =
-                    new TaskReceiver(this, taskid, stormConf, topologyContext,
-                            innerTaskTransfer, taskStatus, taskName);
-        deserializeQueues.put(taskid, taskReceiver.getDeserializeQueue());
+            taskReceiver = new TaskReceiver(this, taskId, stormConf, topologyContext, innerTaskTransfer, taskStatus, taskName);
+        deserializeQueues.put(taskId, taskReceiver.getDeserializeQueue());
         return taskReceiver;
     }
 
     public TaskShutdownDameon execute() throws Exception {
-        setComponentType();
 
         taskSendTargets = echoToSystemBolt();
 
         // create thread to get tuple from zeroMQ,
         // and pass the tuple to bolt/spout
         taskTransfer = mkTaskSending(workerData);
-        RunnableCallback baseExecutor = mkExecutor(taskSendTargets);
-        AsyncLoopThread executor_threads =
-                new AsyncLoopThread(baseExecutor, false, Thread.MAX_PRIORITY,
-                        true);
+        RunnableCallback baseExecutor = prepareExecutor();
+        AsyncLoopThread executor_threads = new AsyncLoopThread(baseExecutor, false, Thread.MAX_PRIORITY, true);
         taskReceiver = mkTaskReceiver();
 
         List<AsyncLoopThread> allThreads = new ArrayList<AsyncLoopThread>();
         allThreads.add(executor_threads);
 
-        TaskHeartbeatRunable.registerTaskStats(taskid, new TaskStats(
-                componentType, taskStats));
-        LOG.info("Finished loading task " + componentid + ":" + taskid);
+        LOG.info("Finished loading task " + componentId + ":" + taskId);
 
-        return getShutdown(allThreads, taskReceiver.getDeserializeQueue(),
+        taskShutdownDameon =  getShutdown(allThreads, taskReceiver.getDeserializeQueue(),
                 baseExecutor);
+        return taskShutdownDameon;
     }
 
-    public TaskShutdownDameon getShutdown(List<AsyncLoopThread> allThreads,
-            DisruptorQueue deserializeQueue, RunnableCallback baseExecutor) {
+    private TaskTransfer mkTaskSending(WorkerData workerData) {
+        // sending tuple's serializer
+        KryoTupleSerializer serializer = new KryoTupleSerializer(workerData.getStormConf(), topologyContext);
 
+        String taskName = JStormServerUtils.getName(componentId, taskId);
+        // Task sending all tuples through this Object
+        TaskTransfer taskTransfer;
+        if (isTaskBatchTuple)
+            taskTransfer = new TaskBatchTransfer(this, taskName, serializer, taskStatus, workerData);
+        else
+            taskTransfer = new TaskTransfer(this, taskName, serializer, taskStatus, workerData);
+        return taskTransfer;
+    }
+
+    public TaskShutdownDameon getShutdown(List<AsyncLoopThread> allThreads, DisruptorQueue deserializeQueue, RunnableCallback baseExecutor) {
         AsyncLoopThread ackerThread = null;
         if (baseExecutor instanceof SpoutExecutors) {
-            ackerThread =
-                    ((SpoutExecutors) baseExecutor).getAckerRunnableThread();
+            ackerThread = ((SpoutExecutors) baseExecutor).getAckerRunnableThread();
 
             if (ackerThread != null) {
                 allThreads.add(ackerThread);
@@ -324,24 +257,30 @@ public class Task {
         AsyncLoopThread serializeThread = taskTransfer.getSerializeThread();
         allThreads.add(serializeThread);
 
-        TaskShutdownDameon shutdown =
-                new TaskShutdownDameon(taskStatus, topologyid, taskid,
-                        allThreads, zkCluster, taskObj);
+        TaskShutdownDameon shutdown = new TaskShutdownDameon(taskStatus, topologyId, taskId, allThreads, zkCluster, taskObj, this);
 
         return shutdown;
     }
 
-    public static TaskShutdownDameon mk_task(WorkerData workerData, int taskId)
-            throws Exception {
+    public TaskShutdownDameon getTaskShutdownDameon(){
+        return taskShutdownDameon;
+    }
 
-        Task t = new Task(workerData, taskId);
+    public void run(){
+        try {
+            taskShutdownDameon=this.execute();
+        }catch (Throwable e){
+            LOG.error("init task take error", e);
+        }
+    }
 
+    public static TaskShutdownDameon mk_task(WorkerData workerData, int taskId) throws Exception {
+        Task t = new Task(workerData, taskId);
         return t.execute();
     }
 
     /**
-     * Update the data which can be changed dynamically e.g. when scale-out of a
-     * task parallelism
+     * Update the data which can be changed dynamically e.g. when scale-out of a task parallelism
      */
     public void updateTaskData() {
         // Only update the local task list of topologyContext here. Because
@@ -359,12 +298,94 @@ public class Task {
     public long getWorkerAssignmentTs() {
         return workerData.getAssignmentTs();
     }
-    
+
     public AssignmentType getWorkerAssignmentType() {
         return workerData.getAssignmentType();
     }
 
     public void unregisterDeserializeQueue() {
-        deserializeQueues.remove(taskid);
+        deserializeQueues.remove(taskId);
+    }
+
+    public String getComponentId() {
+        return componentId;
+    }
+
+    public Integer getTaskId() {
+        return taskId;
+    }
+
+    public DisruptorQueue getExecuteQueue() {
+        return innerTaskTransfer.get(taskId);
+    }
+
+    public DisruptorQueue getDeserializeQueue() {
+        return deserializeQueues.get(taskId);
     }
+
+	public Map<Object, Object> getStormConf() {
+		return stormConf;
+	}
+
+	public TopologyContext getTopologyContext() {
+		return topologyContext;
+	}
+
+	public TopologyContext getUserContext() {
+		return userContext;
+	}
+
+	public TaskTransfer getTaskTransfer() {
+		return taskTransfer;
+	}
+
+	public TaskReceiver getTaskReceiver() {
+		return taskReceiver;
+	}
+
+	public Map<Integer, DisruptorQueue> getInnerTaskTransfer() {
+		return innerTaskTransfer;
+	}
+
+	public Map<Integer, DisruptorQueue> getDeserializeQueues() {
+		return deserializeQueues;
+	}
+
+	public String getTopologyId() {
+		return topologyId;
+	}
+
+	public TaskStatus getTaskStatus() {
+		return taskStatus;
+	}
+
+	public StormClusterState getZkCluster() {
+		return zkCluster;
+	}
+
+	public Object getTaskObj() {
+		return taskObj;
+	}
+
+	public TaskBaseMetric getTaskStats() {
+		return taskStats;
+	}
+
+	public WorkerData getWorkerData() {
+		return workerData;
+	}
+
+	public TaskSendTargets getTaskSendTargets() {
+		return taskSendTargets;
+	}
+
+	public TaskReportErrorAndDie getReportErrorDie() {
+		return reportErrorDie;
+	}
+
+	public boolean isTaskBatchTuple() {
+		return isTaskBatchTuple;
+	}
+    
+    
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/TaskBaseMetric.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/TaskBaseMetric.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/TaskBaseMetric.java
index 4c9eb0b..84c6151 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/task/TaskBaseMetric.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/TaskBaseMetric.java
@@ -17,116 +17,110 @@
  */
 package com.alibaba.jstorm.task;
 
-import java.io.Serializable;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import com.alibaba.jstorm.cluster.Common;
-import com.alibaba.jstorm.common.metric.MetricRegistry;
-import com.alibaba.jstorm.common.metric.window.Metric;
+import com.alibaba.jstorm.common.metric.AsmMetric;
 import com.alibaba.jstorm.metric.JStormMetrics;
 import com.alibaba.jstorm.metric.MetricDef;
+import com.alibaba.jstorm.metric.MetricType;
+import com.alibaba.jstorm.metric.MetricUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.Serializable;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
 
 public class TaskBaseMetric implements Serializable {
-	private static final Logger LOG = LoggerFactory.getLogger(TaskBaseMetric.class);
+    private static final Logger logger = LoggerFactory.getLogger(JStormMetrics.class);
+
     private static final long serialVersionUID = -7157987126460293444L;
-    protected MetricRegistry metrics;
+    private String topologyId;
+    private String componentId;
     private int taskId;
+    private boolean enableMetrics;
 
-    public TaskBaseMetric(int taskId) {
-        metrics = JStormMetrics.registerTask(taskId);
+    /**
+     * local metric name cache to avoid frequent metric name concatenation streamId + name ==> full metric name
+     */
+    private static final ConcurrentMap<String, AsmMetric> metricCache = new ConcurrentHashMap<String, AsmMetric>();
+
+    public TaskBaseMetric(String topologyId, String componentId, int taskId, boolean enableMetrics) {
+        this.topologyId = topologyId;
+        this.componentId = componentId;
         this.taskId = taskId;
+        this.enableMetrics = enableMetrics;
+        logger.info("init task base metric, tp id:{}, comp id:{}, task id:{}", topologyId, componentId, taskId);
     }
 
-    public void update(String name, Number value, int type) {
-        Metric metric = metrics.getMetric(name);
-        if (metric == null) {
-            metric = JStormMetrics.Builder.mkInstance(type);
-            try {
-            	/**
-            	 * Here use one hack method to handle competition register metric
-            	 * if duplicated metric, just skip it.
-            	 * 
-            	 * this will improve performance
-            	 */
-            	JStormMetrics.registerTaskMetric(metric, taskId, name);
-            }catch(Exception e) {
-            	LOG.warn("Duplicated metrics of {}, taskId:{}", name, taskId);
-            	return ;
+    public void update(final String streamId, final String name, final Number value, final MetricType metricType,
+                       boolean mergeTopology) {
+        String key = taskId + streamId + name;
+        AsmMetric existingMetric = metricCache.get(key);
+        if (existingMetric == null) {
+            String fullName = MetricUtils.streamMetricName(topologyId, componentId, taskId, streamId, name, metricType);
+            existingMetric = JStormMetrics.getStreamMetric(fullName);
+            if (existingMetric == null) {
+                existingMetric = AsmMetric.Builder.build(metricType);
+                JStormMetrics.registerStreamMetric(fullName, existingMetric, mergeTopology);
             }
-            
+            metricCache.putIfAbsent(key, existingMetric);
         }
-        metric.update(value);
+
+        existingMetric.update(value);
+    }
+
+    public void update(final String streamId, final String name, final Number value, final MetricType metricType) {
+        update(streamId, name, value, metricType, true);
     }
 
     public void send_tuple(String stream, int num_out_tasks) {
-        if (num_out_tasks <= 0) {
-            return;
+        if (enableMetrics && num_out_tasks > 0) {
+            update(stream, MetricDef.EMMITTED_NUM, num_out_tasks, MetricType.COUNTER);
+            update(stream, MetricDef.SEND_TPS, num_out_tasks, MetricType.METER);
         }
-
-        String emmitedName =
-                MetricRegistry.name(MetricDef.EMMITTED_NUM, stream);
-        update(emmitedName, Double.valueOf(num_out_tasks),
-                JStormMetrics.Builder.COUNTER);
-
-        String sendTpsName = MetricRegistry.name(MetricDef.SEND_TPS, stream);
-        update(sendTpsName, Double.valueOf(num_out_tasks),
-                JStormMetrics.Builder.METER);
     }
 
     public void recv_tuple(String component, String stream) {
-
-        String name =
-                MetricRegistry.name(MetricDef.RECV_TPS, component, stream);
-        update(name, Double.valueOf(1), JStormMetrics.Builder.METER);
-
+        if (enableMetrics) {
+            update(stream, AsmMetric.mkName(component, MetricDef.RECV_TPS), 1, MetricType.METER);
+//            update(stream, MetricDef.RECV_TPS, 1, MetricType.METER);
+        }
     }
 
-    public void bolt_acked_tuple(String component, String stream,
-            Double latency_ms) {
+    public void bolt_acked_tuple(String component, String stream, Long latency, Long lifeCycle) {
+        if (enableMetrics) {
+//            update(stream, AsmMetric.mkName(component, MetricDef.ACKED_NUM), 1, MetricType.COUNTER);
+//            update(stream, AsmMetric.mkName(component, MetricDef.PROCESS_LATENCY), latency_ms, MetricType.HISTOGRAM);
+            update(stream, MetricDef.ACKED_NUM, 1, MetricType.COUNTER);
+            update(stream, MetricDef.PROCESS_LATENCY, latency, MetricType.HISTOGRAM, false);
 
-        if (latency_ms == null) {
-            return;
+            if (lifeCycle > 0) {
+                update(stream, AsmMetric.mkName(component, MetricDef.TUPLE_LIEF_CYCLE), lifeCycle, MetricType.HISTOGRAM, false);
+            }
         }
-
-        String ackNumName =
-                MetricRegistry.name(MetricDef.ACKED_NUM, component, stream);
-        update(ackNumName, Double.valueOf(1), JStormMetrics.Builder.COUNTER);
-
-        String processName =
-                MetricRegistry.name(MetricDef.PROCESS_LATENCY, component,
-                        stream);
-        update(processName, latency_ms,
-                JStormMetrics.Builder.HISTOGRAM);
     }
 
     public void bolt_failed_tuple(String component, String stream) {
-
-        String failNumName =
-                MetricRegistry.name(MetricDef.FAILED_NUM, component, stream);
-        update(failNumName, Double.valueOf(1), JStormMetrics.Builder.COUNTER);
+        if (enableMetrics) {
+            //update(stream, AsmMetric.mkName(component, MetricDef.FAILED_NUM), 1, MetricType.COUNTER);
+            update(stream, MetricDef.FAILED_NUM, 1, MetricType.COUNTER);
+        }
     }
 
-    public void spout_acked_tuple(String stream, long st) {
-
-        String ackNumName =
-                MetricRegistry.name(MetricDef.ACKED_NUM,
-                        Common.ACKER_COMPONENT_ID, stream);
-        update(ackNumName, Double.valueOf(1), JStormMetrics.Builder.COUNTER);
-
-        String processName =
-                MetricRegistry.name(MetricDef.PROCESS_LATENCY,
-                        Common.ACKER_COMPONENT_ID, stream);
-        update(processName, Double.valueOf(st), JStormMetrics.Builder.HISTOGRAM);
+    public void spout_acked_tuple(String stream, long st, Long lifeCycle) {
+        if (enableMetrics) {
+            update(stream, MetricDef.ACKED_NUM, 1, MetricType.COUNTER);
+            update(stream, MetricDef.PROCESS_LATENCY, st, MetricType.HISTOGRAM, true);
 
+            if (lifeCycle > 0) {
+                update(stream, AsmMetric.mkName(Common.ACKER_COMPONENT_ID, MetricDef.TUPLE_LIEF_CYCLE), lifeCycle, MetricType.HISTOGRAM, false);
+            }
+        }
     }
 
     public void spout_failed_tuple(String stream) {
-        String failNumName =
-                MetricRegistry.name(MetricDef.FAILED_NUM,
-                        Common.ACKER_COMPONENT_ID, stream);
-        update(failNumName, Double.valueOf(1), JStormMetrics.Builder.COUNTER);
-
+        if (enableMetrics) {
+            update(stream, MetricDef.FAILED_NUM, 1, MetricType.COUNTER);
+        }
     }
 }


[37/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/TopologyMetric.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/TopologyMetric.java b/jstorm-core/src/main/java/backtype/storm/generated/TopologyMetric.java
index 6df50cf..c87e82b 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/TopologyMetric.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/TopologyMetric.java
@@ -34,14 +34,16 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class TopologyMetric implements org.apache.thrift.TBase<TopologyMetric, TopologyMetric._Fields>, java.io.Serializable, Cloneable, Comparable<TopologyMetric> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TopologyMetric");
 
   private static final org.apache.thrift.protocol.TField TOPOLOGY_METRIC_FIELD_DESC = new org.apache.thrift.protocol.TField("topologyMetric", org.apache.thrift.protocol.TType.STRUCT, (short)1);
-  private static final org.apache.thrift.protocol.TField COMPONENT_METRIC_FIELD_DESC = new org.apache.thrift.protocol.TField("componentMetric", org.apache.thrift.protocol.TType.MAP, (short)2);
-  private static final org.apache.thrift.protocol.TField WORKER_METRIC_FIELD_DESC = new org.apache.thrift.protocol.TField("workerMetric", org.apache.thrift.protocol.TType.MAP, (short)3);
-  private static final org.apache.thrift.protocol.TField TASK_METRIC_FIELD_DESC = new org.apache.thrift.protocol.TField("taskMetric", org.apache.thrift.protocol.TType.MAP, (short)4);
+  private static final org.apache.thrift.protocol.TField COMPONENT_METRIC_FIELD_DESC = new org.apache.thrift.protocol.TField("componentMetric", org.apache.thrift.protocol.TType.STRUCT, (short)2);
+  private static final org.apache.thrift.protocol.TField WORKER_METRIC_FIELD_DESC = new org.apache.thrift.protocol.TField("workerMetric", org.apache.thrift.protocol.TType.STRUCT, (short)3);
+  private static final org.apache.thrift.protocol.TField TASK_METRIC_FIELD_DESC = new org.apache.thrift.protocol.TField("taskMetric", org.apache.thrift.protocol.TType.STRUCT, (short)4);
+  private static final org.apache.thrift.protocol.TField STREAM_METRIC_FIELD_DESC = new org.apache.thrift.protocol.TField("streamMetric", org.apache.thrift.protocol.TType.STRUCT, (short)5);
+  private static final org.apache.thrift.protocol.TField NETTY_METRIC_FIELD_DESC = new org.apache.thrift.protocol.TField("nettyMetric", org.apache.thrift.protocol.TType.STRUCT, (short)6);
 
   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
   static {
@@ -50,16 +52,20 @@ public class TopologyMetric implements org.apache.thrift.TBase<TopologyMetric, T
   }
 
   private MetricInfo topologyMetric; // required
-  private Map<String,MetricInfo> componentMetric; // required
-  private Map<String,MetricInfo> workerMetric; // required
-  private Map<Integer,MetricInfo> taskMetric; // required
+  private MetricInfo componentMetric; // required
+  private MetricInfo workerMetric; // required
+  private MetricInfo taskMetric; // required
+  private MetricInfo streamMetric; // required
+  private MetricInfo nettyMetric; // required
 
   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
     TOPOLOGY_METRIC((short)1, "topologyMetric"),
     COMPONENT_METRIC((short)2, "componentMetric"),
     WORKER_METRIC((short)3, "workerMetric"),
-    TASK_METRIC((short)4, "taskMetric");
+    TASK_METRIC((short)4, "taskMetric"),
+    STREAM_METRIC((short)5, "streamMetric"),
+    NETTY_METRIC((short)6, "nettyMetric");
 
     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 
@@ -82,6 +88,10 @@ public class TopologyMetric implements org.apache.thrift.TBase<TopologyMetric, T
           return WORKER_METRIC;
         case 4: // TASK_METRIC
           return TASK_METRIC;
+        case 5: // STREAM_METRIC
+          return STREAM_METRIC;
+        case 6: // NETTY_METRIC
+          return NETTY_METRIC;
         default:
           return null;
       }
@@ -128,17 +138,15 @@ public class TopologyMetric implements org.apache.thrift.TBase<TopologyMetric, T
     tmpMap.put(_Fields.TOPOLOGY_METRIC, new org.apache.thrift.meta_data.FieldMetaData("topologyMetric", org.apache.thrift.TFieldRequirementType.REQUIRED, 
         new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, MetricInfo.class)));
     tmpMap.put(_Fields.COMPONENT_METRIC, new org.apache.thrift.meta_data.FieldMetaData("componentMetric", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
-            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
-            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, MetricInfo.class))));
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, MetricInfo.class)));
     tmpMap.put(_Fields.WORKER_METRIC, new org.apache.thrift.meta_data.FieldMetaData("workerMetric", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
-            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
-            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, MetricInfo.class))));
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, MetricInfo.class)));
     tmpMap.put(_Fields.TASK_METRIC, new org.apache.thrift.meta_data.FieldMetaData("taskMetric", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
-            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32), 
-            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, MetricInfo.class))));
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, MetricInfo.class)));
+    tmpMap.put(_Fields.STREAM_METRIC, new org.apache.thrift.meta_data.FieldMetaData("streamMetric", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, MetricInfo.class)));
+    tmpMap.put(_Fields.NETTY_METRIC, new org.apache.thrift.meta_data.FieldMetaData("nettyMetric", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, MetricInfo.class)));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TopologyMetric.class, metaDataMap);
   }
@@ -148,15 +156,19 @@ public class TopologyMetric implements org.apache.thrift.TBase<TopologyMetric, T
 
   public TopologyMetric(
     MetricInfo topologyMetric,
-    Map<String,MetricInfo> componentMetric,
-    Map<String,MetricInfo> workerMetric,
-    Map<Integer,MetricInfo> taskMetric)
+    MetricInfo componentMetric,
+    MetricInfo workerMetric,
+    MetricInfo taskMetric,
+    MetricInfo streamMetric,
+    MetricInfo nettyMetric)
   {
     this();
     this.topologyMetric = topologyMetric;
     this.componentMetric = componentMetric;
     this.workerMetric = workerMetric;
     this.taskMetric = taskMetric;
+    this.streamMetric = streamMetric;
+    this.nettyMetric = nettyMetric;
   }
 
   /**
@@ -167,49 +179,19 @@ public class TopologyMetric implements org.apache.thrift.TBase<TopologyMetric, T
       this.topologyMetric = new MetricInfo(other.topologyMetric);
     }
     if (other.is_set_componentMetric()) {
-      Map<String,MetricInfo> __this__componentMetric = new HashMap<String,MetricInfo>(other.componentMetric.size());
-      for (Map.Entry<String, MetricInfo> other_element : other.componentMetric.entrySet()) {
-
-        String other_element_key = other_element.getKey();
-        MetricInfo other_element_value = other_element.getValue();
-
-        String __this__componentMetric_copy_key = other_element_key;
-
-        MetricInfo __this__componentMetric_copy_value = new MetricInfo(other_element_value);
-
-        __this__componentMetric.put(__this__componentMetric_copy_key, __this__componentMetric_copy_value);
-      }
-      this.componentMetric = __this__componentMetric;
+      this.componentMetric = new MetricInfo(other.componentMetric);
     }
     if (other.is_set_workerMetric()) {
-      Map<String,MetricInfo> __this__workerMetric = new HashMap<String,MetricInfo>(other.workerMetric.size());
-      for (Map.Entry<String, MetricInfo> other_element : other.workerMetric.entrySet()) {
-
-        String other_element_key = other_element.getKey();
-        MetricInfo other_element_value = other_element.getValue();
-
-        String __this__workerMetric_copy_key = other_element_key;
-
-        MetricInfo __this__workerMetric_copy_value = new MetricInfo(other_element_value);
-
-        __this__workerMetric.put(__this__workerMetric_copy_key, __this__workerMetric_copy_value);
-      }
-      this.workerMetric = __this__workerMetric;
+      this.workerMetric = new MetricInfo(other.workerMetric);
     }
     if (other.is_set_taskMetric()) {
-      Map<Integer,MetricInfo> __this__taskMetric = new HashMap<Integer,MetricInfo>(other.taskMetric.size());
-      for (Map.Entry<Integer, MetricInfo> other_element : other.taskMetric.entrySet()) {
-
-        Integer other_element_key = other_element.getKey();
-        MetricInfo other_element_value = other_element.getValue();
-
-        Integer __this__taskMetric_copy_key = other_element_key;
-
-        MetricInfo __this__taskMetric_copy_value = new MetricInfo(other_element_value);
-
-        __this__taskMetric.put(__this__taskMetric_copy_key, __this__taskMetric_copy_value);
-      }
-      this.taskMetric = __this__taskMetric;
+      this.taskMetric = new MetricInfo(other.taskMetric);
+    }
+    if (other.is_set_streamMetric()) {
+      this.streamMetric = new MetricInfo(other.streamMetric);
+    }
+    if (other.is_set_nettyMetric()) {
+      this.nettyMetric = new MetricInfo(other.nettyMetric);
     }
   }
 
@@ -223,6 +205,8 @@ public class TopologyMetric implements org.apache.thrift.TBase<TopologyMetric, T
     this.componentMetric = null;
     this.workerMetric = null;
     this.taskMetric = null;
+    this.streamMetric = null;
+    this.nettyMetric = null;
   }
 
   public MetricInfo get_topologyMetric() {
@@ -248,22 +232,11 @@ public class TopologyMetric implements org.apache.thrift.TBase<TopologyMetric, T
     }
   }
 
-  public int get_componentMetric_size() {
-    return (this.componentMetric == null) ? 0 : this.componentMetric.size();
-  }
-
-  public void put_to_componentMetric(String key, MetricInfo val) {
-    if (this.componentMetric == null) {
-      this.componentMetric = new HashMap<String,MetricInfo>();
-    }
-    this.componentMetric.put(key, val);
-  }
-
-  public Map<String,MetricInfo> get_componentMetric() {
+  public MetricInfo get_componentMetric() {
     return this.componentMetric;
   }
 
-  public void set_componentMetric(Map<String,MetricInfo> componentMetric) {
+  public void set_componentMetric(MetricInfo componentMetric) {
     this.componentMetric = componentMetric;
   }
 
@@ -282,22 +255,11 @@ public class TopologyMetric implements org.apache.thrift.TBase<TopologyMetric, T
     }
   }
 
-  public int get_workerMetric_size() {
-    return (this.workerMetric == null) ? 0 : this.workerMetric.size();
-  }
-
-  public void put_to_workerMetric(String key, MetricInfo val) {
-    if (this.workerMetric == null) {
-      this.workerMetric = new HashMap<String,MetricInfo>();
-    }
-    this.workerMetric.put(key, val);
-  }
-
-  public Map<String,MetricInfo> get_workerMetric() {
+  public MetricInfo get_workerMetric() {
     return this.workerMetric;
   }
 
-  public void set_workerMetric(Map<String,MetricInfo> workerMetric) {
+  public void set_workerMetric(MetricInfo workerMetric) {
     this.workerMetric = workerMetric;
   }
 
@@ -316,22 +278,11 @@ public class TopologyMetric implements org.apache.thrift.TBase<TopologyMetric, T
     }
   }
 
-  public int get_taskMetric_size() {
-    return (this.taskMetric == null) ? 0 : this.taskMetric.size();
-  }
-
-  public void put_to_taskMetric(int key, MetricInfo val) {
-    if (this.taskMetric == null) {
-      this.taskMetric = new HashMap<Integer,MetricInfo>();
-    }
-    this.taskMetric.put(key, val);
-  }
-
-  public Map<Integer,MetricInfo> get_taskMetric() {
+  public MetricInfo get_taskMetric() {
     return this.taskMetric;
   }
 
-  public void set_taskMetric(Map<Integer,MetricInfo> taskMetric) {
+  public void set_taskMetric(MetricInfo taskMetric) {
     this.taskMetric = taskMetric;
   }
 
@@ -350,6 +301,52 @@ public class TopologyMetric implements org.apache.thrift.TBase<TopologyMetric, T
     }
   }
 
+  public MetricInfo get_streamMetric() {
+    return this.streamMetric;
+  }
+
+  public void set_streamMetric(MetricInfo streamMetric) {
+    this.streamMetric = streamMetric;
+  }
+
+  public void unset_streamMetric() {
+    this.streamMetric = null;
+  }
+
+  /** Returns true if field streamMetric is set (has been assigned a value) and false otherwise */
+  public boolean is_set_streamMetric() {
+    return this.streamMetric != null;
+  }
+
+  public void set_streamMetric_isSet(boolean value) {
+    if (!value) {
+      this.streamMetric = null;
+    }
+  }
+
+  public MetricInfo get_nettyMetric() {
+    return this.nettyMetric;
+  }
+
+  public void set_nettyMetric(MetricInfo nettyMetric) {
+    this.nettyMetric = nettyMetric;
+  }
+
+  public void unset_nettyMetric() {
+    this.nettyMetric = null;
+  }
+
+  /** Returns true if field nettyMetric is set (has been assigned a value) and false otherwise */
+  public boolean is_set_nettyMetric() {
+    return this.nettyMetric != null;
+  }
+
+  public void set_nettyMetric_isSet(boolean value) {
+    if (!value) {
+      this.nettyMetric = null;
+    }
+  }
+
   public void setFieldValue(_Fields field, Object value) {
     switch (field) {
     case TOPOLOGY_METRIC:
@@ -364,7 +361,7 @@ public class TopologyMetric implements org.apache.thrift.TBase<TopologyMetric, T
       if (value == null) {
         unset_componentMetric();
       } else {
-        set_componentMetric((Map<String,MetricInfo>)value);
+        set_componentMetric((MetricInfo)value);
       }
       break;
 
@@ -372,7 +369,7 @@ public class TopologyMetric implements org.apache.thrift.TBase<TopologyMetric, T
       if (value == null) {
         unset_workerMetric();
       } else {
-        set_workerMetric((Map<String,MetricInfo>)value);
+        set_workerMetric((MetricInfo)value);
       }
       break;
 
@@ -380,7 +377,23 @@ public class TopologyMetric implements org.apache.thrift.TBase<TopologyMetric, T
       if (value == null) {
         unset_taskMetric();
       } else {
-        set_taskMetric((Map<Integer,MetricInfo>)value);
+        set_taskMetric((MetricInfo)value);
+      }
+      break;
+
+    case STREAM_METRIC:
+      if (value == null) {
+        unset_streamMetric();
+      } else {
+        set_streamMetric((MetricInfo)value);
+      }
+      break;
+
+    case NETTY_METRIC:
+      if (value == null) {
+        unset_nettyMetric();
+      } else {
+        set_nettyMetric((MetricInfo)value);
       }
       break;
 
@@ -401,6 +414,12 @@ public class TopologyMetric implements org.apache.thrift.TBase<TopologyMetric, T
     case TASK_METRIC:
       return get_taskMetric();
 
+    case STREAM_METRIC:
+      return get_streamMetric();
+
+    case NETTY_METRIC:
+      return get_nettyMetric();
+
     }
     throw new IllegalStateException();
   }
@@ -420,6 +439,10 @@ public class TopologyMetric implements org.apache.thrift.TBase<TopologyMetric, T
       return is_set_workerMetric();
     case TASK_METRIC:
       return is_set_taskMetric();
+    case STREAM_METRIC:
+      return is_set_streamMetric();
+    case NETTY_METRIC:
+      return is_set_nettyMetric();
     }
     throw new IllegalStateException();
   }
@@ -473,6 +496,24 @@ public class TopologyMetric implements org.apache.thrift.TBase<TopologyMetric, T
         return false;
     }
 
+    boolean this_present_streamMetric = true && this.is_set_streamMetric();
+    boolean that_present_streamMetric = true && that.is_set_streamMetric();
+    if (this_present_streamMetric || that_present_streamMetric) {
+      if (!(this_present_streamMetric && that_present_streamMetric))
+        return false;
+      if (!this.streamMetric.equals(that.streamMetric))
+        return false;
+    }
+
+    boolean this_present_nettyMetric = true && this.is_set_nettyMetric();
+    boolean that_present_nettyMetric = true && that.is_set_nettyMetric();
+    if (this_present_nettyMetric || that_present_nettyMetric) {
+      if (!(this_present_nettyMetric && that_present_nettyMetric))
+        return false;
+      if (!this.nettyMetric.equals(that.nettyMetric))
+        return false;
+    }
+
     return true;
   }
 
@@ -500,6 +541,16 @@ public class TopologyMetric implements org.apache.thrift.TBase<TopologyMetric, T
     if (present_taskMetric)
       list.add(taskMetric);
 
+    boolean present_streamMetric = true && (is_set_streamMetric());
+    list.add(present_streamMetric);
+    if (present_streamMetric)
+      list.add(streamMetric);
+
+    boolean present_nettyMetric = true && (is_set_nettyMetric());
+    list.add(present_nettyMetric);
+    if (present_nettyMetric)
+      list.add(nettyMetric);
+
     return list.hashCode();
   }
 
@@ -551,6 +602,26 @@ public class TopologyMetric implements org.apache.thrift.TBase<TopologyMetric, T
         return lastComparison;
       }
     }
+    lastComparison = Boolean.valueOf(is_set_streamMetric()).compareTo(other.is_set_streamMetric());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_streamMetric()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.streamMetric, other.streamMetric);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(is_set_nettyMetric()).compareTo(other.is_set_nettyMetric());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_nettyMetric()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.nettyMetric, other.nettyMetric);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
     return 0;
   }
 
@@ -558,11 +629,11 @@ public class TopologyMetric implements org.apache.thrift.TBase<TopologyMetric, T
     return _Fields.findByThriftId(fieldId);
   }
 
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
@@ -602,38 +673,77 @@ public class TopologyMetric implements org.apache.thrift.TBase<TopologyMetric, T
       sb.append(this.taskMetric);
     }
     first = false;
+    if (!first) sb.append(", ");
+    sb.append("streamMetric:");
+    if (this.streamMetric == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.streamMetric);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("nettyMetric:");
+    if (this.nettyMetric == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.nettyMetric);
+    }
+    first = false;
     sb.append(")");
     return sb.toString();
   }
 
-  public void validate() throws org.apache.thrift.TException {
+  public void validate() throws TException {
     // check for required fields
     if (!is_set_topologyMetric()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'topologyMetric' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'topologyMetric' is unset! Struct:" + toString());
     }
 
     if (!is_set_componentMetric()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'componentMetric' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'componentMetric' is unset! Struct:" + toString());
     }
 
     if (!is_set_workerMetric()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'workerMetric' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'workerMetric' is unset! Struct:" + toString());
     }
 
     if (!is_set_taskMetric()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'taskMetric' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'taskMetric' is unset! Struct:" + toString());
+    }
+
+    if (!is_set_streamMetric()) {
+      throw new TProtocolException("Required field 'streamMetric' is unset! Struct:" + toString());
+    }
+
+    if (!is_set_nettyMetric()) {
+      throw new TProtocolException("Required field 'nettyMetric' is unset! Struct:" + toString());
     }
 
     // check for sub-struct validity
     if (topologyMetric != null) {
       topologyMetric.validate();
     }
+    if (componentMetric != null) {
+      componentMetric.validate();
+    }
+    if (workerMetric != null) {
+      workerMetric.validate();
+    }
+    if (taskMetric != null) {
+      taskMetric.validate();
+    }
+    if (streamMetric != null) {
+      streamMetric.validate();
+    }
+    if (nettyMetric != null) {
+      nettyMetric.validate();
+    }
   }
 
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -641,7 +751,7 @@ public class TopologyMetric implements org.apache.thrift.TBase<TopologyMetric, T
   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
     try {
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -654,7 +764,7 @@ public class TopologyMetric implements org.apache.thrift.TBase<TopologyMetric, T
 
   private static class TopologyMetricStandardScheme extends StandardScheme<TopologyMetric> {
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot, TopologyMetric struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot, TopologyMetric struct) throws TException {
       org.apache.thrift.protocol.TField schemeField;
       iprot.readStructBegin();
       while (true)
@@ -674,68 +784,50 @@ public class TopologyMetric implements org.apache.thrift.TBase<TopologyMetric, T
             }
             break;
           case 2: // COMPONENT_METRIC
-            if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
-              {
-                org.apache.thrift.protocol.TMap _map166 = iprot.readMapBegin();
-                struct.componentMetric = new HashMap<String,MetricInfo>(2*_map166.size);
-                String _key167;
-                MetricInfo _val168;
-                for (int _i169 = 0; _i169 < _map166.size; ++_i169)
-                {
-                  _key167 = iprot.readString();
-                  _val168 = new MetricInfo();
-                  _val168.read(iprot);
-                  struct.componentMetric.put(_key167, _val168);
-                }
-                iprot.readMapEnd();
-              }
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.componentMetric = new MetricInfo();
+              struct.componentMetric.read(iprot);
               struct.set_componentMetric_isSet(true);
             } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
           case 3: // WORKER_METRIC
-            if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
-              {
-                org.apache.thrift.protocol.TMap _map170 = iprot.readMapBegin();
-                struct.workerMetric = new HashMap<String,MetricInfo>(2*_map170.size);
-                String _key171;
-                MetricInfo _val172;
-                for (int _i173 = 0; _i173 < _map170.size; ++_i173)
-                {
-                  _key171 = iprot.readString();
-                  _val172 = new MetricInfo();
-                  _val172.read(iprot);
-                  struct.workerMetric.put(_key171, _val172);
-                }
-                iprot.readMapEnd();
-              }
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.workerMetric = new MetricInfo();
+              struct.workerMetric.read(iprot);
               struct.set_workerMetric_isSet(true);
             } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
           case 4: // TASK_METRIC
-            if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
-              {
-                org.apache.thrift.protocol.TMap _map174 = iprot.readMapBegin();
-                struct.taskMetric = new HashMap<Integer,MetricInfo>(2*_map174.size);
-                int _key175;
-                MetricInfo _val176;
-                for (int _i177 = 0; _i177 < _map174.size; ++_i177)
-                {
-                  _key175 = iprot.readI32();
-                  _val176 = new MetricInfo();
-                  _val176.read(iprot);
-                  struct.taskMetric.put(_key175, _val176);
-                }
-                iprot.readMapEnd();
-              }
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.taskMetric = new MetricInfo();
+              struct.taskMetric.read(iprot);
               struct.set_taskMetric_isSet(true);
             } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
+          case 5: // STREAM_METRIC
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.streamMetric = new MetricInfo();
+              struct.streamMetric.read(iprot);
+              struct.set_streamMetric_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 6: // NETTY_METRIC
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.nettyMetric = new MetricInfo();
+              struct.nettyMetric.read(iprot);
+              struct.set_nettyMetric_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
           default:
             org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
         }
@@ -745,7 +837,7 @@ public class TopologyMetric implements org.apache.thrift.TBase<TopologyMetric, T
       struct.validate();
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot, TopologyMetric struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot, TopologyMetric struct) throws TException {
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
@@ -756,41 +848,27 @@ public class TopologyMetric implements org.apache.thrift.TBase<TopologyMetric, T
       }
       if (struct.componentMetric != null) {
         oprot.writeFieldBegin(COMPONENT_METRIC_FIELD_DESC);
-        {
-          oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.componentMetric.size()));
-          for (Map.Entry<String, MetricInfo> _iter178 : struct.componentMetric.entrySet())
-          {
-            oprot.writeString(_iter178.getKey());
-            _iter178.getValue().write(oprot);
-          }
-          oprot.writeMapEnd();
-        }
+        struct.componentMetric.write(oprot);
         oprot.writeFieldEnd();
       }
       if (struct.workerMetric != null) {
         oprot.writeFieldBegin(WORKER_METRIC_FIELD_DESC);
-        {
-          oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.workerMetric.size()));
-          for (Map.Entry<String, MetricInfo> _iter179 : struct.workerMetric.entrySet())
-          {
-            oprot.writeString(_iter179.getKey());
-            _iter179.getValue().write(oprot);
-          }
-          oprot.writeMapEnd();
-        }
+        struct.workerMetric.write(oprot);
         oprot.writeFieldEnd();
       }
       if (struct.taskMetric != null) {
         oprot.writeFieldBegin(TASK_METRIC_FIELD_DESC);
-        {
-          oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I32, org.apache.thrift.protocol.TType.STRUCT, struct.taskMetric.size()));
-          for (Map.Entry<Integer, MetricInfo> _iter180 : struct.taskMetric.entrySet())
-          {
-            oprot.writeI32(_iter180.getKey());
-            _iter180.getValue().write(oprot);
-          }
-          oprot.writeMapEnd();
-        }
+        struct.taskMetric.write(oprot);
+        oprot.writeFieldEnd();
+      }
+      if (struct.streamMetric != null) {
+        oprot.writeFieldBegin(STREAM_METRIC_FIELD_DESC);
+        struct.streamMetric.write(oprot);
+        oprot.writeFieldEnd();
+      }
+      if (struct.nettyMetric != null) {
+        oprot.writeFieldBegin(NETTY_METRIC_FIELD_DESC);
+        struct.nettyMetric.write(oprot);
         oprot.writeFieldEnd();
       }
       oprot.writeFieldStop();
@@ -808,83 +886,37 @@ public class TopologyMetric implements org.apache.thrift.TBase<TopologyMetric, T
   private static class TopologyMetricTupleScheme extends TupleScheme<TopologyMetric> {
 
     @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, TopologyMetric struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol prot, TopologyMetric struct) throws TException {
       TTupleProtocol oprot = (TTupleProtocol) prot;
       struct.topologyMetric.write(oprot);
-      {
-        oprot.writeI32(struct.componentMetric.size());
-        for (Map.Entry<String, MetricInfo> _iter181 : struct.componentMetric.entrySet())
-        {
-          oprot.writeString(_iter181.getKey());
-          _iter181.getValue().write(oprot);
-        }
-      }
-      {
-        oprot.writeI32(struct.workerMetric.size());
-        for (Map.Entry<String, MetricInfo> _iter182 : struct.workerMetric.entrySet())
-        {
-          oprot.writeString(_iter182.getKey());
-          _iter182.getValue().write(oprot);
-        }
-      }
-      {
-        oprot.writeI32(struct.taskMetric.size());
-        for (Map.Entry<Integer, MetricInfo> _iter183 : struct.taskMetric.entrySet())
-        {
-          oprot.writeI32(_iter183.getKey());
-          _iter183.getValue().write(oprot);
-        }
-      }
+      struct.componentMetric.write(oprot);
+      struct.workerMetric.write(oprot);
+      struct.taskMetric.write(oprot);
+      struct.streamMetric.write(oprot);
+      struct.nettyMetric.write(oprot);
     }
 
     @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, TopologyMetric struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol prot, TopologyMetric struct) throws TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       struct.topologyMetric = new MetricInfo();
       struct.topologyMetric.read(iprot);
       struct.set_topologyMetric_isSet(true);
-      {
-        org.apache.thrift.protocol.TMap _map184 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-        struct.componentMetric = new HashMap<String,MetricInfo>(2*_map184.size);
-        String _key185;
-        MetricInfo _val186;
-        for (int _i187 = 0; _i187 < _map184.size; ++_i187)
-        {
-          _key185 = iprot.readString();
-          _val186 = new MetricInfo();
-          _val186.read(iprot);
-          struct.componentMetric.put(_key185, _val186);
-        }
-      }
+      struct.componentMetric = new MetricInfo();
+      struct.componentMetric.read(iprot);
       struct.set_componentMetric_isSet(true);
-      {
-        org.apache.thrift.protocol.TMap _map188 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-        struct.workerMetric = new HashMap<String,MetricInfo>(2*_map188.size);
-        String _key189;
-        MetricInfo _val190;
-        for (int _i191 = 0; _i191 < _map188.size; ++_i191)
-        {
-          _key189 = iprot.readString();
-          _val190 = new MetricInfo();
-          _val190.read(iprot);
-          struct.workerMetric.put(_key189, _val190);
-        }
-      }
+      struct.workerMetric = new MetricInfo();
+      struct.workerMetric.read(iprot);
       struct.set_workerMetric_isSet(true);
-      {
-        org.apache.thrift.protocol.TMap _map192 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I32, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-        struct.taskMetric = new HashMap<Integer,MetricInfo>(2*_map192.size);
-        int _key193;
-        MetricInfo _val194;
-        for (int _i195 = 0; _i195 < _map192.size; ++_i195)
-        {
-          _key193 = iprot.readI32();
-          _val194 = new MetricInfo();
-          _val194.read(iprot);
-          struct.taskMetric.put(_key193, _val194);
-        }
-      }
+      struct.taskMetric = new MetricInfo();
+      struct.taskMetric.read(iprot);
       struct.set_taskMetric_isSet(true);
+      struct.streamMetric = new MetricInfo();
+      struct.streamMetric.read(iprot);
+      struct.set_streamMetric_isSet(true);
+      struct.nettyMetric = new MetricInfo();
+      struct.nettyMetric.read(iprot);
+      struct.set_nettyMetric_isSet(true);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/TopologySummary.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/TopologySummary.java b/jstorm-core/src/main/java/backtype/storm/generated/TopologySummary.java
index febb095..7369ed9 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/TopologySummary.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/TopologySummary.java
@@ -34,17 +34,17 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class TopologySummary implements org.apache.thrift.TBase<TopologySummary, TopologySummary._Fields>, java.io.Serializable, Cloneable, Comparable<TopologySummary> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TopologySummary");
 
   private static final org.apache.thrift.protocol.TField ID_FIELD_DESC = new org.apache.thrift.protocol.TField("id", org.apache.thrift.protocol.TType.STRING, (short)1);
   private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)2);
   private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRING, (short)3);
-  private static final org.apache.thrift.protocol.TField UPTIME_SECS_FIELD_DESC = new org.apache.thrift.protocol.TField("uptime_secs", org.apache.thrift.protocol.TType.I32, (short)4);
-  private static final org.apache.thrift.protocol.TField NUM_TASKS_FIELD_DESC = new org.apache.thrift.protocol.TField("num_tasks", org.apache.thrift.protocol.TType.I32, (short)5);
-  private static final org.apache.thrift.protocol.TField NUM_WORKERS_FIELD_DESC = new org.apache.thrift.protocol.TField("num_workers", org.apache.thrift.protocol.TType.I32, (short)6);
-  private static final org.apache.thrift.protocol.TField ERROR_INFO_FIELD_DESC = new org.apache.thrift.protocol.TField("error_info", org.apache.thrift.protocol.TType.STRING, (short)7);
+  private static final org.apache.thrift.protocol.TField UPTIME_SECS_FIELD_DESC = new org.apache.thrift.protocol.TField("uptimeSecs", org.apache.thrift.protocol.TType.I32, (short)4);
+  private static final org.apache.thrift.protocol.TField NUM_TASKS_FIELD_DESC = new org.apache.thrift.protocol.TField("numTasks", org.apache.thrift.protocol.TType.I32, (short)5);
+  private static final org.apache.thrift.protocol.TField NUM_WORKERS_FIELD_DESC = new org.apache.thrift.protocol.TField("numWorkers", org.apache.thrift.protocol.TType.I32, (short)6);
+  private static final org.apache.thrift.protocol.TField ERROR_INFO_FIELD_DESC = new org.apache.thrift.protocol.TField("errorInfo", org.apache.thrift.protocol.TType.STRING, (short)7);
 
   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
   static {
@@ -55,20 +55,20 @@ public class TopologySummary implements org.apache.thrift.TBase<TopologySummary,
   private String id; // required
   private String name; // required
   private String status; // required
-  private int uptime_secs; // required
-  private int num_tasks; // required
-  private int num_workers; // required
-  private String error_info; // optional
+  private int uptimeSecs; // required
+  private int numTasks; // required
+  private int numWorkers; // required
+  private String errorInfo; // optional
 
   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
     ID((short)1, "id"),
     NAME((short)2, "name"),
     STATUS((short)3, "status"),
-    UPTIME_SECS((short)4, "uptime_secs"),
-    NUM_TASKS((short)5, "num_tasks"),
-    NUM_WORKERS((short)6, "num_workers"),
-    ERROR_INFO((short)7, "error_info");
+    UPTIME_SECS((short)4, "uptimeSecs"),
+    NUM_TASKS((short)5, "numTasks"),
+    NUM_WORKERS((short)6, "numWorkers"),
+    ERROR_INFO((short)7, "errorInfo");
 
     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 
@@ -137,9 +137,9 @@ public class TopologySummary implements org.apache.thrift.TBase<TopologySummary,
   }
 
   // isset id assignments
-  private static final int __UPTIME_SECS_ISSET_ID = 0;
-  private static final int __NUM_TASKS_ISSET_ID = 1;
-  private static final int __NUM_WORKERS_ISSET_ID = 2;
+  private static final int __UPTIMESECS_ISSET_ID = 0;
+  private static final int __NUMTASKS_ISSET_ID = 1;
+  private static final int __NUMWORKERS_ISSET_ID = 2;
   private byte __isset_bitfield = 0;
   private static final _Fields optionals[] = {_Fields.ERROR_INFO};
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
@@ -151,13 +151,13 @@ public class TopologySummary implements org.apache.thrift.TBase<TopologySummary,
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
     tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    tmpMap.put(_Fields.UPTIME_SECS, new org.apache.thrift.meta_data.FieldMetaData("uptime_secs", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+    tmpMap.put(_Fields.UPTIME_SECS, new org.apache.thrift.meta_data.FieldMetaData("uptimeSecs", org.apache.thrift.TFieldRequirementType.REQUIRED, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
-    tmpMap.put(_Fields.NUM_TASKS, new org.apache.thrift.meta_data.FieldMetaData("num_tasks", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+    tmpMap.put(_Fields.NUM_TASKS, new org.apache.thrift.meta_data.FieldMetaData("numTasks", org.apache.thrift.TFieldRequirementType.REQUIRED, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
-    tmpMap.put(_Fields.NUM_WORKERS, new org.apache.thrift.meta_data.FieldMetaData("num_workers", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+    tmpMap.put(_Fields.NUM_WORKERS, new org.apache.thrift.meta_data.FieldMetaData("numWorkers", org.apache.thrift.TFieldRequirementType.REQUIRED, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
-    tmpMap.put(_Fields.ERROR_INFO, new org.apache.thrift.meta_data.FieldMetaData("error_info", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+    tmpMap.put(_Fields.ERROR_INFO, new org.apache.thrift.meta_data.FieldMetaData("errorInfo", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TopologySummary.class, metaDataMap);
@@ -170,20 +170,20 @@ public class TopologySummary implements org.apache.thrift.TBase<TopologySummary,
     String id,
     String name,
     String status,
-    int uptime_secs,
-    int num_tasks,
-    int num_workers)
+    int uptimeSecs,
+    int numTasks,
+    int numWorkers)
   {
     this();
     this.id = id;
     this.name = name;
     this.status = status;
-    this.uptime_secs = uptime_secs;
-    set_uptime_secs_isSet(true);
-    this.num_tasks = num_tasks;
-    set_num_tasks_isSet(true);
-    this.num_workers = num_workers;
-    set_num_workers_isSet(true);
+    this.uptimeSecs = uptimeSecs;
+    set_uptimeSecs_isSet(true);
+    this.numTasks = numTasks;
+    set_numTasks_isSet(true);
+    this.numWorkers = numWorkers;
+    set_numWorkers_isSet(true);
   }
 
   /**
@@ -200,11 +200,11 @@ public class TopologySummary implements org.apache.thrift.TBase<TopologySummary,
     if (other.is_set_status()) {
       this.status = other.status;
     }
-    this.uptime_secs = other.uptime_secs;
-    this.num_tasks = other.num_tasks;
-    this.num_workers = other.num_workers;
-    if (other.is_set_error_info()) {
-      this.error_info = other.error_info;
+    this.uptimeSecs = other.uptimeSecs;
+    this.numTasks = other.numTasks;
+    this.numWorkers = other.numWorkers;
+    if (other.is_set_errorInfo()) {
+      this.errorInfo = other.errorInfo;
     }
   }
 
@@ -217,13 +217,13 @@ public class TopologySummary implements org.apache.thrift.TBase<TopologySummary,
     this.id = null;
     this.name = null;
     this.status = null;
-    set_uptime_secs_isSet(false);
-    this.uptime_secs = 0;
-    set_num_tasks_isSet(false);
-    this.num_tasks = 0;
-    set_num_workers_isSet(false);
-    this.num_workers = 0;
-    this.error_info = null;
+    set_uptimeSecs_isSet(false);
+    this.uptimeSecs = 0;
+    set_numTasks_isSet(false);
+    this.numTasks = 0;
+    set_numWorkers_isSet(false);
+    this.numWorkers = 0;
+    this.errorInfo = null;
   }
 
   public String get_id() {
@@ -295,92 +295,92 @@ public class TopologySummary implements org.apache.thrift.TBase<TopologySummary,
     }
   }
 
-  public int get_uptime_secs() {
-    return this.uptime_secs;
+  public int get_uptimeSecs() {
+    return this.uptimeSecs;
   }
 
-  public void set_uptime_secs(int uptime_secs) {
-    this.uptime_secs = uptime_secs;
-    set_uptime_secs_isSet(true);
+  public void set_uptimeSecs(int uptimeSecs) {
+    this.uptimeSecs = uptimeSecs;
+    set_uptimeSecs_isSet(true);
   }
 
-  public void unset_uptime_secs() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __UPTIME_SECS_ISSET_ID);
+  public void unset_uptimeSecs() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __UPTIMESECS_ISSET_ID);
   }
 
-  /** Returns true if field uptime_secs is set (has been assigned a value) and false otherwise */
-  public boolean is_set_uptime_secs() {
-    return EncodingUtils.testBit(__isset_bitfield, __UPTIME_SECS_ISSET_ID);
+  /** Returns true if field uptimeSecs is set (has been assigned a value) and false otherwise */
+  public boolean is_set_uptimeSecs() {
+    return EncodingUtils.testBit(__isset_bitfield, __UPTIMESECS_ISSET_ID);
   }
 
-  public void set_uptime_secs_isSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __UPTIME_SECS_ISSET_ID, value);
+  public void set_uptimeSecs_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __UPTIMESECS_ISSET_ID, value);
   }
 
-  public int get_num_tasks() {
-    return this.num_tasks;
+  public int get_numTasks() {
+    return this.numTasks;
   }
 
-  public void set_num_tasks(int num_tasks) {
-    this.num_tasks = num_tasks;
-    set_num_tasks_isSet(true);
+  public void set_numTasks(int numTasks) {
+    this.numTasks = numTasks;
+    set_numTasks_isSet(true);
   }
 
-  public void unset_num_tasks() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUM_TASKS_ISSET_ID);
+  public void unset_numTasks() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUMTASKS_ISSET_ID);
   }
 
-  /** Returns true if field num_tasks is set (has been assigned a value) and false otherwise */
-  public boolean is_set_num_tasks() {
-    return EncodingUtils.testBit(__isset_bitfield, __NUM_TASKS_ISSET_ID);
+  /** Returns true if field numTasks is set (has been assigned a value) and false otherwise */
+  public boolean is_set_numTasks() {
+    return EncodingUtils.testBit(__isset_bitfield, __NUMTASKS_ISSET_ID);
   }
 
-  public void set_num_tasks_isSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUM_TASKS_ISSET_ID, value);
+  public void set_numTasks_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUMTASKS_ISSET_ID, value);
   }
 
-  public int get_num_workers() {
-    return this.num_workers;
+  public int get_numWorkers() {
+    return this.numWorkers;
   }
 
-  public void set_num_workers(int num_workers) {
-    this.num_workers = num_workers;
-    set_num_workers_isSet(true);
+  public void set_numWorkers(int numWorkers) {
+    this.numWorkers = numWorkers;
+    set_numWorkers_isSet(true);
   }
 
-  public void unset_num_workers() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUM_WORKERS_ISSET_ID);
+  public void unset_numWorkers() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUMWORKERS_ISSET_ID);
   }
 
-  /** Returns true if field num_workers is set (has been assigned a value) and false otherwise */
-  public boolean is_set_num_workers() {
-    return EncodingUtils.testBit(__isset_bitfield, __NUM_WORKERS_ISSET_ID);
+  /** Returns true if field numWorkers is set (has been assigned a value) and false otherwise */
+  public boolean is_set_numWorkers() {
+    return EncodingUtils.testBit(__isset_bitfield, __NUMWORKERS_ISSET_ID);
   }
 
-  public void set_num_workers_isSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUM_WORKERS_ISSET_ID, value);
+  public void set_numWorkers_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUMWORKERS_ISSET_ID, value);
   }
 
-  public String get_error_info() {
-    return this.error_info;
+  public String get_errorInfo() {
+    return this.errorInfo;
   }
 
-  public void set_error_info(String error_info) {
-    this.error_info = error_info;
+  public void set_errorInfo(String errorInfo) {
+    this.errorInfo = errorInfo;
   }
 
-  public void unset_error_info() {
-    this.error_info = null;
+  public void unset_errorInfo() {
+    this.errorInfo = null;
   }
 
-  /** Returns true if field error_info is set (has been assigned a value) and false otherwise */
-  public boolean is_set_error_info() {
-    return this.error_info != null;
+  /** Returns true if field errorInfo is set (has been assigned a value) and false otherwise */
+  public boolean is_set_errorInfo() {
+    return this.errorInfo != null;
   }
 
-  public void set_error_info_isSet(boolean value) {
+  public void set_errorInfo_isSet(boolean value) {
     if (!value) {
-      this.error_info = null;
+      this.errorInfo = null;
     }
   }
 
@@ -412,33 +412,33 @@ public class TopologySummary implements org.apache.thrift.TBase<TopologySummary,
 
     case UPTIME_SECS:
       if (value == null) {
-        unset_uptime_secs();
+        unset_uptimeSecs();
       } else {
-        set_uptime_secs((Integer)value);
+        set_uptimeSecs((Integer)value);
       }
       break;
 
     case NUM_TASKS:
       if (value == null) {
-        unset_num_tasks();
+        unset_numTasks();
       } else {
-        set_num_tasks((Integer)value);
+        set_numTasks((Integer)value);
       }
       break;
 
     case NUM_WORKERS:
       if (value == null) {
-        unset_num_workers();
+        unset_numWorkers();
       } else {
-        set_num_workers((Integer)value);
+        set_numWorkers((Integer)value);
       }
       break;
 
     case ERROR_INFO:
       if (value == null) {
-        unset_error_info();
+        unset_errorInfo();
       } else {
-        set_error_info((String)value);
+        set_errorInfo((String)value);
       }
       break;
 
@@ -457,16 +457,16 @@ public class TopologySummary implements org.apache.thrift.TBase<TopologySummary,
       return get_status();
 
     case UPTIME_SECS:
-      return Integer.valueOf(get_uptime_secs());
+      return Integer.valueOf(get_uptimeSecs());
 
     case NUM_TASKS:
-      return Integer.valueOf(get_num_tasks());
+      return Integer.valueOf(get_numTasks());
 
     case NUM_WORKERS:
-      return Integer.valueOf(get_num_workers());
+      return Integer.valueOf(get_numWorkers());
 
     case ERROR_INFO:
-      return get_error_info();
+      return get_errorInfo();
 
     }
     throw new IllegalStateException();
@@ -486,13 +486,13 @@ public class TopologySummary implements org.apache.thrift.TBase<TopologySummary,
     case STATUS:
       return is_set_status();
     case UPTIME_SECS:
-      return is_set_uptime_secs();
+      return is_set_uptimeSecs();
     case NUM_TASKS:
-      return is_set_num_tasks();
+      return is_set_numTasks();
     case NUM_WORKERS:
-      return is_set_num_workers();
+      return is_set_numWorkers();
     case ERROR_INFO:
-      return is_set_error_info();
+      return is_set_errorInfo();
     }
     throw new IllegalStateException();
   }
@@ -537,39 +537,39 @@ public class TopologySummary implements org.apache.thrift.TBase<TopologySummary,
         return false;
     }
 
-    boolean this_present_uptime_secs = true;
-    boolean that_present_uptime_secs = true;
-    if (this_present_uptime_secs || that_present_uptime_secs) {
-      if (!(this_present_uptime_secs && that_present_uptime_secs))
+    boolean this_present_uptimeSecs = true;
+    boolean that_present_uptimeSecs = true;
+    if (this_present_uptimeSecs || that_present_uptimeSecs) {
+      if (!(this_present_uptimeSecs && that_present_uptimeSecs))
         return false;
-      if (this.uptime_secs != that.uptime_secs)
+      if (this.uptimeSecs != that.uptimeSecs)
         return false;
     }
 
-    boolean this_present_num_tasks = true;
-    boolean that_present_num_tasks = true;
-    if (this_present_num_tasks || that_present_num_tasks) {
-      if (!(this_present_num_tasks && that_present_num_tasks))
+    boolean this_present_numTasks = true;
+    boolean that_present_numTasks = true;
+    if (this_present_numTasks || that_present_numTasks) {
+      if (!(this_present_numTasks && that_present_numTasks))
         return false;
-      if (this.num_tasks != that.num_tasks)
+      if (this.numTasks != that.numTasks)
         return false;
     }
 
-    boolean this_present_num_workers = true;
-    boolean that_present_num_workers = true;
-    if (this_present_num_workers || that_present_num_workers) {
-      if (!(this_present_num_workers && that_present_num_workers))
+    boolean this_present_numWorkers = true;
+    boolean that_present_numWorkers = true;
+    if (this_present_numWorkers || that_present_numWorkers) {
+      if (!(this_present_numWorkers && that_present_numWorkers))
         return false;
-      if (this.num_workers != that.num_workers)
+      if (this.numWorkers != that.numWorkers)
         return false;
     }
 
-    boolean this_present_error_info = true && this.is_set_error_info();
-    boolean that_present_error_info = true && that.is_set_error_info();
-    if (this_present_error_info || that_present_error_info) {
-      if (!(this_present_error_info && that_present_error_info))
+    boolean this_present_errorInfo = true && this.is_set_errorInfo();
+    boolean that_present_errorInfo = true && that.is_set_errorInfo();
+    if (this_present_errorInfo || that_present_errorInfo) {
+      if (!(this_present_errorInfo && that_present_errorInfo))
         return false;
-      if (!this.error_info.equals(that.error_info))
+      if (!this.errorInfo.equals(that.errorInfo))
         return false;
     }
 
@@ -595,25 +595,25 @@ public class TopologySummary implements org.apache.thrift.TBase<TopologySummary,
     if (present_status)
       list.add(status);
 
-    boolean present_uptime_secs = true;
-    list.add(present_uptime_secs);
-    if (present_uptime_secs)
-      list.add(uptime_secs);
+    boolean present_uptimeSecs = true;
+    list.add(present_uptimeSecs);
+    if (present_uptimeSecs)
+      list.add(uptimeSecs);
 
-    boolean present_num_tasks = true;
-    list.add(present_num_tasks);
-    if (present_num_tasks)
-      list.add(num_tasks);
+    boolean present_numTasks = true;
+    list.add(present_numTasks);
+    if (present_numTasks)
+      list.add(numTasks);
 
-    boolean present_num_workers = true;
-    list.add(present_num_workers);
-    if (present_num_workers)
-      list.add(num_workers);
+    boolean present_numWorkers = true;
+    list.add(present_numWorkers);
+    if (present_numWorkers)
+      list.add(numWorkers);
 
-    boolean present_error_info = true && (is_set_error_info());
-    list.add(present_error_info);
-    if (present_error_info)
-      list.add(error_info);
+    boolean present_errorInfo = true && (is_set_errorInfo());
+    list.add(present_errorInfo);
+    if (present_errorInfo)
+      list.add(errorInfo);
 
     return list.hashCode();
   }
@@ -656,42 +656,42 @@ public class TopologySummary implements org.apache.thrift.TBase<TopologySummary,
         return lastComparison;
       }
     }
-    lastComparison = Boolean.valueOf(is_set_uptime_secs()).compareTo(other.is_set_uptime_secs());
+    lastComparison = Boolean.valueOf(is_set_uptimeSecs()).compareTo(other.is_set_uptimeSecs());
     if (lastComparison != 0) {
       return lastComparison;
     }
-    if (is_set_uptime_secs()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.uptime_secs, other.uptime_secs);
+    if (is_set_uptimeSecs()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.uptimeSecs, other.uptimeSecs);
       if (lastComparison != 0) {
         return lastComparison;
       }
     }
-    lastComparison = Boolean.valueOf(is_set_num_tasks()).compareTo(other.is_set_num_tasks());
+    lastComparison = Boolean.valueOf(is_set_numTasks()).compareTo(other.is_set_numTasks());
     if (lastComparison != 0) {
       return lastComparison;
     }
-    if (is_set_num_tasks()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.num_tasks, other.num_tasks);
+    if (is_set_numTasks()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.numTasks, other.numTasks);
       if (lastComparison != 0) {
         return lastComparison;
       }
     }
-    lastComparison = Boolean.valueOf(is_set_num_workers()).compareTo(other.is_set_num_workers());
+    lastComparison = Boolean.valueOf(is_set_numWorkers()).compareTo(other.is_set_numWorkers());
     if (lastComparison != 0) {
       return lastComparison;
     }
-    if (is_set_num_workers()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.num_workers, other.num_workers);
+    if (is_set_numWorkers()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.numWorkers, other.numWorkers);
       if (lastComparison != 0) {
         return lastComparison;
       }
     }
-    lastComparison = Boolean.valueOf(is_set_error_info()).compareTo(other.is_set_error_info());
+    lastComparison = Boolean.valueOf(is_set_errorInfo()).compareTo(other.is_set_errorInfo());
     if (lastComparison != 0) {
       return lastComparison;
     }
-    if (is_set_error_info()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.error_info, other.error_info);
+    if (is_set_errorInfo()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.errorInfo, other.errorInfo);
       if (lastComparison != 0) {
         return lastComparison;
       }
@@ -703,11 +703,11 @@ public class TopologySummary implements org.apache.thrift.TBase<TopologySummary,
     return _Fields.findByThriftId(fieldId);
   }
 
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
@@ -740,24 +740,24 @@ public class TopologySummary implements org.apache.thrift.TBase<TopologySummary,
     }
     first = false;
     if (!first) sb.append(", ");
-    sb.append("uptime_secs:");
-    sb.append(this.uptime_secs);
+    sb.append("uptimeSecs:");
+    sb.append(this.uptimeSecs);
     first = false;
     if (!first) sb.append(", ");
-    sb.append("num_tasks:");
-    sb.append(this.num_tasks);
+    sb.append("numTasks:");
+    sb.append(this.numTasks);
     first = false;
     if (!first) sb.append(", ");
-    sb.append("num_workers:");
-    sb.append(this.num_workers);
+    sb.append("numWorkers:");
+    sb.append(this.numWorkers);
     first = false;
-    if (is_set_error_info()) {
+    if (is_set_errorInfo()) {
       if (!first) sb.append(", ");
-      sb.append("error_info:");
-      if (this.error_info == null) {
+      sb.append("errorInfo:");
+      if (this.errorInfo == null) {
         sb.append("null");
       } else {
-        sb.append(this.error_info);
+        sb.append(this.errorInfo);
       }
       first = false;
     }
@@ -765,30 +765,30 @@ public class TopologySummary implements org.apache.thrift.TBase<TopologySummary,
     return sb.toString();
   }
 
-  public void validate() throws org.apache.thrift.TException {
+  public void validate() throws TException {
     // check for required fields
     if (!is_set_id()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'id' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'id' is unset! Struct:" + toString());
     }
 
     if (!is_set_name()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'name' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'name' is unset! Struct:" + toString());
     }
 
     if (!is_set_status()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'status' is unset! Struct:" + toString());
     }
 
-    if (!is_set_uptime_secs()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'uptime_secs' is unset! Struct:" + toString());
+    if (!is_set_uptimeSecs()) {
+      throw new TProtocolException("Required field 'uptimeSecs' is unset! Struct:" + toString());
     }
 
-    if (!is_set_num_tasks()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'num_tasks' is unset! Struct:" + toString());
+    if (!is_set_numTasks()) {
+      throw new TProtocolException("Required field 'numTasks' is unset! Struct:" + toString());
     }
 
-    if (!is_set_num_workers()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'num_workers' is unset! Struct:" + toString());
+    if (!is_set_numWorkers()) {
+      throw new TProtocolException("Required field 'numWorkers' is unset! Struct:" + toString());
     }
 
     // check for sub-struct validity
@@ -797,7 +797,7 @@ public class TopologySummary implements org.apache.thrift.TBase<TopologySummary,
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -807,7 +807,7 @@ public class TopologySummary implements org.apache.thrift.TBase<TopologySummary,
       // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
       __isset_bitfield = 0;
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -820,7 +820,7 @@ public class TopologySummary implements org.apache.thrift.TBase<TopologySummary,
 
   private static class TopologySummaryStandardScheme extends StandardScheme<TopologySummary> {
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot, TopologySummary struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot, TopologySummary struct) throws TException {
       org.apache.thrift.protocol.TField schemeField;
       iprot.readStructBegin();
       while (true)
@@ -856,32 +856,32 @@ public class TopologySummary implements org.apache.thrift.TBase<TopologySummary,
             break;
           case 4: // UPTIME_SECS
             if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
-              struct.uptime_secs = iprot.readI32();
-              struct.set_uptime_secs_isSet(true);
+              struct.uptimeSecs = iprot.readI32();
+              struct.set_uptimeSecs_isSet(true);
             } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
           case 5: // NUM_TASKS
             if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
-              struct.num_tasks = iprot.readI32();
-              struct.set_num_tasks_isSet(true);
+              struct.numTasks = iprot.readI32();
+              struct.set_numTasks_isSet(true);
             } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
           case 6: // NUM_WORKERS
             if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
-              struct.num_workers = iprot.readI32();
-              struct.set_num_workers_isSet(true);
+              struct.numWorkers = iprot.readI32();
+              struct.set_numWorkers_isSet(true);
             } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
           case 7: // ERROR_INFO
             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
-              struct.error_info = iprot.readString();
-              struct.set_error_info_isSet(true);
+              struct.errorInfo = iprot.readString();
+              struct.set_errorInfo_isSet(true);
             } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
@@ -895,7 +895,7 @@ public class TopologySummary implements org.apache.thrift.TBase<TopologySummary,
       struct.validate();
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot, TopologySummary struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot, TopologySummary struct) throws TException {
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
@@ -915,18 +915,18 @@ public class TopologySummary implements org.apache.thrift.TBase<TopologySummary,
         oprot.writeFieldEnd();
       }
       oprot.writeFieldBegin(UPTIME_SECS_FIELD_DESC);
-      oprot.writeI32(struct.uptime_secs);
+      oprot.writeI32(struct.uptimeSecs);
       oprot.writeFieldEnd();
       oprot.writeFieldBegin(NUM_TASKS_FIELD_DESC);
-      oprot.writeI32(struct.num_tasks);
+      oprot.writeI32(struct.numTasks);
       oprot.writeFieldEnd();
       oprot.writeFieldBegin(NUM_WORKERS_FIELD_DESC);
-      oprot.writeI32(struct.num_workers);
+      oprot.writeI32(struct.numWorkers);
       oprot.writeFieldEnd();
-      if (struct.error_info != null) {
-        if (struct.is_set_error_info()) {
+      if (struct.errorInfo != null) {
+        if (struct.is_set_errorInfo()) {
           oprot.writeFieldBegin(ERROR_INFO_FIELD_DESC);
-          oprot.writeString(struct.error_info);
+          oprot.writeString(struct.errorInfo);
           oprot.writeFieldEnd();
         }
       }
@@ -945,26 +945,26 @@ public class TopologySummary implements org.apache.thrift.TBase<TopologySummary,
   private static class TopologySummaryTupleScheme extends TupleScheme<TopologySummary> {
 
     @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, TopologySummary struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol prot, TopologySummary struct) throws TException {
       TTupleProtocol oprot = (TTupleProtocol) prot;
       oprot.writeString(struct.id);
       oprot.writeString(struct.name);
       oprot.writeString(struct.status);
-      oprot.writeI32(struct.uptime_secs);
-      oprot.writeI32(struct.num_tasks);
-      oprot.writeI32(struct.num_workers);
+      oprot.writeI32(struct.uptimeSecs);
+      oprot.writeI32(struct.numTasks);
+      oprot.writeI32(struct.numWorkers);
       BitSet optionals = new BitSet();
-      if (struct.is_set_error_info()) {
+      if (struct.is_set_errorInfo()) {
         optionals.set(0);
       }
       oprot.writeBitSet(optionals, 1);
-      if (struct.is_set_error_info()) {
-        oprot.writeString(struct.error_info);
+      if (struct.is_set_errorInfo()) {
+        oprot.writeString(struct.errorInfo);
       }
     }
 
     @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, TopologySummary struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol prot, TopologySummary struct) throws TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       struct.id = iprot.readString();
       struct.set_id_isSet(true);
@@ -972,16 +972,16 @@ public class TopologySummary implements org.apache.thrift.TBase<TopologySummary,
       struct.set_name_isSet(true);
       struct.status = iprot.readString();
       struct.set_status_isSet(true);
-      struct.uptime_secs = iprot.readI32();
-      struct.set_uptime_secs_isSet(true);
-      struct.num_tasks = iprot.readI32();
-      struct.set_num_tasks_isSet(true);
-      struct.num_workers = iprot.readI32();
-      struct.set_num_workers_isSet(true);
+      struct.uptimeSecs = iprot.readI32();
+      struct.set_uptimeSecs_isSet(true);
+      struct.numTasks = iprot.readI32();
+      struct.set_numTasks_isSet(true);
+      struct.numWorkers = iprot.readI32();
+      struct.set_numWorkers_isSet(true);
       BitSet incoming = iprot.readBitSet(1);
       if (incoming.get(0)) {
-        struct.error_info = iprot.readString();
-        struct.set_error_info_isSet(true);
+        struct.errorInfo = iprot.readString();
+        struct.set_errorInfo_isSet(true);
       }
     }
   }


[19/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/merger/Merger.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/merger/Merger.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/merger/Merger.java
new file mode 100644
index 0000000..4e31019
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/merger/Merger.java
@@ -0,0 +1,25 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.common.metric.old.operator.merger;
+
+import java.io.Serializable;
+import java.util.Collection;
+
+public interface Merger<V> extends Serializable {
+    V merge(Collection<V> objs, V unflushed, Object... others);
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/merger/SumMerger.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/merger/SumMerger.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/merger/SumMerger.java
new file mode 100644
index 0000000..b4d65e6
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/merger/SumMerger.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.common.metric.old.operator.merger;
+
+import java.util.Collection;
+
+import com.alibaba.jstorm.utils.JStormUtils;
+
+public class SumMerger<T extends Number> implements Merger<T> {
+    private static final long serialVersionUID = -7026523452570138433L;
+
+    @SuppressWarnings("unchecked")
+    @Override
+    public T merge(Collection<T> objs, T unflushed, Object... others) {
+        // TODO Auto-generated method stub
+        T ret = unflushed;
+        for (T obj : objs) {
+            ret = (T) JStormUtils.add(ret, obj);
+        }
+
+        return ret;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/merger/TpsMerger.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/merger/TpsMerger.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/merger/TpsMerger.java
new file mode 100644
index 0000000..4d770d6
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/merger/TpsMerger.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.common.metric.old.operator.merger;
+
+import java.util.Collection;
+
+import com.alibaba.jstorm.common.metric.old.operator.StartTime;
+
+public class TpsMerger implements Merger<Double> {
+    private static final long serialVersionUID = -4534840881635955942L;
+    protected final long createTime;
+
+    public TpsMerger() {
+        createTime = System.currentTimeMillis();
+    }
+
+    public long getRunMillis(Object... args) {
+        long startTime = createTime;
+
+        if (args != null) {
+            if (args[0] != null && args[0] instanceof StartTime) {
+                StartTime rollingWindow = (StartTime) args[0];
+
+                startTime = rollingWindow.getStartTime();
+            }
+        }
+
+        return (System.currentTimeMillis() - startTime);
+    }
+
+    @Override
+    public Double merge(Collection<Double> objs, Double unflushed, Object... others) {
+        // TODO Auto-generated method stub
+        double sum = 0.0d;
+        if (unflushed != null) {
+            sum += unflushed;
+        }
+
+        for (Double item : objs) {
+            if (item != null) {
+                sum += item;
+            }
+        }
+
+        Double ret = (sum * 1000) / getRunMillis(others);
+        return ret;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/updater/AddUpdater.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/updater/AddUpdater.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/updater/AddUpdater.java
new file mode 100644
index 0000000..a6c06f6
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/updater/AddUpdater.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.common.metric.old.operator.updater;
+
+import com.alibaba.jstorm.utils.JStormUtils;
+
+public class AddUpdater<T extends Number> implements Updater<T> {
+    private static final long serialVersionUID = -7955740095421752763L;
+
+    @SuppressWarnings("unchecked")
+    @Override
+    public T update(Number object, T cache, Object... others) {
+        // TODO Auto-generated method stub
+        return (T) JStormUtils.add(cache, object);
+    }
+
+    @Override
+    public T updateBatch(T object, T cache, Object... objects) {
+        // TODO Auto-generated method stub
+        return (T) JStormUtils.add(cache, object);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/updater/AvgUpdater.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/updater/AvgUpdater.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/updater/AvgUpdater.java
new file mode 100644
index 0000000..a0b7aa1
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/updater/AvgUpdater.java
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.common.metric.old.operator.updater;
+
+import com.alibaba.jstorm.common.metric.old.Histogram;
+
+public class AvgUpdater implements Updater<Histogram.HistorgramPair> {
+    private static final long serialVersionUID = 2562836921724586449L;
+
+    @Override
+    public Histogram.HistorgramPair update(Number object, Histogram.HistorgramPair cache, Object... others) {
+        // TODO Auto-generated method stub
+        if (object == null) {
+            return cache;
+        }
+        if (cache == null) {
+            cache = new Histogram.HistorgramPair();
+        }
+
+        cache.addValue(object.doubleValue());
+        cache.addTimes(1l);
+
+        return cache;
+    }
+
+    @Override
+    public Histogram.HistorgramPair updateBatch(Histogram.HistorgramPair object, Histogram.HistorgramPair cache, Object... objects) {
+        // TODO Auto-generated method stub
+        if (object == null) {
+            return cache;
+        }
+        if (cache == null) {
+            cache = new Histogram.HistorgramPair();
+        }
+
+        cache.addValue(object.getSum());
+        cache.addTimes(object.getTimes());
+
+        return cache;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/updater/DoubleAddUpdater.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/updater/DoubleAddUpdater.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/updater/DoubleAddUpdater.java
new file mode 100644
index 0000000..b25f97b
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/updater/DoubleAddUpdater.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.common.metric.old.operator.updater;
+
+import com.google.common.util.concurrent.AtomicDouble;
+
+public class DoubleAddUpdater implements Updater<AtomicDouble> {
+    private static final long serialVersionUID = -1293565961076552462L;
+
+    @Override
+    public AtomicDouble update(Number object, AtomicDouble cache, Object... others) {
+        // TODO Auto-generated method stub
+        if (cache == null) {
+            cache = new AtomicDouble(0.0);
+        }
+        if (object != null) {
+            cache.addAndGet(object.doubleValue());
+        }
+        return cache;
+    }
+
+    @Override
+    public AtomicDouble updateBatch(AtomicDouble object, AtomicDouble cache, Object... objects) {
+        // TODO Auto-generated method stub
+        return update(object, cache, objects);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/updater/LongAddUpdater.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/updater/LongAddUpdater.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/updater/LongAddUpdater.java
new file mode 100644
index 0000000..8db6f21
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/updater/LongAddUpdater.java
@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.common.metric.old.operator.updater;
+
+import java.util.concurrent.atomic.AtomicLong;
+
+public class LongAddUpdater implements Updater<AtomicLong> {
+    private static final long serialVersionUID = -2185639264737912405L;
+
+    @Override
+    public AtomicLong update(Number object, AtomicLong cache, Object... others) {
+        // TODO Auto-generated method stub
+        if (cache == null) {
+            cache = new AtomicLong(0);
+        }
+
+        if (object != null) {
+            cache.addAndGet(object.longValue());
+        }
+        return cache;
+    }
+
+    @Override
+    public AtomicLong updateBatch(AtomicLong object, AtomicLong cache, Object... objects) {
+        // TODO Auto-generated method stub
+        return update(object, cache, objects);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/updater/Updater.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/updater/Updater.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/updater/Updater.java
new file mode 100644
index 0000000..42d7b58
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/updater/Updater.java
@@ -0,0 +1,26 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.common.metric.old.operator.updater;
+
+import java.io.Serializable;
+
+public interface Updater<V> extends Serializable {
+    V update(Number object, V cache, Object... others);
+
+    V updateBatch(V object, V cache, Object... objects);
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/window/AllWindow.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/window/AllWindow.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/window/AllWindow.java
new file mode 100644
index 0000000..244db74
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/window/AllWindow.java
@@ -0,0 +1,78 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.common.metric.old.window;
+
+import com.alibaba.jstorm.common.metric.old.operator.Sampling;
+import com.alibaba.jstorm.common.metric.old.operator.StartTime;
+import com.alibaba.jstorm.common.metric.old.operator.merger.Merger;
+import com.alibaba.jstorm.common.metric.old.operator.updater.Updater;
+
+import java.util.ArrayList;
+
+public class AllWindow<V> implements Sampling<V>, StartTime {
+
+    private static final long serialVersionUID = -8523514907315740812L;
+
+    protected V unflushed;
+    protected V defaultValue;
+
+    protected Updater<V> updater;
+    protected Merger<V> merger;
+    protected long startTime;
+
+    AllWindow(V defaultValue, Updater<V> updater, Merger<V> merger) {
+
+        this.updater = updater;
+        this.merger = merger;
+
+        this.defaultValue = defaultValue;
+        this.startTime = System.currentTimeMillis();
+    }
+
+    @Override
+    public void update(Number obj) {
+        // TODO Auto-generated method stub
+        synchronized (this) {
+            unflushed = updater.update(obj, unflushed);
+        }
+    }
+
+    public void updateBatch(V batch) {
+        synchronized (this) {
+            unflushed = updater.updateBatch(batch, unflushed);
+        }
+    }
+
+    @Override
+    public V getSnapshot() {
+        // TODO Auto-generated method stub
+        V ret = merger.merge(new ArrayList<V>(), unflushed, this);
+        if (ret == null) {
+            return defaultValue;
+        } else {
+            return ret;
+        }
+    }
+
+    @Override
+    public long getStartTime() {
+        // TODO Auto-generated method stub
+        return startTime;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/window/Metric.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/window/Metric.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/window/Metric.java
new file mode 100644
index 0000000..e505a1f
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/window/Metric.java
@@ -0,0 +1,224 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.common.metric.old.window;
+
+import com.alibaba.jstorm.callback.Callback;
+import com.alibaba.jstorm.common.metric.old.operator.Sampling;
+import com.alibaba.jstorm.common.metric.old.operator.convert.Convertor;
+import com.alibaba.jstorm.common.metric.old.operator.merger.Merger;
+import com.alibaba.jstorm.common.metric.old.operator.updater.Updater;
+import com.alibaba.jstorm.utils.IntervalCheck;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+
+public class Metric<T, V> implements Sampling<Map<Integer, T>> {
+    private static final long serialVersionUID = -1362345159511508074L;
+    private static final Logger LOG = LoggerFactory.getLogger(Metric.class);
+
+    protected static boolean enable;
+
+    public static void setEnable(boolean e) {
+        enable = e;
+    }
+
+    protected List<RollingWindow<V>> rollingWindows;
+    protected AllWindow<V> allWindow;
+
+    protected int[] windowSeconds = { StatBuckets.MINUTE_WINDOW, StatBuckets.HOUR_WINDOW, StatBuckets.DAY_WINDOW };
+    protected int bucketSize = StatBuckets.NUM_STAT_BUCKETS;
+    protected V defaultValue;
+    protected Updater<V> updater;
+    protected Merger<V> merger;
+    protected Convertor<V, T> convertor;
+    protected Callback callback;
+
+    protected int interval; // unit is second
+    protected IntervalCheck intervalCheck;
+    protected V unflushed;
+
+    public Metric() {
+    }
+
+    public int getInterval() {
+        if (windowSeconds == null || windowSeconds.length == 0) {
+            return StatBuckets.NUM_STAT_BUCKETS;
+        }
+
+        int intervals[] = new int[windowSeconds.length];
+        int smallest = Integer.MAX_VALUE;
+        for (int i = 0; i < windowSeconds.length; i++) {
+            int interval = windowSeconds[i] / bucketSize;
+            intervals[i] = interval;
+            if (interval < smallest) {
+                smallest = interval;
+            }
+        }
+
+        for (int goodInterval = smallest; goodInterval > 1; goodInterval--) {
+            boolean good = true;
+            for (int interval : intervals) {
+                if (interval % goodInterval != 0) {
+                    good = false;
+                    break;
+                }
+            }
+
+            if (good == true) {
+                return goodInterval;
+            }
+        }
+
+        return 1;
+    }
+
+    public void init() {
+        if (defaultValue == null || updater == null || merger == null || convertor == null) {
+            throw new IllegalArgumentException("Invalid argements");
+        }
+
+        rollingWindows = new ArrayList<RollingWindow<V>>();
+        if (windowSeconds != null) {
+            rollingWindows.clear();
+            for (int windowSize : windowSeconds) {
+                RollingWindow<V> rollingWindow = new RollingWindow<V>(defaultValue, windowSize / bucketSize, windowSize, updater, merger);
+
+                rollingWindows.add(rollingWindow);
+            }
+
+        }
+        allWindow = new AllWindow<V>(defaultValue, updater, merger);
+
+        this.interval = getInterval();
+        this.intervalCheck = new IntervalCheck();
+        this.intervalCheck.setInterval(interval);
+    }
+
+    /**
+     * In order to improve performance Do
+     */
+    @Override
+    public void update(Number obj) {
+        if (enable == false) {
+            return;
+        }
+
+        if (intervalCheck.check()) {
+            flush();
+        }
+        synchronized (this) {
+            unflushed = updater.update(obj, unflushed);
+        }
+    }
+
+    public synchronized void flush() {
+        if (unflushed == null) {
+            return;
+        }
+        for (RollingWindow<V> rollingWindow : rollingWindows) {
+            rollingWindow.updateBatch(unflushed);
+        }
+        allWindow.updateBatch(unflushed);
+        unflushed = null;
+    }
+
+    @Override
+    public Map<Integer, T> getSnapshot() {
+        // TODO Auto-generated method stub
+        flush();
+
+        Map<Integer, T> ret = new TreeMap<Integer, T>();
+        for (RollingWindow<V> rollingWindow : rollingWindows) {
+            V value = rollingWindow.getSnapshot();
+
+            ret.put(rollingWindow.getWindowSecond(), convertor.convert(value));
+        }
+
+        ret.put(StatBuckets.ALL_TIME_WINDOW, convertor.convert(allWindow.getSnapshot()));
+
+        if (callback != null) {
+            callback.execute(this);
+        }
+        return ret;
+    }
+
+    public T getAllTimeValue() {
+        return convertor.convert(allWindow.getSnapshot());
+    }
+
+    public int[] getWindowSeconds() {
+        return windowSeconds;
+    }
+
+    public void setWindowSeconds(int[] windowSeconds) {
+        this.windowSeconds = windowSeconds;
+    }
+
+    public int getBucketSize() {
+        return bucketSize;
+    }
+
+    public void setBucketSize(int bucketSize) {
+        this.bucketSize = bucketSize;
+    }
+
+    public V getDefaultValue() {
+        return defaultValue;
+    }
+
+    public void setDefaultValue(V defaultValue) {
+        this.defaultValue = defaultValue;
+    }
+
+    public Updater<V> getUpdater() {
+        return updater;
+    }
+
+    public void setUpdater(Updater<V> updater) {
+        this.updater = updater;
+    }
+
+    public Merger<V> getMerger() {
+        return merger;
+    }
+
+    public void setMerger(Merger<V> merger) {
+        this.merger = merger;
+    }
+
+    public Convertor<V, T> getConvertor() {
+        return convertor;
+    }
+
+    public void setConvertor(Convertor<V, T> convertor) {
+        this.convertor = convertor;
+    }
+
+    public Callback getCallback() {
+        return callback;
+    }
+
+    public void setCallback(Callback callback) {
+        this.callback = callback;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/window/RollingWindow.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/window/RollingWindow.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/window/RollingWindow.java
new file mode 100644
index 0000000..5963951
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/window/RollingWindow.java
@@ -0,0 +1,189 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.common.metric.old.window;
+
+import com.alibaba.jstorm.common.metric.old.operator.Sampling;
+import com.alibaba.jstorm.common.metric.old.operator.StartTime;
+import com.alibaba.jstorm.common.metric.old.operator.merger.Merger;
+import com.alibaba.jstorm.common.metric.old.operator.updater.Updater;
+import com.alibaba.jstorm.utils.IntervalCheck;
+import com.alibaba.jstorm.utils.TimeUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.TreeMap;
+
+public class RollingWindow<V> implements Sampling<V>, StartTime {
+    private static final long serialVersionUID = 3794478417380003279L;
+    private static final Logger LOG = LoggerFactory.getLogger(RollingWindow.class);
+
+    protected long startTime;
+    protected Integer currBucketTime;
+    protected int interval; // unit is second
+    protected int windowSecond;
+    protected IntervalCheck intervalCheck;
+
+    protected TreeMap<Integer, V> buckets;
+    protected Integer bucketNum;
+    protected V unflushed;
+    protected V defaultValue;
+
+    protected Updater<V> updater;
+    protected Merger<V> merger;
+
+    RollingWindow(V defaultValue, int interval, int windowSecond, Updater<V> updater, Merger<V> merger) {
+        this.startTime = System.currentTimeMillis();
+        this.interval = interval;
+        this.intervalCheck = new IntervalCheck();
+        this.intervalCheck.setInterval(interval);
+        this.currBucketTime = getCurrBucketTime();
+
+        this.bucketNum = windowSecond / interval;
+        this.windowSecond = (bucketNum) * interval;
+
+        this.buckets = new TreeMap<Integer, V>();
+
+        this.updater = updater;
+        this.merger = merger;
+
+        this.defaultValue = defaultValue;
+
+    }
+
+    @Override
+    public void update(Number obj) {
+        // TODO Auto-generated method stub
+
+        if (intervalCheck.check()) {
+            rolling();
+        }
+        synchronized (this) {
+            unflushed = updater.update(obj, unflushed);
+
+        }
+
+    }
+
+    /**
+     * In order to improve performance Flush one batch to rollingWindow
+     * 
+     */
+    public void updateBatch(V batch) {
+
+        if (intervalCheck.check()) {
+            rolling();
+        }
+        synchronized (this) {
+            unflushed = updater.updateBatch(batch, unflushed);
+        }
+
+    }
+
+    @Override
+    public V getSnapshot() {
+        // TODO Auto-generated method stub
+        if (intervalCheck.check()) {
+            rolling();
+        }
+
+        cleanExpiredBuckets();
+        // @@@ Testing
+        // LOG.info("Raw Data:" + buckets + ",unflushed:" + unflushed);
+
+        Collection<V> values = buckets.values();
+
+        V ret = merger.merge(values, unflushed, this);
+        if (ret == null) {
+
+            // @@@ testing
+            // LOG.warn("!!!!Exist null data !!!!!");
+            return defaultValue;
+        }
+        return ret;
+    }
+
+    /*
+     * Move the "current bucket time" index and clean the expired buckets
+     */
+    protected void rolling() {
+        synchronized (this) {
+            if (unflushed != null) {
+                buckets.put(currBucketTime, unflushed);
+                unflushed = null;
+            }
+
+            currBucketTime = getCurrBucketTime();
+
+            return;
+        }
+    }
+
+    protected void cleanExpiredBuckets() {
+        int nowSec = TimeUtils.current_time_secs();
+        int startRemove = nowSec - (interval - 1) - windowSecond;
+
+        List<Integer> removeList = new ArrayList<Integer>();
+
+        for (Integer keyTime : buckets.keySet()) {
+            if (keyTime < startRemove) {
+                removeList.add(keyTime);
+            } else if (keyTime >= startRemove) {
+                break;
+            }
+        }
+
+        for (Integer removeKey : removeList) {
+            buckets.remove(removeKey);
+            // @@@ Testing
+            // LOG.info("Remove key:" + removeKey + ", diff:" + (nowSec - removeKey));
+
+        }
+
+        if (buckets.isEmpty() == false) {
+            Integer first = buckets.firstKey();
+            startTime = first.longValue() * 1000;
+        }
+    }
+
+    public int getWindowSecond() {
+        return windowSecond;
+    }
+
+    public long getStartTime() {
+        return startTime;
+    }
+
+    public int getInterval() {
+        return interval;
+    }
+
+    public Integer getBucketNum() {
+        return bucketNum;
+    }
+
+    public V getDefaultValue() {
+        return defaultValue;
+    }
+
+    private Integer getCurrBucketTime() {
+        return (TimeUtils.current_time_secs() / interval) * interval;
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/window/StatBuckets.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/window/StatBuckets.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/window/StatBuckets.java
new file mode 100644
index 0000000..30f5c64
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/window/StatBuckets.java
@@ -0,0 +1,176 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.common.metric.old.window;
+
+import com.google.common.base.Joiner;
+
+import java.util.*;
+
+public class StatBuckets {
+
+    public static final Integer NUM_STAT_BUCKETS = 20;
+
+    public static final Integer MINUTE_WINDOW = 600;
+    public static final Integer HOUR_WINDOW = 10800;
+    public static final Integer DAY_WINDOW = 86400;
+    public static final Integer ALL_TIME_WINDOW = 0;
+    public static Set<Integer> TIME_WINDOWS = new TreeSet<Integer>();
+    static {
+        TIME_WINDOWS.add(ALL_TIME_WINDOW);
+        TIME_WINDOWS.add(MINUTE_WINDOW);
+        TIME_WINDOWS.add(HOUR_WINDOW);
+        TIME_WINDOWS.add(DAY_WINDOW);
+    }
+
+    public static final String MINUTE_WINDOW_STR = "0d0h10m0s";
+    public static final String HOUR_WINDOW_STR = "0d3h0m0s";
+    public static final String DAY_WINDOW_STR = "1d0h0m0s";
+    public static final String ALL_WINDOW_STR = "All-time";
+
+    public static Integer[] STAT_BUCKETS = { MINUTE_WINDOW / NUM_STAT_BUCKETS, HOUR_WINDOW / NUM_STAT_BUCKETS, DAY_WINDOW / NUM_STAT_BUCKETS };
+
+    private static final String[][] PRETTYSECDIVIDERS = { new String[] { "s", "60" }, new String[] { "m", "60" }, new String[] { "h", "24" },
+            new String[] { "d", null } };
+
+    /**
+     * Service b
+     * 
+     * @param key
+     * @return
+     */
+    public static String parseTimeKey(Integer key) {
+        if (key == 0) {
+            return ALL_WINDOW_STR;
+        } else {
+            return String.valueOf(key);
+        }
+    }
+
+    /**
+     * 
+     * Default is the latest result
+     * 
+     * @param showKey
+     * @return
+     */
+    public static Integer getTimeKey(String showKey) {
+        Integer window = null;
+        if (showKey == null) {
+            window = (MINUTE_WINDOW);
+        } else if (showKey.equals(MINUTE_WINDOW_STR)) {
+            window = (MINUTE_WINDOW);
+        } else if (showKey.equals(HOUR_WINDOW_STR)) {
+            window = (HOUR_WINDOW);
+        } else if (showKey.equals(DAY_WINDOW_STR)) {
+            window = (DAY_WINDOW);
+        } else if (showKey.equals(ALL_WINDOW_STR)) {
+            window = ALL_TIME_WINDOW;
+        } else {
+            window = MINUTE_WINDOW;
+        }
+
+        return window;
+    }
+
+    /**
+     * Default is the latest result
+     * 
+     * @param showStr
+     * @return
+     */
+    public static String getShowTimeStr(Integer time) {
+        if (time == null) {
+            return MINUTE_WINDOW_STR;
+        } else if (time.equals(MINUTE_WINDOW)) {
+            return MINUTE_WINDOW_STR;
+        } else if (time.equals(HOUR_WINDOW)) {
+            return HOUR_WINDOW_STR;
+        } else if (time.equals(DAY_WINDOW)) {
+            return DAY_WINDOW_STR;
+        } else if (time.equals(ALL_TIME_WINDOW)) {
+            return ALL_WINDOW_STR;
+        } else {
+            return MINUTE_WINDOW_STR;
+        }
+
+    }
+
+    /**
+     * seconds to string like 1d20h30m40s
+     * 
+     * @param secs
+     * @return
+     */
+    public static String prettyUptimeStr(int secs) {
+        int diversize = PRETTYSECDIVIDERS.length;
+
+        List<String> tmp = new ArrayList<String>();
+        int div = secs;
+        for (int i = 0; i < diversize; i++) {
+            if (PRETTYSECDIVIDERS[i][1] != null) {
+                Integer d = Integer.parseInt(PRETTYSECDIVIDERS[i][1]);
+                tmp.add(div % d + PRETTYSECDIVIDERS[i][0]);
+                div = div / d;
+            } else {
+                tmp.add(div + PRETTYSECDIVIDERS[i][0]);
+            }
+        }
+
+        String rtn = "";
+        int tmpSzie = tmp.size();
+        for (int j = tmpSzie - 1; j > -1; j--) {
+            rtn += tmp.get(j);
+        }
+        return rtn;
+    }
+
+    /**
+     * seconds to string like '30m 40s' and '1d 20h 30m 40s'
+     *
+     * @param secs
+     * @return
+     */
+    public static String prettyUptime(int secs) {
+        int diversize = PRETTYSECDIVIDERS.length;
+
+        LinkedList<String> tmp = new LinkedList<>();
+        int div = secs;
+        for (int i = 0; i < diversize; i++) {
+            if (PRETTYSECDIVIDERS[i][1] != null) {
+                Integer d = Integer.parseInt(PRETTYSECDIVIDERS[i][1]);
+                tmp.addFirst(div % d + PRETTYSECDIVIDERS[i][0]);
+                div = div / d;
+            } else {
+                tmp.addFirst(div + PRETTYSECDIVIDERS[i][0]);
+            }
+            if (div <= 0 ) break;
+        }
+
+        Joiner joiner = Joiner.on(" ");
+        return joiner.join(tmp);
+    }
+
+    /**
+     * @param args
+     */
+    public static void main(String[] args) {
+        // TODO Auto-generated method stub
+
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/Sampling.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/Sampling.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/Sampling.java
deleted file mode 100755
index 3d32cc9..0000000
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/Sampling.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.alibaba.jstorm.common.metric.operator;
-
-import java.io.Serializable;
-
-public interface Sampling<V> extends Serializable {
-
-    /**
-     * Update object into Metric
-     * 
-     * @param obj
-     */
-    void update(Number obj);
-
-    /**
-     * 
-     * Get snapshot of Metric
-     * 
-     * @return
-     */
-    V getSnapshot();
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/StartTime.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/StartTime.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/StartTime.java
deleted file mode 100755
index 0b6173f..0000000
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/StartTime.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.alibaba.jstorm.common.metric.operator;
-
-public interface StartTime {
-    long getStartTime();
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/convert/AtomicLongToLong.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/convert/AtomicLongToLong.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/convert/AtomicLongToLong.java
deleted file mode 100755
index 8f142f1..0000000
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/convert/AtomicLongToLong.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.alibaba.jstorm.common.metric.operator.convert;
-
-import java.util.concurrent.atomic.AtomicLong;
-
-public class AtomicLongToLong implements Convertor<AtomicLong, Long> {
-    private static final long serialVersionUID = -2755066621494409063L;
-
-    @Override
-    public Long convert(AtomicLong obj) {
-        // TODO Auto-generated method stub
-        if (obj == null) {
-            return null;
-        } else {
-            return obj.get();
-        }
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/convert/Convertor.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/convert/Convertor.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/convert/Convertor.java
deleted file mode 100755
index 73cdceb..0000000
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/convert/Convertor.java
+++ /dev/null
@@ -1,25 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.alibaba.jstorm.common.metric.operator.convert;
-
-import java.io.Serializable;
-
-public interface Convertor<From, To> extends Serializable {
-
-    To convert(From obj);
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/convert/DefaultConvertor.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/convert/DefaultConvertor.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/convert/DefaultConvertor.java
deleted file mode 100755
index 47065d0..0000000
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/convert/DefaultConvertor.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.alibaba.jstorm.common.metric.operator.convert;
-
-public class DefaultConvertor<T> implements Convertor<T, T> {
-    private static final long serialVersionUID = -647209923903679727L;
-
-    @Override
-    public T convert(T obj) {
-        // TODO Auto-generated method stub
-        return obj;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/convert/SetToList.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/convert/SetToList.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/convert/SetToList.java
deleted file mode 100755
index 4891222..0000000
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/convert/SetToList.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.alibaba.jstorm.common.metric.operator.convert;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Set;
-
-public class SetToList<T> implements Convertor<Set<T>, List<T>> {
-    private static final long serialVersionUID = 4968816655779625255L;
-
-    @Override
-    public List<T> convert(Set<T> set) {
-        // TODO Auto-generated method stub
-        List<T> ret = new ArrayList<T>();
-        if (set != null) {
-            for (T item : set) {
-                ret.add(item);
-            }
-        }
-        return ret;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/merger/AvgMerger.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/merger/AvgMerger.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/merger/AvgMerger.java
deleted file mode 100755
index 3ad94f2..0000000
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/merger/AvgMerger.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.alibaba.jstorm.common.metric.operator.merger;
-
-import java.util.Collection;
-
-import com.alibaba.jstorm.common.metric.Histogram;
-import com.alibaba.jstorm.utils.Pair;
-
-public class AvgMerger implements Merger<Histogram.HistorgramPair> {
-    private static final long serialVersionUID = -3892281208959055221L;
-
-    @Override
-    public Histogram.HistorgramPair merge(
-            Collection<Histogram.HistorgramPair> objs,
-            Histogram.HistorgramPair unflushed, Object... others) {
-        // TODO Auto-generated method stub
-        double sum = 0.0d;
-        long times = 0l;
-
-        if (unflushed != null) {
-            sum = sum + unflushed.getSum();
-            times = times + unflushed.getTimes();
-        }
-
-        for (Histogram.HistorgramPair item : objs) {
-            if (item == null) {
-                continue;
-            }
-            sum = sum + item.getSum();
-            times = times + item.getTimes();
-        }
-
-        return new Histogram.HistorgramPair(sum, times);
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/merger/AvgMerger.java.bak
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/merger/AvgMerger.java.bak b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/merger/AvgMerger.java.bak
deleted file mode 100755
index 6f82888..0000000
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/merger/AvgMerger.java.bak
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.alibaba.jstorm.common.metric.operator.merger;
-
-import java.util.Collection;
-import java.util.concurrent.atomic.AtomicLong;
-
-import com.alibaba.jstorm.utils.Pair;
-import com.google.common.util.concurrent.AtomicDouble;
-
-public class AvgMerger2 implements Merger<Pair<AtomicDouble, AtomicLong>> {
-    private static final long serialVersionUID = -3892281208959055221L;
-
-    @Override
-    public Pair<AtomicDouble, AtomicLong> merge(
-            Collection<Pair<AtomicDouble, AtomicLong>> objs,
-            Pair<AtomicDouble, AtomicLong> unflushed, Object... others) {
-        // TODO Auto-generated method stub
-        AtomicDouble sum = new AtomicDouble(0.0);
-        AtomicLong times = new AtomicLong(0);
-
-        if (unflushed != null) {
-            sum.addAndGet(unflushed.getFirst().get());
-            times.addAndGet(unflushed.getSecond().get());
-        }
-
-        for (Pair<AtomicDouble, AtomicLong> item : objs) {
-            if (item == null) {
-                continue;
-            }
-            sum.addAndGet(item.getFirst().get());
-            times.addAndGet(item.getSecond().get());
-        }
-
-        return new Pair<AtomicDouble, AtomicLong>(sum, times);
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/merger/LongSumMerger.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/merger/LongSumMerger.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/merger/LongSumMerger.java
deleted file mode 100755
index 30ded34..0000000
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/merger/LongSumMerger.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.alibaba.jstorm.common.metric.operator.merger;
-
-import java.util.Collection;
-import java.util.concurrent.atomic.AtomicLong;
-
-public class LongSumMerger implements Merger<AtomicLong> {
-    private static final long serialVersionUID = -3500779273677666691L;
-
-    @Override
-    public AtomicLong merge(Collection<AtomicLong> objs, AtomicLong unflushed,
-            Object... others) {
-        AtomicLong ret = new AtomicLong(0);
-        if (unflushed != null) {
-            ret.addAndGet(unflushed.get());
-        }
-
-        for (AtomicLong item : objs) {
-            if (item == null) {
-                continue;
-            }
-            ret.addAndGet(item.get());
-        }
-        return ret;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/merger/Merger.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/merger/Merger.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/merger/Merger.java
deleted file mode 100755
index 0483458..0000000
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/merger/Merger.java
+++ /dev/null
@@ -1,25 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.alibaba.jstorm.common.metric.operator.merger;
-
-import java.io.Serializable;
-import java.util.Collection;
-
-public interface Merger<V> extends Serializable {
-    V merge(Collection<V> objs, V unflushed, Object... others);
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/merger/SumMerger.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/merger/SumMerger.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/merger/SumMerger.java
deleted file mode 100755
index ead3c53..0000000
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/merger/SumMerger.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.alibaba.jstorm.common.metric.operator.merger;
-
-import java.util.Collection;
-
-import com.alibaba.jstorm.utils.JStormUtils;
-
-public class SumMerger<T extends Number> implements Merger<T> {
-    private static final long serialVersionUID = -7026523452570138433L;
-
-    @SuppressWarnings("unchecked")
-    @Override
-    public T merge(Collection<T> objs, T unflushed, Object... others) {
-        // TODO Auto-generated method stub
-        T ret = unflushed;
-        for (T obj : objs) {
-            ret = (T) JStormUtils.add(ret, obj);
-        }
-
-        return ret;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/merger/TpsMerger.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/merger/TpsMerger.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/merger/TpsMerger.java
deleted file mode 100755
index 859f642..0000000
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/merger/TpsMerger.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.alibaba.jstorm.common.metric.operator.merger;
-
-import java.util.Collection;
-
-import com.alibaba.jstorm.common.metric.operator.StartTime;
-
-public class TpsMerger implements Merger<Double> {
-    private static final long serialVersionUID = -4534840881635955942L;
-    protected final long createTime;
-
-    public TpsMerger() {
-        createTime = System.currentTimeMillis();
-    }
-
-    public long getRunMillis(Object... args) {
-        long startTime = createTime;
-
-        if (args != null) {
-            if (args[0] != null && args[0] instanceof StartTime) {
-                StartTime rollingWindow = (StartTime) args[0];
-
-                startTime = rollingWindow.getStartTime();
-            }
-        }
-
-        return (System.currentTimeMillis() - startTime);
-    }
-
-    @Override
-    public Double merge(Collection<Double> objs, Double unflushed,
-            Object... others) {
-        // TODO Auto-generated method stub
-        double sum = 0.0d;
-        if (unflushed != null) {
-            sum += unflushed;
-        }
-
-        for (Double item : objs) {
-            if (item != null) {
-                sum += item;
-            }
-        }
-
-        Double ret = (sum * 1000) / getRunMillis(others);
-        return ret;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/updater/AddUpdater.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/updater/AddUpdater.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/updater/AddUpdater.java
deleted file mode 100755
index 4fdf813..0000000
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/updater/AddUpdater.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.alibaba.jstorm.common.metric.operator.updater;
-
-import com.alibaba.jstorm.utils.JStormUtils;
-
-public class AddUpdater<T extends Number> implements Updater<T> {
-    private static final long serialVersionUID = -7955740095421752763L;
-
-    @SuppressWarnings("unchecked")
-    @Override
-    public T update(Number object, T cache, Object... others) {
-        // TODO Auto-generated method stub
-        return (T) JStormUtils.add(cache, object);
-    }
-
-    @Override
-    public T updateBatch(T object, T cache, Object... objects) {
-        // TODO Auto-generated method stub
-        return (T) JStormUtils.add(cache, object);
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/updater/AvgUpdater.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/updater/AvgUpdater.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/updater/AvgUpdater.java
deleted file mode 100755
index 30ae46c..0000000
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/updater/AvgUpdater.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.alibaba.jstorm.common.metric.operator.updater;
-
-import com.alibaba.jstorm.common.metric.Histogram;
-
-public class AvgUpdater implements Updater<Histogram.HistorgramPair> {
-    private static final long serialVersionUID = 2562836921724586449L;
-
-    @Override
-    public Histogram.HistorgramPair update(Number object,
-            Histogram.HistorgramPair cache, Object... others) {
-        // TODO Auto-generated method stub
-        if (object == null) {
-            return cache;
-        }
-        if (cache == null) {
-            cache =
-                    new Histogram.HistorgramPair();
-        }
-
-        cache.addValue(object.doubleValue());
-        cache.addTimes(1l);
-
-        return cache;
-    }
-
-    @Override
-    public Histogram.HistorgramPair updateBatch(
-            Histogram.HistorgramPair object,
-            Histogram.HistorgramPair cache, Object... objects) {
-        // TODO Auto-generated method stub
-        if (object == null) {
-            return cache;
-        }
-        if (cache == null) {
-            cache =
-                    new Histogram.HistorgramPair();
-        }
-
-        cache.addValue(object.getSum());
-        cache.addTimes(object.getTimes());
-
-        return cache;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/updater/AvgUpdater.java.bak
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/updater/AvgUpdater.java.bak b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/updater/AvgUpdater.java.bak
deleted file mode 100755
index 44cc70d..0000000
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/updater/AvgUpdater.java.bak
+++ /dev/null
@@ -1,73 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.alibaba.jstorm.common.metric.operator.updater;
-
-import java.util.concurrent.atomic.AtomicLong;
-
-import com.alibaba.jstorm.utils.Pair;
-import com.google.common.util.concurrent.AtomicDouble;
-
-public class AvgUpdater2 implements Updater<Pair<AtomicDouble, AtomicLong>> {
-    private static final long serialVersionUID = 2562836921724586449L;
-
-    @Override
-    public Pair<AtomicDouble, AtomicLong> update(Number object,
-            Pair<AtomicDouble, AtomicLong> cache, Object... others) {
-        // TODO Auto-generated method stub
-        if (object == null) {
-            return cache;
-        }
-        if (cache == null) {
-            cache =
-                    new Pair<AtomicDouble, AtomicLong>(new AtomicDouble(0.0),
-                            new AtomicLong(0));
-        }
-
-        AtomicDouble sum = cache.getFirst();
-        AtomicLong times = cache.getSecond();
-
-        sum.addAndGet(object.doubleValue());
-        times.incrementAndGet();
-
-        return cache;
-    }
-
-    @Override
-    public Pair<AtomicDouble, AtomicLong> updateBatch(
-            Pair<AtomicDouble, AtomicLong> object,
-            Pair<AtomicDouble, AtomicLong> cache, Object... objects) {
-        // TODO Auto-generated method stub
-        if (object == null) {
-            return cache;
-        }
-        if (cache == null) {
-            cache =
-                    new Pair<AtomicDouble, AtomicLong>(new AtomicDouble(0.0),
-                            new AtomicLong(0));
-        }
-
-        AtomicDouble sum = cache.getFirst();
-        AtomicLong times = cache.getSecond();
-
-        sum.addAndGet(object.getFirst().get());
-        times.addAndGet(object.getSecond().get());
-
-        return cache;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/updater/DoubleAddUpdater.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/updater/DoubleAddUpdater.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/updater/DoubleAddUpdater.java
deleted file mode 100755
index e3b640a..0000000
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/updater/DoubleAddUpdater.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.alibaba.jstorm.common.metric.operator.updater;
-
-import com.google.common.util.concurrent.AtomicDouble;
-
-public class DoubleAddUpdater implements Updater<AtomicDouble> {
-    private static final long serialVersionUID = -1293565961076552462L;
-
-    @Override
-    public AtomicDouble update(Number object, AtomicDouble cache,
-            Object... others) {
-        // TODO Auto-generated method stub
-        if (cache == null) {
-            cache = new AtomicDouble(0.0);
-        }
-        if (object != null) {
-            cache.addAndGet(object.doubleValue());
-        }
-        return cache;
-    }
-
-    @Override
-    public AtomicDouble updateBatch(AtomicDouble object, AtomicDouble cache,
-            Object... objects) {
-        // TODO Auto-generated method stub
-        return update(object, cache, objects);
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/updater/LongAddUpdater.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/updater/LongAddUpdater.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/updater/LongAddUpdater.java
deleted file mode 100755
index 4986146..0000000
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/updater/LongAddUpdater.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.alibaba.jstorm.common.metric.operator.updater;
-
-import java.util.concurrent.atomic.AtomicLong;
-
-public class LongAddUpdater implements Updater<AtomicLong> {
-    private static final long serialVersionUID = -2185639264737912405L;
-
-    @Override
-    public AtomicLong update(Number object, AtomicLong cache, Object... others) {
-        // TODO Auto-generated method stub
-        if (cache == null) {
-            cache = new AtomicLong(0);
-        }
-
-        if (object != null) {
-            cache.addAndGet(object.longValue());
-        }
-        return cache;
-    }
-
-    @Override
-    public AtomicLong updateBatch(AtomicLong object, AtomicLong cache,
-            Object... objects) {
-        // TODO Auto-generated method stub
-        return update(object, cache, objects);
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/updater/Updater.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/updater/Updater.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/updater/Updater.java
deleted file mode 100755
index cb22c4c..0000000
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/operator/updater/Updater.java
+++ /dev/null
@@ -1,25 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.alibaba.jstorm.common.metric.operator.updater;
-
-import java.io.Serializable;
-
-public interface Updater<V> extends Serializable {
-    V update(Number object, V cache, Object... others);
-    V updateBatch(V object, V cache, Object... objects );
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/snapshot/AsmCounterSnapshot.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/snapshot/AsmCounterSnapshot.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/snapshot/AsmCounterSnapshot.java
new file mode 100644
index 0000000..2f79141
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/snapshot/AsmCounterSnapshot.java
@@ -0,0 +1,20 @@
+package com.alibaba.jstorm.common.metric.snapshot;
+
+/**
+ * @author wange
+ * @since 15/6/5
+ */
+public class AsmCounterSnapshot extends AsmSnapshot {
+    private static final long serialVersionUID = -7574994037947802582L;
+
+    private long v;
+
+    public long getV() {
+        return v;
+    }
+
+    public AsmSnapshot setValue(long value) {
+        this.v = value;
+        return this;
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/snapshot/AsmGaugeSnapshot.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/snapshot/AsmGaugeSnapshot.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/snapshot/AsmGaugeSnapshot.java
new file mode 100644
index 0000000..221b5b1
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/snapshot/AsmGaugeSnapshot.java
@@ -0,0 +1,20 @@
+package com.alibaba.jstorm.common.metric.snapshot;
+
+/**
+ * @author wange
+ * @since 15/6/5
+ */
+public class AsmGaugeSnapshot extends AsmSnapshot {
+    private static final long serialVersionUID = 3216517772824794848L;
+
+    private double v;
+
+    public double getV() {
+        return v;
+    }
+
+    public AsmSnapshot setValue(double value) {
+        this.v = value;
+        return this;
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/snapshot/AsmHistogramSnapshot.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/snapshot/AsmHistogramSnapshot.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/snapshot/AsmHistogramSnapshot.java
new file mode 100644
index 0000000..51ac3f5
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/snapshot/AsmHistogramSnapshot.java
@@ -0,0 +1,22 @@
+package com.alibaba.jstorm.common.metric.snapshot;
+
+import com.codahale.metrics.Snapshot;
+
+/**
+ * @author wange
+ * @since 15/6/5
+ */
+public class AsmHistogramSnapshot extends AsmSnapshot {
+    private static final long serialVersionUID = 7284437562594156565L;
+
+    private Snapshot snapshot;
+
+    public Snapshot getSnapshot() {
+        return snapshot;
+    }
+
+    public AsmSnapshot setSnapshot(Snapshot snapshot) {
+        this.snapshot = snapshot;
+        return this;
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/snapshot/AsmMeterSnapshot.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/snapshot/AsmMeterSnapshot.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/snapshot/AsmMeterSnapshot.java
new file mode 100644
index 0000000..e255e6b
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/snapshot/AsmMeterSnapshot.java
@@ -0,0 +1,50 @@
+package com.alibaba.jstorm.common.metric.snapshot;
+
+/**
+ * @author wange
+ * @since 15/6/5
+ */
+public class AsmMeterSnapshot extends AsmSnapshot {
+    private static final long serialVersionUID = -1754325312045025810L;
+
+    private double m1;
+    private double m5;
+    private double m15;
+    private double mean;
+
+    public double getM1() {
+        return m1;
+    }
+
+    public AsmMeterSnapshot setM1(double m1) {
+        this.m1 = m1;
+        return this;
+    }
+
+    public double getM5() {
+        return m5;
+    }
+
+    public AsmMeterSnapshot setM5(double m5) {
+        this.m5 = m5;
+        return this;
+    }
+
+    public double getM15() {
+        return m15;
+    }
+
+    public AsmMeterSnapshot setM15(double m15) {
+        this.m15 = m15;
+        return this;
+    }
+
+    public double getMean() {
+        return mean;
+    }
+
+    public AsmMeterSnapshot setMean(double mean) {
+        this.mean = mean;
+        return this;
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/snapshot/AsmSnapshot.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/snapshot/AsmSnapshot.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/snapshot/AsmSnapshot.java
new file mode 100644
index 0000000..4c71fe9
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/snapshot/AsmSnapshot.java
@@ -0,0 +1,32 @@
+package com.alibaba.jstorm.common.metric.snapshot;
+
+import java.io.Serializable;
+
+/**
+ * @author wange
+ * @since 15/6/5
+ */
+public abstract class AsmSnapshot implements Serializable {
+    private static final long serialVersionUID = 1945719653840917619L;
+
+    private long metricId;
+    private long ts;
+
+    public long getTs() {
+        return ts;
+    }
+
+    public AsmSnapshot setTs(long ts) {
+        this.ts = ts;
+        return this;
+    }
+
+    public long getMetricId() {
+        return metricId;
+    }
+
+    public AsmSnapshot setMetricId(long metricId) {
+        this.metricId = metricId;
+        return this;
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/snapshot/AsmTimerSnapshot.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/snapshot/AsmTimerSnapshot.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/snapshot/AsmTimerSnapshot.java
new file mode 100644
index 0000000..6fec50c
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/snapshot/AsmTimerSnapshot.java
@@ -0,0 +1,32 @@
+package com.alibaba.jstorm.common.metric.snapshot;
+
+import com.codahale.metrics.Snapshot;
+
+/**
+ * @author wange
+ * @since 15/6/5
+ */
+public class AsmTimerSnapshot extends AsmSnapshot {
+    private static final long serialVersionUID = 7784062881728741781L;
+
+    private Snapshot histogram;
+    private AsmMeterSnapshot meter;
+
+    public Snapshot getHistogram() {
+        return histogram;
+    }
+
+    public AsmTimerSnapshot setHistogram(Snapshot snapshot) {
+        this.histogram = snapshot;
+        return this;
+    }
+
+    public AsmMeterSnapshot getMeter() {
+        return meter;
+    }
+
+    public AsmTimerSnapshot setMeter(AsmMeterSnapshot meter) {
+        this.meter = meter;
+        return this;
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/window/AllWindow.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/window/AllWindow.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/window/AllWindow.java
deleted file mode 100755
index 8475e4c..0000000
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/window/AllWindow.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.alibaba.jstorm.common.metric.window;
-
-import java.util.ArrayList;
-
-import com.alibaba.jstorm.common.metric.operator.Sampling;
-import com.alibaba.jstorm.common.metric.operator.StartTime;
-import com.alibaba.jstorm.common.metric.operator.merger.Merger;
-import com.alibaba.jstorm.common.metric.operator.updater.Updater;
-
-public class AllWindow<V> implements Sampling<V>, StartTime {
-
-    private static final long serialVersionUID = -8523514907315740812L;
-
-    protected V unflushed;
-    protected V defaultValue;
-
-    protected Updater<V> updater;
-    protected Merger<V> merger;
-    protected long startTime;
-
-    AllWindow(V defaultValue, Updater<V> updater, Merger<V> merger) {
-
-        this.updater = updater;
-        this.merger = merger;
-
-        this.defaultValue = defaultValue;
-        this.startTime = System.currentTimeMillis();
-    }
-
-    @Override
-    public void update(Number obj) {
-        // TODO Auto-generated method stub
-        synchronized (this) {
-            unflushed = updater.update(obj, unflushed);
-        }
-    }
-    
-    public void updateBatch(V batch) {
-        synchronized (this) {
-            unflushed = updater.updateBatch(batch, unflushed);
-        }
-    }
-
-    @Override
-    public V getSnapshot() {
-        // TODO Auto-generated method stub
-        V ret = merger.merge(new ArrayList<V>(), unflushed, this);
-        if (ret == null) {
-            return defaultValue;
-        } else {
-            return ret;
-        }
-    }
-
-    @Override
-    public long getStartTime() {
-        // TODO Auto-generated method stub
-        return startTime;
-    }
-
-}


[36/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/TopologyTaskHbInfo.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/TopologyTaskHbInfo.java b/jstorm-core/src/main/java/backtype/storm/generated/TopologyTaskHbInfo.java
new file mode 100644
index 0000000..2734f4f
--- /dev/null
+++ b/jstorm-core/src/main/java/backtype/storm/generated/TopologyTaskHbInfo.java
@@ -0,0 +1,663 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.2)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package backtype.storm.generated;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
+public class TopologyTaskHbInfo implements org.apache.thrift.TBase<TopologyTaskHbInfo, TopologyTaskHbInfo._Fields>, java.io.Serializable, Cloneable, Comparable<TopologyTaskHbInfo> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TopologyTaskHbInfo");
+
+  private static final org.apache.thrift.protocol.TField TOPOLOGY_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("topologyId", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField TOPOLOGY_MASTER_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("topologyMasterId", org.apache.thrift.protocol.TType.I32, (short)2);
+  private static final org.apache.thrift.protocol.TField TASK_HBS_FIELD_DESC = new org.apache.thrift.protocol.TField("taskHbs", org.apache.thrift.protocol.TType.MAP, (short)3);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new TopologyTaskHbInfoStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new TopologyTaskHbInfoTupleSchemeFactory());
+  }
+
+  private String topologyId; // required
+  private int topologyMasterId; // required
+  private Map<Integer,TaskHeartbeat> taskHbs; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    TOPOLOGY_ID((short)1, "topologyId"),
+    TOPOLOGY_MASTER_ID((short)2, "topologyMasterId"),
+    TASK_HBS((short)3, "taskHbs");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // TOPOLOGY_ID
+          return TOPOLOGY_ID;
+        case 2: // TOPOLOGY_MASTER_ID
+          return TOPOLOGY_MASTER_ID;
+        case 3: // TASK_HBS
+          return TASK_HBS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __TOPOLOGYMASTERID_ISSET_ID = 0;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.TASK_HBS};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.TOPOLOGY_ID, new org.apache.thrift.meta_data.FieldMetaData("topologyId", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TOPOLOGY_MASTER_ID, new org.apache.thrift.meta_data.FieldMetaData("topologyMasterId", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+    tmpMap.put(_Fields.TASK_HBS, new org.apache.thrift.meta_data.FieldMetaData("taskHbs", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32), 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TaskHeartbeat.class))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TopologyTaskHbInfo.class, metaDataMap);
+  }
+
+  public TopologyTaskHbInfo() {
+  }
+
+  public TopologyTaskHbInfo(
+    String topologyId,
+    int topologyMasterId)
+  {
+    this();
+    this.topologyId = topologyId;
+    this.topologyMasterId = topologyMasterId;
+    set_topologyMasterId_isSet(true);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public TopologyTaskHbInfo(TopologyTaskHbInfo other) {
+    __isset_bitfield = other.__isset_bitfield;
+    if (other.is_set_topologyId()) {
+      this.topologyId = other.topologyId;
+    }
+    this.topologyMasterId = other.topologyMasterId;
+    if (other.is_set_taskHbs()) {
+      Map<Integer,TaskHeartbeat> __this__taskHbs = new HashMap<Integer,TaskHeartbeat>(other.taskHbs.size());
+      for (Map.Entry<Integer, TaskHeartbeat> other_element : other.taskHbs.entrySet()) {
+
+        Integer other_element_key = other_element.getKey();
+        TaskHeartbeat other_element_value = other_element.getValue();
+
+        Integer __this__taskHbs_copy_key = other_element_key;
+
+        TaskHeartbeat __this__taskHbs_copy_value = new TaskHeartbeat(other_element_value);
+
+        __this__taskHbs.put(__this__taskHbs_copy_key, __this__taskHbs_copy_value);
+      }
+      this.taskHbs = __this__taskHbs;
+    }
+  }
+
+  public TopologyTaskHbInfo deepCopy() {
+    return new TopologyTaskHbInfo(this);
+  }
+
+  @Override
+  public void clear() {
+    this.topologyId = null;
+    set_topologyMasterId_isSet(false);
+    this.topologyMasterId = 0;
+    this.taskHbs = null;
+  }
+
+  public String get_topologyId() {
+    return this.topologyId;
+  }
+
+  public void set_topologyId(String topologyId) {
+    this.topologyId = topologyId;
+  }
+
+  public void unset_topologyId() {
+    this.topologyId = null;
+  }
+
+  /** Returns true if field topologyId is set (has been assigned a value) and false otherwise */
+  public boolean is_set_topologyId() {
+    return this.topologyId != null;
+  }
+
+  public void set_topologyId_isSet(boolean value) {
+    if (!value) {
+      this.topologyId = null;
+    }
+  }
+
+  public int get_topologyMasterId() {
+    return this.topologyMasterId;
+  }
+
+  public void set_topologyMasterId(int topologyMasterId) {
+    this.topologyMasterId = topologyMasterId;
+    set_topologyMasterId_isSet(true);
+  }
+
+  public void unset_topologyMasterId() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TOPOLOGYMASTERID_ISSET_ID);
+  }
+
+  /** Returns true if field topologyMasterId is set (has been assigned a value) and false otherwise */
+  public boolean is_set_topologyMasterId() {
+    return EncodingUtils.testBit(__isset_bitfield, __TOPOLOGYMASTERID_ISSET_ID);
+  }
+
+  public void set_topologyMasterId_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TOPOLOGYMASTERID_ISSET_ID, value);
+  }
+
+  public int get_taskHbs_size() {
+    return (this.taskHbs == null) ? 0 : this.taskHbs.size();
+  }
+
+  public void put_to_taskHbs(int key, TaskHeartbeat val) {
+    if (this.taskHbs == null) {
+      this.taskHbs = new HashMap<Integer,TaskHeartbeat>();
+    }
+    this.taskHbs.put(key, val);
+  }
+
+  public Map<Integer,TaskHeartbeat> get_taskHbs() {
+    return this.taskHbs;
+  }
+
+  public void set_taskHbs(Map<Integer,TaskHeartbeat> taskHbs) {
+    this.taskHbs = taskHbs;
+  }
+
+  public void unset_taskHbs() {
+    this.taskHbs = null;
+  }
+
+  /** Returns true if field taskHbs is set (has been assigned a value) and false otherwise */
+  public boolean is_set_taskHbs() {
+    return this.taskHbs != null;
+  }
+
+  public void set_taskHbs_isSet(boolean value) {
+    if (!value) {
+      this.taskHbs = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case TOPOLOGY_ID:
+      if (value == null) {
+        unset_topologyId();
+      } else {
+        set_topologyId((String)value);
+      }
+      break;
+
+    case TOPOLOGY_MASTER_ID:
+      if (value == null) {
+        unset_topologyMasterId();
+      } else {
+        set_topologyMasterId((Integer)value);
+      }
+      break;
+
+    case TASK_HBS:
+      if (value == null) {
+        unset_taskHbs();
+      } else {
+        set_taskHbs((Map<Integer,TaskHeartbeat>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case TOPOLOGY_ID:
+      return get_topologyId();
+
+    case TOPOLOGY_MASTER_ID:
+      return Integer.valueOf(get_topologyMasterId());
+
+    case TASK_HBS:
+      return get_taskHbs();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case TOPOLOGY_ID:
+      return is_set_topologyId();
+    case TOPOLOGY_MASTER_ID:
+      return is_set_topologyMasterId();
+    case TASK_HBS:
+      return is_set_taskHbs();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof TopologyTaskHbInfo)
+      return this.equals((TopologyTaskHbInfo)that);
+    return false;
+  }
+
+  public boolean equals(TopologyTaskHbInfo that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_topologyId = true && this.is_set_topologyId();
+    boolean that_present_topologyId = true && that.is_set_topologyId();
+    if (this_present_topologyId || that_present_topologyId) {
+      if (!(this_present_topologyId && that_present_topologyId))
+        return false;
+      if (!this.topologyId.equals(that.topologyId))
+        return false;
+    }
+
+    boolean this_present_topologyMasterId = true;
+    boolean that_present_topologyMasterId = true;
+    if (this_present_topologyMasterId || that_present_topologyMasterId) {
+      if (!(this_present_topologyMasterId && that_present_topologyMasterId))
+        return false;
+      if (this.topologyMasterId != that.topologyMasterId)
+        return false;
+    }
+
+    boolean this_present_taskHbs = true && this.is_set_taskHbs();
+    boolean that_present_taskHbs = true && that.is_set_taskHbs();
+    if (this_present_taskHbs || that_present_taskHbs) {
+      if (!(this_present_taskHbs && that_present_taskHbs))
+        return false;
+      if (!this.taskHbs.equals(that.taskHbs))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_topologyId = true && (is_set_topologyId());
+    list.add(present_topologyId);
+    if (present_topologyId)
+      list.add(topologyId);
+
+    boolean present_topologyMasterId = true;
+    list.add(present_topologyMasterId);
+    if (present_topologyMasterId)
+      list.add(topologyMasterId);
+
+    boolean present_taskHbs = true && (is_set_taskHbs());
+    list.add(present_taskHbs);
+    if (present_taskHbs)
+      list.add(taskHbs);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(TopologyTaskHbInfo other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(is_set_topologyId()).compareTo(other.is_set_topologyId());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_topologyId()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.topologyId, other.topologyId);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(is_set_topologyMasterId()).compareTo(other.is_set_topologyMasterId());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_topologyMasterId()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.topologyMasterId, other.topologyMasterId);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(is_set_taskHbs()).compareTo(other.is_set_taskHbs());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_taskHbs()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.taskHbs, other.taskHbs);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("TopologyTaskHbInfo(");
+    boolean first = true;
+
+    sb.append("topologyId:");
+    if (this.topologyId == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.topologyId);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("topologyMasterId:");
+    sb.append(this.topologyMasterId);
+    first = false;
+    if (is_set_taskHbs()) {
+      if (!first) sb.append(", ");
+      sb.append("taskHbs:");
+      if (this.taskHbs == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.taskHbs);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws TException {
+    // check for required fields
+    if (!is_set_topologyId()) {
+      throw new TProtocolException("Required field 'topologyId' is unset! Struct:" + toString());
+    }
+
+    if (!is_set_topologyMasterId()) {
+      throw new TProtocolException("Required field 'topologyMasterId' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class TopologyTaskHbInfoStandardSchemeFactory implements SchemeFactory {
+    public TopologyTaskHbInfoStandardScheme getScheme() {
+      return new TopologyTaskHbInfoStandardScheme();
+    }
+  }
+
+  private static class TopologyTaskHbInfoStandardScheme extends StandardScheme<TopologyTaskHbInfo> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, TopologyTaskHbInfo struct) throws TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // TOPOLOGY_ID
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.topologyId = iprot.readString();
+              struct.set_topologyId_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // TOPOLOGY_MASTER_ID
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.topologyMasterId = iprot.readI32();
+              struct.set_topologyMasterId_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // TASK_HBS
+            if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
+              {
+                org.apache.thrift.protocol.TMap _map222 = iprot.readMapBegin();
+                struct.taskHbs = new HashMap<Integer,TaskHeartbeat>(2*_map222.size);
+                int _key223;
+                TaskHeartbeat _val224;
+                for (int _i225 = 0; _i225 < _map222.size; ++_i225)
+                {
+                  _key223 = iprot.readI32();
+                  _val224 = new TaskHeartbeat();
+                  _val224.read(iprot);
+                  struct.taskHbs.put(_key223, _val224);
+                }
+                iprot.readMapEnd();
+              }
+              struct.set_taskHbs_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, TopologyTaskHbInfo struct) throws TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.topologyId != null) {
+        oprot.writeFieldBegin(TOPOLOGY_ID_FIELD_DESC);
+        oprot.writeString(struct.topologyId);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldBegin(TOPOLOGY_MASTER_ID_FIELD_DESC);
+      oprot.writeI32(struct.topologyMasterId);
+      oprot.writeFieldEnd();
+      if (struct.taskHbs != null) {
+        if (struct.is_set_taskHbs()) {
+          oprot.writeFieldBegin(TASK_HBS_FIELD_DESC);
+          {
+            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I32, org.apache.thrift.protocol.TType.STRUCT, struct.taskHbs.size()));
+            for (Map.Entry<Integer, TaskHeartbeat> _iter226 : struct.taskHbs.entrySet())
+            {
+              oprot.writeI32(_iter226.getKey());
+              _iter226.getValue().write(oprot);
+            }
+            oprot.writeMapEnd();
+          }
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class TopologyTaskHbInfoTupleSchemeFactory implements SchemeFactory {
+    public TopologyTaskHbInfoTupleScheme getScheme() {
+      return new TopologyTaskHbInfoTupleScheme();
+    }
+  }
+
+  private static class TopologyTaskHbInfoTupleScheme extends TupleScheme<TopologyTaskHbInfo> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, TopologyTaskHbInfo struct) throws TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeString(struct.topologyId);
+      oprot.writeI32(struct.topologyMasterId);
+      BitSet optionals = new BitSet();
+      if (struct.is_set_taskHbs()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.is_set_taskHbs()) {
+        {
+          oprot.writeI32(struct.taskHbs.size());
+          for (Map.Entry<Integer, TaskHeartbeat> _iter227 : struct.taskHbs.entrySet())
+          {
+            oprot.writeI32(_iter227.getKey());
+            _iter227.getValue().write(oprot);
+          }
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, TopologyTaskHbInfo struct) throws TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.topologyId = iprot.readString();
+      struct.set_topologyId_isSet(true);
+      struct.topologyMasterId = iprot.readI32();
+      struct.set_topologyMasterId_isSet(true);
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        {
+          org.apache.thrift.protocol.TMap _map228 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I32, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.taskHbs = new HashMap<Integer,TaskHeartbeat>(2*_map228.size);
+          int _key229;
+          TaskHeartbeat _val230;
+          for (int _i231 = 0; _i231 < _map228.size; ++_i231)
+          {
+            _key229 = iprot.readI32();
+            _val230 = new TaskHeartbeat();
+            _val230.read(iprot);
+            struct.taskHbs.put(_key229, _val230);
+          }
+        }
+        struct.set_taskHbs_isSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/WorkerSummary.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/WorkerSummary.java b/jstorm-core/src/main/java/backtype/storm/generated/WorkerSummary.java
index 9faaee7..0fbe63f 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/WorkerSummary.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/WorkerSummary.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class WorkerSummary implements org.apache.thrift.TBase<WorkerSummary, WorkerSummary._Fields>, java.io.Serializable, Cloneable, Comparable<WorkerSummary> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("WorkerSummary");
 
@@ -505,11 +505,11 @@ public class WorkerSummary implements org.apache.thrift.TBase<WorkerSummary, Wor
     return _Fields.findByThriftId(fieldId);
   }
 
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
@@ -545,22 +545,22 @@ public class WorkerSummary implements org.apache.thrift.TBase<WorkerSummary, Wor
     return sb.toString();
   }
 
-  public void validate() throws org.apache.thrift.TException {
+  public void validate() throws TException {
     // check for required fields
     if (!is_set_port()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'port' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'port' is unset! Struct:" + toString());
     }
 
     if (!is_set_uptime()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'uptime' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'uptime' is unset! Struct:" + toString());
     }
 
     if (!is_set_topology()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'topology' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'topology' is unset! Struct:" + toString());
     }
 
     if (!is_set_tasks()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'tasks' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'tasks' is unset! Struct:" + toString());
     }
 
     // check for sub-struct validity
@@ -569,7 +569,7 @@ public class WorkerSummary implements org.apache.thrift.TBase<WorkerSummary, Wor
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -579,7 +579,7 @@ public class WorkerSummary implements org.apache.thrift.TBase<WorkerSummary, Wor
       // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
       __isset_bitfield = 0;
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -592,7 +592,7 @@ public class WorkerSummary implements org.apache.thrift.TBase<WorkerSummary, Wor
 
   private static class WorkerSummaryStandardScheme extends StandardScheme<WorkerSummary> {
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot, WorkerSummary struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot, WorkerSummary struct) throws TException {
       org.apache.thrift.protocol.TField schemeField;
       iprot.readStructBegin();
       while (true)
@@ -654,7 +654,7 @@ public class WorkerSummary implements org.apache.thrift.TBase<WorkerSummary, Wor
       struct.validate();
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot, WorkerSummary struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot, WorkerSummary struct) throws TException {
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
@@ -696,7 +696,7 @@ public class WorkerSummary implements org.apache.thrift.TBase<WorkerSummary, Wor
   private static class WorkerSummaryTupleScheme extends TupleScheme<WorkerSummary> {
 
     @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, WorkerSummary struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol prot, WorkerSummary struct) throws TException {
       TTupleProtocol oprot = (TTupleProtocol) prot;
       oprot.writeI32(struct.port);
       oprot.writeI32(struct.uptime);
@@ -711,7 +711,7 @@ public class WorkerSummary implements org.apache.thrift.TBase<WorkerSummary, Wor
     }
 
     @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, WorkerSummary struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol prot, WorkerSummary struct) throws TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       struct.port = iprot.readI32();
       struct.set_port_isSet(true);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/WorkerUploadMetrics.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/WorkerUploadMetrics.java b/jstorm-core/src/main/java/backtype/storm/generated/WorkerUploadMetrics.java
index 934cfb3..7258105 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/WorkerUploadMetrics.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/WorkerUploadMetrics.java
@@ -34,16 +34,14 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class WorkerUploadMetrics implements org.apache.thrift.TBase<WorkerUploadMetrics, WorkerUploadMetrics._Fields>, java.io.Serializable, Cloneable, Comparable<WorkerUploadMetrics> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("WorkerUploadMetrics");
 
-  private static final org.apache.thrift.protocol.TField TOPOLOGY_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("topology_id", org.apache.thrift.protocol.TType.STRING, (short)1);
-  private static final org.apache.thrift.protocol.TField SUPERVISOR_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("supervisor_id", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField TOPOLOGY_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("topologyId", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField SUPERVISOR_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("supervisorId", org.apache.thrift.protocol.TType.STRING, (short)2);
   private static final org.apache.thrift.protocol.TField PORT_FIELD_DESC = new org.apache.thrift.protocol.TField("port", org.apache.thrift.protocol.TType.I32, (short)3);
-  private static final org.apache.thrift.protocol.TField WORKER_METRIC_FIELD_DESC = new org.apache.thrift.protocol.TField("workerMetric", org.apache.thrift.protocol.TType.STRUCT, (short)4);
-  private static final org.apache.thrift.protocol.TField NETTY_METRIC_FIELD_DESC = new org.apache.thrift.protocol.TField("nettyMetric", org.apache.thrift.protocol.TType.STRUCT, (short)5);
-  private static final org.apache.thrift.protocol.TField TASK_METRIC_FIELD_DESC = new org.apache.thrift.protocol.TField("taskMetric", org.apache.thrift.protocol.TType.MAP, (short)6);
+  private static final org.apache.thrift.protocol.TField ALL_METRICS_FIELD_DESC = new org.apache.thrift.protocol.TField("allMetrics", org.apache.thrift.protocol.TType.STRUCT, (short)4);
 
   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
   static {
@@ -51,21 +49,17 @@ public class WorkerUploadMetrics implements org.apache.thrift.TBase<WorkerUpload
     schemes.put(TupleScheme.class, new WorkerUploadMetricsTupleSchemeFactory());
   }
 
-  private String topology_id; // required
-  private String supervisor_id; // required
+  private String topologyId; // required
+  private String supervisorId; // required
   private int port; // required
-  private MetricInfo workerMetric; // required
-  private NettyMetric nettyMetric; // required
-  private Map<Integer,MetricInfo> taskMetric; // required
+  private MetricInfo allMetrics; // required
 
   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-    TOPOLOGY_ID((short)1, "topology_id"),
-    SUPERVISOR_ID((short)2, "supervisor_id"),
+    TOPOLOGY_ID((short)1, "topologyId"),
+    SUPERVISOR_ID((short)2, "supervisorId"),
     PORT((short)3, "port"),
-    WORKER_METRIC((short)4, "workerMetric"),
-    NETTY_METRIC((short)5, "nettyMetric"),
-    TASK_METRIC((short)6, "taskMetric");
+    ALL_METRICS((short)4, "allMetrics");
 
     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 
@@ -86,12 +80,8 @@ public class WorkerUploadMetrics implements org.apache.thrift.TBase<WorkerUpload
           return SUPERVISOR_ID;
         case 3: // PORT
           return PORT;
-        case 4: // WORKER_METRIC
-          return WORKER_METRIC;
-        case 5: // NETTY_METRIC
-          return NETTY_METRIC;
-        case 6: // TASK_METRIC
-          return TASK_METRIC;
+        case 4: // ALL_METRICS
+          return ALL_METRICS;
         default:
           return null;
       }
@@ -137,20 +127,14 @@ public class WorkerUploadMetrics implements org.apache.thrift.TBase<WorkerUpload
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.TOPOLOGY_ID, new org.apache.thrift.meta_data.FieldMetaData("topology_id", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+    tmpMap.put(_Fields.TOPOLOGY_ID, new org.apache.thrift.meta_data.FieldMetaData("topologyId", org.apache.thrift.TFieldRequirementType.REQUIRED, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    tmpMap.put(_Fields.SUPERVISOR_ID, new org.apache.thrift.meta_data.FieldMetaData("supervisor_id", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+    tmpMap.put(_Fields.SUPERVISOR_ID, new org.apache.thrift.meta_data.FieldMetaData("supervisorId", org.apache.thrift.TFieldRequirementType.REQUIRED, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
     tmpMap.put(_Fields.PORT, new org.apache.thrift.meta_data.FieldMetaData("port", org.apache.thrift.TFieldRequirementType.REQUIRED, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
-    tmpMap.put(_Fields.WORKER_METRIC, new org.apache.thrift.meta_data.FieldMetaData("workerMetric", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+    tmpMap.put(_Fields.ALL_METRICS, new org.apache.thrift.meta_data.FieldMetaData("allMetrics", org.apache.thrift.TFieldRequirementType.REQUIRED, 
         new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, MetricInfo.class)));
-    tmpMap.put(_Fields.NETTY_METRIC, new org.apache.thrift.meta_data.FieldMetaData("nettyMetric", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, NettyMetric.class)));
-    tmpMap.put(_Fields.TASK_METRIC, new org.apache.thrift.meta_data.FieldMetaData("taskMetric", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
-            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32), 
-            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, MetricInfo.class))));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(WorkerUploadMetrics.class, metaDataMap);
   }
@@ -159,21 +143,17 @@ public class WorkerUploadMetrics implements org.apache.thrift.TBase<WorkerUpload
   }
 
   public WorkerUploadMetrics(
-    String topology_id,
-    String supervisor_id,
+    String topologyId,
+    String supervisorId,
     int port,
-    MetricInfo workerMetric,
-    NettyMetric nettyMetric,
-    Map<Integer,MetricInfo> taskMetric)
+    MetricInfo allMetrics)
   {
     this();
-    this.topology_id = topology_id;
-    this.supervisor_id = supervisor_id;
+    this.topologyId = topologyId;
+    this.supervisorId = supervisorId;
     this.port = port;
     set_port_isSet(true);
-    this.workerMetric = workerMetric;
-    this.nettyMetric = nettyMetric;
-    this.taskMetric = taskMetric;
+    this.allMetrics = allMetrics;
   }
 
   /**
@@ -181,33 +161,15 @@ public class WorkerUploadMetrics implements org.apache.thrift.TBase<WorkerUpload
    */
   public WorkerUploadMetrics(WorkerUploadMetrics other) {
     __isset_bitfield = other.__isset_bitfield;
-    if (other.is_set_topology_id()) {
-      this.topology_id = other.topology_id;
+    if (other.is_set_topologyId()) {
+      this.topologyId = other.topologyId;
     }
-    if (other.is_set_supervisor_id()) {
-      this.supervisor_id = other.supervisor_id;
+    if (other.is_set_supervisorId()) {
+      this.supervisorId = other.supervisorId;
     }
     this.port = other.port;
-    if (other.is_set_workerMetric()) {
-      this.workerMetric = new MetricInfo(other.workerMetric);
-    }
-    if (other.is_set_nettyMetric()) {
-      this.nettyMetric = new NettyMetric(other.nettyMetric);
-    }
-    if (other.is_set_taskMetric()) {
-      Map<Integer,MetricInfo> __this__taskMetric = new HashMap<Integer,MetricInfo>(other.taskMetric.size());
-      for (Map.Entry<Integer, MetricInfo> other_element : other.taskMetric.entrySet()) {
-
-        Integer other_element_key = other_element.getKey();
-        MetricInfo other_element_value = other_element.getValue();
-
-        Integer __this__taskMetric_copy_key = other_element_key;
-
-        MetricInfo __this__taskMetric_copy_value = new MetricInfo(other_element_value);
-
-        __this__taskMetric.put(__this__taskMetric_copy_key, __this__taskMetric_copy_value);
-      }
-      this.taskMetric = __this__taskMetric;
+    if (other.is_set_allMetrics()) {
+      this.allMetrics = new MetricInfo(other.allMetrics);
     }
   }
 
@@ -217,58 +179,56 @@ public class WorkerUploadMetrics implements org.apache.thrift.TBase<WorkerUpload
 
   @Override
   public void clear() {
-    this.topology_id = null;
-    this.supervisor_id = null;
+    this.topologyId = null;
+    this.supervisorId = null;
     set_port_isSet(false);
     this.port = 0;
-    this.workerMetric = null;
-    this.nettyMetric = null;
-    this.taskMetric = null;
+    this.allMetrics = null;
   }
 
-  public String get_topology_id() {
-    return this.topology_id;
+  public String get_topologyId() {
+    return this.topologyId;
   }
 
-  public void set_topology_id(String topology_id) {
-    this.topology_id = topology_id;
+  public void set_topologyId(String topologyId) {
+    this.topologyId = topologyId;
   }
 
-  public void unset_topology_id() {
-    this.topology_id = null;
+  public void unset_topologyId() {
+    this.topologyId = null;
   }
 
-  /** Returns true if field topology_id is set (has been assigned a value) and false otherwise */
-  public boolean is_set_topology_id() {
-    return this.topology_id != null;
+  /** Returns true if field topologyId is set (has been assigned a value) and false otherwise */
+  public boolean is_set_topologyId() {
+    return this.topologyId != null;
   }
 
-  public void set_topology_id_isSet(boolean value) {
+  public void set_topologyId_isSet(boolean value) {
     if (!value) {
-      this.topology_id = null;
+      this.topologyId = null;
     }
   }
 
-  public String get_supervisor_id() {
-    return this.supervisor_id;
+  public String get_supervisorId() {
+    return this.supervisorId;
   }
 
-  public void set_supervisor_id(String supervisor_id) {
-    this.supervisor_id = supervisor_id;
+  public void set_supervisorId(String supervisorId) {
+    this.supervisorId = supervisorId;
   }
 
-  public void unset_supervisor_id() {
-    this.supervisor_id = null;
+  public void unset_supervisorId() {
+    this.supervisorId = null;
   }
 
-  /** Returns true if field supervisor_id is set (has been assigned a value) and false otherwise */
-  public boolean is_set_supervisor_id() {
-    return this.supervisor_id != null;
+  /** Returns true if field supervisorId is set (has been assigned a value) and false otherwise */
+  public boolean is_set_supervisorId() {
+    return this.supervisorId != null;
   }
 
-  public void set_supervisor_id_isSet(boolean value) {
+  public void set_supervisorId_isSet(boolean value) {
     if (!value) {
-      this.supervisor_id = null;
+      this.supervisorId = null;
     }
   }
 
@@ -294,83 +254,26 @@ public class WorkerUploadMetrics implements org.apache.thrift.TBase<WorkerUpload
     __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __PORT_ISSET_ID, value);
   }
 
-  public MetricInfo get_workerMetric() {
-    return this.workerMetric;
-  }
-
-  public void set_workerMetric(MetricInfo workerMetric) {
-    this.workerMetric = workerMetric;
-  }
-
-  public void unset_workerMetric() {
-    this.workerMetric = null;
-  }
-
-  /** Returns true if field workerMetric is set (has been assigned a value) and false otherwise */
-  public boolean is_set_workerMetric() {
-    return this.workerMetric != null;
-  }
-
-  public void set_workerMetric_isSet(boolean value) {
-    if (!value) {
-      this.workerMetric = null;
-    }
-  }
-
-  public NettyMetric get_nettyMetric() {
-    return this.nettyMetric;
-  }
-
-  public void set_nettyMetric(NettyMetric nettyMetric) {
-    this.nettyMetric = nettyMetric;
-  }
-
-  public void unset_nettyMetric() {
-    this.nettyMetric = null;
-  }
-
-  /** Returns true if field nettyMetric is set (has been assigned a value) and false otherwise */
-  public boolean is_set_nettyMetric() {
-    return this.nettyMetric != null;
-  }
-
-  public void set_nettyMetric_isSet(boolean value) {
-    if (!value) {
-      this.nettyMetric = null;
-    }
-  }
-
-  public int get_taskMetric_size() {
-    return (this.taskMetric == null) ? 0 : this.taskMetric.size();
-  }
-
-  public void put_to_taskMetric(int key, MetricInfo val) {
-    if (this.taskMetric == null) {
-      this.taskMetric = new HashMap<Integer,MetricInfo>();
-    }
-    this.taskMetric.put(key, val);
+  public MetricInfo get_allMetrics() {
+    return this.allMetrics;
   }
 
-  public Map<Integer,MetricInfo> get_taskMetric() {
-    return this.taskMetric;
+  public void set_allMetrics(MetricInfo allMetrics) {
+    this.allMetrics = allMetrics;
   }
 
-  public void set_taskMetric(Map<Integer,MetricInfo> taskMetric) {
-    this.taskMetric = taskMetric;
+  public void unset_allMetrics() {
+    this.allMetrics = null;
   }
 
-  public void unset_taskMetric() {
-    this.taskMetric = null;
+  /** Returns true if field allMetrics is set (has been assigned a value) and false otherwise */
+  public boolean is_set_allMetrics() {
+    return this.allMetrics != null;
   }
 
-  /** Returns true if field taskMetric is set (has been assigned a value) and false otherwise */
-  public boolean is_set_taskMetric() {
-    return this.taskMetric != null;
-  }
-
-  public void set_taskMetric_isSet(boolean value) {
+  public void set_allMetrics_isSet(boolean value) {
     if (!value) {
-      this.taskMetric = null;
+      this.allMetrics = null;
     }
   }
 
@@ -378,17 +281,17 @@ public class WorkerUploadMetrics implements org.apache.thrift.TBase<WorkerUpload
     switch (field) {
     case TOPOLOGY_ID:
       if (value == null) {
-        unset_topology_id();
+        unset_topologyId();
       } else {
-        set_topology_id((String)value);
+        set_topologyId((String)value);
       }
       break;
 
     case SUPERVISOR_ID:
       if (value == null) {
-        unset_supervisor_id();
+        unset_supervisorId();
       } else {
-        set_supervisor_id((String)value);
+        set_supervisorId((String)value);
       }
       break;
 
@@ -400,27 +303,11 @@ public class WorkerUploadMetrics implements org.apache.thrift.TBase<WorkerUpload
       }
       break;
 
-    case WORKER_METRIC:
-      if (value == null) {
-        unset_workerMetric();
-      } else {
-        set_workerMetric((MetricInfo)value);
-      }
-      break;
-
-    case NETTY_METRIC:
+    case ALL_METRICS:
       if (value == null) {
-        unset_nettyMetric();
+        unset_allMetrics();
       } else {
-        set_nettyMetric((NettyMetric)value);
-      }
-      break;
-
-    case TASK_METRIC:
-      if (value == null) {
-        unset_taskMetric();
-      } else {
-        set_taskMetric((Map<Integer,MetricInfo>)value);
+        set_allMetrics((MetricInfo)value);
       }
       break;
 
@@ -430,22 +317,16 @@ public class WorkerUploadMetrics implements org.apache.thrift.TBase<WorkerUpload
   public Object getFieldValue(_Fields field) {
     switch (field) {
     case TOPOLOGY_ID:
-      return get_topology_id();
+      return get_topologyId();
 
     case SUPERVISOR_ID:
-      return get_supervisor_id();
+      return get_supervisorId();
 
     case PORT:
       return Integer.valueOf(get_port());
 
-    case WORKER_METRIC:
-      return get_workerMetric();
-
-    case NETTY_METRIC:
-      return get_nettyMetric();
-
-    case TASK_METRIC:
-      return get_taskMetric();
+    case ALL_METRICS:
+      return get_allMetrics();
 
     }
     throw new IllegalStateException();
@@ -459,17 +340,13 @@ public class WorkerUploadMetrics implements org.apache.thrift.TBase<WorkerUpload
 
     switch (field) {
     case TOPOLOGY_ID:
-      return is_set_topology_id();
+      return is_set_topologyId();
     case SUPERVISOR_ID:
-      return is_set_supervisor_id();
+      return is_set_supervisorId();
     case PORT:
       return is_set_port();
-    case WORKER_METRIC:
-      return is_set_workerMetric();
-    case NETTY_METRIC:
-      return is_set_nettyMetric();
-    case TASK_METRIC:
-      return is_set_taskMetric();
+    case ALL_METRICS:
+      return is_set_allMetrics();
     }
     throw new IllegalStateException();
   }
@@ -487,21 +364,21 @@ public class WorkerUploadMetrics implements org.apache.thrift.TBase<WorkerUpload
     if (that == null)
       return false;
 
-    boolean this_present_topology_id = true && this.is_set_topology_id();
-    boolean that_present_topology_id = true && that.is_set_topology_id();
-    if (this_present_topology_id || that_present_topology_id) {
-      if (!(this_present_topology_id && that_present_topology_id))
+    boolean this_present_topologyId = true && this.is_set_topologyId();
+    boolean that_present_topologyId = true && that.is_set_topologyId();
+    if (this_present_topologyId || that_present_topologyId) {
+      if (!(this_present_topologyId && that_present_topologyId))
         return false;
-      if (!this.topology_id.equals(that.topology_id))
+      if (!this.topologyId.equals(that.topologyId))
         return false;
     }
 
-    boolean this_present_supervisor_id = true && this.is_set_supervisor_id();
-    boolean that_present_supervisor_id = true && that.is_set_supervisor_id();
-    if (this_present_supervisor_id || that_present_supervisor_id) {
-      if (!(this_present_supervisor_id && that_present_supervisor_id))
+    boolean this_present_supervisorId = true && this.is_set_supervisorId();
+    boolean that_present_supervisorId = true && that.is_set_supervisorId();
+    if (this_present_supervisorId || that_present_supervisorId) {
+      if (!(this_present_supervisorId && that_present_supervisorId))
         return false;
-      if (!this.supervisor_id.equals(that.supervisor_id))
+      if (!this.supervisorId.equals(that.supervisorId))
         return false;
     }
 
@@ -514,30 +391,12 @@ public class WorkerUploadMetrics implements org.apache.thrift.TBase<WorkerUpload
         return false;
     }
 
-    boolean this_present_workerMetric = true && this.is_set_workerMetric();
-    boolean that_present_workerMetric = true && that.is_set_workerMetric();
-    if (this_present_workerMetric || that_present_workerMetric) {
-      if (!(this_present_workerMetric && that_present_workerMetric))
+    boolean this_present_allMetrics = true && this.is_set_allMetrics();
+    boolean that_present_allMetrics = true && that.is_set_allMetrics();
+    if (this_present_allMetrics || that_present_allMetrics) {
+      if (!(this_present_allMetrics && that_present_allMetrics))
         return false;
-      if (!this.workerMetric.equals(that.workerMetric))
-        return false;
-    }
-
-    boolean this_present_nettyMetric = true && this.is_set_nettyMetric();
-    boolean that_present_nettyMetric = true && that.is_set_nettyMetric();
-    if (this_present_nettyMetric || that_present_nettyMetric) {
-      if (!(this_present_nettyMetric && that_present_nettyMetric))
-        return false;
-      if (!this.nettyMetric.equals(that.nettyMetric))
-        return false;
-    }
-
-    boolean this_present_taskMetric = true && this.is_set_taskMetric();
-    boolean that_present_taskMetric = true && that.is_set_taskMetric();
-    if (this_present_taskMetric || that_present_taskMetric) {
-      if (!(this_present_taskMetric && that_present_taskMetric))
-        return false;
-      if (!this.taskMetric.equals(that.taskMetric))
+      if (!this.allMetrics.equals(that.allMetrics))
         return false;
     }
 
@@ -548,35 +407,25 @@ public class WorkerUploadMetrics implements org.apache.thrift.TBase<WorkerUpload
   public int hashCode() {
     List<Object> list = new ArrayList<Object>();
 
-    boolean present_topology_id = true && (is_set_topology_id());
-    list.add(present_topology_id);
-    if (present_topology_id)
-      list.add(topology_id);
+    boolean present_topologyId = true && (is_set_topologyId());
+    list.add(present_topologyId);
+    if (present_topologyId)
+      list.add(topologyId);
 
-    boolean present_supervisor_id = true && (is_set_supervisor_id());
-    list.add(present_supervisor_id);
-    if (present_supervisor_id)
-      list.add(supervisor_id);
+    boolean present_supervisorId = true && (is_set_supervisorId());
+    list.add(present_supervisorId);
+    if (present_supervisorId)
+      list.add(supervisorId);
 
     boolean present_port = true;
     list.add(present_port);
     if (present_port)
       list.add(port);
 
-    boolean present_workerMetric = true && (is_set_workerMetric());
-    list.add(present_workerMetric);
-    if (present_workerMetric)
-      list.add(workerMetric);
-
-    boolean present_nettyMetric = true && (is_set_nettyMetric());
-    list.add(present_nettyMetric);
-    if (present_nettyMetric)
-      list.add(nettyMetric);
-
-    boolean present_taskMetric = true && (is_set_taskMetric());
-    list.add(present_taskMetric);
-    if (present_taskMetric)
-      list.add(taskMetric);
+    boolean present_allMetrics = true && (is_set_allMetrics());
+    list.add(present_allMetrics);
+    if (present_allMetrics)
+      list.add(allMetrics);
 
     return list.hashCode();
   }
@@ -589,22 +438,22 @@ public class WorkerUploadMetrics implements org.apache.thrift.TBase<WorkerUpload
 
     int lastComparison = 0;
 
-    lastComparison = Boolean.valueOf(is_set_topology_id()).compareTo(other.is_set_topology_id());
+    lastComparison = Boolean.valueOf(is_set_topologyId()).compareTo(other.is_set_topologyId());
     if (lastComparison != 0) {
       return lastComparison;
     }
-    if (is_set_topology_id()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.topology_id, other.topology_id);
+    if (is_set_topologyId()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.topologyId, other.topologyId);
       if (lastComparison != 0) {
         return lastComparison;
       }
     }
-    lastComparison = Boolean.valueOf(is_set_supervisor_id()).compareTo(other.is_set_supervisor_id());
+    lastComparison = Boolean.valueOf(is_set_supervisorId()).compareTo(other.is_set_supervisorId());
     if (lastComparison != 0) {
       return lastComparison;
     }
-    if (is_set_supervisor_id()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.supervisor_id, other.supervisor_id);
+    if (is_set_supervisorId()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.supervisorId, other.supervisorId);
       if (lastComparison != 0) {
         return lastComparison;
       }
@@ -619,32 +468,12 @@ public class WorkerUploadMetrics implements org.apache.thrift.TBase<WorkerUpload
         return lastComparison;
       }
     }
-    lastComparison = Boolean.valueOf(is_set_workerMetric()).compareTo(other.is_set_workerMetric());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_workerMetric()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.workerMetric, other.workerMetric);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_nettyMetric()).compareTo(other.is_set_nettyMetric());
+    lastComparison = Boolean.valueOf(is_set_allMetrics()).compareTo(other.is_set_allMetrics());
     if (lastComparison != 0) {
       return lastComparison;
     }
-    if (is_set_nettyMetric()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.nettyMetric, other.nettyMetric);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_taskMetric()).compareTo(other.is_set_taskMetric());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_taskMetric()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.taskMetric, other.taskMetric);
+    if (is_set_allMetrics()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.allMetrics, other.allMetrics);
       if (lastComparison != 0) {
         return lastComparison;
       }
@@ -656,11 +485,11 @@ public class WorkerUploadMetrics implements org.apache.thrift.TBase<WorkerUpload
     return _Fields.findByThriftId(fieldId);
   }
 
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
@@ -669,19 +498,19 @@ public class WorkerUploadMetrics implements org.apache.thrift.TBase<WorkerUpload
     StringBuilder sb = new StringBuilder("WorkerUploadMetrics(");
     boolean first = true;
 
-    sb.append("topology_id:");
-    if (this.topology_id == null) {
+    sb.append("topologyId:");
+    if (this.topologyId == null) {
       sb.append("null");
     } else {
-      sb.append(this.topology_id);
+      sb.append(this.topologyId);
     }
     first = false;
     if (!first) sb.append(", ");
-    sb.append("supervisor_id:");
-    if (this.supervisor_id == null) {
+    sb.append("supervisorId:");
+    if (this.supervisorId == null) {
       sb.append("null");
     } else {
-      sb.append(this.supervisor_id);
+      sb.append(this.supervisorId);
     }
     first = false;
     if (!first) sb.append(", ");
@@ -689,72 +518,45 @@ public class WorkerUploadMetrics implements org.apache.thrift.TBase<WorkerUpload
     sb.append(this.port);
     first = false;
     if (!first) sb.append(", ");
-    sb.append("workerMetric:");
-    if (this.workerMetric == null) {
+    sb.append("allMetrics:");
+    if (this.allMetrics == null) {
       sb.append("null");
     } else {
-      sb.append(this.workerMetric);
-    }
-    first = false;
-    if (!first) sb.append(", ");
-    sb.append("nettyMetric:");
-    if (this.nettyMetric == null) {
-      sb.append("null");
-    } else {
-      sb.append(this.nettyMetric);
-    }
-    first = false;
-    if (!first) sb.append(", ");
-    sb.append("taskMetric:");
-    if (this.taskMetric == null) {
-      sb.append("null");
-    } else {
-      sb.append(this.taskMetric);
+      sb.append(this.allMetrics);
     }
     first = false;
     sb.append(")");
     return sb.toString();
   }
 
-  public void validate() throws org.apache.thrift.TException {
+  public void validate() throws TException {
     // check for required fields
-    if (!is_set_topology_id()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'topology_id' is unset! Struct:" + toString());
+    if (!is_set_topologyId()) {
+      throw new TProtocolException("Required field 'topologyId' is unset! Struct:" + toString());
     }
 
-    if (!is_set_supervisor_id()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'supervisor_id' is unset! Struct:" + toString());
+    if (!is_set_supervisorId()) {
+      throw new TProtocolException("Required field 'supervisorId' is unset! Struct:" + toString());
     }
 
     if (!is_set_port()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'port' is unset! Struct:" + toString());
-    }
-
-    if (!is_set_workerMetric()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'workerMetric' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'port' is unset! Struct:" + toString());
     }
 
-    if (!is_set_nettyMetric()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'nettyMetric' is unset! Struct:" + toString());
-    }
-
-    if (!is_set_taskMetric()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'taskMetric' is unset! Struct:" + toString());
+    if (!is_set_allMetrics()) {
+      throw new TProtocolException("Required field 'allMetrics' is unset! Struct:" + toString());
     }
 
     // check for sub-struct validity
-    if (workerMetric != null) {
-      workerMetric.validate();
-    }
-    if (nettyMetric != null) {
-      nettyMetric.validate();
+    if (allMetrics != null) {
+      allMetrics.validate();
     }
   }
 
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -764,7 +566,7 @@ public class WorkerUploadMetrics implements org.apache.thrift.TBase<WorkerUpload
       // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
       __isset_bitfield = 0;
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -777,7 +579,7 @@ public class WorkerUploadMetrics implements org.apache.thrift.TBase<WorkerUpload
 
   private static class WorkerUploadMetricsStandardScheme extends StandardScheme<WorkerUploadMetrics> {
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot, WorkerUploadMetrics struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot, WorkerUploadMetrics struct) throws TException {
       org.apache.thrift.protocol.TField schemeField;
       iprot.readStructBegin();
       while (true)
@@ -789,16 +591,16 @@ public class WorkerUploadMetrics implements org.apache.thrift.TBase<WorkerUpload
         switch (schemeField.id) {
           case 1: // TOPOLOGY_ID
             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
-              struct.topology_id = iprot.readString();
-              struct.set_topology_id_isSet(true);
+              struct.topologyId = iprot.readString();
+              struct.set_topologyId_isSet(true);
             } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
           case 2: // SUPERVISOR_ID
             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
-              struct.supervisor_id = iprot.readString();
-              struct.set_supervisor_id_isSet(true);
+              struct.supervisorId = iprot.readString();
+              struct.set_supervisorId_isSet(true);
             } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
@@ -811,41 +613,11 @@ public class WorkerUploadMetrics implements org.apache.thrift.TBase<WorkerUpload
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
-          case 4: // WORKER_METRIC
+          case 4: // ALL_METRICS
             if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
-              struct.workerMetric = new MetricInfo();
-              struct.workerMetric.read(iprot);
-              struct.set_workerMetric_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 5: // NETTY_METRIC
-            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
-              struct.nettyMetric = new NettyMetric();
-              struct.nettyMetric.read(iprot);
-              struct.set_nettyMetric_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 6: // TASK_METRIC
-            if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
-              {
-                org.apache.thrift.protocol.TMap _map284 = iprot.readMapBegin();
-                struct.taskMetric = new HashMap<Integer,MetricInfo>(2*_map284.size);
-                int _key285;
-                MetricInfo _val286;
-                for (int _i287 = 0; _i287 < _map284.size; ++_i287)
-                {
-                  _key285 = iprot.readI32();
-                  _val286 = new MetricInfo();
-                  _val286.read(iprot);
-                  struct.taskMetric.put(_key285, _val286);
-                }
-                iprot.readMapEnd();
-              }
-              struct.set_taskMetric_isSet(true);
+              struct.allMetrics = new MetricInfo();
+              struct.allMetrics.read(iprot);
+              struct.set_allMetrics_isSet(true);
             } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
@@ -859,44 +631,26 @@ public class WorkerUploadMetrics implements org.apache.thrift.TBase<WorkerUpload
       struct.validate();
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot, WorkerUploadMetrics struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot, WorkerUploadMetrics struct) throws TException {
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
-      if (struct.topology_id != null) {
+      if (struct.topologyId != null) {
         oprot.writeFieldBegin(TOPOLOGY_ID_FIELD_DESC);
-        oprot.writeString(struct.topology_id);
+        oprot.writeString(struct.topologyId);
         oprot.writeFieldEnd();
       }
-      if (struct.supervisor_id != null) {
+      if (struct.supervisorId != null) {
         oprot.writeFieldBegin(SUPERVISOR_ID_FIELD_DESC);
-        oprot.writeString(struct.supervisor_id);
+        oprot.writeString(struct.supervisorId);
         oprot.writeFieldEnd();
       }
       oprot.writeFieldBegin(PORT_FIELD_DESC);
       oprot.writeI32(struct.port);
       oprot.writeFieldEnd();
-      if (struct.workerMetric != null) {
-        oprot.writeFieldBegin(WORKER_METRIC_FIELD_DESC);
-        struct.workerMetric.write(oprot);
-        oprot.writeFieldEnd();
-      }
-      if (struct.nettyMetric != null) {
-        oprot.writeFieldBegin(NETTY_METRIC_FIELD_DESC);
-        struct.nettyMetric.write(oprot);
-        oprot.writeFieldEnd();
-      }
-      if (struct.taskMetric != null) {
-        oprot.writeFieldBegin(TASK_METRIC_FIELD_DESC);
-        {
-          oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I32, org.apache.thrift.protocol.TType.STRUCT, struct.taskMetric.size()));
-          for (Map.Entry<Integer, MetricInfo> _iter288 : struct.taskMetric.entrySet())
-          {
-            oprot.writeI32(_iter288.getKey());
-            _iter288.getValue().write(oprot);
-          }
-          oprot.writeMapEnd();
-        }
+      if (struct.allMetrics != null) {
+        oprot.writeFieldBegin(ALL_METRICS_FIELD_DESC);
+        struct.allMetrics.write(oprot);
         oprot.writeFieldEnd();
       }
       oprot.writeFieldStop();
@@ -914,52 +668,26 @@ public class WorkerUploadMetrics implements org.apache.thrift.TBase<WorkerUpload
   private static class WorkerUploadMetricsTupleScheme extends TupleScheme<WorkerUploadMetrics> {
 
     @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, WorkerUploadMetrics struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol prot, WorkerUploadMetrics struct) throws TException {
       TTupleProtocol oprot = (TTupleProtocol) prot;
-      oprot.writeString(struct.topology_id);
-      oprot.writeString(struct.supervisor_id);
+      oprot.writeString(struct.topologyId);
+      oprot.writeString(struct.supervisorId);
       oprot.writeI32(struct.port);
-      struct.workerMetric.write(oprot);
-      struct.nettyMetric.write(oprot);
-      {
-        oprot.writeI32(struct.taskMetric.size());
-        for (Map.Entry<Integer, MetricInfo> _iter289 : struct.taskMetric.entrySet())
-        {
-          oprot.writeI32(_iter289.getKey());
-          _iter289.getValue().write(oprot);
-        }
-      }
+      struct.allMetrics.write(oprot);
     }
 
     @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, WorkerUploadMetrics struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol prot, WorkerUploadMetrics struct) throws TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
-      struct.topology_id = iprot.readString();
-      struct.set_topology_id_isSet(true);
-      struct.supervisor_id = iprot.readString();
-      struct.set_supervisor_id_isSet(true);
+      struct.topologyId = iprot.readString();
+      struct.set_topologyId_isSet(true);
+      struct.supervisorId = iprot.readString();
+      struct.set_supervisorId_isSet(true);
       struct.port = iprot.readI32();
       struct.set_port_isSet(true);
-      struct.workerMetric = new MetricInfo();
-      struct.workerMetric.read(iprot);
-      struct.set_workerMetric_isSet(true);
-      struct.nettyMetric = new NettyMetric();
-      struct.nettyMetric.read(iprot);
-      struct.set_nettyMetric_isSet(true);
-      {
-        org.apache.thrift.protocol.TMap _map290 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I32, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-        struct.taskMetric = new HashMap<Integer,MetricInfo>(2*_map290.size);
-        int _key291;
-        MetricInfo _val292;
-        for (int _i293 = 0; _i293 < _map290.size; ++_i293)
-        {
-          _key291 = iprot.readI32();
-          _val292 = new MetricInfo();
-          _val292.read(iprot);
-          struct.taskMetric.put(_key291, _val292);
-        }
-      }
-      struct.set_taskMetric_isSet(true);
+      struct.allMetrics = new MetricInfo();
+      struct.allMetrics.read(iprot);
+      struct.set_allMetrics_isSet(true);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/grouping/CustomStreamGrouping.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/grouping/CustomStreamGrouping.java b/jstorm-core/src/main/java/backtype/storm/grouping/CustomStreamGrouping.java
index 0e599f5..d1ce29d 100755
--- a/jstorm-core/src/main/java/backtype/storm/grouping/CustomStreamGrouping.java
+++ b/jstorm-core/src/main/java/backtype/storm/grouping/CustomStreamGrouping.java
@@ -23,21 +23,19 @@ import java.io.Serializable;
 import java.util.List;
 
 public interface CustomStreamGrouping extends Serializable {
-    
-   /**
-     * Tells the stream grouping at runtime the tasks in the target bolt.
-     * This information should be used in chooseTasks to determine the target tasks.
+
+    /**
+     * Tells the stream grouping at runtime the tasks in the target bolt. This information should be used in chooseTasks to determine the target tasks.
      * 
      * It also tells the grouping the metadata on the stream this grouping will be used on.
      */
-   void prepare(WorkerTopologyContext context, GlobalStreamId stream, List<Integer> targetTasks);
-    
-   /**
-     * This function implements a custom stream grouping. It takes in as input
-     * the number of tasks in the target bolt in prepare and returns the
-     * tasks to send the tuples to.
+    void prepare(WorkerTopologyContext context, GlobalStreamId stream, List<Integer> targetTasks);
+
+    /**
+     * This function implements a custom stream grouping. It takes in as input the number of tasks in the target bolt in prepare and returns the tasks to send
+     * the tuples to.
      * 
      * @param values the values to group on
      */
-   List<Integer> chooseTasks(int taskId, List<Object> values); 
+    List<Integer> chooseTasks(int taskId, List<Object> values);
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/grouping/PartialKeyGrouping.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/grouping/PartialKeyGrouping.java b/jstorm-core/src/main/java/backtype/storm/grouping/PartialKeyGrouping.java
index d1f534b..2cc936c 100755
--- a/jstorm-core/src/main/java/backtype/storm/grouping/PartialKeyGrouping.java
+++ b/jstorm-core/src/main/java/backtype/storm/grouping/PartialKeyGrouping.java
@@ -20,6 +20,7 @@ package backtype.storm.grouping;
 import java.io.Serializable;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.List;
 
 import backtype.storm.generated.GlobalStreamId;
@@ -40,7 +41,7 @@ public class PartialKeyGrouping implements CustomStreamGrouping, Serializable {
     private Fields outFields = null;
 
     public PartialKeyGrouping() {
-        //Empty
+        // Empty
     }
 
     public PartialKeyGrouping(Fields fields) {
@@ -65,12 +66,37 @@ public class PartialKeyGrouping implements CustomStreamGrouping, Serializable {
                 List<Object> selectedFields = outFields.select(fields, values);
                 ByteBuffer out = ByteBuffer.allocate(selectedFields.size() * 4);
                 for (Object o: selectedFields) {
-                    out.putInt(o.hashCode());
+                    if (o instanceof List) {
+                        out.putInt(Arrays.deepHashCode(((List) o).toArray()));
+                    } else if (o instanceof Object[]) {
+                        out.putInt(Arrays.deepHashCode((Object[])o));
+                    } else if (o instanceof byte[]) {
+                        out.putInt(Arrays.hashCode((byte[]) o));
+                    } else if (o instanceof short[]) {
+                        out.putInt(Arrays.hashCode((short[]) o));
+                    } else if (o instanceof int[]) {
+                        out.putInt(Arrays.hashCode((int[]) o));
+                    } else if (o instanceof long[]) {
+                        out.putInt(Arrays.hashCode((long[]) o));
+                    } else if (o instanceof char[]) {
+                        out.putInt(Arrays.hashCode((char[]) o));
+                    } else if (o instanceof float[]) {
+                        out.putInt(Arrays.hashCode((float[]) o));
+                    } else if (o instanceof double[]) {
+                        out.putInt(Arrays.hashCode((double[]) o));
+                    } else if (o instanceof boolean[]) {
+                        out.putInt(Arrays.hashCode((boolean[]) o));
+                    } else if (o != null) {
+                        out.putInt(o.hashCode());
+                    } else {
+                        out.putInt(0);
+                    }
                 }
                 raw = out.array();
             } else {
                 raw = values.get(0).toString().getBytes(); // assume key is the first field
             }
+
             int firstChoice = (int) (Math.abs(h1.hashBytes(raw).asLong()) % this.targetTasks.size());
             int secondChoice = (int) (Math.abs(h2.hashBytes(raw).asLong()) % this.targetTasks.size());
             int selected = targetTaskStats[firstChoice] > targetTaskStats[secondChoice] ? secondChoice : firstChoice;

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/hooks/BaseTaskHook.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/hooks/BaseTaskHook.java b/jstorm-core/src/main/java/backtype/storm/hooks/BaseTaskHook.java
index 12386d5..2f51576 100755
--- a/jstorm-core/src/main/java/backtype/storm/hooks/BaseTaskHook.java
+++ b/jstorm-core/src/main/java/backtype/storm/hooks/BaseTaskHook.java
@@ -33,7 +33,7 @@ public class BaseTaskHook implements ITaskHook {
 
     @Override
     public void cleanup() {
-    }    
+    }
 
     @Override
     public void emit(EmitInfo info) {

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/hooks/ITaskHook.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/hooks/ITaskHook.java b/jstorm-core/src/main/java/backtype/storm/hooks/ITaskHook.java
index c2833ca..ab19d63 100755
--- a/jstorm-core/src/main/java/backtype/storm/hooks/ITaskHook.java
+++ b/jstorm-core/src/main/java/backtype/storm/hooks/ITaskHook.java
@@ -28,11 +28,18 @@ import java.util.Map;
 
 public interface ITaskHook {
     void prepare(Map conf, TopologyContext context);
+
     void cleanup();
+
     void emit(EmitInfo info);
+
     void spoutAck(SpoutAckInfo info);
+
     void spoutFail(SpoutFailInfo info);
+
     void boltExecute(BoltExecuteInfo info);
+
     void boltAck(BoltAckInfo info);
+
     void boltFail(BoltFailInfo info);
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/hooks/info/BoltAckInfo.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/hooks/info/BoltAckInfo.java b/jstorm-core/src/main/java/backtype/storm/hooks/info/BoltAckInfo.java
index 769a37c..228da94 100755
--- a/jstorm-core/src/main/java/backtype/storm/hooks/info/BoltAckInfo.java
+++ b/jstorm-core/src/main/java/backtype/storm/hooks/info/BoltAckInfo.java
@@ -23,7 +23,7 @@ public class BoltAckInfo {
     public Tuple tuple;
     public int ackingTaskId;
     public Long processLatencyMs; // null if it wasn't sampled
-    
+
     public BoltAckInfo(Tuple tuple, int ackingTaskId, Long processLatencyMs) {
         this.tuple = tuple;
         this.ackingTaskId = ackingTaskId;

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/hooks/info/BoltExecuteInfo.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/hooks/info/BoltExecuteInfo.java b/jstorm-core/src/main/java/backtype/storm/hooks/info/BoltExecuteInfo.java
index 52e2c70..d666322 100755
--- a/jstorm-core/src/main/java/backtype/storm/hooks/info/BoltExecuteInfo.java
+++ b/jstorm-core/src/main/java/backtype/storm/hooks/info/BoltExecuteInfo.java
@@ -23,7 +23,7 @@ public class BoltExecuteInfo {
     public Tuple tuple;
     public int executingTaskId;
     public Long executeLatencyMs; // null if it wasn't sampled
-    
+
     public BoltExecuteInfo(Tuple tuple, int executingTaskId, Long executeLatencyMs) {
         this.tuple = tuple;
         this.executingTaskId = executingTaskId;

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/hooks/info/BoltFailInfo.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/hooks/info/BoltFailInfo.java b/jstorm-core/src/main/java/backtype/storm/hooks/info/BoltFailInfo.java
index 7dc930d..d32416c 100755
--- a/jstorm-core/src/main/java/backtype/storm/hooks/info/BoltFailInfo.java
+++ b/jstorm-core/src/main/java/backtype/storm/hooks/info/BoltFailInfo.java
@@ -23,7 +23,7 @@ public class BoltFailInfo {
     public Tuple tuple;
     public int failingTaskId;
     public Long failLatencyMs; // null if it wasn't sampled
-    
+
     public BoltFailInfo(Tuple tuple, int failingTaskId, Long failLatencyMs) {
         this.tuple = tuple;
         this.failingTaskId = failingTaskId;

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/hooks/info/EmitInfo.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/hooks/info/EmitInfo.java b/jstorm-core/src/main/java/backtype/storm/hooks/info/EmitInfo.java
index 59c01fa..0e7b369 100755
--- a/jstorm-core/src/main/java/backtype/storm/hooks/info/EmitInfo.java
+++ b/jstorm-core/src/main/java/backtype/storm/hooks/info/EmitInfo.java
@@ -25,7 +25,7 @@ public class EmitInfo {
     public String stream;
     public int taskId;
     public Collection<Integer> outTasks;
-    
+
     public EmitInfo(List<Object> values, String stream, int taskId, Collection<Integer> outTasks) {
         this.values = values;
         this.stream = stream;

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/hooks/info/SpoutAckInfo.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/hooks/info/SpoutAckInfo.java b/jstorm-core/src/main/java/backtype/storm/hooks/info/SpoutAckInfo.java
index 962f998..9e2735b 100755
--- a/jstorm-core/src/main/java/backtype/storm/hooks/info/SpoutAckInfo.java
+++ b/jstorm-core/src/main/java/backtype/storm/hooks/info/SpoutAckInfo.java
@@ -21,7 +21,7 @@ public class SpoutAckInfo {
     public Object messageId;
     public int spoutTaskId;
     public Long completeLatencyMs; // null if it wasn't sampled
-    
+
     public SpoutAckInfo(Object messageId, int spoutTaskId, Long completeLatencyMs) {
         this.messageId = messageId;
         this.spoutTaskId = spoutTaskId;

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/hooks/info/SpoutFailInfo.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/hooks/info/SpoutFailInfo.java b/jstorm-core/src/main/java/backtype/storm/hooks/info/SpoutFailInfo.java
index 493d1e4..76ad7d1 100755
--- a/jstorm-core/src/main/java/backtype/storm/hooks/info/SpoutFailInfo.java
+++ b/jstorm-core/src/main/java/backtype/storm/hooks/info/SpoutFailInfo.java
@@ -21,7 +21,7 @@ public class SpoutFailInfo {
     public Object messageId;
     public int spoutTaskId;
     public Long failLatencyMs; // null if it wasn't sampled
-    
+
     public SpoutFailInfo(Object messageId, int spoutTaskId, Long failLatencyMs) {
         this.messageId = messageId;
         this.spoutTaskId = spoutTaskId;

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/messaging/ConnectionWithStatus.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/messaging/ConnectionWithStatus.java b/jstorm-core/src/main/java/backtype/storm/messaging/ConnectionWithStatus.java
index 37981ca..96bbb90 100644
--- a/jstorm-core/src/main/java/backtype/storm/messaging/ConnectionWithStatus.java
+++ b/jstorm-core/src/main/java/backtype/storm/messaging/ConnectionWithStatus.java
@@ -19,31 +19,28 @@ package backtype.storm.messaging;
 
 public abstract class ConnectionWithStatus implements IConnection {
 
-  public static enum Status {
+    public static enum Status {
 
-    /**
-     * we are establishing a active connection with target host. The new data
-     * sending request can be buffered for future sending, or dropped(cases like
-     * there is no enough memory). It varies with difference IConnection
-     * implementations.
-     */
-    Connecting,
+        /**
+         * we are establishing a active connection with target host. The new data sending request can be buffered for future sending, or dropped(cases like
+         * there is no enough memory). It varies with difference IConnection implementations.
+         */
+        Connecting,
 
-    /**
-     * We have a alive connection channel, which can be used to transfer data.
-     */
-    Ready,
+        /**
+         * We have a alive connection channel, which can be used to transfer data.
+         */
+        Ready,
+
+        /**
+         * The connection channel is closed or being closed. We don't accept further data sending or receiving. All data sending request will be dropped.
+         */
+        Closed
+    };
 
     /**
-     * The connection channel is closed or being closed. We don't accept further
-     * data sending or receiving. All data sending request will be dropped.
+     * whether this connection is available to transfer data
      */
-    Closed
-  };
-
-  /**
-   * whether this connection is available to transfer data
-   */
-  public abstract Status status();
+    public abstract Status status();
 
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/messaging/IConnection.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/messaging/IConnection.java b/jstorm-core/src/main/java/backtype/storm/messaging/IConnection.java
index 24c404f..19c0cdc 100644
--- a/jstorm-core/src/main/java/backtype/storm/messaging/IConnection.java
+++ b/jstorm-core/src/main/java/backtype/storm/messaging/IConnection.java
@@ -32,8 +32,7 @@ public interface IConnection {
     public Object recv(Integer taskId, int flags);
 
     /**
-     * In the new design, receive flow is through registerQueue, then push
-     * message into queue
+     * In the new design, receive flow is through registerQueue, then push message into queue
      * 
      * @param recvQueu
      */
@@ -45,6 +44,8 @@ public interface IConnection {
 
     public void send(TaskMessage message);
 
+    public boolean available();
+
     /**
      * close this connection
      */

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/messaging/IContext.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/messaging/IContext.java b/jstorm-core/src/main/java/backtype/storm/messaging/IContext.java
index 2611366..2241faa 100644
--- a/jstorm-core/src/main/java/backtype/storm/messaging/IContext.java
+++ b/jstorm-core/src/main/java/backtype/storm/messaging/IContext.java
@@ -25,11 +25,9 @@ import backtype.storm.utils.DisruptorQueue;
 /**
  * This interface needs to be implemented for messaging plugin.
  * 
- * Messaging plugin is specified via Storm config parameter,
- * storm.messaging.transport.
+ * Messaging plugin is specified via Storm config parameter, storm.messaging.transport.
  * 
- * A messaging plugin should have a default constructor and implements IContext
- * interface. Upon construction, we will invoke IContext::prepare(storm_conf) to
+ * A messaging plugin should have a default constructor and implements IContext interface. Upon construction, we will invoke IContext::prepare(storm_conf) to
  * enable context to be configured according to storm configuration.
  */
 public interface IContext {

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/messaging/TaskMessage.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/messaging/TaskMessage.java b/jstorm-core/src/main/java/backtype/storm/messaging/TaskMessage.java
index fd95f5d..5895e76 100755
--- a/jstorm-core/src/main/java/backtype/storm/messaging/TaskMessage.java
+++ b/jstorm-core/src/main/java/backtype/storm/messaging/TaskMessage.java
@@ -22,20 +22,20 @@ import java.nio.ByteBuffer;
 public class TaskMessage {
     private int _task;
     private byte[] _message;
-    
+
     public TaskMessage(int task, byte[] message) {
         _task = task;
         _message = message;
     }
-    
+
     public int task() {
         return _task;
     }
-    
+
     public byte[] message() {
         return _message;
     }
-    
+
     public static boolean isEmpty(TaskMessage message) {
         if (message == null) {
             return true;
@@ -44,10 +44,10 @@ public class TaskMessage {
         } else if (message.message().length == 0) {
             return true;
         }
-        
+
         return false;
     }
-    
+
     @Deprecated
     public ByteBuffer serialize() {
         ByteBuffer bb = ByteBuffer.allocate(_message.length + 2);
@@ -55,7 +55,7 @@ public class TaskMessage {
         bb.put(_message);
         return bb;
     }
-    
+
     @Deprecated
     public void deserialize(ByteBuffer packet) {
         if (packet == null)
@@ -64,5 +64,5 @@ public class TaskMessage {
         _message = new byte[packet.limit() - 2];
         packet.get(_message);
     }
-    
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/messaging/TransportFactory.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/messaging/TransportFactory.java b/jstorm-core/src/main/java/backtype/storm/messaging/TransportFactory.java
index 4eddf4b..4cda654 100755
--- a/jstorm-core/src/main/java/backtype/storm/messaging/TransportFactory.java
+++ b/jstorm-core/src/main/java/backtype/storm/messaging/TransportFactory.java
@@ -28,13 +28,13 @@ import backtype.storm.Config;
 
 public class TransportFactory {
     public static final Logger LOG = LoggerFactory.getLogger(TransportFactory.class);
-    
+
     public static IContext makeContext(Map storm_conf) {
-        
+
         // get factory class name
         String transport_plugin_klassName = (String) storm_conf.get(Config.STORM_MESSAGING_TRANSPORT);
         LOG.info("JStorm peer transport plugin:" + transport_plugin_klassName);
-        
+
         IContext transport = null;
         try {
             // create a factory class
@@ -64,5 +64,5 @@ public class TransportFactory {
         }
         return transport;
     }
-    
+
 }


[17/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/NimbusData.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/NimbusData.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/NimbusData.java
index 02b574f..3871074 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/NimbusData.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/NimbusData.java
@@ -17,36 +17,37 @@
  */
 package com.alibaba.jstorm.daemon.nimbus;
 
-import java.io.IOException;
-import java.nio.channels.Channel;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
+import backtype.storm.Config;
+import backtype.storm.generated.TopologyTaskHbInfo;
+import backtype.storm.scheduler.INimbus;
+import backtype.storm.utils.BufferFileInputStream;
+import backtype.storm.utils.TimeCacheMap;
 import com.alibaba.jstorm.cache.JStormCache;
+import com.alibaba.jstorm.callback.AsyncLoopThread;
+import com.alibaba.jstorm.client.ConfigExtension;
 import com.alibaba.jstorm.cluster.Cluster;
 import com.alibaba.jstorm.cluster.StormClusterState;
 import com.alibaba.jstorm.cluster.StormConfig;
 import com.alibaba.jstorm.cluster.StormZkClusterState;
+import com.alibaba.jstorm.metric.JStormMetricCache;
+import com.alibaba.jstorm.metric.JStormMetricsReporter;
 import com.alibaba.jstorm.task.TkHbCacheTime;
 import com.alibaba.jstorm.utils.JStormUtils;
 import com.alibaba.jstorm.utils.TimeUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import backtype.storm.Config;
-import backtype.storm.scheduler.INimbus;
-import backtype.storm.utils.BufferFileInputStream;
-import backtype.storm.utils.TimeCacheMap;
+import java.io.IOException;
+import java.nio.channels.Channel;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
 
 /**
  * All nimbus data
- * 
  */
 public class NimbusData {
     private static final Logger LOG = LoggerFactory.getLogger(NimbusData.class);
@@ -62,7 +63,7 @@ public class NimbusData {
     private TimeCacheMap<Object, Object> downloaders;
     private TimeCacheMap<Object, Object> uploaders;
     // cache thrift response to avoid scan zk too frequently
-    private NimbusCache cache;
+    private NimbusCache nimbusCache;
 
     private int startTime;
 
@@ -82,17 +83,24 @@ public class NimbusData {
 
     private AtomicBoolean isShutdown = new AtomicBoolean(false);
 
-    private final TopologyMetricsRunnable metricRunnable;
+    private TopologyMetricsRunnable metricRunnable;
+    private AsyncLoopThread metricLoopThread;
 
     // The topologys which has been submitted, but the assignment is not
     // finished
     private TimeCacheMap<String, Object> pendingSubmitTopologys;
-
     private Map<String, Integer> topologyTaskTimeout;
-    
-    private TopologyNettyMgr topologyNettyMgr ;
 
-    @SuppressWarnings({ "unchecked", "rawtypes" })
+    // Map<TopologyId, TasksHeartbeat>
+    private Map<String, TopologyTaskHbInfo> tasksHeartbeat;
+
+    private final JStormMetricCache metricCache;
+
+    private final String clusterName;
+
+    private JStormMetricsReporter metricsReporter;
+
+    @SuppressWarnings({"unchecked", "rawtypes"})
     public NimbusData(Map conf, INimbus inimbus) throws Exception {
         this.conf = conf;
 
@@ -104,8 +112,7 @@ public class NimbusData {
 
         createCache();
 
-        this.taskHeartbeatsCache =
-                new ConcurrentHashMap<String, Map<Integer, TkHbCacheTime>>();
+        this.taskHeartbeatsCache = new ConcurrentHashMap<String, Map<Integer, TkHbCacheTime>>();
 
         this.scheduExec = Executors.newScheduledThreadPool(SCHEDULE_THREAD_NUM);
 
@@ -117,66 +124,63 @@ public class NimbusData {
 
         localMode = StormConfig.local_mode(conf);
 
-        this.topologyNettyMgr = new TopologyNettyMgr(conf);
+        this.metricCache = new JStormMetricCache(conf, this.stormClusterState);
+        this.clusterName = ConfigExtension.getClusterName(conf);
+
         this.metricRunnable = new TopologyMetricsRunnable(this);
+        this.metricRunnable.init();
 
-        pendingSubmitTopologys =
-                new TimeCacheMap<String, Object>(JStormUtils.MIN_30);
-        
+        pendingSubmitTopologys = new TimeCacheMap<String, Object>(JStormUtils.MIN_30);
         topologyTaskTimeout = new ConcurrentHashMap<String, Integer>();
+        tasksHeartbeat = new ConcurrentHashMap<String, TopologyTaskHbInfo>();
+
+        if (!localMode) {
+            startMetricThreads();
+        }
     }
 
-    /**
-     * Just for test
-     */
-    public NimbusData() {
-        scheduExec = Executors.newScheduledThreadPool(6);
+    public void startMetricThreads() {
+        this.metricRunnable.start();
 
-        inimubs = null;
-        conf = new HashMap<Object, Object>();
-        localMode = false;
-        this.metricRunnable = new TopologyMetricsRunnable(this);
+        // init nimbus metric reporter
+        this.metricsReporter = new JStormMetricsReporter(this);
+        this.metricsReporter.init();
     }
 
     public void createFileHandler() {
-        TimeCacheMap.ExpiredCallback<Object, Object> expiredCallback =
-                new TimeCacheMap.ExpiredCallback<Object, Object>() {
-                    @Override
-                    public void expire(Object key, Object val) {
-                        try {
-                            LOG.info("Close file " + String.valueOf(key));
-                            if (val != null) {
-                                if (val instanceof Channel) {
-                                    Channel channel = (Channel) val;
-                                    channel.close();
-                                } else if (val instanceof BufferFileInputStream) {
-                                    BufferFileInputStream is =
-                                            (BufferFileInputStream) val;
-                                    is.close();
-                                }
-                            }
-                        } catch (IOException e) {
-                            LOG.error(e.getMessage(), e);
+        TimeCacheMap.ExpiredCallback<Object, Object> expiredCallback = new TimeCacheMap.ExpiredCallback<Object, Object>() {
+            @Override
+            public void expire(Object key, Object val) {
+                try {
+                    LOG.info("Close file " + String.valueOf(key));
+                    if (val != null) {
+                        if (val instanceof Channel) {
+                            Channel channel = (Channel) val;
+                            channel.close();
+                        } else if (val instanceof BufferFileInputStream) {
+                            BufferFileInputStream is = (BufferFileInputStream) val;
+                            is.close();
                         }
-
                     }
-                };
+                } catch (IOException e) {
+                    LOG.error(e.getMessage(), e);
+                }
+
+            }
+        };
 
-        int file_copy_expiration_secs =
-                JStormUtils.parseInt(
-                        conf.get(Config.NIMBUS_FILE_COPY_EXPIRATION_SECS), 30);
-        uploaders =
-                new TimeCacheMap<Object, Object>(file_copy_expiration_secs,
-                        expiredCallback);
-        downloaders =
-                new TimeCacheMap<Object, Object>(file_copy_expiration_secs,
-                        expiredCallback);
+        int file_copy_expiration_secs = JStormUtils.parseInt(conf.get(Config.NIMBUS_FILE_COPY_EXPIRATION_SECS), 30);
+        uploaders = new TimeCacheMap<Object, Object>(file_copy_expiration_secs, expiredCallback);
+        downloaders = new TimeCacheMap<Object, Object>(file_copy_expiration_secs, expiredCallback);
     }
 
     public void createCache() throws IOException {
-        cache = new NimbusCache(conf, stormClusterState);
-        
-        ((StormZkClusterState) stormClusterState).setCache(cache.getMemCache());
+        nimbusCache = new NimbusCache(conf, stormClusterState);
+        ((StormZkClusterState) stormClusterState).setCache(nimbusCache.getMemCache());
+    }
+
+    public String getClusterName() {
+        return clusterName;
     }
 
     public int uptime() {
@@ -203,8 +207,7 @@ public class NimbusData {
         return taskHeartbeatsCache;
     }
 
-    public Map<Integer, TkHbCacheTime> getTaskHeartbeatsCache(
-            String topologyId, boolean createIfNotExist) {
+    public Map<Integer, TkHbCacheTime> getTaskHeartbeatsCache(String topologyId, boolean createIfNotExist) {
         Map<Integer, TkHbCacheTime> ret = null;
         ret = taskHeartbeatsCache.get(topologyId);
         if (ret == null && createIfNotExist) {
@@ -214,8 +217,7 @@ public class NimbusData {
         return ret;
     }
 
-    public void setTaskHeartbeatsCache(
-            ConcurrentHashMap<String, Map<Integer, TkHbCacheTime>> taskHeartbeatsCache) {
+    public void setTaskHeartbeatsCache(ConcurrentHashMap<String, Map<Integer, TkHbCacheTime>> taskHeartbeatsCache) {
         this.taskHeartbeatsCache = taskHeartbeatsCache;
     }
 
@@ -256,7 +258,7 @@ public class NimbusData {
     }
 
     public void cleanup() {
-        cache.cleanup();
+        nimbusCache.cleanup();
         LOG.info("Successfully shutdown Cache");
         try {
             stormClusterState.disconnect();
@@ -296,15 +298,19 @@ public class NimbusData {
     }
 
     public JStormCache getMemCache() {
-        return cache.getMemCache();
+        return nimbusCache.getMemCache();
     }
-    
+
     public JStormCache getDbCache() {
-        return cache.getDbCache();
+        return nimbusCache.getDbCache();
     }
-    
+
     public NimbusCache getNimbusCache() {
-        return cache;
+        return nimbusCache;
+    }
+
+    public JStormMetricCache getMetricCache() {
+        return metricCache;
     }
 
     public final TopologyMetricsRunnable getMetricRunnable() {
@@ -319,9 +325,7 @@ public class NimbusData {
         return topologyTaskTimeout;
     }
 
-	public TopologyNettyMgr getTopologyNettyMgr() {
-		return topologyNettyMgr;
-	}
-    
-    
+    public Map<String, TopologyTaskHbInfo> getTasksHeartbeat() {
+        return tasksHeartbeat;
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/NimbusServer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/NimbusServer.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/NimbusServer.java
index b22088e..5d5e18c 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/NimbusServer.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/NimbusServer.java
@@ -17,59 +17,51 @@
  */
 package com.alibaba.jstorm.daemon.nimbus;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.thrift.protocol.TBinaryProtocol;
-import org.apache.thrift.server.THsHaServer;
-import org.apache.thrift.transport.TNonblockingServerSocket;
-import org.apache.thrift.transport.TTransportException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import backtype.storm.Config;
 import backtype.storm.generated.Nimbus;
 import backtype.storm.generated.Nimbus.Iface;
 import backtype.storm.scheduler.INimbus;
 import backtype.storm.utils.Utils;
-
 import com.alibaba.jstorm.callback.AsyncLoopRunnable;
 import com.alibaba.jstorm.callback.AsyncLoopThread;
 import com.alibaba.jstorm.client.ConfigExtension;
 import com.alibaba.jstorm.cluster.StormConfig;
 import com.alibaba.jstorm.daemon.supervisor.Httpserver;
 import com.alibaba.jstorm.daemon.worker.hearbeat.SyncContainerHb;
-import com.alibaba.jstorm.metric.SimpleJStormMetric;
 import com.alibaba.jstorm.schedule.CleanRunnable;
 import com.alibaba.jstorm.schedule.FollowerRunnable;
 import com.alibaba.jstorm.schedule.MonitorRunnable;
 import com.alibaba.jstorm.utils.JStormServerUtils;
 import com.alibaba.jstorm.utils.JStormUtils;
+import org.apache.thrift.protocol.TBinaryProtocol;
+import org.apache.thrift.server.THsHaServer;
+import org.apache.thrift.transport.TNonblockingServerSocket;
+import org.apache.thrift.transport.TTransportException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
 
 /**
  * 
- * NimbusServer work flow: 1. cleanup interrupted topology delete
- * /storm-local-dir/nimbus/topologyid/stormdis delete
- * /storm-zk-root/storms/topologyid
+ * NimbusServer work flow: 1. cleanup interrupted topology delete /storm-local-dir/nimbus/topologyid/stormdis delete /storm-zk-root/storms/topologyid
  * 
  * 2. set /storm-zk-root/storms/topology stats as run
  * 
- * 3. start one thread, every nimbus.monitor.reeq.secs set
- * /storm-zk-root/storms/ all topology as monitor. when the topology's status is
- * monitor, nimubs would reassign workers 4. start one threa, every
- * nimubs.cleanup.inbox.freq.secs cleanup useless jar
+ * 3. start one thread, every nimbus.monitor.reeq.secs set /storm-zk-root/storms/ all topology as monitor. when the topology's status is monitor, nimubs would
+ * reassign workers 4. start one threa, every nimubs.cleanup.inbox.freq.secs cleanup useless jar
  * 
  * @author version 1: Nathan Marz version 2: Lixin/Chenjun version 3: Longda
  * 
  */
 public class NimbusServer {
 
-    private static final Logger LOG = LoggerFactory
-            .getLogger(NimbusServer.class);
+    private static final Logger LOG = LoggerFactory.getLogger(NimbusServer.class);
 
     private NimbusData data;
 
@@ -83,8 +75,7 @@ public class NimbusServer {
 
     private Httpserver hs;
 
-    private List<AsyncLoopThread> smartThreads =
-            new ArrayList<AsyncLoopThread>();
+    private List<AsyncLoopThread> smartThreads = new ArrayList<AsyncLoopThread>();
 
     public static void main(String[] args) throws Exception {
         // read configuration files
@@ -134,8 +125,6 @@ public class NimbusServer {
             while (!data.isLeader())
                 Utils.sleep(5000);
 
-            initUploadMetricThread(data);
-
             init(conf);
         } catch (Throwable e) {
             LOG.error("Fail to run nimbus ", e);
@@ -146,8 +135,7 @@ public class NimbusServer {
         LOG.info("Quit nimbus");
     }
 
-    public ServiceHandler launcherLocalServer(final Map conf, INimbus inimbus)
-            throws Exception {
+    public ServiceHandler launcherLocalServer(final Map conf, INimbus inimbus) throws Exception {
         LOG.info("Begin to start nimbus on local model");
 
         StormConfig.validate_local_mode(conf);
@@ -156,9 +144,6 @@ public class NimbusServer {
 
         data = createNimbusData(conf, inimbus);
 
-        // @@@ testing
-        initUploadMetricThread(data);
-
         init(conf);
 
         return serviceHandler;
@@ -184,6 +169,8 @@ public class NimbusServer {
         serviceHandler = new ServiceHandler(data);
 
         if (!data.isLocalMode()) {
+        	
+        	//data.startMetricThreads();
 
             initMonitor(conf);
 
@@ -193,15 +180,11 @@ public class NimbusServer {
     }
 
     @SuppressWarnings("rawtypes")
-    private NimbusData createNimbusData(Map conf, INimbus inimbus)
-            throws Exception {
+    private NimbusData createNimbusData(Map conf, INimbus inimbus) throws Exception {
 
         // Callback callback=new TimerCallBack();
         // StormTimer timer=Timer.mkTimerTimer(callback);
-        NimbusData data = new NimbusData(conf, inimbus);
-
-        return data;
-
+        return new NimbusData(conf, inimbus);
     }
 
     private void initTopologyAssign() {
@@ -218,9 +201,9 @@ public class NimbusServer {
             for (String topologyid : active_ids) {
                 // set the topology status as startup
                 // in fact, startup won't change anything
-                NimbusUtils.transition(data, topologyid, false,
-                        StatusType.startup);
+                NimbusUtils.transition(data, topologyid, false, StatusType.startup);
                 NimbusUtils.updateTopologyTaskTimeout(data, topologyid);
+                NimbusUtils.updateTopologyTaskHb(data, topologyid);
             }
 
         }
@@ -235,20 +218,15 @@ public class NimbusServer {
         // Schedule Nimbus monitor
         MonitorRunnable r1 = new MonitorRunnable(data);
 
-        int monitor_freq_secs =
-                JStormUtils.parseInt(conf.get(Config.NIMBUS_MONITOR_FREQ_SECS),
-                        10);
-        scheduExec.scheduleAtFixedRate(r1, 0, monitor_freq_secs,
-                TimeUnit.SECONDS);
+        int monitor_freq_secs = JStormUtils.parseInt(conf.get(Config.NIMBUS_MONITOR_FREQ_SECS), 10);
+        scheduExec.scheduleAtFixedRate(r1, 0, monitor_freq_secs, TimeUnit.SECONDS);
 
         LOG.info("Successfully init Monitor thread");
     }
 
     /**
-     * Right now, every 600 seconds, nimbus will clean jar under
-     * /LOCAL-DIR/nimbus/inbox, which is the uploading topology directory
+     * Right now, every 600 seconds, nimbus will clean jar under /LOCAL-DIR/nimbus/inbox, which is the uploading topology directory
      * 
-     * @param conf
      * @throws IOException
      */
     @SuppressWarnings("rawtypes")
@@ -257,39 +235,25 @@ public class NimbusServer {
 
         // Schedule Nimbus inbox cleaner/nimbus/inbox jar
         String dir_location = StormConfig.masterInbox(conf);
-        int inbox_jar_expiration_secs =
-                JStormUtils
-                        .parseInt(conf
-                                .get(Config.NIMBUS_INBOX_JAR_EXPIRATION_SECS),
-                                3600);
-        CleanRunnable r2 =
-                new CleanRunnable(dir_location, inbox_jar_expiration_secs);
+        int inbox_jar_expiration_secs = JStormUtils.parseInt(conf.get(Config.NIMBUS_INBOX_JAR_EXPIRATION_SECS), 3600);
+        CleanRunnable r2 = new CleanRunnable(dir_location, inbox_jar_expiration_secs);
 
-        int cleanup_inbox_freq_secs =
-                JStormUtils.parseInt(
-                        conf.get(Config.NIMBUS_CLEANUP_INBOX_FREQ_SECS), 600);
-
-        scheduExec.scheduleAtFixedRate(r2, 0, cleanup_inbox_freq_secs,
-                TimeUnit.SECONDS);
+        int cleanup_inbox_freq_secs = JStormUtils.parseInt(conf.get(Config.NIMBUS_CLEANUP_INBOX_FREQ_SECS), 600);
 
+        scheduExec.scheduleAtFixedRate(r2, 0, cleanup_inbox_freq_secs, TimeUnit.SECONDS);
         LOG.info("Successfully init " + dir_location + " cleaner");
     }
 
     @SuppressWarnings("rawtypes")
     private void initThrift(Map conf) throws TTransportException {
-        Integer thrift_port =
-                JStormUtils.parseInt(conf.get(Config.NIMBUS_THRIFT_PORT));
-        TNonblockingServerSocket socket =
-                new TNonblockingServerSocket(thrift_port);
+        Integer thrift_port = JStormUtils.parseInt(conf.get(Config.NIMBUS_THRIFT_PORT));
+        TNonblockingServerSocket socket = new TNonblockingServerSocket(thrift_port);
 
-        Integer maxReadBufSize =
-                JStormUtils.parseInt(conf
-                        .get(Config.NIMBUS_THRIFT_MAX_BUFFER_SIZE));
+        Integer maxReadBufSize = JStormUtils.parseInt(conf.get(Config.NIMBUS_THRIFT_MAX_BUFFER_SIZE));
 
         THsHaServer.Args args = new THsHaServer.Args(socket);
         args.workerThreads(ServiceHandler.THREAD_NUM);
-        args.protocolFactory(new TBinaryProtocol.Factory(false, true,
-                maxReadBufSize, -1));
+        args.protocolFactory(new TBinaryProtocol.Factory(false, true, maxReadBufSize, -1));
 
         args.processor(new Nimbus.Processor<Iface>(serviceHandler));
         args.maxReadBufferBytes = maxReadBufSize;
@@ -317,53 +281,15 @@ public class NimbusServer {
         });
     }
 
-    private void initUploadMetricThread(NimbusData data) {
-        final TopologyMetricsRunnable metricRunnable = data.getMetricRunnable();
-        
-        int threadNum = ConfigExtension.getNimbusMetricThreadNum(data.getConf());
-        
-        for (int i = 0; i < threadNum; i++) {
-            AsyncLoopThread thread = new AsyncLoopThread(metricRunnable);
-            smartThreads.add(thread);
-        }
-        
-        Runnable pusher = new Runnable() {
-
-            @Override
-            public void run() {
-                // TODO Auto-generated method stub
-                TopologyMetricsRunnable.Upload event =
-                        new TopologyMetricsRunnable.Upload();
-                event.timeStamp = System.currentTimeMillis();
-
-                metricRunnable.pushEvent(event);
-            }
-
-        };
-
-        ScheduledExecutorService scheduleService = data.getScheduExec();
-        scheduleService.scheduleAtFixedRate(pusher, 120, 60,
-                TimeUnit.SECONDS);
-        
-        SimpleJStormMetric nimbusMetric = SimpleJStormMetric.mkInstance();
-        scheduleService.scheduleAtFixedRate(nimbusMetric, 120, 60,
-                        TimeUnit.SECONDS);
-        
-        //AsyncLoopThread nimbusCacheThread = new AsyncLoopThread(data.getNimbusCache().getCacheRunnable());
-        //smartThreads.add(nimbusCacheThread);
-
-        LOG.info("Successfully init metrics uploading thread");
-    }
-
     public void cleanup() {
-        if (data.getIsShutdown().getAndSet(true) == true) {
+        if (data.getIsShutdown().getAndSet(true)) {
             LOG.info("Notify to quit nimbus");
             return;
         }
 
         LOG.info("Begin to shutdown nimbus");
         AsyncLoopRunnable.getShutdown().set(true);
-        
+
         data.getScheduExec().shutdownNow();
 
         for (AsyncLoopThread t : smartThreads) {

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/NimbusUtils.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/NimbusUtils.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/NimbusUtils.java
index 7181e77..4e032e3 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/NimbusUtils.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/NimbusUtils.java
@@ -17,53 +17,31 @@
  */
 package com.alibaba.jstorm.daemon.nimbus;
 
-import java.io.BufferedReader;
-import java.io.Closeable;
-import java.io.File;
-import java.io.FileReader;
-import java.io.IOException;
-import java.security.InvalidParameterException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import backtype.storm.Config;
-import backtype.storm.generated.Bolt;
-import backtype.storm.generated.ComponentCommon;
-import backtype.storm.generated.NimbusStat;
-import backtype.storm.generated.NimbusSummary;
-import backtype.storm.generated.NotAliveException;
-import backtype.storm.generated.SpoutSpec;
-import backtype.storm.generated.StateSpoutSpec;
-import backtype.storm.generated.StormTopology;
-import backtype.storm.generated.SupervisorSummary;
-import backtype.storm.generated.TopologySummary;
+import backtype.storm.generated.*;
 import backtype.storm.utils.ThriftTopologyUtils;
 import backtype.storm.utils.Utils;
-
 import com.alibaba.jstorm.client.ConfigExtension;
 import com.alibaba.jstorm.cluster.Cluster;
+import com.alibaba.jstorm.cluster.Common;
 import com.alibaba.jstorm.cluster.StormBase;
 import com.alibaba.jstorm.cluster.StormClusterState;
 import com.alibaba.jstorm.cluster.StormConfig;
 import com.alibaba.jstorm.daemon.supervisor.SupervisorInfo;
 import com.alibaba.jstorm.schedule.Assignment;
 import com.alibaba.jstorm.schedule.default_assign.ResourceWorkerSlot;
+import com.alibaba.jstorm.task.TaskInfo;
 import com.alibaba.jstorm.task.TkHbCacheTime;
-import com.alibaba.jstorm.task.heartbeat.TaskHeartbeat;
 import com.alibaba.jstorm.utils.JStormUtils;
 import com.alibaba.jstorm.utils.PathUtils;
 import com.alibaba.jstorm.utils.TimeUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.security.InvalidParameterException;
+import java.util.*;
+import java.util.Map.Entry;
 
 public class NimbusUtils {
 
@@ -71,7 +49,6 @@ public class NimbusUtils {
 
     /**
      * add coustom KRYO serialization
-     * 
      */
     private static Map mapifySerializations(List sers) {
         Map rtn = new HashMap();
@@ -92,8 +69,6 @@ public class NimbusUtils {
     /**
      * Normalize stormConf
      * 
-     * 
-     * 
      * @param conf
      * @param stormConf
      * @param topology
@@ -101,8 +76,7 @@ public class NimbusUtils {
      * @throws Exception
      */
     @SuppressWarnings("rawtypes")
-    public static Map normalizeConf(Map conf, Map stormConf,
-            StormTopology topology) throws Exception {
+    public static Map normalizeConf(Map conf, Map stormConf, StormTopology topology) throws Exception {
 
         List kryoRegisterList = new ArrayList();
         List kryoDecoratorList = new ArrayList();
@@ -113,18 +87,14 @@ public class NimbusUtils {
 
         Object totalRegister = totalConf.get(Config.TOPOLOGY_KRYO_REGISTER);
         if (totalRegister != null) {
-            LOG.info("topology:" + stormConf.get(Config.TOPOLOGY_NAME)
-                    + ", TOPOLOGY_KRYO_REGISTER"
-                    + totalRegister.getClass().getName());
+            LOG.info("topology:" + stormConf.get(Config.TOPOLOGY_NAME) + ", TOPOLOGY_KRYO_REGISTER" + totalRegister.getClass().getName());
 
             JStormUtils.mergeList(kryoRegisterList, totalRegister);
         }
 
         Object totalDecorator = totalConf.get(Config.TOPOLOGY_KRYO_DECORATORS);
         if (totalDecorator != null) {
-            LOG.info("topology:" + stormConf.get(Config.TOPOLOGY_NAME)
-                    + ", TOPOLOGY_KRYO_DECORATOR"
-                    + totalDecorator.getClass().getName());
+            LOG.info("topology:" + stormConf.get(Config.TOPOLOGY_NAME) + ", TOPOLOGY_KRYO_DECORATOR" + totalDecorator.getClass().getName());
             JStormUtils.mergeList(kryoDecoratorList, totalDecorator);
         }
 
@@ -132,9 +102,7 @@ public class NimbusUtils {
         for (Iterator it = cids.iterator(); it.hasNext();) {
             String componentId = (String) it.next();
 
-            ComponentCommon common =
-                    ThriftTopologyUtils.getComponentCommon(topology,
-                            componentId);
+            ComponentCommon common = ThriftTopologyUtils.getComponentCommon(topology, componentId);
             String json = common.get_json_conf();
             if (json == null) {
                 continue;
@@ -150,24 +118,18 @@ public class NimbusUtils {
                 throw new Exception(sb.toString());
             }
 
-            Object componentKryoRegister =
-                    mtmp.get(Config.TOPOLOGY_KRYO_REGISTER);
+            Object componentKryoRegister = mtmp.get(Config.TOPOLOGY_KRYO_REGISTER);
 
             if (componentKryoRegister != null) {
-                LOG.info("topology:" + stormConf.get(Config.TOPOLOGY_NAME)
-                        + ", componentId:" + componentId
-                        + ", TOPOLOGY_KRYO_REGISTER"
+                LOG.info("topology:" + stormConf.get(Config.TOPOLOGY_NAME) + ", componentId:" + componentId + ", TOPOLOGY_KRYO_REGISTER"
                         + componentKryoRegister.getClass().getName());
 
                 JStormUtils.mergeList(kryoRegisterList, componentKryoRegister);
             }
 
-            Object componentDecorator =
-                    mtmp.get(Config.TOPOLOGY_KRYO_DECORATORS);
+            Object componentDecorator = mtmp.get(Config.TOPOLOGY_KRYO_DECORATORS);
             if (componentDecorator != null) {
-                LOG.info("topology:" + stormConf.get(Config.TOPOLOGY_NAME)
-                        + ", componentId:" + componentId
-                        + ", TOPOLOGY_KRYO_DECORATOR"
+                LOG.info("topology:" + stormConf.get(Config.TOPOLOGY_NAME) + ", componentId:" + componentId + ", TOPOLOGY_KRYO_DECORATOR"
                         + componentDecorator.getClass().getName());
                 JStormUtils.mergeList(kryoDecoratorList, componentDecorator);
             }
@@ -177,25 +139,23 @@ public class NimbusUtils {
         Map kryoRegisterMap = mapifySerializations(kryoRegisterList);
         List decoratorList = JStormUtils.distinctList(kryoDecoratorList);
 
-        Integer ackerNum =
-                JStormUtils.parseInt(totalConf
-                        .get(Config.TOPOLOGY_ACKER_EXECUTORS));
+        Integer ackerNum = JStormUtils.parseInt(totalConf.get(Config.TOPOLOGY_ACKER_EXECUTORS));
         if (ackerNum == null) {
             ackerNum = Integer.valueOf(1);
         }
 
         Map rtn = new HashMap();
+        //ensure to be cluster_mode
+        rtn.put(Config.STORM_CLUSTER_MODE, conf.get(Config.STORM_CLUSTER_MODE));
         rtn.putAll(stormConf);
-        rtn.put(Config.TOPOLOGY_KRYO_DECORATORS, decoratorList);
+		rtn.put(Config.TOPOLOGY_KRYO_DECORATORS, decoratorList);
         rtn.put(Config.TOPOLOGY_KRYO_REGISTER, kryoRegisterMap);
         rtn.put(Config.TOPOLOGY_ACKER_EXECUTORS, ackerNum);
-        rtn.put(Config.TOPOLOGY_MAX_TASK_PARALLELISM,
-                totalConf.get(Config.TOPOLOGY_MAX_TASK_PARALLELISM));
+        rtn.put(Config.TOPOLOGY_MAX_TASK_PARALLELISM, totalConf.get(Config.TOPOLOGY_MAX_TASK_PARALLELISM));
         return rtn;
     }
 
-    public static Integer componentParalism(Map stormConf,
-            ComponentCommon common) {
+    public static Integer componentParalism(Map stormConf, ComponentCommon common) {
         Map mergeMap = new HashMap();
         mergeMap.putAll(stormConf);
 
@@ -223,8 +183,7 @@ public class NimbusUtils {
         // }
         // }
 
-        Object maxTaskParalismObject =
-                mergeMap.get(Config.TOPOLOGY_MAX_TASK_PARALLELISM);
+        Object maxTaskParalismObject = mergeMap.get(Config.TOPOLOGY_MAX_TASK_PARALLELISM);
         if (maxTaskParalismObject == null) {
             return taskNum;
         } else {
@@ -239,24 +198,19 @@ public class NimbusUtils {
      * finalize component's task paralism
      * 
      * @param topology
-     * @param fromConf means if the paralism is read from conf file instead of
-     *            reading from topology code
+     * @param fromConf means if the paralism is read from conf file instead of reading from topology code
      * @return
      */
-    public static StormTopology normalizeTopology(Map stormConf,
-            StormTopology topology, boolean fromConf) {
+    public static StormTopology normalizeTopology(Map stormConf, StormTopology topology, boolean fromConf) {
         StormTopology ret = topology.deepCopy();
 
-        Map<String, Object> rawComponents =
-                ThriftTopologyUtils.getComponents(topology);
+        Map<String, Object> rawComponents = ThriftTopologyUtils.getComponents(topology);
 
         Map<String, Object> components = ThriftTopologyUtils.getComponents(ret);
 
         if (rawComponents.keySet().equals(components.keySet()) == false) {
-            String errMsg =
-                    "Failed to normalize topology binary, maybe due to wrong dependency";
-            LOG.info(errMsg + " raw components:" + rawComponents.keySet()
-                    + ", normalized " + components.keySet());
+            String errMsg = "Failed to normalize topology binary, maybe due to wrong dependency";
+            LOG.info(errMsg + " raw components:" + rawComponents.keySet() + ", normalized " + components.keySet());
 
             throw new InvalidParameterException(errMsg);
         }
@@ -269,9 +223,7 @@ public class NimbusUtils {
             if (component instanceof Bolt) {
                 common = ((Bolt) component).get_common();
                 if (fromConf) {
-                    Integer paraNum =
-                            ConfigExtension.getBoltParallelism(stormConf,
-                                    componentName);
+                    Integer paraNum = ConfigExtension.getBoltParallelism(stormConf, componentName);
                     if (paraNum != null) {
                         LOG.info("Set " + componentName + " as " + paraNum);
                         common.set_parallelism_hint(paraNum);
@@ -281,9 +233,7 @@ public class NimbusUtils {
             if (component instanceof SpoutSpec) {
                 common = ((SpoutSpec) component).get_common();
                 if (fromConf) {
-                    Integer paraNum =
-                            ConfigExtension.getSpoutParallelism(stormConf,
-                                    componentName);
+                    Integer paraNum = ConfigExtension.getSpoutParallelism(stormConf, componentName);
                     if (paraNum != null) {
                         LOG.info("Set " + componentName + " as " + paraNum);
                         common.set_parallelism_hint(paraNum);
@@ -293,9 +243,7 @@ public class NimbusUtils {
             if (component instanceof StateSpoutSpec) {
                 common = ((StateSpoutSpec) component).get_common();
                 if (fromConf) {
-                    Integer paraNum =
-                            ConfigExtension.getSpoutParallelism(stormConf,
-                                    componentName);
+                    Integer paraNum = ConfigExtension.getSpoutParallelism(stormConf, componentName);
                     if (paraNum != null) {
                         LOG.info("Set " + componentName + " as " + paraNum);
                         common.set_parallelism_hint(paraNum);
@@ -307,8 +255,7 @@ public class NimbusUtils {
 
             String jsonConfString = common.get_json_conf();
             if (jsonConfString != null) {
-                componentMap
-                        .putAll((Map) JStormUtils.from_json(jsonConfString));
+                componentMap.putAll((Map) JStormUtils.from_json(jsonConfString));
             }
 
             Integer taskNum = componentParalism(stormConf, common);
@@ -328,20 +275,16 @@ public class NimbusUtils {
      * clean the topology which is in ZK but not in local dir
      * 
      * @throws Exception
-     * 
      */
-    public static void cleanupCorruptTopologies(NimbusData data)
-            throws Exception {
+    public static void cleanupCorruptTopologies(NimbusData data) throws Exception {
 
         StormClusterState stormClusterState = data.getStormClusterState();
 
         // get /local-storm-dir/nimbus/stormdist path
-        String master_stormdist_root =
-                StormConfig.masterStormdistRoot(data.getConf());
+        String master_stormdist_root = StormConfig.masterStormdistRoot(data.getConf());
 
         // listdir /local-storm-dir/nimbus/stormdist
-        List<String> code_ids =
-                PathUtils.read_dir_contents(master_stormdist_root);
+        List<String> code_ids = PathUtils.read_dir_contents(master_stormdist_root);
 
         // get topology in ZK /storms
         List<String> active_ids = data.getStormClusterState().active_storms();
@@ -352,9 +295,7 @@ public class NimbusUtils {
             }
 
             for (String corrupt : active_ids) {
-                LOG.info("Corrupt topology "
-                        + corrupt
-                        + " has state on zookeeper but doesn't have a local dir on Nimbus. Cleaning up...");
+                LOG.info("Corrupt topology " + corrupt + " has state on zookeeper but doesn't have a local dir on Nimbus. Cleaning up...");
 
                 /**
                  * Just removing the /STORMS is enough
@@ -368,53 +309,47 @@ public class NimbusUtils {
 
     }
 
-    public static boolean isTaskDead(NimbusData data, String topologyId,
-            Integer taskId) {
+    public static boolean isTaskDead(NimbusData data, String topologyId, Integer taskId) {
         String idStr = " topology:" + topologyId + ",taskid:" + taskId;
 
-        Integer zkReportTime = null;
+        TopologyTaskHbInfo topoTasksHbInfo = data.getTasksHeartbeat().get(topologyId);
+        Map<Integer, TaskHeartbeat> taskHbMap = null;
+        Integer taskReportTime = null;
 
-        StormClusterState stormClusterState = data.getStormClusterState();
-        TaskHeartbeat zkTaskHeartbeat = null;
-        try {
-            zkTaskHeartbeat =
-                    stormClusterState.task_heartbeat(topologyId, taskId);
-            if (zkTaskHeartbeat != null) {
-                zkReportTime = zkTaskHeartbeat.getTimeSecs();
+        if (topoTasksHbInfo != null) {
+            taskHbMap = topoTasksHbInfo.get_taskHbs();
+            if (taskHbMap != null) {
+                TaskHeartbeat tHb = taskHbMap.get(taskId);
+                taskReportTime = ((tHb != null ) ? tHb.get_time() : null);
             }
-        } catch (Exception e) {
-            LOG.error("Failed to get ZK task hearbeat " + idStr, e);
         }
 
-        Map<Integer, TkHbCacheTime> taskHBs =
-                data.getTaskHeartbeatsCache(topologyId, true);
+        Map<Integer, TkHbCacheTime> taskHBs = data.getTaskHeartbeatsCache(topologyId, true);
 
         TkHbCacheTime taskHB = taskHBs.get(taskId);
         if (taskHB == null) {
             LOG.info("No task heartbeat cache " + idStr);
 
-            if (zkTaskHeartbeat == null) {
-                LOG.info("No ZK task hearbeat " + idStr);
+            if (topoTasksHbInfo == null || taskHbMap == null) {
+                LOG.info("No task hearbeat was reported for " + idStr);
                 return true;
             }
 
             taskHB = new TkHbCacheTime();
-            taskHB.update(zkTaskHeartbeat);
+            taskHB.update(taskHbMap.get(taskId));
 
             taskHBs.put(taskId, taskHB);
 
             return false;
         }
 
-        if (zkReportTime == null) {
-            LOG.debug("No ZK task heartbeat " + idStr);
+        if (taskReportTime == null || taskReportTime < taskHB.getTaskAssignedTime()) {
+            LOG.debug("No task heartbeat was reported for " + idStr);
             // Task hasn't finish init
             int nowSecs = TimeUtils.current_time_secs();
             int assignSecs = taskHB.getTaskAssignedTime();
 
-            int waitInitTimeout =
-                    JStormUtils.parseInt(data.getConf().get(
-                            Config.NIMBUS_TASK_LAUNCH_SECS));
+            int waitInitTimeout = JStormUtils.parseInt(data.getConf().get(Config.NIMBUS_TASK_LAUNCH_SECS));
 
             if (nowSecs - assignSecs > waitInitTimeout) {
                 LOG.info(idStr + " failed to init ");
@@ -433,30 +368,29 @@ public class NimbusUtils {
         int nowSecs = TimeUtils.current_time_secs();
         if (nimbusTime == 0) {
             // taskHB no entry, first time
-            // update taskHB
+            // update taskHBtaskReportTime
             taskHB.setNimbusTime(nowSecs);
-            taskHB.setTaskReportedTime(zkReportTime);
+            taskHB.setTaskReportedTime(taskReportTime);
 
             LOG.info("Update taskheartbeat to nimbus cache " + idStr);
             return false;
         }
 
-        if (reportTime != zkReportTime.intValue()) {
+        if (reportTime != taskReportTime.intValue()) {
             // zk has been updated the report time
             taskHB.setNimbusTime(nowSecs);
-            taskHB.setTaskReportedTime(zkReportTime);
+            taskHB.setTaskReportedTime(taskReportTime);
 
-            LOG.debug(idStr + ",nimbusTime " + nowSecs + ",zkReport:"
-                    + zkReportTime + ",report:" + reportTime);
+            LOG.debug(idStr + ",nimbusTime " + nowSecs + ",zkReport:" + taskReportTime + ",report:" + reportTime);
             return false;
         }
 
         // the following is (zkReportTime == reportTime)
         Integer taskHBTimeout = data.getTopologyTaskTimeout().get(topologyId);
         if (taskHBTimeout == null)
-            taskHBTimeout =
-                    JStormUtils.parseInt(data.getConf().get(
-                            Config.NIMBUS_TASK_TIMEOUT_SECS));
+            taskHBTimeout = JStormUtils.parseInt(data.getConf().get(Config.NIMBUS_TASK_TIMEOUT_SECS));
+        if (taskId == topoTasksHbInfo.get_topologyMasterId())
+            taskHBTimeout = (taskHBTimeout / 2);
         if (nowSecs - nimbusTime > taskHBTimeout) {
             // task is dead
             long ts = ((long) nimbusTime) * 1000;
@@ -470,7 +404,7 @@ public class NimbusUtils {
             sb.append(",current ");
             sb.append(nowSecs);
             sb.append(":").append(new Date(((long) nowSecs) * 1000));
-            LOG.info(sb.toString());
+            LOG.debug(sb.toString());
             return true;
         }
 
@@ -478,13 +412,10 @@ public class NimbusUtils {
 
     }
 
-    public static void updateTaskHbStartTime(NimbusData data,
-            Assignment assignment, String topologyId) {
-        Map<Integer, TkHbCacheTime> taskHBs =
-                data.getTaskHeartbeatsCache(topologyId, true);
+    public static void updateTaskHbStartTime(NimbusData data, Assignment assignment, String topologyId) {
+        Map<Integer, TkHbCacheTime> taskHBs = data.getTaskHeartbeatsCache(topologyId, true);
 
-        Map<Integer, Integer> taskStartTimes =
-                assignment.getTaskStartTimeSecs();
+        Map<Integer, Integer> taskStartTimes = assignment.getTaskStartTimeSecs();
         for (Entry<Integer, Integer> entry : taskStartTimes.entrySet()) {
             Integer taskId = entry.getKey();
             Integer taskStartTime = entry.getValue();
@@ -501,25 +432,19 @@ public class NimbusUtils {
         return;
     }
 
-    public static <T> void transitionName(NimbusData data, String topologyName,
-            boolean errorOnNoTransition, StatusType transition_status,
-            T... args) throws Exception {
+    public static <T> void transitionName(NimbusData data, String topologyName, boolean errorOnNoTransition, StatusType transition_status, T... args)
+            throws Exception {
         StormClusterState stormClusterState = data.getStormClusterState();
-        String topologyId =
-                Cluster.get_topology_id(stormClusterState, topologyName);
+        String topologyId = Cluster.get_topology_id(stormClusterState, topologyName);
         if (topologyId == null) {
             throw new NotAliveException(topologyName);
         }
-        transition(data, topologyId, errorOnNoTransition, transition_status,
-                args);
+        transition(data, topologyId, errorOnNoTransition, transition_status, args);
     }
 
-    public static <T> void transition(NimbusData data, String topologyid,
-            boolean errorOnNoTransition, StatusType transition_status,
-            T... args) {
+    public static <T> void transition(NimbusData data, String topologyid, boolean errorOnNoTransition, StatusType transition_status, T... args) {
         try {
-            data.getStatusTransition().transition(topologyid,
-                    errorOnNoTransition, transition_status, args);
+            data.getStatusTransition().transition(topologyid, errorOnNoTransition, transition_status, args);
         } catch (Exception e) {
             // TODO Auto-generated catch block
             LOG.error("Failed to do status transition,", e);
@@ -535,22 +460,17 @@ public class NimbusUtils {
         return numTasks;
     }
 
-    public static List<TopologySummary> getTopologySummary(
-            StormClusterState stormClusterState,
-            Map<String, Assignment> assignments) throws Exception {
-        List<TopologySummary> topologySummaries =
-                new ArrayList<TopologySummary>();
+    public static List<TopologySummary> getTopologySummary(StormClusterState stormClusterState, Map<String, Assignment> assignments) throws Exception {
+        List<TopologySummary> topologySummaries = new ArrayList<TopologySummary>();
 
         // get all active topology's StormBase
-        Map<String, StormBase> bases =
-                Cluster.get_all_StormBase(stormClusterState);
+        Map<String, StormBase> bases = Cluster.get_all_StormBase(stormClusterState);
         for (Entry<String, StormBase> entry : bases.entrySet()) {
 
             String topologyId = entry.getKey();
             StormBase base = entry.getValue();
 
-            Assignment assignment =
-                    stormClusterState.assignment_info(topologyId, null);
+            Assignment assignment = stormClusterState.assignment_info(topologyId, null);
             if (assignment == null) {
                 LOG.error("Failed to get assignment of " + topologyId);
                 continue;
@@ -571,11 +491,10 @@ public class NimbusUtils {
             topology.set_id(topologyId);
             topology.set_name(base.getStormName());
             topology.set_status(base.getStatusString());
-            topology.set_uptime_secs(TimeUtils.time_delta(base
-                    .getLanchTimeSecs()));
-            topology.set_num_workers(num_workers);
-            topology.set_num_tasks(num_tasks);
-            topology.set_error_info(errorString);
+            topology.set_uptimeSecs(TimeUtils.time_delta(base.getLanchTimeSecs()));
+            topology.set_numWorkers(num_workers);
+            topology.set_numTasks(num_tasks);
+            topology.set_errorInfo(errorString);
 
             topologySummaries.add(topology);
 
@@ -584,34 +503,26 @@ public class NimbusUtils {
         return topologySummaries;
     }
 
-    public static SupervisorSummary mkSupervisorSummary(
-            SupervisorInfo supervisorInfo, String supervisorId,
-            Map<String, Integer> supervisorToUsedSlotNum) {
+    public static SupervisorSummary mkSupervisorSummary(SupervisorInfo supervisorInfo, String supervisorId, Map<String, Integer> supervisorToUsedSlotNum) {
         Integer usedNum = supervisorToUsedSlotNum.get(supervisorId);
 
         SupervisorSummary summary =
-                new SupervisorSummary(supervisorInfo.getHostName(),
-                        supervisorId, supervisorInfo.getUptimeSecs(),
-                        supervisorInfo.getWorkerPorts().size(),
+                new SupervisorSummary(supervisorInfo.getHostName(), supervisorId, supervisorInfo.getUptimeSecs(), supervisorInfo.getWorkerPorts().size(),
                         usedNum == null ? 0 : usedNum);
 
         return summary;
     }
 
-    public static List<SupervisorSummary> mkSupervisorSummaries(
-            Map<String, SupervisorInfo> supervisorInfos,
-            Map<String, Assignment> assignments) {
+    public static List<SupervisorSummary> mkSupervisorSummaries(Map<String, SupervisorInfo> supervisorInfos, Map<String, Assignment> assignments) {
 
-        Map<String, Integer> supervisorToLeftSlotNum =
-                new HashMap<String, Integer>();
+        Map<String, Integer> supervisorToLeftSlotNum = new HashMap<String, Integer>();
         for (Entry<String, Assignment> entry : assignments.entrySet()) {
             Set<ResourceWorkerSlot> workers = entry.getValue().getWorkers();
 
             for (ResourceWorkerSlot worker : workers) {
 
                 String supervisorId = worker.getNodeId();
-                SupervisorInfo supervisorInfo =
-                        supervisorInfos.get(supervisorId);
+                SupervisorInfo supervisorInfo = supervisorInfos.get(supervisorId);
                 if (supervisorInfo == null) {
                     continue;
                 }
@@ -629,9 +540,7 @@ public class NimbusUtils {
             String supervisorId = entry.getKey();
             SupervisorInfo supervisorInfo = entry.getValue();
 
-            SupervisorSummary summary =
-                    mkSupervisorSummary(supervisorInfo, supervisorId,
-                            supervisorToLeftSlotNum);
+            SupervisorSummary summary = mkSupervisorSummary(supervisorInfo, supervisorId, supervisorToLeftSlotNum);
 
             ret.add(summary);
         }
@@ -648,27 +557,24 @@ public class NimbusUtils {
         return ret;
     }
 
-    public static NimbusSummary getNimbusSummary(
-            StormClusterState stormClusterState,
-            List<SupervisorSummary> supervisorSummaries, NimbusData data)
+    public static NimbusSummary getNimbusSummary(StormClusterState stormClusterState, List<SupervisorSummary> supervisorSummaries, NimbusData data)
             throws Exception {
         NimbusSummary ret = new NimbusSummary();
 
         String master = stormClusterState.get_leader_host();
         NimbusStat nimbusMaster = new NimbusStat();
         nimbusMaster.set_host(master);
-        nimbusMaster.set_uptime_secs(String.valueOf(data.uptime()));
-        ret.set_nimbus_master(nimbusMaster);
+        nimbusMaster.set_uptimeSecs(String.valueOf(data.uptime()));
+        ret.set_nimbusMaster(nimbusMaster);
 
         List<NimbusStat> nimbusSlaveList = new ArrayList<NimbusStat>();
-        ret.set_nimbus_slaves(nimbusSlaveList);
-        Map<String, String> nimbusSlaveMap =
-                Cluster.get_all_nimbus_slave(stormClusterState);
+        ret.set_nimbusSlaves(nimbusSlaveList);
+        Map<String, String> nimbusSlaveMap = Cluster.get_all_nimbus_slave(stormClusterState);
         if (nimbusSlaveMap != null) {
             for (Entry<String, String> entry : nimbusSlaveMap.entrySet()) {
                 NimbusStat slave = new NimbusStat();
                 slave.set_host(entry.getKey());
-                slave.set_uptime_secs(entry.getValue());
+                slave.set_uptimeSecs(entry.getValue());
 
                 nimbusSlaveList.add(slave);
             }
@@ -678,46 +584,75 @@ public class NimbusUtils {
         int usedPort = 0;
 
         for (SupervisorSummary supervisor : supervisorSummaries) {
-            totalPort += supervisor.get_num_workers();
-            usedPort += supervisor.get_num_used_workers();
+            totalPort += supervisor.get_numWorkers();
+            usedPort += supervisor.get_numUsedWorkers();
         }
 
-        ret.set_supervisor_num(supervisorSummaries.size());
-        ret.set_total_port_num(totalPort);
-        ret.set_used_port_num(usedPort);
-        ret.set_free_port_num(totalPort - usedPort);
+        ret.set_supervisorNum(supervisorSummaries.size());
+        ret.set_totalPortNum(totalPort);
+        ret.set_usedPortNum(usedPort);
+        ret.set_freePortNum(totalPort - usedPort);
         ret.set_version(Utils.getVersion());
 
         return ret;
 
     }
 
-    public static void updateTopologyTaskTimeout(NimbusData data,
-            String topologyId) {
+    public static void updateTopologyTaskTimeout(NimbusData data, String topologyId) {
         Map topologyConf = null;
         try {
-            topologyConf =
-                    StormConfig.read_nimbus_topology_conf(data.getConf(),
-                            topologyId);
+            topologyConf = StormConfig.read_nimbus_topology_conf(data.getConf(), topologyId);
         } catch (IOException e) {
-            LOG.warn("Failed to read configuration of " + topologyId + ", "
-                    + e.getMessage());
+            LOG.warn("Failed to read configuration of " + topologyId + ", " + e.getMessage());
         }
 
-        Integer timeout =
-                JStormUtils.parseInt(topologyConf
-                        .get(Config.NIMBUS_TASK_TIMEOUT_SECS));
+        Integer timeout = JStormUtils.parseInt(topologyConf.get(Config.NIMBUS_TASK_TIMEOUT_SECS));
         if (timeout == null) {
-            timeout =
-                    JStormUtils.parseInt(data.getConf().get(
-                            Config.NIMBUS_TASK_TIMEOUT_SECS));
+            timeout = JStormUtils.parseInt(data.getConf().get(Config.NIMBUS_TASK_TIMEOUT_SECS));
         }
         LOG.info("Setting taskTimeout:" + timeout + " for " + topologyId);
         data.getTopologyTaskTimeout().put(topologyId, timeout);
     }
 
-    public static void removeTopologyTaskTimeout(NimbusData data,
-            String topologyId) {
+    public static void removeTopologyTaskTimeout(NimbusData data, String topologyId) {
         data.getTopologyTaskTimeout().remove(topologyId);
     }
+
+    public static void updateTopologyTaskHb(NimbusData data, String topologyId) {
+        StormClusterState clusterState = data.getStormClusterState();
+        TopologyTaskHbInfo topologyTaskHb = null;
+
+        try {
+            topologyTaskHb = clusterState.topology_heartbeat(topologyId);
+        } catch (Exception e) {
+            LOG.error("updateTopologyTaskHb: Failed to get topology task heartbeat info", e);
+        }
+
+        if (topologyTaskHb != null) {
+            data.getTasksHeartbeat().put(topologyId, topologyTaskHb);
+        }
+    }
+
+    public static void removeTopologyTaskHb(NimbusData data, String topologyId, int taskId) {
+        TopologyTaskHbInfo topologyTaskHbs = data.getTasksHeartbeat().get(topologyId);
+
+        if (topologyTaskHbs != null) {
+            Map<Integer, TaskHeartbeat> taskHbs = topologyTaskHbs.get_taskHbs();
+            if (taskHbs != null) {
+                taskHbs.remove(taskId);
+            }
+        }
+    }
+
+    public static int getTopologyMasterId(Map<Integer, TaskInfo> tasksInfo) {
+        int ret = 0;
+        for (Entry<Integer, TaskInfo> entry : tasksInfo.entrySet()) {
+            if (entry.getValue().getComponentId().equalsIgnoreCase(Common.TOPOLOGY_MASTER_COMPONENT_ID)) {
+                ret = entry.getKey();
+                break;
+            }
+        }
+
+        return ret;
+    }
 }


[51/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
Update JStorm to latest release 2.1.0


Project: http://git-wip-us.apache.org/repos/asf/storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/storm/commit/7eaf0651
Tree: http://git-wip-us.apache.org/repos/asf/storm/tree/7eaf0651
Diff: http://git-wip-us.apache.org/repos/asf/storm/diff/7eaf0651

Branch: refs/heads/jstorm-import
Commit: 7eaf06513183228d8c0a8ac032733f0ec47ba9a7
Parents: 27fb31c
Author: basti.lj <ba...@alibaba-inc.com>
Authored: Wed Nov 25 20:04:48 2015 +0800
Committer: basti.lj <ba...@alibaba-inc.com>
Committed: Wed Nov 25 20:04:48 2015 +0800

----------------------------------------------------------------------
 bin/jstorm.py                                   |    37 +-
 bin/start.sh                                    |    16 +-
 conf/client_logback.xml                         |     7 +-
 conf/jstorm.log4j.properties                    |     4 +-
 conf/jstorm.logback.xml                         |   130 +-
 conf/storm.yaml                                 |    44 +-
 history.md                                      |   142 +-
 history_cn.md                                   |   148 +-
 jstorm-core/pom.xml                             |    34 +-
 .../src/main/java/backtype/storm/Config.java    |   569 +-
 .../java/backtype/storm/ConfigValidation.java   |   107 +-
 .../src/main/java/backtype/storm/Constants.java |     5 +-
 .../backtype/storm/GenericOptionsParser.java    |    76 +-
 .../backtype/storm/ICredentialsListener.java    |     3 +-
 .../main/java/backtype/storm/ILocalCluster.java |    17 +-
 .../main/java/backtype/storm/ILocalDRPC.java    |     3 +-
 .../main/java/backtype/storm/LocalCluster.java  |    77 +-
 .../java/backtype/storm/LocalClusterMap.java    |    50 +-
 .../src/main/java/backtype/storm/LocalDRPC.java |    26 +-
 .../main/java/backtype/storm/LocalUtils.java    |    24 +-
 .../java/backtype/storm/StormSubmitter.java     |   160 +-
 .../src/main/java/backtype/storm/Tool.java      |     6 +-
 .../main/java/backtype/storm/ToolRunner.java    |    11 +-
 .../backtype/storm/clojure/ClojureBolt.java     |    45 +-
 .../backtype/storm/clojure/ClojureSpout.java    |    59 +-
 .../backtype/storm/clojure/RichShellBolt.java   |    12 +-
 .../backtype/storm/clojure/RichShellSpout.java  |     4 +-
 .../java/backtype/storm/command/activate.java   |    14 +-
 .../backtype/storm/command/config_value.java    |    10 +-
 .../java/backtype/storm/command/deactivate.java |    14 +-
 .../backtype/storm/command/kill_topology.java   |    28 +-
 .../main/java/backtype/storm/command/list.java  |    16 +-
 .../backtype/storm/command/metrics_monitor.java |    24 +-
 .../java/backtype/storm/command/rebalance.java  |    33 +-
 .../java/backtype/storm/command/restart.java    |    14 +-
 .../backtype/storm/command/update_config.java   |    69 -
 .../backtype/storm/command/update_topology.java |   164 +
 .../storm/coordination/BatchBoltExecutor.java   |    27 +-
 .../coordination/BatchOutputCollector.java      |    13 +-
 .../coordination/BatchOutputCollectorImpl.java  |     8 +-
 .../coordination/BatchSubtopologyBuilder.java   |   153 +-
 .../storm/coordination/CoordinatedBolt.java     |   155 +-
 .../backtype/storm/coordination/IBatchBolt.java |     2 +
 .../storm/drpc/DRPCInvocationsClient.java       |    35 +-
 .../java/backtype/storm/drpc/DRPCSpout.java     |    86 +-
 .../java/backtype/storm/drpc/JoinResult.java    |    11 +-
 .../java/backtype/storm/drpc/KeyedFairBolt.java |    12 +-
 .../storm/drpc/LinearDRPCInputDeclarer.java     |    13 +-
 .../storm/drpc/LinearDRPCTopologyBuilder.java   |   139 +-
 .../backtype/storm/drpc/PrepareRequest.java     |     1 -
 .../java/backtype/storm/drpc/ReturnResults.java |    37 +-
 .../storm/generated/AlreadyAliveException.java  |    22 +-
 .../storm/generated/AuthorizationException.java |    22 +-
 .../java/backtype/storm/generated/Bolt.java     |    24 +-
 .../storm/generated/ClusterSummary.java         |    26 +-
 .../storm/generated/ComponentCommon.java        |    24 +-
 .../storm/generated/ComponentObject.java        |    12 +-
 .../storm/generated/ComponentSummary.java       |   204 +-
 .../backtype/storm/generated/Credentials.java   |    66 +-
 .../storm/generated/DRPCExecutionException.java |    22 +-
 .../backtype/storm/generated/DRPCRequest.java   |    24 +-
 .../storm/generated/DistributedRPC.java         |   186 +-
 .../generated/DistributedRPCInvocations.java    |   573 +-
 .../backtype/storm/generated/ErrorInfo.java     |   120 +-
 .../storm/generated/GlobalStreamId.java         |    24 +-
 .../java/backtype/storm/generated/Grouping.java |    12 +-
 .../generated/InvalidTopologyException.java     |    22 +-
 .../backtype/storm/generated/JavaObject.java    |    24 +-
 .../backtype/storm/generated/JavaObjectArg.java |    12 +-
 .../backtype/storm/generated/KillOptions.java   |    20 +-
 .../storm/generated/LocalStateData.java         |    70 +-
 .../backtype/storm/generated/MetricInfo.java    |   653 +-
 .../storm/generated/MetricSnapshot.java         |  2221 ++
 .../backtype/storm/generated/MetricWindow.java  |    22 +-
 .../storm/generated/MonitorOptions.java         |    20 +-
 .../backtype/storm/generated/NettyMetric.java   |   553 -
 .../java/backtype/storm/generated/Nimbus.java   | 25096 +++++++++++------
 .../backtype/storm/generated/NimbusStat.java    |   118 +-
 .../backtype/storm/generated/NimbusSummary.java |   644 +-
 .../storm/generated/NotAliveException.java      |    22 +-
 .../backtype/storm/generated/NullStruct.java    |    20 +-
 .../storm/generated/RebalanceOptions.java       |    20 +-
 .../storm/generated/ShellComponent.java         |    20 +-
 .../backtype/storm/generated/SpoutSpec.java     |    24 +-
 .../storm/generated/StateSpoutSpec.java         |    24 +-
 .../backtype/storm/generated/StormTopology.java |    26 +-
 .../backtype/storm/generated/StreamInfo.java    |    24 +-
 .../backtype/storm/generated/SubmitOptions.java |   139 +-
 .../storm/generated/SupervisorSummary.java      |   412 +-
 .../storm/generated/SupervisorWorkers.java      |   110 +-
 .../backtype/storm/generated/TaskComponent.java |    24 +-
 .../backtype/storm/generated/TaskHeartbeat.java |   482 +
 .../backtype/storm/generated/TaskSummary.java   |   162 +-
 .../storm/generated/ThriftSerializedObject.java |    24 +-
 .../generated/TopologyAssignException.java      |    22 +-
 .../backtype/storm/generated/TopologyInfo.java  |   100 +-
 .../storm/generated/TopologyInitialStatus.java  |     2 +-
 .../storm/generated/TopologyMetric.java         |   530 +-
 .../storm/generated/TopologySummary.java        |   416 +-
 .../storm/generated/TopologyTaskHbInfo.java     |   663 +
 .../backtype/storm/generated/WorkerSummary.java |    28 +-
 .../storm/generated/WorkerUploadMetrics.java    |   604 +-
 .../storm/grouping/CustomStreamGrouping.java    |    20 +-
 .../storm/grouping/PartialKeyGrouping.java      |    30 +-
 .../java/backtype/storm/hooks/BaseTaskHook.java |     2 +-
 .../java/backtype/storm/hooks/ITaskHook.java    |     7 +
 .../backtype/storm/hooks/info/BoltAckInfo.java  |     2 +-
 .../storm/hooks/info/BoltExecuteInfo.java       |     2 +-
 .../backtype/storm/hooks/info/BoltFailInfo.java |     2 +-
 .../backtype/storm/hooks/info/EmitInfo.java     |     2 +-
 .../backtype/storm/hooks/info/SpoutAckInfo.java |     2 +-
 .../storm/hooks/info/SpoutFailInfo.java         |     2 +-
 .../storm/messaging/ConnectionWithStatus.java   |    39 +-
 .../backtype/storm/messaging/IConnection.java   |     5 +-
 .../java/backtype/storm/messaging/IContext.java |     6 +-
 .../backtype/storm/messaging/TaskMessage.java   |    16 +-
 .../storm/messaging/TransportFactory.java       |     8 +-
 .../storm/metric/LoggingMetricsConsumer.java    |    18 +-
 .../storm/metric/MetricsConsumerBolt.java       |    14 +-
 .../java/backtype/storm/metric/SystemBolt.java  |    20 +-
 .../storm/metric/api/CombinedMetric.java        |     2 +-
 .../backtype/storm/metric/api/CountMetric.java  |     2 +-
 .../backtype/storm/metric/api/ICombiner.java    |     1 +
 .../storm/metric/api/IMetricsConsumer.java      |    28 +-
 .../backtype/storm/metric/api/IReducer.java     |     2 +
 .../backtype/storm/metric/api/MeanReducer.java  |    21 +-
 .../storm/metric/api/MultiCountMetric.java      |     6 +-
 .../storm/metric/api/MultiReducedMetric.java    |     8 +-
 .../storm/metric/api/rpc/CountShellMetric.java  |     7 +-
 .../storm/metric/api/rpc/IShellMetric.java      |     8 +-
 .../java/backtype/storm/multilang/BoltMsg.java  |    13 +-
 .../backtype/storm/multilang/ISerializer.java   |    23 +-
 .../storm/multilang/JsonSerializer.java         |    16 +-
 .../storm/multilang/NoOutputException.java      |     3 +-
 .../java/backtype/storm/multilang/ShellMsg.java |    49 +-
 .../java/backtype/storm/multilang/SpoutMsg.java |    13 +-
 .../storm/nimbus/DefaultTopologyValidator.java  |     7 +-
 .../storm/nimbus/ITopologyValidator.java        |     4 +-
 .../backtype/storm/planner/CompoundSpout.java   |     3 +-
 .../backtype/storm/planner/CompoundTask.java    |     3 +-
 .../java/backtype/storm/planner/TaskBundle.java |     5 +-
 .../java/backtype/storm/scheduler/Cluster.java  |    87 +-
 .../storm/scheduler/ExecutorDetails.java        |    12 +-
 .../java/backtype/storm/scheduler/INimbus.java  |    14 +-
 .../backtype/storm/scheduler/IScheduler.java    |    22 +-
 .../backtype/storm/scheduler/ISupervisor.java   |    15 +-
 .../storm/scheduler/SchedulerAssignment.java    |     8 +-
 .../scheduler/SchedulerAssignmentImpl.java      |    16 +-
 .../storm/scheduler/SupervisorDetails.java      |    14 +-
 .../backtype/storm/scheduler/Topologies.java    |    15 +-
 .../storm/scheduler/TopologyDetails.java        |    21 +-
 .../backtype/storm/scheduler/WorkerSlot.java    |    24 +-
 .../scheduler/multitenant/DefaultPool.java      |   333 +-
 .../storm/scheduler/multitenant/FreePool.java   |   158 +-
 .../scheduler/multitenant/IsolatedPool.java     |   538 +-
 .../multitenant/MultitenantScheduler.java       |   115 +-
 .../storm/scheduler/multitenant/Node.java       |   540 +-
 .../storm/scheduler/multitenant/NodePool.java   |   466 +-
 .../storm/security/INimbusCredentialPlugin.java |    12 +-
 .../backtype/storm/security/auth/AuthUtils.java |    83 +-
 .../auth/DefaultHttpCredentialsPlugin.java      |    20 +-
 .../security/auth/DefaultPrincipalToLocal.java  |    14 +-
 .../storm/security/auth/IAuthorizer.java        |    22 +-
 .../storm/security/auth/IAutoCredentials.java   |    15 +-
 .../security/auth/ICredentialsRenewer.java      |    14 +-
 .../auth/IGroupMappingServiceProvider.java      |     5 +-
 .../security/auth/IHttpCredentialsPlugin.java   |     6 +-
 .../storm/security/auth/IPrincipalToLocal.java  |     9 +-
 .../storm/security/auth/ITransportPlugin.java   |    13 +-
 .../security/auth/KerberosPrincipalToLocal.java |    15 +-
 .../storm/security/auth/ReqContext.java         |    29 +-
 .../security/auth/SaslTransportPlugin.java      |    43 +-
 .../security/auth/ShellBasedGroupsMapping.java  |    20 +-
 .../security/auth/SimpleTransportPlugin.java    |    66 +-
 .../security/auth/SingleUserPrincipal.java      |     2 +-
 .../storm/security/auth/TBackoffConnect.java    |    10 +-
 .../storm/security/auth/ThriftClient.java       |    75 +-
 .../security/auth/ThriftConnectionType.java     |    20 +-
 .../storm/security/auth/ThriftServer.java       |    27 +-
 .../auth/authorizer/DRPCAuthorizerBase.java     |    10 +-
 .../authorizer/DRPCSimpleACLAuthorizer.java     |    74 +-
 .../auth/authorizer/DenyAuthorizer.java         |    20 +-
 .../authorizer/ImpersonationAuthorizer.java     |    41 +-
 .../auth/authorizer/NoopAuthorizer.java         |    18 +-
 .../auth/authorizer/SimpleACLAuthorizer.java    |    38 +-
 .../authorizer/SimpleWhitelistAuthorizer.java   |    21 +-
 .../auth/digest/ClientCallbackHandler.java      |    21 +-
 .../auth/digest/DigestSaslTransportPlugin.java  |    15 +-
 .../auth/digest/ServerCallbackHandler.java      |    29 +-
 .../storm/security/auth/kerberos/AutoTGT.java   |    58 +-
 .../auth/kerberos/AutoTGTKrb5LoginModule.java   |     6 +-
 .../kerberos/AutoTGTKrb5LoginModuleTest.java    |     2 +-
 .../auth/kerberos/ClientCallbackHandler.java    |    37 +-
 .../kerberos/KerberosSaslTransportPlugin.java   |   107 +-
 .../auth/kerberos/ServerCallbackHandler.java    |    15 +-
 .../serialization/BlowfishTupleSerializer.java  |    18 +-
 .../storm/serialization/DefaultKryoFactory.java |    25 +-
 .../DefaultSerializationDelegate.java           |     6 +-
 .../GzipBridgeSerializationDelegate.java        |     9 +-
 .../GzipBridgeThriftSerializationDelegate.java  |     9 +-
 .../GzipSerializationDelegate.java              |     6 +-
 .../GzipThriftSerializationDelegate.java        |     2 +-
 .../storm/serialization/IKryoDecorator.java     |     1 +
 .../storm/serialization/IKryoFactory.java       |    16 +-
 .../storm/serialization/ITupleDeserializer.java |     2 +-
 .../storm/serialization/ITupleSerializer.java   |     3 +-
 .../serialization/KryoTupleDeserializer.java    |    51 +-
 .../serialization/KryoTupleSerializer.java      |    66 +-
 .../serialization/KryoValuesDeserializer.java   |    12 +-
 .../serialization/KryoValuesSerializer.java     |    12 +-
 .../serialization/SerializableSerializer.java   |     4 +-
 .../serialization/SerializationFactory.java     |    52 +-
 .../ThriftSerializationDelegate.java            |     4 +-
 .../types/ArrayListSerializer.java              |     3 +-
 .../serialization/types/HashMapSerializer.java  |     1 -
 .../serialization/types/HashSetSerializer.java  |     3 +-
 .../types/ListDelegateSerializer.java           |     3 +-
 .../storm/spout/IMultiSchemableSpout.java       |     5 +-
 .../backtype/storm/spout/ISchemableSpout.java   |     6 +-
 .../main/java/backtype/storm/spout/ISpout.java  |   104 +-
 .../storm/spout/ISpoutOutputCollector.java      |     7 +-
 .../storm/spout/ISpoutWaitStrategy.java         |     7 +-
 .../java/backtype/storm/spout/MultiScheme.java  |     5 +-
 .../storm/spout/NothingEmptyEmitStrategy.java   |     2 +-
 .../backtype/storm/spout/RawMultiScheme.java    |    17 +-
 .../main/java/backtype/storm/spout/Scheme.java  |     2 +-
 .../storm/spout/SchemeAsMultiScheme.java        |    28 +-
 .../java/backtype/storm/spout/ShellSpout.java   |    81 +-
 .../storm/spout/SleepSpoutWaitStrategy.java     |     3 +-
 .../storm/spout/SpoutOutputCollector.java       |    80 +-
 .../java/backtype/storm/state/IStateSpout.java  |     3 +
 .../backtype/storm/state/ISubscribedState.java  |     1 +
 .../state/ISynchronizeOutputCollector.java      |     2 +-
 .../storm/state/StateSpoutOutputCollector.java  |     1 -
 .../storm/state/SynchronizeOutputCollector.java |     1 -
 .../storm/task/GeneralTopologyContext.java      |    52 +-
 .../main/java/backtype/storm/task/IBolt.java    |    91 +-
 .../backtype/storm/task/IMetricsContext.java    |     5 +-
 .../backtype/storm/task/IOutputCollector.java   |     5 +-
 .../backtype/storm/task/OutputCollector.java    |   128 +-
 .../java/backtype/storm/task/ShellBolt.java     |   133 +-
 .../backtype/storm/task/TopologyContext.java    |   239 +-
 .../storm/task/WorkerTopologyContext.java       |    43 +-
 .../backtype/storm/testing/AckFailDelegate.java |     1 +
 .../storm/testing/AckFailMapTracker.java        |    18 +-
 .../java/backtype/storm/testing/AckTracker.java |    12 +-
 .../backtype/storm/testing/BatchNumberList.java |    13 +-
 .../storm/testing/BatchProcessWord.java         |     2 +-
 .../backtype/storm/testing/BatchRepeatA.java    |    13 +-
 .../backtype/storm/testing/BoltTracker.java     |     1 -
 .../storm/testing/CompleteTopologyParam.java    |   100 +-
 .../storm/testing/CountingBatchBolt.java        |     8 +-
 .../storm/testing/CountingCommitBolt.java       |     6 +-
 .../backtype/storm/testing/FeederSpout.java     |    17 +-
 .../backtype/storm/testing/FixedTupleSpout.java |    48 +-
 .../testing/ForwardingMetricsConsumer.java      |    37 +-
 .../backtype/storm/testing/IdentityBolt.java    |     4 +-
 .../storm/testing/KeyedCountingBatchBolt.java   |     8 +-
 .../storm/testing/KeyedSummingBatchBolt.java    |    10 +-
 .../storm/testing/MemoryTransactionalSpout.java |    76 +-
 .../testing/MemoryTransactionalSpoutMeta.java   |     8 +-
 .../backtype/storm/testing/MkClusterParam.java  |    67 +-
 .../backtype/storm/testing/MkTupleParam.java    |    57 +-
 .../backtype/storm/testing/MockedSources.java   |    14 +-
 .../java/backtype/storm/testing/NGrouping.java  |     8 +-
 .../storm/testing/NonRichBoltTracker.java       |     1 -
 .../testing/OpaqueMemoryTransactionalSpout.java |    64 +-
 .../storm/testing/PrepareBatchBolt.java         |     6 +-
 .../storm/testing/PythonShellMetricsBolt.java   |    38 +-
 .../storm/testing/PythonShellMetricsSpout.java  |    42 +-
 .../testing/SingleUserSimpleTransport.java      |    24 +-
 .../backtype/storm/testing/SpoutTracker.java    |    11 +-
 .../storm/testing/TestAggregatesCounter.java    |     5 +-
 .../backtype/storm/testing/TestConfBolt.java    |    11 +-
 .../storm/testing/TestEventLogSpout.java        |    66 +-
 .../storm/testing/TestEventOrderCheckBolt.java  |     7 +-
 .../backtype/storm/testing/TestGlobalCount.java |     1 -
 .../java/backtype/storm/testing/TestJob.java    |    16 +-
 .../backtype/storm/testing/TestPlannerBolt.java |     5 +-
 .../storm/testing/TestPlannerSpout.java         |    30 +-
 .../backtype/storm/testing/TestSerObject.java   |     8 +-
 .../backtype/storm/testing/TestWordCounter.java |    11 +-
 .../backtype/storm/testing/TestWordSpout.java   |    21 +-
 .../backtype/storm/testing/TrackedTopology.java |    16 +-
 .../storm/testing/TupleCaptureBolt.java         |     9 +-
 .../topology/BaseConfigurationDeclarer.java     |    11 +-
 .../storm/topology/BasicBoltExecutor.java       |    11 +-
 .../storm/topology/BasicOutputCollector.java    |     1 -
 .../backtype/storm/topology/BoltDeclarer.java   |     3 +-
 .../ComponentConfigurationDeclarer.java         |     5 +
 .../storm/topology/FailedException.java         |     4 +-
 .../backtype/storm/topology/IBasicBolt.java     |     2 +
 .../storm/topology/IBasicOutputCollector.java   |     2 +
 .../backtype/storm/topology/IComponent.java     |    12 +-
 .../java/backtype/storm/topology/IConfig.java   |    28 -
 .../storm/topology/IDynamicComponent.java       |    13 +
 .../java/backtype/storm/topology/IRichBolt.java |     5 +-
 .../backtype/storm/topology/IRichSpout.java     |     5 +-
 .../storm/topology/IRichStateSpout.java         |     1 -
 .../backtype/storm/topology/InputDeclarer.java  |    66 +-
 .../storm/topology/OutputFieldsDeclarer.java    |     5 +-
 .../storm/topology/OutputFieldsGetter.java      |     3 +-
 .../storm/topology/ReportedFailedException.java |     4 +-
 .../backtype/storm/topology/SpoutDeclarer.java  |     2 +-
 .../storm/topology/TopologyBuilder.java         |   181 +-
 .../storm/topology/base/BaseBasicBolt.java      |     2 +-
 .../storm/topology/base/BaseBatchBolt.java      |     2 +-
 .../storm/topology/base/BaseComponent.java      |     2 +-
 ...BaseOpaquePartitionedTransactionalSpout.java |     3 +-
 .../storm/topology/base/BaseRichBolt.java       |     2 +-
 .../storm/topology/base/BaseRichSpout.java      |     2 +-
 .../topology/base/BaseTransactionalBolt.java    |     2 +-
 .../storm/transactional/ICommitter.java         |     5 +-
 .../ICommitterTransactionalSpout.java           |     7 +-
 .../transactional/ITransactionalSpout.java      |    43 +-
 .../storm/transactional/TransactionAttempt.java |    16 +-
 .../TransactionalSpoutBatchExecutor.java        |    18 +-
 .../TransactionalSpoutCoordinator.java          |    80 +-
 .../TransactionalTopologyBuilder.java           |   179 +-
 .../IOpaquePartitionedTransactionalSpout.java   |    23 +-
 .../IPartitionedTransactionalSpout.java         |    33 +-
 ...uePartitionedTransactionalSpoutExecutor.java |    46 +-
 .../PartitionedTransactionalSpoutExecutor.java  |    47 +-
 .../state/RotatingTransactionalState.java       |    51 +-
 .../state/TestTransactionalState.java           |     9 +-
 .../transactional/state/TransactionalState.java |    71 +-
 .../java/backtype/storm/tuple/BatchTuple.java   |    26 +-
 .../main/java/backtype/storm/tuple/Fields.java  |    26 +-
 .../main/java/backtype/storm/tuple/ITuple.java  |    30 +-
 .../java/backtype/storm/tuple/ITupleExt.java    |    25 +
 .../java/backtype/storm/tuple/MessageId.java    |    20 +-
 .../main/java/backtype/storm/tuple/Tuple.java   |    23 +-
 .../java/backtype/storm/tuple/TupleExt.java     |     9 +-
 .../java/backtype/storm/tuple/TupleImpl.java    |   109 +-
 .../java/backtype/storm/tuple/TupleImplExt.java |    23 +-
 .../main/java/backtype/storm/tuple/Values.java  |    11 +-
 .../storm/utils/BufferFileInputStream.java      |     7 +-
 .../backtype/storm/utils/CRC32OutputStream.java |     6 +-
 .../backtype/storm/utils/ClojureTimerTask.java  |     6 +-
 .../java/backtype/storm/utils/Container.java    |     2 +-
 .../java/backtype/storm/utils/DRPCClient.java   |     6 +-
 .../backtype/storm/utils/DisruptorQueue.java    |    38 +-
 .../storm/utils/DisruptorQueueImpl.java         |    70 +-
 .../storm/utils/DisruptorWrapBlockingQueue.java |    58 +-
 .../storm/utils/ExtendedThreadPoolExecutor.java |    65 +-
 .../storm/utils/IndifferentAccessMap.java       |    30 +-
 .../backtype/storm/utils/InprocMessaging.java   |    20 +-
 .../storm/utils/KeyedRoundRobinQueue.java       |    10 +-
 .../java/backtype/storm/utils/ListDelegate.java |    10 +-
 .../java/backtype/storm/utils/LocalState.java   |    19 +-
 .../main/java/backtype/storm/utils/Monitor.java |    84 +-
 .../java/backtype/storm/utils/MutableInt.java   |    10 +-
 .../java/backtype/storm/utils/MutableLong.java  |    10 +-
 .../backtype/storm/utils/MutableObject.java     |     8 +-
 .../java/backtype/storm/utils/NimbusClient.java |    36 +-
 .../storm/utils/RegisteredGlobalState.java      |    30 +-
 .../java/backtype/storm/utils/RotatingMap.java  |    52 +-
 .../backtype/storm/utils/ServiceRegistry.java   |    16 +-
 .../java/backtype/storm/utils/ShellProcess.java |    32 +-
 .../java/backtype/storm/utils/ShellUtils.java   |   200 +-
 .../StormBoundedExponentialBackoffRetry.java    |    22 +-
 .../java/backtype/storm/utils/TestUtils.java    |     4 +-
 .../storm/utils/ThreadResourceManager.java      |     8 +-
 .../storm/utils/ThriftTopologyUtils.java        |    12 +-
 .../main/java/backtype/storm/utils/Time.java    |    65 +-
 .../java/backtype/storm/utils/TimeCacheMap.java |    32 +-
 .../backtype/storm/utils/TransferDrainer.java   |   160 +-
 .../java/backtype/storm/utils/TupleHelpers.java |     4 +-
 .../java/backtype/storm/utils/TupleUtils.java   |    15 +-
 .../main/java/backtype/storm/utils/Utils.java   |   404 +-
 .../java/backtype/storm/utils/VersionInfo.java  |   202 +-
 .../backtype/storm/utils/VersionedStore.java    |    53 +-
 .../storm/utils/WindowedTimeThrottler.java      |    14 +-
 .../backtype/storm/utils/WorkerClassLoader.java |    77 +-
 .../backtype/storm/utils/WritableUtils.java     |   584 +-
 .../backtype/storm/utils/ZookeeperAuthInfo.java |     9 +-
 .../storm/utils/ZookeeperServerCnxnFactory.java |   104 +-
 .../utils/disruptor/AbstractSequencerExt.java   |     8 +-
 .../utils/disruptor/MultiProducerSequencer.java |    74 +-
 .../storm/utils/disruptor/RingBuffer.java       |   192 +-
 .../disruptor/SingleProducerSequencer.java      |    54 +-
 .../java/com/alibaba/jstorm/batch/BatchId.java  |     3 +-
 .../jstorm/batch/BatchTopologyBuilder.java      |    28 +-
 .../com/alibaba/jstorm/batch/IBatchSpout.java   |     6 +-
 .../com/alibaba/jstorm/batch/ICommitter.java    |     6 +-
 .../alibaba/jstorm/batch/IPrepareCommit.java    |     3 +-
 .../jstorm/batch/impl/BatchSpoutMsgId.java      |     3 +-
 .../jstorm/batch/impl/BatchSpoutTrigger.java    |    33 +-
 .../jstorm/batch/impl/CoordinatedBolt.java      |    19 +-
 .../alibaba/jstorm/batch/util/BatchCommon.java  |    23 +-
 .../com/alibaba/jstorm/cache/JStormCache.java   |    21 +-
 .../com/alibaba/jstorm/cache/RocksDBCache.java  |   148 +-
 .../alibaba/jstorm/cache/RocksTTLDBCache.java   |   315 +-
 .../alibaba/jstorm/cache/TimeoutMemCache.java   |    84 +-
 .../jstorm/callback/AsyncLoopRunnable.java      |     3 +-
 .../jstorm/callback/AsyncLoopThread.java        |    18 +-
 .../jstorm/callback/DefaultWatcherCallBack.java |     6 +-
 .../impl/DelayStatusTransitionCallback.java     |    25 +-
 .../impl/DoRebalanceTransitionCallback.java     |   176 +-
 .../callback/impl/KillTransitionCallback.java   |    18 +-
 .../impl/ReassignTransitionCallback.java        |     9 +-
 .../impl/RebalanceTransitionCallback.java       |    12 +-
 .../callback/impl/RemoveTransitionCallback.java |     7 +-
 .../impl/UpdateConfTransitionCallback.java      |    79 -
 .../impl/UpdateTopologyTransitionCallback.java  |    74 +
 .../alibaba/jstorm/client/ConfigExtension.java  |   463 +-
 .../alibaba/jstorm/client/WorkerAssignment.java |    28 +-
 .../jstorm/client/spout/ConfigExtension.java    |   943 +
 .../jstorm/client/spout/IAckValueSpout.java     |     3 +-
 .../jstorm/client/spout/IFailValueSpout.java    |     3 +-
 .../com/alibaba/jstorm/cluster/Cluster.java     |   187 +-
 .../alibaba/jstorm/cluster/ClusterState.java    |     3 +-
 .../java/com/alibaba/jstorm/cluster/Common.java |   444 +-
 .../jstorm/cluster/DistributedClusterState.java |    29 +-
 .../com/alibaba/jstorm/cluster/StormBase.java   |    10 +-
 .../jstorm/cluster/StormClusterState.java       |    98 +-
 .../com/alibaba/jstorm/cluster/StormConfig.java |   220 +-
 .../alibaba/jstorm/cluster/StormMonitor.java    |     3 +-
 .../com/alibaba/jstorm/cluster/StormStatus.java |    17 +-
 .../jstorm/cluster/StormZkClusterState.java     |   299 +-
 .../jstorm/common/metric/AsmCounter.java        |    92 +
 .../alibaba/jstorm/common/metric/AsmGauge.java  |    72 +
 .../jstorm/common/metric/AsmHistogram.java      |   101 +
 .../alibaba/jstorm/common/metric/AsmMeter.java  |    74 +
 .../alibaba/jstorm/common/metric/AsmMetric.java |   267 +
 .../alibaba/jstorm/common/metric/AsmTimer.java  |   107 +
 .../alibaba/jstorm/common/metric/Counter.java   |    55 -
 .../jstorm/common/metric/CounterData.java       |    34 +
 .../com/alibaba/jstorm/common/metric/Gauge.java |    61 -
 .../alibaba/jstorm/common/metric/GaugeData.java |    34 +
 .../alibaba/jstorm/common/metric/Histogram.java |   104 -
 .../jstorm/common/metric/Histogram.java.bak     |    71 -
 .../jstorm/common/metric/HistogramData.java     |   135 +
 .../jstorm/common/metric/LongCounter.java       |    39 -
 .../com/alibaba/jstorm/common/metric/Meter.java |    50 -
 .../alibaba/jstorm/common/metric/MeterData.java |    71 +
 .../jstorm/common/metric/MetricBaseData.java    |    59 +
 .../jstorm/common/metric/MetricFilter.java      |    46 -
 .../jstorm/common/metric/MetricMeta.java        |   213 +
 .../jstorm/common/metric/MetricMetaParser.java  |    58 +
 .../jstorm/common/metric/MetricRegistry.java    |   316 -
 .../alibaba/jstorm/common/metric/MetricSet.java |    28 -
 .../jstorm/common/metric/QueueGauge.java        |     9 +-
 .../alibaba/jstorm/common/metric/TaskTrack.java |   180 +
 .../com/alibaba/jstorm/common/metric/Timer.java |   108 -
 .../alibaba/jstorm/common/metric/TimerData.java |   169 +
 .../jstorm/common/metric/TimerRatio.java        |     6 +-
 .../com/alibaba/jstorm/common/metric/Top.java   |   157 -
 .../jstorm/common/metric/TopologyHistory.java   |   153 +
 .../jstorm/common/metric/old/Counter.java       |    51 +
 .../alibaba/jstorm/common/metric/old/Gauge.java |    61 +
 .../jstorm/common/metric/old/Histogram.java     |   102 +
 .../jstorm/common/metric/old/LongCounter.java   |    39 +
 .../alibaba/jstorm/common/metric/old/Meter.java |    49 +
 .../jstorm/common/metric/old/MetricFilter.java  |    45 +
 .../jstorm/common/metric/old/MetricSet.java     |    28 +
 .../jstorm/common/metric/old/MetricThrift.java  |   106 +
 .../jstorm/common/metric/old/RegistryType.java  |     9 +
 .../jstorm/common/metric/old/StaticsType.java   |    22 +
 .../alibaba/jstorm/common/metric/old/Timer.java |   106 +
 .../alibaba/jstorm/common/metric/old/Top.java   |   154 +
 .../common/metric/old/operator/Sampling.java    |    38 +
 .../common/metric/old/operator/StartTime.java   |    22 +
 .../old/operator/convert/AtomicLongToLong.java  |    35 +
 .../metric/old/operator/convert/Convertor.java  |    25 +
 .../old/operator/convert/DefaultConvertor.java  |    29 +
 .../metric/old/operator/convert/SetToList.java  |    39 +
 .../metric/old/operator/merger/AvgMerger.java   |    49 +
 .../old/operator/merger/LongSumMerger.java      |    42 +
 .../metric/old/operator/merger/Merger.java      |    25 +
 .../metric/old/operator/merger/SumMerger.java   |    39 +
 .../metric/old/operator/merger/TpsMerger.java   |    64 +
 .../metric/old/operator/updater/AddUpdater.java |    38 +
 .../metric/old/operator/updater/AvgUpdater.java |    57 +
 .../old/operator/updater/DoubleAddUpdater.java  |    43 +
 .../old/operator/updater/LongAddUpdater.java    |    44 +
 .../metric/old/operator/updater/Updater.java    |    26 +
 .../common/metric/old/window/AllWindow.java     |    78 +
 .../jstorm/common/metric/old/window/Metric.java |   224 +
 .../common/metric/old/window/RollingWindow.java |   189 +
 .../common/metric/old/window/StatBuckets.java   |   176 +
 .../jstorm/common/metric/operator/Sampling.java |    38 -
 .../common/metric/operator/StartTime.java       |    22 -
 .../operator/convert/AtomicLongToLong.java      |    35 -
 .../metric/operator/convert/Convertor.java      |    25 -
 .../operator/convert/DefaultConvertor.java      |    29 -
 .../metric/operator/convert/SetToList.java      |    39 -
 .../metric/operator/merger/AvgMerger.java       |    52 -
 .../metric/operator/merger/AvgMerger.java.bak   |    53 -
 .../metric/operator/merger/LongSumMerger.java   |    43 -
 .../common/metric/operator/merger/Merger.java   |    25 -
 .../metric/operator/merger/SumMerger.java       |    39 -
 .../metric/operator/merger/TpsMerger.java       |    65 -
 .../metric/operator/updater/AddUpdater.java     |    38 -
 .../metric/operator/updater/AvgUpdater.java     |    62 -
 .../metric/operator/updater/AvgUpdater.java.bak |    73 -
 .../operator/updater/DoubleAddUpdater.java      |    45 -
 .../metric/operator/updater/LongAddUpdater.java |    45 -
 .../common/metric/operator/updater/Updater.java |    25 -
 .../metric/snapshot/AsmCounterSnapshot.java     |    20 +
 .../metric/snapshot/AsmGaugeSnapshot.java       |    20 +
 .../metric/snapshot/AsmHistogramSnapshot.java   |    22 +
 .../metric/snapshot/AsmMeterSnapshot.java       |    50 +
 .../common/metric/snapshot/AsmSnapshot.java     |    32 +
 .../metric/snapshot/AsmTimerSnapshot.java       |    32 +
 .../jstorm/common/metric/window/AllWindow.java  |    78 -
 .../jstorm/common/metric/window/Metric.java     |   231 -
 .../common/metric/window/RollingWindow.java     |   194 -
 .../common/metric/window/StatBuckets.java       |   153 -
 .../jstorm/common/stats/StaticsType.java        |    22 -
 .../alibaba/jstorm/container/CgroupCenter.java  |    11 +-
 .../alibaba/jstorm/container/CgroupUtils.java   |     9 +-
 .../com/alibaba/jstorm/container/SubSystem.java |     3 +-
 .../jstorm/container/cgroup/CgroupCommon.java   |    54 +-
 .../container/cgroup/CgroupCommonOperation.java |     3 +-
 .../container/cgroup/CgroupCoreFactory.java     |     6 +-
 .../jstorm/container/cgroup/core/BlkioCore.java |   130 +-
 .../jstorm/container/cgroup/core/CpuCore.java   |    37 +-
 .../container/cgroup/core/CpuacctCore.java      |    12 +-
 .../container/cgroup/core/CpusetCore.java       |   117 +-
 .../container/cgroup/core/DevicesCore.java      |    19 +-
 .../container/cgroup/core/FreezerCore.java      |     6 +-
 .../container/cgroup/core/MemoryCore.java       |    83 +-
 .../container/cgroup/core/NetClsCore.java       |     7 +-
 .../container/cgroup/core/NetPrioCore.java      |    10 +-
 .../jstorm/daemon/nimbus/DefaultInimbus.java    |    11 +-
 .../jstorm/daemon/nimbus/NimbusCache.java       |    93 +-
 .../jstorm/daemon/nimbus/NimbusData.java        |   176 +-
 .../jstorm/daemon/nimbus/NimbusServer.java      |   150 +-
 .../jstorm/daemon/nimbus/NimbusUtils.java       |   349 +-
 .../jstorm/daemon/nimbus/ServiceHandler.java    |  1095 +-
 .../jstorm/daemon/nimbus/StatusTransition.java  |   177 +-
 .../jstorm/daemon/nimbus/StatusType.java        |    29 +-
 .../jstorm/daemon/nimbus/TopologyAssign.java    |   379 +-
 .../daemon/nimbus/TopologyAssignEvent.java      |    10 +-
 .../daemon/nimbus/TopologyMetricsRunnable.java  |  1428 +-
 .../jstorm/daemon/nimbus/TopologyNettyMgr.java  |   163 +-
 .../metric/uploader/AlimonitorClient.java       |   226 +
 .../metric/uploader/DefaultMetricUploader.java  |    71 +
 .../nimbus/metric/uploader/MetricUploader.java  |    46 +
 .../jstorm/daemon/supervisor/CgroupManager.java |    43 +-
 .../jstorm/daemon/supervisor/Heartbeat.java     |    29 +-
 .../jstorm/daemon/supervisor/Httpserver.java    |    79 +-
 .../jstorm/daemon/supervisor/SandBoxMaker.java  |    37 +-
 .../jstorm/daemon/supervisor/ShutdownWork.java  |    40 +-
 .../daemon/supervisor/StateHeartbeat.java       |     3 +-
 .../jstorm/daemon/supervisor/Supervisor.java    |    79 +-
 .../daemon/supervisor/SupervisorInfo.java       |    37 +-
 .../daemon/supervisor/SupervisorManger.java     |    20 +-
 .../daemon/supervisor/SyncProcessEvent.java     |   377 +-
 .../daemon/supervisor/SyncSupervisorEvent.java  |   305 +-
 .../daemon/worker/BatchDrainerRunable.java      |     6 +-
 .../jstorm/daemon/worker/ContextMaker.java      |    57 +-
 .../jstorm/daemon/worker/DrainerRunable.java    |    13 +-
 .../jstorm/daemon/worker/LocalAssignment.java   |    15 +-
 .../jstorm/daemon/worker/ProcessSimulator.java  |     3 +-
 .../jstorm/daemon/worker/RefreshActive.java     |    13 +-
 .../daemon/worker/RefreshConnections.java       |   191 +-
 .../daemon/worker/ShutdownableDameon.java       |     3 +-
 .../daemon/worker/VirtualPortDispatch.java      |    13 +-
 .../alibaba/jstorm/daemon/worker/Worker.java    |   169 +-
 .../jstorm/daemon/worker/WorkerData.java        |   263 +-
 .../jstorm/daemon/worker/WorkerHeartbeat.java   |     9 +-
 .../jstorm/daemon/worker/WorkerReportError.java |    38 +
 .../jstorm/daemon/worker/WorkerShutdown.java    |     7 +-
 .../daemon/worker/hearbeat/SyncContainerHb.java |    49 +-
 .../worker/hearbeat/WorkerHeartbeatRunable.java |    12 +-
 .../worker/timer/BackpressureCheckTrigger.java  |    51 +
 .../daemon/worker/timer/RotatingMapTrigger.java |    12 +-
 .../worker/timer/TaskBatchCheckTrigger.java     |    32 +
 .../worker/timer/TaskBatchFlushTrigger.java     |     4 +-
 .../worker/timer/TaskHeartbeatTrigger.java      |   112 +-
 .../daemon/worker/timer/TickTupleTrigger.java   |    11 +-
 .../daemon/worker/timer/TimerTrigger.java       |    14 +-
 .../com/alibaba/jstorm/drpc/ClearThread.java    |    11 +-
 .../main/java/com/alibaba/jstorm/drpc/Drpc.java |    74 +-
 .../alibaba/jstorm/event/EventManagerImp.java   |     6 +-
 .../jstorm/event/EventManagerPusher.java        |     3 +-
 .../jstorm/message/netty/ControlMessage.java    |     7 +-
 .../jstorm/message/netty/MessageBatch.java      |    22 +-
 .../jstorm/message/netty/MessageDecoder.java    |   140 +-
 .../jstorm/message/netty/MessageEncoder.java    |     6 +-
 .../jstorm/message/netty/NettyClient.java       |   325 +-
 .../jstorm/message/netty/NettyClientAsync.java  |    86 +-
 .../jstorm/message/netty/NettyClientSync.java   |   141 +-
 .../jstorm/message/netty/NettyConnection.java   |    29 +-
 .../jstorm/message/netty/NettyContext.java      |    42 +-
 .../message/netty/NettyRenameThreadFactory.java |    10 +-
 .../jstorm/message/netty/NettyServer.java       |    52 +-
 .../jstorm/message/netty/ReconnectRunnable.java |     6 +-
 .../message/netty/StormClientHandler.java       |    44 +-
 .../netty/StormClientPipelineFactory.java       |     4 +-
 .../message/netty/StormServerHandler.java       |    21 +-
 .../netty/StormServerPipelineFactory.java       |     2 +-
 .../alibaba/jstorm/metric/AlimonitorClient.java |   267 -
 .../alibaba/jstorm/metric/AsmMetricFilter.java  |    49 +
 .../jstorm/metric/AsmMetricRegistry.java        |   205 +
 .../com/alibaba/jstorm/metric/AsmMetricSet.java |    31 +
 .../com/alibaba/jstorm/metric/AsmWindow.java    |    41 +
 .../java/com/alibaba/jstorm/metric/Bytes.java   |   842 +
 .../jstorm/metric/DefaultMetricIDGenerator.java |    15 +
 .../jstorm/metric/DefaultMetricQueryClient.java |    84 +
 .../jstorm/metric/JStormHealthCheck.java        |    21 +-
 .../jstorm/metric/JStormHealthReporter.java     |    59 +
 .../jstorm/metric/JStormMetricCache.java        |   351 +
 .../alibaba/jstorm/metric/JStormMetrics.java    |   534 +-
 .../jstorm/metric/JStormMetricsReporter.java    |   544 +-
 .../alibaba/jstorm/metric/KVSerializable.java   |    17 +
 .../com/alibaba/jstorm/metric/MetaFilter.java   |    11 +
 .../com/alibaba/jstorm/metric/MetaType.java     |    50 +
 .../com/alibaba/jstorm/metric/MetricClient.java |    92 +
 .../jstorm/metric/MetricDataConverter.java      |    87 +
 .../com/alibaba/jstorm/metric/MetricDef.java    |    55 +-
 .../jstorm/metric/MetricIDGenerator.java        |     9 +
 .../com/alibaba/jstorm/metric/MetricJstack.java |    51 +-
 .../jstorm/metric/MetricQueryClient.java        |   148 +
 .../alibaba/jstorm/metric/MetricSendClient.java |    18 -
 .../com/alibaba/jstorm/metric/MetricThrift.java |   129 -
 .../com/alibaba/jstorm/metric/MetricType.java   |    50 +
 .../com/alibaba/jstorm/metric/MetricUtils.java  |   600 +
 .../jstorm/metric/SimpleJStormMetric.java       |   147 +-
 .../com/alibaba/jstorm/metric/TimeTicker.java   |    52 +
 .../jstorm/metric/TopologyMetricContext.java    |   528 +
 .../jstorm/queue/disruptor/JstormEvent.java     |    11 +-
 .../queue/disruptor/JstormEventHandler.java     |     3 +-
 .../com/alibaba/jstorm/schedule/Assignment.java |    59 +-
 .../alibaba/jstorm/schedule/AssignmentBak.java  |     6 +-
 .../alibaba/jstorm/schedule/CleanRunnable.java  |     6 +-
 .../jstorm/schedule/DelayEventRunnable.java     |     3 +-
 .../jstorm/schedule/FollowerRunnable.java       |   235 +-
 .../jstorm/schedule/IToplogyScheduler.java      |     3 +-
 .../jstorm/schedule/MonitorRunnable.java        |   117 +-
 .../jstorm/schedule/TopologyAssignContext.java  |    38 +-
 .../DefaultTopologyAssignContext.java           |    66 +-
 .../DefaultTopologyScheduler.java               |    59 +-
 .../default_assign/ResourceWorkerSlot.java      |    15 +-
 .../Selector/AbstractSelector.java              |     6 +-
 .../Selector/ComponentNumSelector.java          |     8 +-
 .../Selector/InputComponentNumSelector.java     |     8 +-
 .../default_assign/Selector/Selector.java       |     3 +-
 .../Selector/WorkerComparator.java              |     3 +-
 .../default_assign/TaskAssignContext.java       |    35 +-
 .../schedule/default_assign/TaskScheduler.java  |   289 +-
 .../default_assign/WorkerScheduler.java         |   715 +-
 .../main/java/com/alibaba/jstorm/task/Task.java |   305 +-
 .../com/alibaba/jstorm/task/TaskBaseMetric.java |   150 +-
 .../alibaba/jstorm/task/TaskBatchReceiver.java  |    25 +-
 .../alibaba/jstorm/task/TaskBatchTransfer.java  |   141 +-
 .../java/com/alibaba/jstorm/task/TaskInfo.java  |    13 +-
 .../com/alibaba/jstorm/task/TaskReceiver.java   |    87 +-
 .../alibaba/jstorm/task/TaskShutdownDameon.java |    52 +-
 .../com/alibaba/jstorm/task/TaskTransfer.java   |   153 +-
 .../com/alibaba/jstorm/task/TkHbCacheTime.java  |    19 +-
 .../com/alibaba/jstorm/task/acker/Acker.java    |    42 +-
 .../jstorm/task/backpressure/Backpressure.java  |    88 +
 .../backpressure/BackpressureController.java    |   182 +
 .../backpressure/BackpressureCoordinator.java   |   415 +
 .../task/backpressure/BackpressureTrigger.java  |   216 +
 .../backpressure/SourceBackpressureInfo.java    |    97 +
 .../backpressure/TargetBackpressureInfo.java    |    84 +
 .../jstorm/task/comm/TaskSendTargets.java       |    37 +-
 .../com/alibaba/jstorm/task/comm/TupleInfo.java |     3 +-
 .../jstorm/task/comm/UnanchoredSend.java        |     9 +-
 .../jstorm/task/error/ITaskReportErr.java       |     2 +
 .../jstorm/task/error/TaskErrorRunable.java     |     3 +-
 .../jstorm/task/error/TaskReportError.java      |    20 +-
 .../task/error/TaskReportErrorAndDie.java       |     9 +-
 .../jstorm/task/execute/BaseExecutors.java      |   188 +-
 .../jstorm/task/execute/BoltCollector.java      |   145 +-
 .../jstorm/task/execute/BoltExecutors.java      |   195 +-
 .../jstorm/task/execute/spout/AckSpoutMsg.java  |    26 +-
 .../jstorm/task/execute/spout/FailSpoutMsg.java |     6 +-
 .../spout/MultipleThreadSpoutExecutors.java     |    64 +-
 .../spout/SingleThreadSpoutExecutors.java       |    47 +-
 .../task/execute/spout/SpoutCollector.java      |   103 +-
 .../task/execute/spout/SpoutExecutors.java      |   295 +-
 .../execute/spout/SpoutTimeoutCallBack.java     |    15 +-
 .../jstorm/task/group/MkCustomGrouper.java      |     4 +-
 .../jstorm/task/group/MkFieldsGrouper.java      |    10 +-
 .../alibaba/jstorm/task/group/MkGrouper.java    |    44 +-
 .../alibaba/jstorm/task/group/MkLocalFirst.java |    11 +-
 .../jstorm/task/group/MkLocalShuffer.java       |    60 +-
 .../com/alibaba/jstorm/task/group/Shuffer.java  |     3 +-
 .../jstorm/task/heartbeat/TaskHeartbeat.java    |    98 -
 .../task/heartbeat/TaskHeartbeatRunable.java    |   191 -
 .../task/heartbeat/TaskHeartbeatUpdater.java    |   156 +
 .../jstorm/task/master/TopoMasterCtrlEvent.java |    75 +
 .../jstorm/task/master/TopologyMaster.java      |   359 +
 .../alibaba/jstorm/utils/DisruptorQueue.java    |     2 +-
 .../alibaba/jstorm/utils/DisruptorRunable.java  |    56 +-
 .../com/alibaba/jstorm/utils/EPlatform.java     |    46 +-
 .../com/alibaba/jstorm/utils/FileAttribute.java |     6 +-
 .../alibaba/jstorm/utils/HttpserverUtils.java   |     6 +-
 .../com/alibaba/jstorm/utils/IntervalCheck.java |   157 +-
 .../alibaba/jstorm/utils/JStormServerUtils.java |    25 +-
 .../com/alibaba/jstorm/utils/JStormUtils.java   |   332 +-
 .../java/com/alibaba/jstorm/utils/LoadConf.java |   232 +-
 .../com/alibaba/jstorm/utils/NetWorkUtils.java  |    75 +-
 .../java/com/alibaba/jstorm/utils/OSInfo.java   |   277 +-
 .../alibaba/jstorm/utils/OlderFileFilter.java   |    21 +-
 .../java/com/alibaba/jstorm/utils/Pair.java     |     6 +-
 .../com/alibaba/jstorm/utils/PathUtils.java     |     2 +-
 .../com/alibaba/jstorm/utils/RandomRange.java   |     3 +-
 .../com/alibaba/jstorm/utils/RotatingMap.java   |    11 +-
 .../alibaba/jstorm/utils/SystemOperation.java   |    16 +-
 .../java/com/alibaba/jstorm/utils/Thrift.java   |    61 +-
 .../com/alibaba/jstorm/utils/TimeCacheMap.java  |     8 +-
 .../alibaba/jstorm/utils/TimeCacheQueue.java    |    20 +-
 .../com/alibaba/jstorm/utils/TimeFormat.java    |    25 +-
 .../com/alibaba/jstorm/utils/TimeUtils.java     |   105 +-
 .../com/alibaba/jstorm/zk/ZkEventTypes.java     |     3 +-
 .../main/java/com/alibaba/jstorm/zk/ZkTool.java |   164 +-
 .../java/com/alibaba/jstorm/zk/Zookeeper.java   |    89 +-
 .../src/main/java/storm/trident/JoinType.java   |     5 +-
 .../src/main/java/storm/trident/Stream.java     |   153 +-
 .../java/storm/trident/TridentTopology.java     |    36 +-
 .../trident/drpc/ReturnResultsReducer.java      |    31 +-
 .../fluent/ChainedAggregatorDeclarer.java       |    63 +-
 .../fluent/ChainedFullAggregatorDeclarer.java   |     5 +
 .../ChainedPartitionAggregatorDeclarer.java     |     7 +-
 .../trident/fluent/GlobalAggregationScheme.java |     2 +-
 .../storm/trident/fluent/GroupedStream.java     |    57 +-
 .../trident/fluent/IAggregatableStream.java     |     3 +
 .../java/storm/trident/fluent/UniqueIdGen.java  |     6 +-
 .../java/storm/trident/graph/GraphGrouper.java  |    65 +-
 .../main/java/storm/trident/graph/Group.java    |    23 +-
 .../storm/trident/operation/Aggregator.java     |     2 +
 .../java/storm/trident/operation/Assembly.java  |     1 -
 .../storm/trident/operation/BaseAggregator.java |     3 +-
 .../storm/trident/operation/BaseFilter.java     |     3 +-
 .../storm/trident/operation/BaseFunction.java   |     3 +-
 .../trident/operation/BaseMultiReducer.java     |     3 +-
 .../storm/trident/operation/BaseOperation.java  |     2 +-
 .../trident/operation/CombinerAggregator.java   |     2 +
 .../storm/trident/operation/EachOperation.java  |     2 +-
 .../java/storm/trident/operation/Filter.java    |     1 -
 .../trident/operation/GroupedMultiReducer.java  |     5 +-
 .../storm/trident/operation/MultiReducer.java   |     5 +-
 .../java/storm/trident/operation/Operation.java |     1 +
 .../trident/operation/ReducerAggregator.java    |     1 +
 .../trident/operation/TridentCollector.java     |     2 +-
 .../operation/TridentMultiReducerContext.java   |     9 +-
 .../operation/TridentOperationContext.java      |    16 +-
 .../storm/trident/operation/builtin/Count.java  |     3 +-
 .../storm/trident/operation/builtin/Equals.java |     9 +-
 .../trident/operation/builtin/FilterNull.java   |     5 +-
 .../storm/trident/operation/builtin/FirstN.java |    38 +-
 .../storm/trident/operation/builtin/MapGet.java |     7 +-
 .../storm/trident/operation/builtin/Negate.java |     6 +-
 .../trident/operation/builtin/SnapshotGet.java  |     2 +-
 .../storm/trident/operation/builtin/Sum.java    |     3 +-
 .../operation/builtin/TupleCollectionGet.java   |     6 +-
 .../operation/impl/CaptureCollector.java        |     6 +-
 .../operation/impl/ChainedAggregatorImpl.java   |    53 +-
 .../trident/operation/impl/ChainedResult.java   |    17 +-
 .../operation/impl/CombinerAggStateUpdater.java |     9 +-
 .../impl/CombinerAggregatorCombineImpl.java     |    20 +-
 .../impl/CombinerAggregatorInitImpl.java        |     6 +-
 .../trident/operation/impl/FilterExecutor.java  |     6 +-
 .../operation/impl/GlobalBatchToPartition.java  |     3 +-
 .../trident/operation/impl/GroupCollector.java  |     8 +-
 .../operation/impl/GroupedAggregator.java       |    16 +-
 .../impl/GroupedMultiReducerExecutor.java       |    19 +-
 .../operation/impl/IdentityMultiReducer.java    |     3 +-
 .../impl/IndexHashBatchToPartition.java         |     2 +-
 .../operation/impl/JoinerMultiReducer.java      |    77 +-
 .../operation/impl/ReducerAggStateUpdater.java  |     7 +-
 .../operation/impl/ReducerAggregatorImpl.java   |    18 +-
 .../storm/trident/operation/impl/Result.java    |     2 +-
 .../operation/impl/SingleEmitAggregator.java    |    23 +-
 .../trident/operation/impl/TrueFilter.java      |     2 +-
 .../storm/trident/partition/GlobalGrouping.java |     5 +-
 .../trident/partition/IdentityGrouping.java     |    11 +-
 .../trident/partition/IndexHashGrouping.java    |    12 +-
 .../storm/trident/planner/BridgeReceiver.java   |     7 +-
 .../main/java/storm/trident/planner/Node.java   |    10 +-
 .../storm/trident/planner/NodeStateInfo.java    |     2 +-
 .../storm/trident/planner/PartitionNode.java    |     7 +-
 .../storm/trident/planner/ProcessorContext.java |     3 +-
 .../storm/trident/planner/ProcessorNode.java    |     4 +-
 .../java/storm/trident/planner/SpoutNode.java   |    10 +-
 .../storm/trident/planner/SubtopologyBolt.java  |    90 +-
 .../storm/trident/planner/TridentProcessor.java |     9 +-
 .../storm/trident/planner/TupleReceiver.java    |     5 +-
 .../planner/processor/AggregateProcessor.java   |    11 +-
 .../planner/processor/AppendCollector.java      |    11 +-
 .../planner/processor/EachProcessor.java        |    11 +-
 .../planner/processor/FreshCollector.java       |    13 +-
 .../processor/MultiReducerProcessor.java        |    15 +-
 .../processor/PartitionPersistProcessor.java    |    19 +-
 .../planner/processor/ProjectedProcessor.java   |     9 +-
 .../planner/processor/StateQueryProcessor.java  |    21 +-
 .../planner/processor/TridentContext.java       |    26 +-
 .../storm/trident/spout/BatchSpoutExecutor.java |    16 +-
 .../main/java/storm/trident/spout/IBatchID.java |     2 +-
 .../java/storm/trident/spout/IBatchSpout.java   |     5 +
 .../trident/spout/ICommitterTridentSpout.java   |     6 +-
 .../spout/IOpaquePartitionedTridentSpout.java   |    32 +-
 .../trident/spout/IPartitionedTridentSpout.java |    40 +-
 .../java/storm/trident/spout/ITridentSpout.java |    48 +-
 .../OpaquePartitionedTridentSpoutExecutor.java  |    67 +-
 .../spout/PartitionedTridentSpoutExecutor.java  |    59 +-
 .../trident/spout/RichSpoutBatchExecutor.java   |    70 +-
 .../storm/trident/spout/RichSpoutBatchId.java   |     9 +-
 .../spout/RichSpoutBatchIdSerializer.java       |     3 +-
 .../trident/spout/RichSpoutBatchTriggerer.java  |    41 +-
 .../trident/spout/TridentSpoutCoordinator.java  |    10 +-
 .../trident/spout/TridentSpoutExecutor.java     |    31 +-
 .../storm/trident/state/BaseQueryFunction.java  |     3 +-
 .../storm/trident/state/BaseStateUpdater.java   |     3 +-
 .../trident/state/CombinerValueUpdater.java     |     8 +-
 .../state/JSONNonTransactionalSerializer.java   |     3 +-
 .../trident/state/JSONOpaqueSerializer.java     |     3 +-
 .../state/JSONTransactionalSerializer.java      |     3 +-
 .../java/storm/trident/state/OpaqueValue.java   |    20 +-
 .../java/storm/trident/state/QueryFunction.java |     1 +
 .../java/storm/trident/state/ReadOnlyState.java |     2 +-
 .../trident/state/ReducerValueUpdater.java      |     8 +-
 .../java/storm/trident/state/Serializer.java    |     2 +-
 .../main/java/storm/trident/state/State.java    |    15 +-
 .../java/storm/trident/state/StateSpec.java     |     3 +-
 .../java/storm/trident/state/StateType.java     |     5 +-
 .../java/storm/trident/state/StateUpdater.java  |     1 -
 .../storm/trident/state/TransactionalValue.java |     9 +-
 .../trident/state/map/CachedBatchReadsMap.java  |    17 +-
 .../java/storm/trident/state/map/CachedMap.java |    11 +-
 .../storm/trident/state/map/IBackingMap.java    |     6 +-
 .../state/map/MapCombinerAggStateUpdater.java   |    22 +-
 .../state/map/MapReducerAggStateUpdater.java    |    18 +-
 .../state/snapshot/ReadOnlySnapshottable.java   |     2 +-
 .../storm/trident/testing/FeederBatchSpout.java |    73 +-
 .../testing/FeederCommitterBatchSpout.java      |     2 +-
 .../trident/testing/LRUMemoryMapState.java      |     4 +-
 .../storm/trident/testing/MemoryMapState.java   |     4 +-
 .../topology/TridentTopologyBuilder.java        |    28 +-
 .../java/storm/trident/util/TridentUtils.java   |    12 +-
 jstorm-core/src/main/py/storm/DistributedRPC.py |    20 +-
 .../main/py/storm/DistributedRPCInvocations.py  |    77 +-
 jstorm-core/src/main/py/storm/Nimbus-remote     |    98 +-
 jstorm-core/src/main/py/storm/Nimbus.py         |  2064 +-
 jstorm-core/src/main/py/storm/ttypes.py         |  2798 +-
 jstorm-core/src/main/resources/defaults.yaml    |    37 +-
 jstorm-core/src/main/resources/logback-test.xml |    26 +-
 jstorm-core/src/main/resources/version          |     2 +-
 .../com/alibaba/jstorm/cache/RocksDBTest.java   |   270 +-
 .../jstorm/common/metric/AsmMetricTest.java     |    29 +
 .../metric/old/window/StatBucketsTest.java      |    27 +
 .../jstorm/message/context/ContextTest.java     |     3 +-
 .../jstorm/message/netty/NettyUnitTest.java     |    78 +-
 .../alibaba/jstorm/message/zmq/ZmqUnitTest.java |     6 +-
 .../com/alibaba/jstorm/metric/MetricTest.java   |   380 -
 .../superivosr/deamon/HttpserverTest.java       |     7 +-
 .../alibaba/jstorm/topology/SingleJoinBolt.java |    38 +-
 .../alibaba/jstorm/topology/SingleJoinTest.java |    10 +-
 .../jstorm/topology/TransactionalWordsTest.java |   119 +-
 .../jstorm/util/queue/DisruptorTest.java        |   170 +-
 .../alibaba/jstorm/utils/JStormUtilsTest.java   |    14 +-
 .../com/alibaba/jstorm/utils/TestThrift.java    |     5 -
 jstorm-ui/pom.xml                               |   235 +-
 .../alibaba/jstorm/ui/DescendComparator.java    |    42 -
 .../alibaba/jstorm/ui/NimbusClientManager.java  |   113 -
 .../main/java/com/alibaba/jstorm/ui/UIDef.java  |   114 -
 .../java/com/alibaba/jstorm/ui/UIMetrics.java   |    62 -
 .../java/com/alibaba/jstorm/ui/UIUtils.java     |   549 -
 .../jstorm/ui/api/ClusterAPIController.java     |    61 +
 .../jstorm/ui/api/ConfAPIController.java        |    56 +
 .../jstorm/ui/api/TopologyAPIController.java    |   107 +
 .../jstorm/ui/controller/ClusterController.java |   152 +
 .../ui/controller/ClustersController.java       |    60 +
 .../ui/controller/ComponentController.java      |   215 +
 .../jstorm/ui/controller/ConfController.java    |    69 +
 .../jstorm/ui/controller/FilesController.java   |   132 +
 .../jstorm/ui/controller/JStackController.java  |    73 +
 .../jstorm/ui/controller/LogController.java     |   260 +
 .../jstorm/ui/controller/NettyController.java   |   135 +
 .../ui/controller/SupervisorController.java     |   136 +
 .../jstorm/ui/controller/TaskController.java    |   191 +
 .../ui/controller/TopologyController.java       |   249 +
 .../alibaba/jstorm/ui/model/ClusterConfig.java  |   103 +
 .../alibaba/jstorm/ui/model/ClusterEntity.java  |   108 +
 .../com/alibaba/jstorm/ui/model/ColumnData.java |    59 -
 .../com/alibaba/jstorm/ui/model/LinkData.java   |    65 -
 .../alibaba/jstorm/ui/model/NimbusEntity.java   |   152 +
 .../alibaba/jstorm/ui/model/PageGenerator.java  |    32 -
 .../com/alibaba/jstorm/ui/model/PageIndex.java  |   151 -
 .../com/alibaba/jstorm/ui/model/Response.java   |    48 +
 .../com/alibaba/jstorm/ui/model/TableData.java  |    41 -
 .../com/alibaba/jstorm/ui/model/TaskEntity.java |   121 +
 .../alibaba/jstorm/ui/model/UIBasicMetric.java  |    57 +
 .../jstorm/ui/model/UIComponentMetric.java      |   173 +
 .../alibaba/jstorm/ui/model/UINettyMetric.java  |    66 +
 .../alibaba/jstorm/ui/model/UIStreamMetric.java |    44 +
 .../jstorm/ui/model/UISummaryMetric.java        |    43 +
 .../alibaba/jstorm/ui/model/UITaskMetric.java   |    72 +
 .../jstorm/ui/model/UIUserDefinedMetric.java    |    89 +
 .../alibaba/jstorm/ui/model/UIWorkerMetric.java |    73 +
 .../jstorm/ui/model/ZooKeeperEntity.java        |    58 +
 .../jstorm/ui/model/graph/ChartSeries.java      |    89 +
 .../jstorm/ui/model/graph/TopologyEdge.java     |   130 +
 .../jstorm/ui/model/graph/TopologyGraph.java    |    67 +
 .../jstorm/ui/model/graph/TopologyNode.java     |   118 +
 .../alibaba/jstorm/ui/model/graph/TreeNode.java |   135 +
 .../jstorm/ui/model/pages/ClusterPage.java      |   324 -
 .../jstorm/ui/model/pages/ComponentPage.java    |   473 -
 .../alibaba/jstorm/ui/model/pages/ConfPage.java |   167 -
 .../alibaba/jstorm/ui/model/pages/HomePage.java |   182 -
 .../jstorm/ui/model/pages/JStackPage.java       |   135 -
 .../jstorm/ui/model/pages/ListLogsPage.java     |   232 -
 .../alibaba/jstorm/ui/model/pages/LogPage.java  |   300 -
 .../jstorm/ui/model/pages/NettyPage.java        |   131 -
 .../jstorm/ui/model/pages/SupervisorPage.java   |   281 -
 .../jstorm/ui/model/pages/TablePage.java        |   154 -
 .../jstorm/ui/model/pages/TopologyPage.java     |   211 -
 .../jstorm/ui/model/pages/WindowTablePage.java  |   105 -
 .../java/com/alibaba/jstorm/ui/tags/CpuTag.java |    97 +
 .../com/alibaba/jstorm/ui/tags/ErrorTag.java    |   121 +
 .../com/alibaba/jstorm/ui/tags/HostTag.java     |    47 +
 .../alibaba/jstorm/ui/tags/PaginationTag.java   |   115 +
 .../com/alibaba/jstorm/ui/tags/PrettyTag.java   |    88 +
 .../com/alibaba/jstorm/ui/tags/StatusTag.java   |    69 +
 .../alibaba/jstorm/ui/tags/SubMetricTag.java    |    87 +
 .../jstorm/ui/utils/NimbusClientManager.java    |    86 +
 .../java/com/alibaba/jstorm/ui/utils/UIDef.java |    59 +
 .../alibaba/jstorm/ui/utils/UIMetricUtils.java  |   170 +
 .../com/alibaba/jstorm/ui/utils/UIUtils.java    |   818 +
 jstorm-ui/src/main/resources/log4j.properties   |     4 +-
 jstorm-ui/src/main/resources/logback-test.xml   |    65 -
 jstorm-ui/src/main/resources/logback.xml        |    59 +
 jstorm-ui/src/main/webapp/META-INF/context.xml  |     2 -
 .../src/main/webapp/WEB-INF/faces-config.xml    |    26 -
 .../webapp/WEB-INF/mvc-dispatcher-servlet.xml   |    39 +
 .../src/main/webapp/WEB-INF/pages/cluster.jsp   |   268 +
 .../src/main/webapp/WEB-INF/pages/clusters.jsp  |    65 +
 .../src/main/webapp/WEB-INF/pages/component.jsp |   249 +
 .../src/main/webapp/WEB-INF/pages/conf.jsp      |    48 +
 .../src/main/webapp/WEB-INF/pages/files.jsp     |   108 +
 .../src/main/webapp/WEB-INF/pages/jstack.jsp    |    54 +
 .../webapp/WEB-INF/pages/layout/_breadcrumb.jsp |    51 +
 .../webapp/WEB-INF/pages/layout/_footer.jsp     |    67 +
 .../main/webapp/WEB-INF/pages/layout/_head.jsp  |    32 +
 .../webapp/WEB-INF/pages/layout/_header.jsp     |    86 +
 jstorm-ui/src/main/webapp/WEB-INF/pages/log.jsp |    75 +
 .../src/main/webapp/WEB-INF/pages/netty.jsp     |   120 +
 .../main/webapp/WEB-INF/pages/supervisor.jsp    |   215 +
 .../src/main/webapp/WEB-INF/pages/task.jsp      |   187 +
 .../src/main/webapp/WEB-INF/pages/topology.jsp  |   535 +
 jstorm-ui/src/main/webapp/WEB-INF/tag.tld       |   136 +
 jstorm-ui/src/main/webapp/WEB-INF/web.xml       |    48 +-
 jstorm-ui/src/main/webapp/assets/css/aloha.css  |    42 -
 .../main/webapp/assets/css/bootstrap-theme.css  |   476 -
 .../webapp/assets/css/bootstrap-theme.css.map   |     1 -
 .../webapp/assets/css/bootstrap-theme.min.css   |     4 +-
 .../src/main/webapp/assets/css/bootstrap.css    |  6584 -----
 .../main/webapp/assets/css/bootstrap.css.map    |     1 -
 .../main/webapp/assets/css/bootstrap.min.css    |     4 +-
 .../webapp/assets/css/dataTables.bootstrap.css  |   176 +
 .../src/main/webapp/assets/css/json.human.css   |   117 +
 jstorm-ui/src/main/webapp/assets/css/storm.css  |   197 +
 .../src/main/webapp/assets/css/vis.min.css      |     1 +
 .../src/main/webapp/assets/imgs/favicon.png     |   Bin 0 -> 55144 bytes
 .../src/main/webapp/assets/imgs/jstorm.png      |   Bin 0 -> 6375 bytes
 .../src/main/webapp/assets/js/bootstrap.js      |  2317 --
 .../src/main/webapp/assets/js/bootstrap.min.js  |     8 +-
 .../assets/js/dataTables.bootstrap.min.js       |     8 +
 .../src/main/webapp/assets/js/highcharts.js     |   327 +
 jstorm-ui/src/main/webapp/assets/js/hilitor.js  |   104 +
 .../webapp/assets/js/jquery.dataTables.min.js   |   163 +
 .../src/main/webapp/assets/js/jquery.min.js     |     9 +-
 .../src/main/webapp/assets/js/json.human.js     |   378 +
 jstorm-ui/src/main/webapp/assets/js/npm.js      |    13 -
 jstorm-ui/src/main/webapp/assets/js/storm.js    |   371 +
 jstorm-ui/src/main/webapp/assets/js/vis.min.js  |    43 +
 jstorm-ui/src/main/webapp/assets/js/vue.min.js  |     8 +
 jstorm-ui/src/main/webapp/clusters.xhtml        |   122 -
 jstorm-ui/src/main/webapp/log.xhtml             |   102 -
 jstorm-ui/src/main/webapp/table.xhtml           |   105 -
 jstorm-ui/src/main/webapp/windowtable.xhtml     |   146 -
 jstorm-ui/src/test/java/UIUtilsTest.java        |    37 +
 other/storm.thrift                              |   203 +-
 pom.xml                                         |   416 +-
 release.xml                                     |   205 +-
 version                                         |     2 +-
 982 files changed, 63491 insertions(+), 48749 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/bin/jstorm.py
----------------------------------------------------------------------
diff --git a/bin/jstorm.py b/bin/jstorm.py
index c4b3fe7..b82d2ff 100755
--- a/bin/jstorm.py
+++ b/bin/jstorm.py
@@ -69,7 +69,9 @@ def get_client_childopts():
     return ret
 
 def get_server_childopts(log_name):
-    ret = (" -Dlogfile.name=%s -Dlogback.configurationFile=%s"  %(log_name, LOGBACK_CONF))
+    jstorm_log_dir = get_log_dir()
+    gc_log_path = jstorm_log_dir + "/" + log_name + ".gc"
+    ret = (" -Xloggc:%s -Dlogfile.name=%s -Dlogback.configurationFile=%s -Djstorm.log.dir=%s "  %(gc_log_path, log_name, LOGBACK_CONF, jstorm_log_dir))
     return ret
 
 if not os.path.exists(JSTORM_DIR + "/RELEASE"):
@@ -103,8 +105,8 @@ def get_classpath(extrajars):
     ret.extend(get_jars_full(JSTORM_DIR))
     ret.extend(get_jars_full(JSTORM_DIR + "/lib"))
     ret.extend(INCLUDE_JARS)
-    
     return normclasspath(":".join(ret))
+    
 
 def confvalue(name, extrapaths):
     command = [
@@ -130,6 +132,18 @@ def print_localconfvalue(name):
     """
     print name + ": " + confvalue(name, [JSTORM_CONF_DIR])
 
+def get_log_dir():
+    cppaths = [JSTORM_CONF_DIR]
+    jstorm_log_dir = confvalue("jstorm.log.dir", cppaths)
+    if not jstorm_log_dir == "null":
+       if not os.path.exists(jstorm_log_dir):
+          os.mkdir(jstorm_log_dir)
+    else:
+       jstorm_log_dir = JSTORM_DIR + "/logs"
+       if not os.path.exists(jstorm_log_dir):
+          os.mkdir(jstorm_log_dir)
+    return jstorm_log_dir
+
 def print_remoteconfvalue(name):
     """Syntax: [jstorm remoteconfvalue conf-name]
 
@@ -141,11 +155,17 @@ def print_remoteconfvalue(name):
     """
     print name + ": " + confvalue(name, [JSTORM_CONF_DIR])
 
+
 def exec_storm_class(klass, jvmtype="-server", childopts="", extrajars=[], args=[]):
     nativepath = confvalue("java.library.path", extrajars)
     args_str = " ".join(map(lambda s: "\"" + s + "\"", args))
-    command = "java " + jvmtype + " -Djstorm.home=" + JSTORM_DIR + " " + get_config_opts() + " -Djava.library.path=" + nativepath + " " + childopts + " -cp " + get_classpath(extrajars) + " " + klass + " " + args_str
-    print "Running: " + command    
+    print args_str
+    if "NimbusServer" in klass:
+        # fix cmd > 4096, use dir in cp, only for nimbus server
+        command = "java " + jvmtype + " -Djstorm.home=" + JSTORM_DIR + " " + get_config_opts() + " -Djava.library.path=" + nativepath + " " + childopts + " -cp " + get_classpath(extrajars) + ":" + JSTORM_DIR + "/lib/ext/* " + klass + " " + args_str
+    else:
+        command = "java " + jvmtype + " -Djstorm.home=" + JSTORM_DIR + " " + get_config_opts() + " -Djava.library.path=" + nativepath + " " + childopts + " -cp " + get_classpath(extrajars) + " " + klass + " " + args_str
+    print "Running: " + command
     global STATUS
     STATUS = os.system(command)
 
@@ -263,12 +283,12 @@ def restart(*args):
         extrajars=[JSTORM_CONF_DIR, JSTORM_DIR + "/bin", CLIENT_CONF_FILE],
         childopts=childopts)
 
-def update_config(*args):
-    """Syntax: [jstorm restart topology-name [conf]]
+def update_topology(*args):
+    """Syntax: [jstorm update_topology topology-name -jar [jarpath] -conf [confpath]]
     """
     childopts = get_client_childopts()
     exec_storm_class(
-        "backtype.storm.command.update_config",
+        "backtype.storm.command.update_topology",
         args=args,
         jvmtype="-client -Xms256m -Xmx256m",
         extrajars=[JSTORM_CONF_DIR, JSTORM_DIR + "/bin", CLIENT_CONF_FILE],
@@ -309,7 +329,6 @@ def supervisor():
         extrajars=cppaths, 
         childopts=childopts)
 
-
 def drpc():
     """Syntax: [jstorm drpc]
 
@@ -388,7 +407,7 @@ COMMANDS = {"jar": jar, "kill": kill, "nimbus": nimbus, "zktool": zktool,
             "drpc": drpc, "supervisor": supervisor, "localconfvalue": print_localconfvalue,
             "remoteconfvalue": print_remoteconfvalue, "classpath": print_classpath,
             "activate": activate, "deactivate": deactivate, "rebalance": rebalance, "help": print_usage,
-            "metricsMonitor": metrics_Monitor, "list": list, "restart": restart, "update_config": update_config}
+            "metricsMonitor": metrics_Monitor, "list": list, "restart": restart, "update_topology": update_topology}
 
 def parse_config(config_list):
     global CONFIG_OPTS

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/bin/start.sh
----------------------------------------------------------------------
diff --git a/bin/start.sh b/bin/start.sh
index 483a97b..c7a6e49 100644
--- a/bin/start.sh
+++ b/bin/start.sh
@@ -41,13 +41,13 @@ else
 fi
 echo "JSTORM_HOME =" $JSTORM_HOME
 
-if [ "x$JSTORM_CONF_DIR_PATH" != "x" ]
+if [ "x$JSTORM_CONF_DIR" != "x" ]
 then
-    echo "JSTORM_CONF_DIR_PATH has been set " 
+    echo "JSTORM_CONF_DIR has been set " 
 else
-    export JSTORM_CONF_DIR_PATH=$JSTORM_HOME/conf
+    export JSTORM_CONF_DIR=$JSTORM_HOME/conf
 fi
-echo "JSTORM_CONF_DIR_PATH =" $JSTORM_CONF_DIR_PATH
+echo "JSTORM_CONF_DIR =" $JSTORM_CONF_DIR
 
 
 
@@ -77,16 +77,18 @@ function startJStorm()
 
 
 HOSTNAME=`hostname -i`
-NIMBUS_HOST=`grep "nimbus.host:" $JSTORM_CONF_DIR_PATH/storm.yaml  | grep -w $HOSTNAME`
-SUPERVISOR_HOST_START=`grep "supervisor.host.start:" $JSTORM_CONF_DIR_PATH/storm.yaml  | grep -w "false"`
+NIMBUS_HOST=`grep "nimbus.host:" $JSTORM_CONF_DIR/storm.yaml |grep -v "#" | grep -w $HOSTNAME`
+NIMBUS_HOST_START_SUPERVISOR=`grep "nimbus.host.start.supervisor:" $JSTORM_CONF_DIR/storm.yaml |grep -v "#" | grep -wi "false"`
 
 if [ "X${NIMBUS_HOST}" != "X" ]
 then
 	startJStorm "nimbus" "NimbusServer"
 fi
 
-if [ "X${SUPERVISOR_HOST_START}" == "X" ]
+if [ "X${NIMBUS_HOST}" != "X" ] && [ "X${NIMBUS_HOST_START_SUPERVISOR}" != "X" ]
 then
+	echo "Skip start Supervisor on nimbus host"
+else
 	startJStorm "supervisor" "Supervisor"
 fi
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/conf/client_logback.xml
----------------------------------------------------------------------
diff --git a/conf/client_logback.xml b/conf/client_logback.xml
index 27799bf..2ed3445 100755
--- a/conf/client_logback.xml
+++ b/conf/client_logback.xml
@@ -23,10 +23,11 @@
 		</encoder>
 	</appender>
 	<logger name="org.apache.zookeeper" level="ERROR" />
+	<logger name="org.apache.curator" level="ERROR" />
 	<logger name="com.netflix.curator" level="ERROR" />
-	<logger name="com.alibaba.jstorm.common.metric" level="ERROR" />	
-	<logger name="com.alibaba.jstorm.daemon.nimbus.TopologyMetricsRunnable" level="ERROR" />
-	<logger name="com.alibaba.jstorm.metric" level="ERROR" />
+	<logger name="com.alibaba.jstorm.common.metric" level="INFO" />
+	<logger name="com.alibaba.jstorm.daemon.nimbus.TopologyMetricsRunnable" level="INFO" />
+	<logger name="com.alibaba.jstorm.metric" level="INFO" />
 
 	<root level="INFO">
 		<appender-ref ref="A1" />

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/conf/jstorm.log4j.properties
----------------------------------------------------------------------
diff --git a/conf/jstorm.log4j.properties b/conf/jstorm.log4j.properties
index 7a3700e..3d67db9 100755
--- a/conf/jstorm.log4j.properties
+++ b/conf/jstorm.log4j.properties
@@ -33,7 +33,7 @@ log4j.appender.stdout.layout.ConversionPattern =  [%p  %d{yyyy-MM-dd HH:mm:ss} %
 
 ### output to file ###
 log4j.appender.D = org.apache.log4j.RollingFileAppender
-log4j.appender.D.File = ${jstorm.home}/logs/${logfile.name}
+log4j.appender.D.File = ${jstorm.log.dir}/${logfile.name}
 log4j.appender.D.Append = true
 log4j.appender.D.Threshold = INFO
 log4j.appender.D.MaxFileSize=1GB
@@ -54,7 +54,7 @@ log4j.logger.com.alibaba.jstorm.metric= INFO, M
 log4j.additivity.com.alibaba.jstorm.metric=false
 
 log4j.appender.M = org.apache.log4j.RollingFileAppender
-log4j.appender.M.File = ${jstorm.home}/logs/${logfile.name}.metrics
+log4j.appender.M.File = ${jstorm.log.dir}/${logfile.name}.metrics
 log4j.appender.M.Append = true
 log4j.appender.M.Threshold = INFO 
 log4j.appender.M.MaxFileSize=100MB

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/conf/jstorm.logback.xml
----------------------------------------------------------------------
diff --git a/conf/jstorm.logback.xml b/conf/jstorm.logback.xml
index 0b85a00..419565a 100755
--- a/conf/jstorm.logback.xml
+++ b/conf/jstorm.logback.xml
@@ -17,84 +17,76 @@
 -->
 
 <configuration scan="true" scanPeriod="60 seconds">
-	<appender name="A1"
-		class="ch.qos.logback.core.rolling.RollingFileAppender">
-		<file>${jstorm.home}/logs/${logfile.name}</file>
-		<rollingPolicy class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
-			<fileNamePattern>${jstorm.home}/logs/${logfile.name}.%i</fileNamePattern>
-			<minIndex>1</minIndex>
-			<maxIndex>5</maxIndex>
-		</rollingPolicy>
+    <appender name="A1" class="ch.qos.logback.core.rolling.RollingFileAppender">
+        <file>${jstorm.log.dir}/${topology.name:-.}/${logfile.name}</file>
+        <rollingPolicy class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+            <fileNamePattern>${jstorm.log.dir}/${topology.name:-.}/${logfile.name}.%i</fileNamePattern>
+            <minIndex>1</minIndex>
+            <maxIndex>5</maxIndex>
+        </rollingPolicy>
 
-		<triggeringPolicy
-			class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
-			<maxFileSize>1GB</maxFileSize>
-		</triggeringPolicy>
+        <triggeringPolicy class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+            <maxFileSize>1GB</maxFileSize>
+        </triggeringPolicy>
 
-		<encoder>
-			<pattern>[%p %d{yyyy-MM-dd HH:mm:ss} %c{1}:%L %t] %m%n</pattern>
+        <encoder>
+            <pattern>[%p %d{yyyy-MM-dd HH:mm:ss} %c{1}:%L %t] %m%n</pattern>
+        </encoder>
+    </appender>
 
-		</encoder>
-	</appender>
+    <appender name="METRICS" class="ch.qos.logback.core.rolling.RollingFileAppender">
+        <file>${jstorm.log.dir}/${topology.name:-.}/${logfile.name}.metrics</file>
+        <rollingPolicy class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+            <fileNamePattern>${jstorm.log.dir}/${topology.name:-.}/${logfile.name}.metrics.%i</fileNamePattern>
+            <minIndex>1</minIndex>
+            <maxIndex>5</maxIndex>
+        </rollingPolicy>
 
-	<appender name="METRICS"
-		class="ch.qos.logback.core.rolling.RollingFileAppender">
-		<file>${jstorm.home}/logs/${logfile.name}.metrics</file>
-		<rollingPolicy class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
-			<fileNamePattern>${jstorm.home}/logs/${logfile.name}.metrics.%i</fileNamePattern>
-			<minIndex>1</minIndex>
-			<maxIndex>5</maxIndex>
-		</rollingPolicy>
+        <triggeringPolicy class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+            <maxFileSize>100MB</maxFileSize>
+        </triggeringPolicy>
 
-		<triggeringPolicy
-			class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
-			<maxFileSize>100MB</maxFileSize>
-		</triggeringPolicy>
+        <encoder>
+            <!--<pattern>[%p %d{yyyy-MM-dd HH:mm:ss} %c{1}:%L %t] %m%n</pattern>-->
+            <pattern>[%d{yyyy-MM-dd HH:mm:ss} %t] %m%n</pattern>
+        </encoder>
+    </appender>
 
-		<encoder>
-			<pattern>[%p %d{yyyy-MM-dd HH:mm:ss} %c{1}:%L %t] %m%n</pattern>
-		</encoder>
-	</appender>
+    <root level="INFO">
+        <appender-ref ref="A1"/>
+    </root>
+    <logger name="org.apache.zookeeper" level="ERROR" />
+    <logger name="org.apache.curator" level="ERROR" />
+    <logger name="com.netflix.curator" level="ERROR" />
 
-	<root level="INFO">
-		<appender-ref ref="A1" />
-	</root>
+    <logger name="com.alibaba.jstorm" additivity="false">
+        <level value="INFO"/>
+        <appender-ref ref="A1"/>
+    </logger>
 
-	<logger name="com.alibaba.jstorm"
-		additivity="false">
-		<level value="INFO" />
-		<appender-ref ref="A1" />
-	</logger>
+    <logger name="com.alibaba.jstorm.common.metric" additivity="false">
+        <level value="INFO"/>
+        <appender-ref ref="METRICS"/>
+    </logger>
 
-	<logger name="com.alibaba.jstorm.common.metric"
-		additivity="false">
-		<level value="INFO" />
-		<appender-ref ref="METRICS" />
-	</logger>
-	
-	<logger name="com.alibaba.jstorm.task.heartbeat"
-		additivity="false">
-		<level value="INFO" />
-		<appender-ref ref="METRICS" />
-	</logger>
-	
-	<logger name="com.alibaba.jstorm.daemon.worker.hearbeat"
-		additivity="false">
-		<level value="INFO" />
-		<appender-ref ref="METRICS" />
-	</logger>
-	
-	<logger name="com.alibaba.jstorm.daemon.nimbus.TopologyMetricsRunnable"
-		additivity="false">
-		<level value="INFO" />
-		<appender-ref ref="METRICS" />
-	</logger>
-	
-	<logger name="com.alibaba.jstorm.metric"
-		additivity="false">
-		<level value="INFO" />
-		<appender-ref ref="METRICS" />
-	</logger>
+    <logger name="com.alibaba.jstorm.task.heartbeat" additivity="false">
+        <level value="WARN"/>
+        <appender-ref ref="METRICS"/>
+    </logger>
 
+    <logger name="com.alibaba.jstorm.daemon.worker.hearbeat" additivity="false">
+        <level value="WARN"/>
+        <appender-ref ref="METRICS"/>
+    </logger>
+
+    <logger name="com.alibaba.jstorm.daemon.nimbus.metric" additivity="false">
+        <level value="INFO"/>
+        <appender-ref ref="METRICS"/>
+    </logger>
+
+    <logger name="com.alibaba.jstorm.metric" additivity="false">
+        <level value="INFO"/>
+        <appender-ref ref="METRICS"/>
+    </logger>
 </configuration>
  
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/conf/storm.yaml
----------------------------------------------------------------------
diff --git a/conf/storm.yaml b/conf/storm.yaml
index c45d850..2ec2303 100755
--- a/conf/storm.yaml
+++ b/conf/storm.yaml
@@ -20,30 +20,36 @@
      - "localhost"
 
  storm.zookeeper.root: "/jstorm"
- 
- #nimbus.host is being used by $JSTORM_HOME/bin/start.sh
+
+# cluster.name: "default"
+
+ #nimbus.host/nimbus.host.start.supervisor is being used by $JSTORM_HOME/bin/start.sh
  #it only support IP, please don't set hostname
- # For example 
+ # For example
  # nimbus.host: "10.132.168.10, 10.132.168.45"
  #nimbus.host: "localhost"
+ #nimbus.host.start.supervisor: false
  
 # %JSTORM_HOME% is the jstorm home directory
  storm.local.dir: "%JSTORM_HOME%/data"
+ # please set absolute path, default path is JSTORM_HOME/logs
+# jstorm.log.dir: "absolute path"
  
- java.library.path: "/usr/local/lib:/opt/local/lib:/usr/lib"
+# java.library.path: "/usr/local/lib:/opt/local/lib:/usr/lib"
 
 
 
 # if supervisor.slots.ports is null, 
 # the port list will be generated by cpu cores and system memory size 
-# for example, if there are 24 cpu cores and supervisor.slots.port.cpu.weight is 1.2
-# then there are 24/1.2 ports for cpu, 
-# there are system_physical_memory_size/worker.memory.size ports for memory 
-# The final port number is min(cpu_ports, memory_port)
- supervisor.slots.ports.base: 6800
- supervisor.slots.port.cpu.weight: 1
- supervisor.slots.ports: null
-#supervisor.slots.ports:
+# for example, 
+# there are cpu_num = system_physical_cpu_num/supervisor.slots.port.cpu.weight
+# there are mem_num = system_physical_memory_size/(worker.memory.size * supervisor.slots.port.mem.weight) 
+# The final port number is min(cpu_num, mem_num)
+# supervisor.slots.ports.base: 6800
+# supervisor.slots.port.cpu.weight: 1.2
+# supervisor.slots.port.mem.weight: 0.7
+# supervisor.slots.ports: null
+# supervisor.slots.ports:
 #    - 6800
 #    - 6801
 #    - 6802
@@ -52,7 +58,7 @@
 # Default disable user-define classloader
 # If there are jar conflict between jstorm and application, 
 # please enable it 
- topology.enable.classloader: false
+# topology.enable.classloader: false
 
 # enable supervisor use cgroup to make resource isolation
 # Before enable it, you should make sure:
@@ -61,22 +67,22 @@
 #	3. You should start your supervisor on root
 # You can get more about cgroup:
 #   http://t.cn/8s7nexU
- supervisor.enable.cgroup: false
+# supervisor.enable.cgroup: false
 
 
 ### Netty will send multiple messages in one batch  
 ### Setting true will improve throughput, but more latency
- storm.messaging.netty.transfer.async.batch: true
+# storm.messaging.netty.transfer.async.batch: true
 
 ### if this setting  is true, it will use disruptor as internal queue, which size is limited
 ### otherwise, it will use LinkedBlockingDeque as internal queue , which size is unlimited
 ### generally when this setting is true, the topology will be more stable,
 ### but when there is a data loop flow, for example A -> B -> C -> A
 ### and the data flow occur blocking, please set this as false
- topology.buffer.size.limited: true
+# topology.buffer.size.limited: true
  
 ### default worker memory size, unit is byte
- worker.memory.size: 2147483648
+# worker.memory.size: 2147483648
 
 # Metrics Monitor
 # topology.performance.metrics: it is the switch flag for performance 
@@ -85,8 +91,8 @@
 # topology.alimonitor.metrics.post: If it is disable, metrics data
 # will only be printed to log. If it is enabled, the metrics data will be
 # posted to alimonitor besides printing to log.
- topology.performance.metrics: true
- topology.alimonitor.metrics.post: false
+# topology.performance.metrics: true
+# topology.alimonitor.metrics.post: false
 
 # UI MultiCluster
 # Following is an example of multicluster UI configuration

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/history.md
----------------------------------------------------------------------
diff --git a/history.md b/history.md
index efc2056..bfdf75f 100755
--- a/history.md
+++ b/history.md
@@ -1,22 +1,93 @@
 [JStorm English introduction](http://42.121.19.155/jstorm/JStorm-introduce-en.pptx)
 [JStorm Chinese introduction](http://42.121.19.155/jstorm/JStorm-introduce.pptx)
 
-#Release 2.0.4-SNAPSHOT
+# Release 2.1.0
+
+## New features
+
+1. Totally redesign Web UI
+	1.	Make the UI more beatiful
+	1.	Improve Web UI speed much.
+	1.	Add Cluster/Topology Level Summarized Metrics in recent 30 minutes.
+	1.	Add DAG in the Web UI, support Uer Interaction to get key information such as emit, tuple lifecycle, tps
+1. Redesign Metrics/Monitor System
+	1.	New metrics core, support sample with more metric, avoid noise, merge metrics automatically for user.
+	1.	No metrics will be stored in ZK
+	1.	Support metrics HA
+	1.	Add more useful metrics, such as tuple lifecycle, netty metrics, disk space etc. accurately get worker memory
+	1.	Support external storage plugin to store metrics.
+1. Implement Smart BackPressure 
+	1.	Smart Backpressure, the dataflow will be more stable, avoid noise to trigger
+	1.	Easy to manual control Backpressure
+1. Implement TopologyMaster
+	1.	Redesign hearbeat mechanism, easily support 6000+ tasks
+	1.	Collect all task's metrics, do merge job, release Nimbus pressure.
+	1.	Central Control Coordinator, issue control command
+1. Redesign ZK usage, one set of ZK support more 2000+ hardware nodes.
+	1.	No dynamic data in ZK, such as heartbeat, metrics, monitor status.
+	1.	Nimbus reduce visiting ZK frequence when serve thrift API.
+	1.	Reduce visiting ZK frequence, merge some task level ZK node.
+	1.	Reduce visiting ZK frequence, remove useless ZK node, such as empty taskerror node
+	1.	Tuning ZK cache  
+	1.  Optimize ZK reconnect mechanism
+1. Tuning Executor Batch performance
+	1.	Add smart batch size setting
+	1.	Remove memory copy
+	1.	Directly issue tuple without batch for internal channel
+	1.	Set the default Serialize/Deserialize method as Kryo
+1. Set the default Serialized/Deserialized method as Kryo  to improve performance.
+1. Support dynamic reload binary/configuration
+1. Tuning LocalShuffle performance, Set 3 level priority, local worker, local node, other node, add dynamic check queue status, connection status.
+1. Optimize Nimbus HA, only the highest priority nimbuses can be promoted as master 
+
+## Improvement
+1. Supervisor automatically dump worker jstack/jmap, when worker's status is invalid.
+1. Supervisor can generate more ports according to memory.
+1. Supervisor can download binary more time.
+1. Support set logdir in configuration
+1. Add configuration "nimbus.host.start.supervisor"
+1. Add supervisor/nimbus/drpc gc log
+1. Adjust jvm parameter 1. set -Xmn 1/2 of heap memory 2. set PermSize to 1/32 and MaxPermSize 1/16 of heap memory; 3. set -Xms by "worker.memory.min.size"。
+1. Refine ZK error schema, when worker is dead, UI will report error
+1. Add function to zktool utility, support remove all topology znodes, support list 
+1. Optimize netty client.
+1. Dynamic update connected task status by network connection, not by ZK znode.
+1. Add configuration "topology.enable.metrics".
+1. Classify all topology log into one directory by topologyName.
+
+## Bug fix
+1. Skip download same binary when assigment has been changed.
+1. Skip start worker when binary is invalid.
+1. Use correct configuration map in a lot of worker thread
+1. In the first step Nimbus will check topologyName or not when submit topology
+1. Support fieldGrouping for Object[]
+1. For drpc single instance under one configuration
+1. In the client topologyNameExists interface,directly use trhift api
+1. Fix failed to restart due to topology cleanup thread's competition
+1. Fix the bug that backpressure might be lost when trigger bolt was failed.
+1. Fixed the bug that DefaultMetricUploader doesn't delete metrics data in rocksdb, causing new metrics data cannot be appended.
+
+## Deploy and scripts
+1. Optimize cleandisk.sh, avoid delete useful worker log
+
+# Release 2.0.4-SNAPSHOT
+
 ## New features
-1.Redesign Metric/Monitor system, new RollingWindow/Metrics/NettyMetrics, all data will send/recv through thrift
-2.Redesign Web-UI, the new Web-UI code is clear and clean
-3.Add NimbusCache Layer, using RocksDB and TimeCacheWindow
-4.Refactoring all ZK structure and ZK operation
-5.Refactoring all thrift structure
-6.Merge jstorm-client/jstorm-client-extension/jstorm-core 3 modules into jstorm-core
-7.set the dependency version same as storm
-8.Sync apache-storm-0.10.0-beta1 all java code
-9.Switch log system to logback
-10.Upgrade thrift to apache thrift 0.9.2
+1. Redesign Metric/Monitor system, new RollingWindow/Metrics/NettyMetrics, all data will send/recv through thrift
+2. Redesign Web-UI, the new Web-UI code is clear and clean
+3. Add NimbusCache Layer, using RocksDB and TimeCacheWindow
+4. Refactoring all ZK structure and ZK operation
+5. Refactoring all thrift structure
+6. Merge jstorm-client/jstorm-client-extension/jstorm-core 3 modules into jstorm-core
+7. Set the dependency version same as storm
+8. Sync apache-storm-0.10.0-beta1 all java code
+9. Switch log system to logback
+10. Upgrade thrift to apache thrift 0.9.2
 11. Performance tuning Huge topology more than 600 workers or 2000 tasks
 12. Require jdk7 or higher
 
-#Release 0.9.7.1
+# Release 0.9.7.1
+
 ## New Features
 1. Batch the tuples whose target task is same, before sending out(task.batch.tuple=true,task.msg.batch.size=4).  
 2. LocalFirst grouping is updated. If all local tasks are busy, the tasks of outside nodes will be chosen as target task instead of waiting on the busy local task.
@@ -29,21 +100,25 @@
 9. Nimbus or Supervisor suicide when the local ip is 127.0.0.0
 10. Add user-define-scheduler example
 11. Merge Supervisor's syncSupervisor and syncProcess
+
 ## Bug Fix
 1. Improve the GC setting.
 2. Fix the bug that task heartbeat might not be updated timely in some scenarioes.  
 3. Fix the bug that the reconnection operation might be stick for a unexpected period when the connection to remote worker is shutdown and some messages are buffer in netty.   
 4. Reuse thrift client when submit topology
 5. Avoid repeatedly download binary when failed to start worker.
+
 ## Changed setting
 1. Change task's heartbeat timeout to 4 minutes
 2. Set the netty client thread pool(clientScheduleService) size as 5 
+
 ## Deploy and scripts
 1. Improve cleandisk.sh, avoid delete current directory and /tmp/hsperfdata_admin
 2. Add executable attribute for the script under example
 3. Add parameter to stat.sh, which can be used to start supervisor or not. This is useful under virtual 
 
-#Release 0.9.7
+# Release 0.9.7
+
 ## New Features
 1. Support dynamic scale-out/scale-in of worker, spout, bolt or acker without stopping the service of topology.
 2. When enable cgroup, Support the upper limit control of cpu core usage. Default setting is 3 cpu cores.
@@ -56,6 +131,7 @@
 9. Add thrift api getVersion, it will be used check between the client jstorm version and the server jstorm version.  
 10. Update the metrics' structure to Alimonitor
 11. Add exclude-jar parameter into jstorm.py, which avoid class conflict when submit topology
+
 ## Bug Fix
 1. Fix the no response problem of supervisor process when subimtting big amout topologys in a short time
 2. When submitting two or more topologys at the same time, the later one might be failed.
@@ -67,17 +143,20 @@
 8. Fix failed to read ZK monitor znode through zktool
 9. Fix exception when enable classload and local mode
 10. Fix duplicate log when enable user-defined logback in local mode
+
 ## Changed Setting
 1. Set Nimbus jvm memory size as 4G
 2. Set hearbeat from supervisor to nimbus timeout from 60s to 180s
 3. In order to avoid OOM, set storm.messaging.netty.max.pending as 4
 4. Set task queue size as 1024, worker's total send/receive queue size as 2048
+
 ## Deploy and scripts
 1. Add rpm build spec
 2. Add deploy files of jstorm for rpm package building
 3. Enable the cleandisk cronjob every hour, reserve coredump for only one hour.
 
-#Release 0.9.6.3
+# Release 0.9.6.3
+
 ## New features
 1. Implement tick tuple
 2. Support logback
@@ -87,6 +166,7 @@
 6. Support the use of ip and hostname at the same for user defined schedule
 7. Support junit test for local mode
 8. Enable client command(e.g. jstorm jar) to load self-defined storm.yaml
+
 ## Bug fix
 1. Add activate and deactivate api of spout, which are used in nextTuple prepare phase
 2. Update the support of multi language
@@ -115,7 +195,7 @@
 25. Config local temporay ports when rpm installation
 26. Add noarch rpm package
 
-#Release 0.9.6.2
+# Release 0.9.6.2
 1. Add option to switch between BlockingQueue and Disruptor
 2. Fix the bug which under sync netty mode, client failed to send message to server 
 3. Fix the bug let web UI can dispaly 0.9.6.1 cluster
@@ -125,7 +205,7 @@
 7. Add the validation of topology name, component name... Only A-Z, a-z, 0-9, '_', '-', '.' are valid now.
 8. Fix the bug close thrift client
 
-#Release 0.9.6.2-rc
+# Release 0.9.6.2-rc
 1. Improve user experience from Web UI
 1.1 Add jstack link
 1.2 Add worker log link in supervisor page
@@ -145,7 +225,7 @@
 11. Add tcp option "reuseAddress" in netty framework
 12. Fix the bug: When spout does not implement the ICommitterTrident interface, MasterCoordinatorSpout will stick on commit phase.
 
-#Release 0.9.6.2-rc
+# Release 0.9.6.2-rc
 1. Improve user experience from Web UI
 1.1 Add jstack link
 1.2 Add worker log link in supervisor page
@@ -165,7 +245,7 @@
 11. Add tcp option "reuseAddress" in netty framework
 12. Fix the bug: When spout does not implement the ICommitterTrident interface, MasterCoordinatorSpout will stick on commit phase.
 
-#Release 0.9.6.1
+# Release 0.9.6.1
 1. Add management of multiclusters to Web UI. Added management tools for multiclusters in WebUI.
 2. Merged Trident API from storm-0.9.3
 3. Replaced gson with fastjson
@@ -191,7 +271,7 @@
 23. Support assign topology to user-defined supervisors
 
 
-#Release 0.9.6
+# Release 0.9.6
 1. Update UI 
   - Display the metrics information of task and worker
   - Add warning flag when errors occur for a topology
@@ -205,7 +285,7 @@
 8. Add closing channel check in netty client to avoid double close
 9. Add connecting check in netty client to avoid connecting one server twice at one time 
 
-#Release 0.9.5.1
+# Release 0.9.5.1
 1. Add netty sync mode
 2. Add block operation in netty async mode
 3. Replace exception with Throwable in executor layer
@@ -213,16 +293,18 @@
 5. Add more netty junit test
 6. Add log when queue is full
 
-#Release 0.9.5
-##Big feature:
+# Release 0.9.5
+
+## Big feature:
 1. Redesign scheduler arithmetic, basing worker not task .
 
 ## Bug fix
 1. Fix disruptor use too much cpu
 2. Add target NettyServer log when f1ail to send data by netty
 
-#Release 0.9.4.1
-##Bug fix:
+# Release 0.9.4.1
+
+## Bug fix:
 1. Improve speed between tasks who is running in one worker
 2. Fix wrong timeout seconds
 3. Add checking port when worker initialize and begin to kill old worker
@@ -241,7 +323,7 @@
 
 
 
-#Release 0.9.4
+# Release 0.9.4
 
 ## Big features
 1. Add transaction programming mode
@@ -257,7 +339,7 @@
 
 
 
-##Bug fix:
+## Bug fix:
 1. Setting buffer size  when upload jar
 2. Add lock between ZK watch and timer thread when refresh connection
 3. Enable nimbus monitor thread only when topology is running in cluster mode
@@ -265,7 +347,7 @@
 5. classloader fix when both parent and current classloader load the same class
 6. Fix log view null pointer exception
 
-#Release 0.9.3.1
+# Release 0.9.3.1
 
 ## Enhancement
 1. switch apache thrift7 to storm thrift7
@@ -276,7 +358,8 @@
 6. Set gc dump dir as log's dir
 
 
-#Release 0.9.3
+# Release 0.9.3
+
 ## New feature
 1. Support Aliyun Apsara/Hadoop Yarn
 
@@ -308,7 +391,8 @@
  
 
 
-#Release 0.9.2
+# Release 0.9.2
+
 ## New feature
 1. Support LocalCluster/LocalDrpc mode, support debugging topology under local mode
 2. Support CGroups, assigning CPU in hardware level.


[32/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/security/auth/digest/ServerCallbackHandler.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/security/auth/digest/ServerCallbackHandler.java b/jstorm-core/src/main/java/backtype/storm/security/auth/digest/ServerCallbackHandler.java
index 1788dab..d681236 100755
--- a/jstorm-core/src/main/java/backtype/storm/security/auth/digest/ServerCallbackHandler.java
+++ b/jstorm-core/src/main/java/backtype/storm/security/auth/digest/ServerCallbackHandler.java
@@ -48,26 +48,27 @@ public class ServerCallbackHandler implements CallbackHandler {
     private static final String SYSPROP_SUPER_PASSWORD = "storm.SASLAuthenticationProvider.superPassword";
 
     private String userName;
-    private final Map<String,String> credentials = new HashMap<String,String>();
+    private final Map<String, String> credentials = new HashMap<String, String>();
 
     public ServerCallbackHandler(Configuration configuration) throws IOException {
-        if (configuration==null) return;
+        if (configuration == null)
+            return;
 
         AppConfigurationEntry configurationEntries[] = configuration.getAppConfigurationEntry(AuthUtils.LOGIN_CONTEXT_SERVER);
         if (configurationEntries == null) {
-            String errorMessage = "Could not find a '"+AuthUtils.LOGIN_CONTEXT_SERVER+"' entry in this configuration: Server cannot start.";
+            String errorMessage = "Could not find a '" + AuthUtils.LOGIN_CONTEXT_SERVER + "' entry in this configuration: Server cannot start.";
             throw new IOException(errorMessage);
         }
         credentials.clear();
-        for(AppConfigurationEntry entry: configurationEntries) {
-            Map<String,?> options = entry.getOptions();
+        for (AppConfigurationEntry entry : configurationEntries) {
+            Map<String, ?> options = entry.getOptions();
             // Populate DIGEST-MD5 user -> password map with JAAS configuration entries from the "Server" section.
             // Usernames are distinguished from other options by prefixing the username with a "user_" prefix.
-            for(Map.Entry<String, ?> pair : options.entrySet()) {
+            for (Map.Entry<String, ?> pair : options.entrySet()) {
                 String key = pair.getKey();
                 if (key.startsWith(USER_PREFIX)) {
                     String userName = key.substring(USER_PREFIX.length());
-                    credentials.put(userName,(String)pair.getValue());
+                    credentials.put(userName, (String) pair.getValue());
                 }
             }
         }
@@ -98,7 +99,7 @@ public class ServerCallbackHandler implements CallbackHandler {
         if ("super".equals(this.userName) && System.getProperty(SYSPROP_SUPER_PASSWORD) != null) {
             // superuser: use Java system property for password, if available.
             pc.setPassword(System.getProperty(SYSPROP_SUPER_PASSWORD).toCharArray());
-        } else if (credentials.containsKey(userName) ) {
+        } else if (credentials.containsKey(userName)) {
             pc.setPassword(credentials.get(userName).toCharArray());
         } else {
             LOG.warn("No password found for user: " + userName);
@@ -106,7 +107,7 @@ public class ServerCallbackHandler implements CallbackHandler {
     }
 
     private void handleRealmCallback(RealmCallback rc) {
-        LOG.debug("handleRealmCallback: "+ rc.getDefaultText());
+        LOG.debug("handleRealmCallback: " + rc.getDefaultText());
         rc.setText(rc.getDefaultText());
     }
 
@@ -114,14 +115,14 @@ public class ServerCallbackHandler implements CallbackHandler {
         String authenticationID = ac.getAuthenticationID();
         LOG.info("Successfully authenticated client: authenticationID = " + authenticationID + " authorizationID = " + ac.getAuthorizationID());
 
-        //if authorizationId is not set, set it to authenticationId.
-        if(ac.getAuthorizationID() == null) {
+        // if authorizationId is not set, set it to authenticationId.
+        if (ac.getAuthorizationID() == null) {
             ac.setAuthorizedID(authenticationID);
         }
 
-        //When authNid and authZid are not equal , authNId is attempting to impersonate authZid, We
-        //add the authNid as the real user in reqContext's subject which will be used during authorization.
-        if(!authenticationID.equals(ac.getAuthorizationID())) {
+        // When authNid and authZid are not equal , authNId is attempting to impersonate authZid, We
+        // add the authNid as the real user in reqContext's subject which will be used during authorization.
+        if (!authenticationID.equals(ac.getAuthorizationID())) {
             LOG.info("Impersonation attempt  authenticationID = " + ac.getAuthenticationID() + " authorizationID = " + ac.getAuthorizationID());
             ReqContext.context().setRealPrincipal(new SaslTransportPlugin.User(ac.getAuthenticationID()));
         }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/security/auth/kerberos/AutoTGT.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/security/auth/kerberos/AutoTGT.java b/jstorm-core/src/main/java/backtype/storm/security/auth/kerberos/AutoTGT.java
index aed1c4f..116febb 100755
--- a/jstorm-core/src/main/java/backtype/storm/security/auth/kerberos/AutoTGT.java
+++ b/jstorm-core/src/main/java/backtype/storm/security/auth/kerberos/AutoTGT.java
@@ -63,7 +63,7 @@ public class AutoTGT implements IAutoCredentials, ICredentialsRenewer {
 
     private static KerberosTicket getTGT(Subject subject) {
         Set<KerberosTicket> tickets = subject.getPrivateCredentials(KerberosTicket.class);
-        for(KerberosTicket ticket: tickets) {
+        for (KerberosTicket ticket : tickets) {
             KerberosPrincipal server = ticket.getServer();
             if (server.getName().equals("krbtgt/" + server.getRealm() + "@" + server.getRealm())) {
                 tickets = null;
@@ -72,26 +72,26 @@ public class AutoTGT implements IAutoCredentials, ICredentialsRenewer {
         }
         tickets = null;
         return null;
-    } 
+    }
 
     @Override
     public void populateCredentials(Map<String, String> credentials) {
-        //Log the user in and get the TGT
+        // Log the user in and get the TGT
         try {
             Configuration login_conf = AuthUtils.GetConfiguration(conf);
             ClientCallbackHandler client_callback_handler = new ClientCallbackHandler(login_conf);
-        
-            //login our user
-            Configuration.setConfiguration(login_conf); 
+
+            // login our user
+            Configuration.setConfiguration(login_conf);
             LoginContext lc = new LoginContext(AuthUtils.LOGIN_CONTEXT_CLIENT, client_callback_handler);
             try {
                 lc.login();
                 final Subject subject = lc.getSubject();
                 KerberosTicket tgt = getTGT(subject);
 
-                if (tgt == null) { //error
-                    throw new RuntimeException("Fail to verify user principal with section \""
-                            +AuthUtils.LOGIN_CONTEXT_CLIENT+"\" in login configuration file "+ login_conf);
+                if (tgt == null) { // error
+                    throw new RuntimeException("Fail to verify user principal with section \"" + AuthUtils.LOGIN_CONTEXT_CLIENT
+                            + "\" in login configuration file " + login_conf);
                 }
 
                 if (!tgt.isForwardable()) {
@@ -102,7 +102,7 @@ public class AutoTGT implements IAutoCredentials, ICredentialsRenewer {
                     throw new RuntimeException("The TGT found is not renewable");
                 }
 
-                LOG.info("Pushing TGT for "+tgt.getClient()+" to topology.");
+                LOG.info("Pushing TGT for " + tgt.getClient() + " to topology.");
                 saveTGT(tgt, credentials);
             } finally {
                 lc.logout();
@@ -131,7 +131,7 @@ public class AutoTGT implements IAutoCredentials, ICredentialsRenewer {
             try {
                 ByteArrayInputStream bin = new ByteArrayInputStream(DatatypeConverter.parseBase64Binary(credentials.get("TGT")));
                 ObjectInputStream in = new ObjectInputStream(bin);
-                ret = (KerberosTicket)in.readObject();
+                ret = (KerberosTicket) in.readObject();
                 in.close();
             } catch (Exception e) {
                 throw new RuntimeException(e);
@@ -155,16 +155,16 @@ public class AutoTGT implements IAutoCredentials, ICredentialsRenewer {
         KerberosTicket tgt = getTGT(credentials);
         if (tgt != null) {
             Set<Object> creds = subject.getPrivateCredentials();
-            synchronized(creds) {
+            synchronized (creds) {
                 Iterator<Object> iterator = creds.iterator();
                 while (iterator.hasNext()) {
                     Object o = iterator.next();
                     if (o instanceof KerberosTicket) {
-                        KerberosTicket t = (KerberosTicket)o;
+                        KerberosTicket t = (KerberosTicket) o;
                         iterator.remove();
                         try {
                             t.destroy();
-                        } catch (DestroyFailedException  e) {
+                        } catch (DestroyFailedException e) {
                             LOG.warn("Failed to destory ticket ", e);
                         }
                     }
@@ -179,8 +179,8 @@ public class AutoTGT implements IAutoCredentials, ICredentialsRenewer {
     }
 
     /**
-     * Hadoop does not just go off of a TGT, it needs a bit more.  This
-     * should fill in the rest.
+     * Hadoop does not just go off of a TGT, it needs a bit more. This should fill in the rest.
+     * 
      * @param subject the subject that should have a TGT in it.
      */
     private void loginHadoopUser(Subject subject) {
@@ -193,23 +193,21 @@ public class AutoTGT implements IAutoCredentials, ICredentialsRenewer {
         }
         try {
             Method isSecEnabled = ugi.getMethod("isSecurityEnabled");
-            if (!((Boolean)isSecEnabled.invoke(null))) {
-                LOG.warn("Hadoop is on the classpath but not configured for " +
-                  "security, if you want security you need to be sure that " +
-                  "hadoop.security.authentication=kerberos in core-site.xml " +
-                  "in your jar");
+            if (!((Boolean) isSecEnabled.invoke(null))) {
+                LOG.warn("Hadoop is on the classpath but not configured for " + "security, if you want security you need to be sure that "
+                        + "hadoop.security.authentication=kerberos in core-site.xml " + "in your jar");
                 return;
             }
- 
+
             try {
                 Method login = ugi.getMethod("loginUserFromSubject", Subject.class);
                 login.invoke(null, subject);
             } catch (NoSuchMethodException me) {
-                //The version of Hadoop does not have the needed client changes.
+                // The version of Hadoop does not have the needed client changes.
                 // So don't look now, but do something really ugly to work around it.
                 // This is because we are reaching into the hidden bits of Hadoop security, and it works for now, but may stop at any point in time.
 
-                //We are just trying to do the following
+                // We are just trying to do the following
                 // Configuration conf = new Configuration();
                 // HadoopKerberosName.setConfiguration(conf);
                 // subject.getPrincipals().add(new User(tgt.getClient().toString(), AuthenticationMethod.KERBEROS, null));
@@ -220,7 +218,7 @@ public class AutoTGT implements IAutoCredentials, ICredentialsRenewer {
                 Constructor confCons = confClass.getConstructor();
                 Object conf = confCons.newInstance();
                 Class<?> hknClass = Class.forName("org.apache.hadoop.security.HadoopKerberosName");
-                Method hknSetConf = hknClass.getMethod("setConfiguration",confClass);
+                Method hknSetConf = hknClass.getMethod("setConfiguration", confClass);
                 hknSetConf.invoke(null, conf);
 
                 Class<?> authMethodClass = Class.forName("org.apache.hadoop.security.UserGroupInformation$AuthenticationMethod");
@@ -236,7 +234,7 @@ public class AutoTGT implements IAutoCredentials, ICredentialsRenewer {
                 Constructor userCons = userClass.getConstructor(String.class, authMethodClass, LoginContext.class);
                 userCons.setAccessible(true);
                 Object user = userCons.newInstance(name, kerbAuthMethod, null);
-                subject.getPrincipals().add((Principal)user);
+                subject.getPrincipals().add((Principal) user);
             }
         } catch (Exception e) {
             LOG.warn("Something went wrong while trying to initialize Hadoop through reflection. This version of hadoop may not be compatible.", e);
@@ -250,14 +248,14 @@ public class AutoTGT implements IAutoCredentials, ICredentialsRenewer {
     }
 
     @Override
-    public void renew(Map<String,String> credentials, Map topologyConf) {
+    public void renew(Map<String, String> credentials, Map topologyConf) {
         KerberosTicket tgt = getTGT(credentials);
         if (tgt != null) {
             long refreshTime = getRefreshTime(tgt);
             long now = System.currentTimeMillis();
             if (now >= refreshTime) {
                 try {
-                    LOG.info("Renewing TGT for "+tgt.getClient());
+                    LOG.info("Renewing TGT for " + tgt.getClient());
                     tgt.refresh();
                     saveTGT(tgt, credentials);
                 } catch (RefreshFailedException e) {
@@ -272,10 +270,10 @@ public class AutoTGT implements IAutoCredentials, ICredentialsRenewer {
         Map conf = new java.util.HashMap();
         conf.put("java.security.auth.login.config", args[0]);
         at.prepare(conf);
-        Map<String,String> creds = new java.util.HashMap<String,String>();
+        Map<String, String> creds = new java.util.HashMap<String, String>();
         at.populateCredentials(creds);
         Subject s = new Subject();
         at.populateSubject(s, creds);
-        LOG.info("Got a Subject "+s);
+        LOG.info("Got a Subject " + s);
     }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/security/auth/kerberos/AutoTGTKrb5LoginModule.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/security/auth/kerberos/AutoTGTKrb5LoginModule.java b/jstorm-core/src/main/java/backtype/storm/security/auth/kerberos/AutoTGTKrb5LoginModule.java
index 807abe3..647e240 100755
--- a/jstorm-core/src/main/java/backtype/storm/security/auth/kerberos/AutoTGTKrb5LoginModule.java
+++ b/jstorm-core/src/main/java/backtype/storm/security/auth/kerberos/AutoTGTKrb5LoginModule.java
@@ -29,7 +29,6 @@ import javax.security.auth.kerberos.KerberosTicket;
 import javax.security.auth.login.LoginException;
 import javax.security.auth.spi.LoginModule;
 
-
 /**
  * Custom LoginModule to enable Auto Login based on cached ticket
  */
@@ -41,10 +40,7 @@ public class AutoTGTKrb5LoginModule implements LoginModule {
 
     protected KerberosTicket kerbTicket = null;
 
-    public void initialize(Subject subject,
-                           CallbackHandler callbackHandler,
-                           Map<String, ?> sharedState,
-                           Map<String, ?> options) {
+    public void initialize(Subject subject, CallbackHandler callbackHandler, Map<String, ?> sharedState, Map<String, ?> options) {
 
         this.subject = subject;
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/security/auth/kerberos/AutoTGTKrb5LoginModuleTest.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/security/auth/kerberos/AutoTGTKrb5LoginModuleTest.java b/jstorm-core/src/main/java/backtype/storm/security/auth/kerberos/AutoTGTKrb5LoginModuleTest.java
index ba34fc9..6188566 100755
--- a/jstorm-core/src/main/java/backtype/storm/security/auth/kerberos/AutoTGTKrb5LoginModuleTest.java
+++ b/jstorm-core/src/main/java/backtype/storm/security/auth/kerberos/AutoTGTKrb5LoginModuleTest.java
@@ -31,7 +31,7 @@ public class AutoTGTKrb5LoginModuleTest extends AutoTGTKrb5LoginModule {
     public void setKerbTicket(KerberosTicket ticket) {
         this.kerbTicket = ticket;
     }
-    
+
     @Override
     protected void getKerbTicketFromCache() {
         // Do nothing.

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/security/auth/kerberos/ClientCallbackHandler.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/security/auth/kerberos/ClientCallbackHandler.java b/jstorm-core/src/main/java/backtype/storm/security/auth/kerberos/ClientCallbackHandler.java
index d46aa8b..13a2cba 100755
--- a/jstorm-core/src/main/java/backtype/storm/security/auth/kerberos/ClientCallbackHandler.java
+++ b/jstorm-core/src/main/java/backtype/storm/security/auth/kerberos/ClientCallbackHandler.java
@@ -49,11 +49,11 @@ public class ClientCallbackHandler implements CallbackHandler {
      * @throws IOException
      */
     public ClientCallbackHandler(Configuration configuration) throws IOException {
-        if (configuration == null) return;
+        if (configuration == null)
+            return;
         AppConfigurationEntry configurationEntries[] = configuration.getAppConfigurationEntry(AuthUtils.LOGIN_CONTEXT_CLIENT);
         if (configurationEntries == null) {
-            String errorMessage = "Could not find a '"+AuthUtils.LOGIN_CONTEXT_CLIENT
-                    + "' entry in this configuration: Client cannot start.";
+            String errorMessage = "Could not find a '" + AuthUtils.LOGIN_CONTEXT_CLIENT + "' entry in this configuration: Client cannot start.";
             LOG.error(errorMessage);
             throw new IOException(errorMessage);
         }
@@ -61,7 +61,8 @@ public class ClientCallbackHandler implements CallbackHandler {
 
     /**
      * This method is invoked by SASL for authentication challenges
-     * @param callbacks a collection of challenge callbacks 
+     * 
+     * @param callbacks a collection of challenge callbacks
      */
     public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException {
         for (Callback c : callbacks) {
@@ -69,20 +70,18 @@ public class ClientCallbackHandler implements CallbackHandler {
                 LOG.debug("name callback");
             } else if (c instanceof PasswordCallback) {
                 LOG.debug("password callback");
-                LOG.warn("Could not login: the client is being asked for a password, but the " +
-                        " client code does not currently support obtaining a password from the user." +
-                        " Make sure that the client is configured to use a ticket cache (using" +
-                        " the JAAS configuration setting 'useTicketCache=true)' and restart the client. If" +
-                        " you still get this message after that, the TGT in the ticket cache has expired and must" +
-                        " be manually refreshed. To do so, first determine if you are using a password or a" +
-                        " keytab. If the former, run kinit in a Unix shell in the environment of the user who" +
-                        " is running this client using the command" +
-                        " 'kinit <princ>' (where <princ> is the name of the client's Kerberos principal)." +
-                        " If the latter, do" +
-                        " 'kinit -k -t <keytab> <princ>' (where <princ> is the name of the Kerberos principal, and" +
-                        " <keytab> is the location of the keytab file). After manually refreshing your cache," +
-                        " restart this client. If you continue to see this message after manually refreshing" +
-                        " your cache, ensure that your KDC host's clock is in sync with this host's clock.");
+                LOG.warn("Could not login: the client is being asked for a password, but the "
+                        + " client code does not currently support obtaining a password from the user."
+                        + " Make sure that the client is configured to use a ticket cache (using"
+                        + " the JAAS configuration setting 'useTicketCache=true)' and restart the client. If"
+                        + " you still get this message after that, the TGT in the ticket cache has expired and must"
+                        + " be manually refreshed. To do so, first determine if you are using a password or a"
+                        + " keytab. If the former, run kinit in a Unix shell in the environment of the user who" + " is running this client using the command"
+                        + " 'kinit <princ>' (where <princ> is the name of the client's Kerberos principal)." + " If the latter, do"
+                        + " 'kinit -k -t <keytab> <princ>' (where <princ> is the name of the Kerberos principal, and"
+                        + " <keytab> is the location of the keytab file). After manually refreshing your cache,"
+                        + " restart this client. If you continue to see this message after manually refreshing"
+                        + " your cache, ensure that your KDC host's clock is in sync with this host's clock.");
             } else if (c instanceof AuthorizeCallback) {
                 LOG.debug("authorization callback");
                 AuthorizeCallback ac = (AuthorizeCallback) c;
@@ -96,7 +95,7 @@ public class ClientCallbackHandler implements CallbackHandler {
                 if (ac.isAuthorized()) {
                     ac.setAuthorizedID(authzid);
                 }
-            }  else {
+            } else {
                 throw new UnsupportedCallbackException(c);
             }
         }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/security/auth/kerberos/KerberosSaslTransportPlugin.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/security/auth/kerberos/KerberosSaslTransportPlugin.java b/jstorm-core/src/main/java/backtype/storm/security/auth/kerberos/KerberosSaslTransportPlugin.java
index ecb0daf..e257a8a 100755
--- a/jstorm-core/src/main/java/backtype/storm/security/auth/kerberos/KerberosSaslTransportPlugin.java
+++ b/jstorm-core/src/main/java/backtype/storm/security/auth/kerberos/KerberosSaslTransportPlugin.java
@@ -48,19 +48,19 @@ import backtype.storm.security.auth.AuthUtils;
 import backtype.storm.security.auth.SaslTransportPlugin;
 
 public class KerberosSaslTransportPlugin extends SaslTransportPlugin {
-    public static final String KERBEROS = "GSSAPI"; 
+    public static final String KERBEROS = "GSSAPI";
     private static final Logger LOG = LoggerFactory.getLogger(KerberosSaslTransportPlugin.class);
 
     public TTransportFactory getServerTransportFactory() throws IOException {
-        //create an authentication callback handler
+        // create an authentication callback handler
         CallbackHandler server_callback_handler = new ServerCallbackHandler(login_conf, storm_conf);
-        
-        //login our principal
+
+        // login our principal
         Subject subject = null;
         try {
-            //specify a configuration object to be used
-            Configuration.setConfiguration(login_conf); 
-            //now login
+            // specify a configuration object to be used
+            Configuration.setConfiguration(login_conf);
+            // now login
             Login login = new Login(AuthUtils.LOGIN_CONTEXT_SERVER, server_callback_handler);
             subject = login.getSubject();
         } catch (LoginException ex) {
@@ -68,27 +68,27 @@ public class KerberosSaslTransportPlugin extends SaslTransportPlugin {
             throw new RuntimeException(ex);
         }
 
-        //check the credential of our principal
-        if (subject.getPrivateCredentials(KerberosTicket.class).isEmpty()) { 
-            throw new RuntimeException("Fail to verify user principal with section \""
-                    +AuthUtils.LOGIN_CONTEXT_SERVER+"\" in login configuration file "+ login_conf);
+        // check the credential of our principal
+        if (subject.getPrivateCredentials(KerberosTicket.class).isEmpty()) {
+            throw new RuntimeException("Fail to verify user principal with section \"" + AuthUtils.LOGIN_CONTEXT_SERVER + "\" in login configuration file "
+                    + login_conf);
         }
 
-        String principal = AuthUtils.get(login_conf, AuthUtils.LOGIN_CONTEXT_SERVER, "principal"); 
-        LOG.debug("principal:"+principal);  
+        String principal = AuthUtils.get(login_conf, AuthUtils.LOGIN_CONTEXT_SERVER, "principal");
+        LOG.debug("principal:" + principal);
         KerberosName serviceKerberosName = new KerberosName(principal);
         String serviceName = serviceKerberosName.getServiceName();
         String hostName = serviceKerberosName.getHostName();
-        Map<String, String> props = new TreeMap<String,String>();
+        Map<String, String> props = new TreeMap<String, String>();
         props.put(Sasl.QOP, "auth");
         props.put(Sasl.SERVER_AUTH, "false");
 
-        //create a transport factory that will invoke our auth callback for digest
+        // create a transport factory that will invoke our auth callback for digest
         TSaslServerTransport.Factory factory = new TSaslServerTransport.Factory();
         factory.addServerDefinition(KERBEROS, serviceName, hostName, props, server_callback_handler);
 
-        //create a wrap transport factory so that we could apply user credential during connections
-        TUGIAssumingTransportFactory wrapFactory = new TUGIAssumingTransportFactory(factory, subject); 
+        // create a wrap transport factory so that we could apply user credential during connections
+        TUGIAssumingTransportFactory wrapFactory = new TUGIAssumingTransportFactory(factory, subject);
 
         LOG.info("SASL GSSAPI transport factory will be used");
         return wrapFactory;
@@ -96,55 +96,47 @@ public class KerberosSaslTransportPlugin extends SaslTransportPlugin {
 
     @Override
     public TTransport connect(TTransport transport, String serverHost, String asUser) throws TTransportException, IOException {
-        //create an authentication callback handler
+        // create an authentication callback handler
         ClientCallbackHandler client_callback_handler = new ClientCallbackHandler(login_conf);
-        
-        //login our user
+
+        // login our user
         Login login = null;
-        try { 
-            //specify a configuration object to be used
-            Configuration.setConfiguration(login_conf); 
-            //now login
-            login  = new Login(AuthUtils.LOGIN_CONTEXT_CLIENT, client_callback_handler);
+        try {
+            // specify a configuration object to be used
+            Configuration.setConfiguration(login_conf);
+            // now login
+            login = new Login(AuthUtils.LOGIN_CONTEXT_CLIENT, client_callback_handler);
         } catch (LoginException ex) {
             LOG.error("Server failed to login in principal:" + ex, ex);
             throw new RuntimeException(ex);
         }
 
         final Subject subject = login.getSubject();
-        if (subject.getPrivateCredentials(KerberosTicket.class).isEmpty()) { //error
-            throw new RuntimeException("Fail to verify user principal with section \""
-                        +AuthUtils.LOGIN_CONTEXT_CLIENT+"\" in login configuration file "+ login_conf);
+        if (subject.getPrivateCredentials(KerberosTicket.class).isEmpty()) { // error
+            throw new RuntimeException("Fail to verify user principal with section \"" + AuthUtils.LOGIN_CONTEXT_CLIENT + "\" in login configuration file "
+                    + login_conf);
         }
 
         final String principal = StringUtils.isBlank(asUser) ? getPrincipal(subject) : asUser;
         String serviceName = AuthUtils.get(login_conf, AuthUtils.LOGIN_CONTEXT_CLIENT, "serviceName");
         if (serviceName == null) {
-            serviceName = AuthUtils.SERVICE; 
+            serviceName = AuthUtils.SERVICE;
         }
-        Map<String, String> props = new TreeMap<String,String>();
+        Map<String, String> props = new TreeMap<String, String>();
         props.put(Sasl.QOP, "auth");
         props.put(Sasl.SERVER_AUTH, "false");
 
         LOG.debug("SASL GSSAPI client transport is being established");
-        final TTransport sasalTransport = new TSaslClientTransport(KERBEROS, 
-                principal, 
-                serviceName, 
-                serverHost,
-                props,
-                null, 
-                transport);
-
-        //open Sasl transport with the login credential
+        final TTransport sasalTransport = new TSaslClientTransport(KERBEROS, principal, serviceName, serverHost, props, null, transport);
+
+        // open Sasl transport with the login credential
         try {
-            Subject.doAs(subject,
-                    new PrivilegedExceptionAction<Void>() {
+            Subject.doAs(subject, new PrivilegedExceptionAction<Void>() {
                 public Void run() {
                     try {
-                        LOG.debug("do as:"+ principal);
+                        LOG.debug("do as:" + principal);
                         sasalTransport.open();
-                    }
-                    catch (Exception e) {
+                    } catch (Exception e) {
                         LOG.error("Client failed to open SaslClientTransport to interact with a server during session initiation: " + e, e);
                     }
                     return null;
@@ -158,19 +150,18 @@ public class KerberosSaslTransportPlugin extends SaslTransportPlugin {
     }
 
     private String getPrincipal(Subject subject) {
-        Set<Principal> principals = (Set<Principal>)subject.getPrincipals();
-        if (principals==null || principals.size()<1) {
+        Set<Principal> principals = (Set<Principal>) subject.getPrincipals();
+        if (principals == null || principals.size() < 1) {
             LOG.info("No principal found in login subject");
             return null;
         }
-        return ((Principal)(principals.toArray()[0])).getName();
+        return ((Principal) (principals.toArray()[0])).getName();
     }
 
-    /** A TransportFactory that wraps another one, but assumes a specified UGI
-     * before calling through.                                                                                                                                                      
-     *                                                                                                                                                                              
-     * This is used on the server side to assume the server's Principal when accepting                                                                                              
-     * clients.                                                                                                                                                                     
+    /**
+     * A TransportFactory that wraps another one, but assumes a specified UGI before calling through.
+     * 
+     * This is used on the server side to assume the server's Principal when accepting clients.
      */
     static class TUGIAssumingTransportFactory extends TTransportFactory {
         private final Subject subject;
@@ -180,21 +171,19 @@ public class KerberosSaslTransportPlugin extends SaslTransportPlugin {
             this.wrapped = wrapped;
             this.subject = subject;
 
-            Set<Principal> principals = (Set<Principal>)subject.getPrincipals();
-            if (principals.size()>0) 
-                LOG.info("Service principal:"+ ((Principal)(principals.toArray()[0])).getName());
+            Set<Principal> principals = (Set<Principal>) subject.getPrincipals();
+            if (principals.size() > 0)
+                LOG.info("Service principal:" + ((Principal) (principals.toArray()[0])).getName());
         }
 
         @Override
         public TTransport getTransport(final TTransport trans) {
             try {
-                return Subject.doAs(subject,
-                        new PrivilegedExceptionAction<TTransport>() {
+                return Subject.doAs(subject, new PrivilegedExceptionAction<TTransport>() {
                     public TTransport run() {
                         try {
                             return wrapped.getTransport(trans);
-                        }
-                        catch (Exception e) {
+                        } catch (Exception e) {
                             LOG.error("Storm server failed to open transport to interact with a client during session initiation: " + e, e);
                             return null;
                         }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/security/auth/kerberos/ServerCallbackHandler.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/security/auth/kerberos/ServerCallbackHandler.java b/jstorm-core/src/main/java/backtype/storm/security/auth/kerberos/ServerCallbackHandler.java
index 7b143f0..0e32e0b 100755
--- a/jstorm-core/src/main/java/backtype/storm/security/auth/kerberos/ServerCallbackHandler.java
+++ b/jstorm-core/src/main/java/backtype/storm/security/auth/kerberos/ServerCallbackHandler.java
@@ -41,11 +41,12 @@ public class ServerCallbackHandler implements CallbackHandler {
     private String userName;
 
     public ServerCallbackHandler(Configuration configuration, Map stormConf) throws IOException {
-        if (configuration==null) return;
+        if (configuration == null)
+            return;
 
         AppConfigurationEntry configurationEntries[] = configuration.getAppConfigurationEntry(AuthUtils.LOGIN_CONTEXT_SERVER);
         if (configurationEntries == null) {
-            String errorMessage = "Could not find a '"+AuthUtils.LOGIN_CONTEXT_SERVER+"' entry in this configuration: Server cannot start.";
+            String errorMessage = "Could not find a '" + AuthUtils.LOGIN_CONTEXT_SERVER + "' entry in this configuration: Server cannot start.";
             LOG.error(errorMessage);
             throw new IOException(errorMessage);
         }
@@ -78,14 +79,14 @@ public class ServerCallbackHandler implements CallbackHandler {
         String authenticationID = ac.getAuthenticationID();
         LOG.info("Successfully authenticated client: authenticationID=" + authenticationID + " authorizationID= " + ac.getAuthorizationID());
 
-        //if authorizationId is not set, set it to authenticationId.
-        if(ac.getAuthorizationID() == null) {
+        // if authorizationId is not set, set it to authenticationId.
+        if (ac.getAuthorizationID() == null) {
             ac.setAuthorizedID(authenticationID);
         }
 
-        //When authNid and authZid are not equal , authNId is attempting to impersonate authZid, We
-        //add the authNid as the real user in reqContext's subject which will be used during authorization.
-        if(!ac.getAuthenticationID().equals(ac.getAuthorizationID())) {
+        // When authNid and authZid are not equal , authNId is attempting to impersonate authZid, We
+        // add the authNid as the real user in reqContext's subject which will be used during authorization.
+        if (!ac.getAuthenticationID().equals(ac.getAuthorizationID())) {
             ReqContext.context().setRealPrincipal(new SaslTransportPlugin.User(ac.getAuthenticationID()));
         }
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/security/serialization/BlowfishTupleSerializer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/security/serialization/BlowfishTupleSerializer.java b/jstorm-core/src/main/java/backtype/storm/security/serialization/BlowfishTupleSerializer.java
index 8e66cdf..437cdbb 100644
--- a/jstorm-core/src/main/java/backtype/storm/security/serialization/BlowfishTupleSerializer.java
+++ b/jstorm-core/src/main/java/backtype/storm/security/serialization/BlowfishTupleSerializer.java
@@ -40,8 +40,9 @@ import backtype.storm.Config;
  */
 public class BlowfishTupleSerializer extends Serializer<ListDelegate> {
     /**
-     * The secret key (if any) for data encryption by blowfish payload serialization factory (BlowfishSerializationFactory). 
-     * You should use in via "storm -c topology.tuple.serializer.blowfish.key=YOURKEY -c topology.tuple.serializer=backtype.storm.security.serialization.BlowfishTupleSerializer jar ...".
+     * The secret key (if any) for data encryption by blowfish payload serialization factory (BlowfishSerializationFactory). You should use in via
+     * "storm -c topology.tuple.serializer.blowfish.key=YOURKEY -c topology.tuple.serializer=backtype.storm.security.serialization.BlowfishTupleSerializer jar ..."
+     * .
      */
     public static String SECRET_KEY = "topology.tuple.serializer.blowfish.key";
     private static final Logger LOG = LoggerFactory.getLogger(BlowfishTupleSerializer.class);
@@ -50,12 +51,12 @@ public class BlowfishTupleSerializer extends Serializer<ListDelegate> {
     public BlowfishTupleSerializer(Kryo kryo, Map storm_conf) {
         String encryption_key = null;
         try {
-            encryption_key = (String)storm_conf.get(SECRET_KEY);
+            encryption_key = (String) storm_conf.get(SECRET_KEY);
             LOG.debug("Blowfish serializer being constructed ...");
             if (encryption_key == null) {
                 throw new RuntimeException("Blowfish encryption key not specified");
             }
-            byte[] bytes =  Hex.decodeHex(encryption_key.toCharArray());
+            byte[] bytes = Hex.decodeHex(encryption_key.toCharArray());
             _serializer = new BlowfishSerializer(new ListDelegateSerializer(), bytes);
         } catch (org.apache.commons.codec.DecoderException ex) {
             throw new RuntimeException("Blowfish encryption key invalid", ex);
@@ -69,22 +70,23 @@ public class BlowfishTupleSerializer extends Serializer<ListDelegate> {
 
     @Override
     public ListDelegate read(Kryo kryo, Input input, Class<ListDelegate> type) {
-        return (ListDelegate)_serializer.read(kryo, input, type);
+        return (ListDelegate) _serializer.read(kryo, input, type);
     }
 
     /**
      * Produce a blowfish key to be used in "Storm jar" command
      */
     public static void main(String[] args) {
-        try{
+        try {
             KeyGenerator kgen = KeyGenerator.getInstance("Blowfish");
             SecretKey skey = kgen.generateKey();
             byte[] raw = skey.getEncoded();
             String keyString = new String(Hex.encodeHex(raw));
-            System.out.println("storm -c "+SECRET_KEY+"="+keyString+" -c "+Config.TOPOLOGY_TUPLE_SERIALIZER+"="+BlowfishTupleSerializer.class.getName() + " ..." );
+            System.out.println("storm -c " + SECRET_KEY + "=" + keyString + " -c " + Config.TOPOLOGY_TUPLE_SERIALIZER + "="
+                    + BlowfishTupleSerializer.class.getName() + " ...");
         } catch (Exception ex) {
             LOG.error(ex.getMessage());
             ex.printStackTrace();
         }
-    }    
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/serialization/DefaultKryoFactory.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/serialization/DefaultKryoFactory.java b/jstorm-core/src/main/java/backtype/storm/serialization/DefaultKryoFactory.java
index a055eb2..91e629a 100755
--- a/jstorm-core/src/main/java/backtype/storm/serialization/DefaultKryoFactory.java
+++ b/jstorm-core/src/main/java/backtype/storm/serialization/DefaultKryoFactory.java
@@ -22,30 +22,29 @@ import com.esotericsoftware.kryo.Kryo;
 import com.esotericsoftware.kryo.Serializer;
 import java.util.Map;
 
-
 public class DefaultKryoFactory implements IKryoFactory {
 
     public static class KryoSerializableDefault extends Kryo {
         boolean _override = false;
-        
+
         public void overrideDefault(boolean value) {
             _override = value;
-        }                
-        
+        }
+
         @Override
         public Serializer getDefaultSerializer(Class type) {
-            if(_override) {
+            if (_override) {
                 return new SerializableSerializer();
             } else {
                 return super.getDefaultSerializer(type);
             }
-        }        
-    }    
-    
+        }
+    }
+
     @Override
     public Kryo getKryo(Map conf) {
         KryoSerializableDefault k = new KryoSerializableDefault();
-        k.setRegistrationRequired(!((Boolean) conf.get(Config.TOPOLOGY_FALL_BACK_ON_JAVA_SERIALIZATION)));        
+        k.setRegistrationRequired((Boolean) conf.get(Config.TOPOLOGY_KRYO_REGISTER_REQUIRED));
         k.setReferences(false);
         return k;
     }
@@ -53,12 +52,12 @@ public class DefaultKryoFactory implements IKryoFactory {
     @Override
     public void preRegister(Kryo k, Map conf) {
     }
-    
+
     public void postRegister(Kryo k, Map conf) {
-        ((KryoSerializableDefault)k).overrideDefault(true);
+        ((KryoSerializableDefault) k).overrideDefault((Boolean) conf.get(Config.TOPOLOGY_FALL_BACK_ON_JAVA_SERIALIZATION));
     }
 
     @Override
-    public void postDecorate(Kryo k, Map conf) {        
-    }    
+    public void postDecorate(Kryo k, Map conf) {
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/serialization/DefaultSerializationDelegate.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/serialization/DefaultSerializationDelegate.java b/jstorm-core/src/main/java/backtype/storm/serialization/DefaultSerializationDelegate.java
index 6d986af..c97470f 100755
--- a/jstorm-core/src/main/java/backtype/storm/serialization/DefaultSerializationDelegate.java
+++ b/jstorm-core/src/main/java/backtype/storm/serialization/DefaultSerializationDelegate.java
@@ -48,10 +48,10 @@ public class DefaultSerializationDelegate implements SerializationDelegate {
             ObjectInputStream ois = new ObjectInputStream(bis);
             Object ret = ois.readObject();
             ois.close();
-            return (T)ret;
-        } catch(IOException ioe) {
+            return (T) ret;
+        } catch (IOException ioe) {
             throw new RuntimeException(ioe);
-        } catch(ClassNotFoundException e) {
+        } catch (ClassNotFoundException e) {
             throw new RuntimeException(e);
         }
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/serialization/GzipBridgeSerializationDelegate.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/serialization/GzipBridgeSerializationDelegate.java b/jstorm-core/src/main/java/backtype/storm/serialization/GzipBridgeSerializationDelegate.java
index c8377c3..4b7951e 100755
--- a/jstorm-core/src/main/java/backtype/storm/serialization/GzipBridgeSerializationDelegate.java
+++ b/jstorm-core/src/main/java/backtype/storm/serialization/GzipBridgeSerializationDelegate.java
@@ -22,8 +22,8 @@ import java.util.zip.GZIPInputStream;
 
 /**
  * Always writes gzip out, but tests incoming to see if it's gzipped. If it is, deserializes with gzip. If not, uses
- * {@link backtype.storm.serialization.DefaultSerializationDelegate} to deserialize. Any logic needing to be enabled
- * via {@link #prepare(java.util.Map)} is passed through to both delegates.
+ * {@link DefaultSerializationDelegate} to deserialize. Any logic needing to be enabled via {@link #prepare(Map)} is
+ * passed through to both delegates.
  */
 @Deprecated
 public class GzipBridgeSerializationDelegate implements SerializationDelegate {
@@ -47,7 +47,7 @@ public class GzipBridgeSerializationDelegate implements SerializationDelegate {
         if (isGzipped(bytes)) {
             return gzipDelegate.deserialize(bytes, clazz);
         } else {
-            return defaultDelegate.deserialize(bytes,clazz);
+            return defaultDelegate.deserialize(bytes, clazz);
         }
     }
 
@@ -59,7 +59,6 @@ public class GzipBridgeSerializationDelegate implements SerializationDelegate {
      * Looks ahead to see if the GZIP magic constant is heading {@code bytes}
      */
     private boolean isGzipped(byte[] bytes) {
-        return (bytes.length > 1) && (bytes[0] == GZIP_MAGIC_FIRST_BYTE)
-               && (bytes[1] == GZIP_MAGIC_SECOND_BYTE);
+        return (bytes.length > 1) && (bytes[0] == GZIP_MAGIC_FIRST_BYTE) && (bytes[1] == GZIP_MAGIC_SECOND_BYTE);
     }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/serialization/GzipBridgeThriftSerializationDelegate.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/serialization/GzipBridgeThriftSerializationDelegate.java b/jstorm-core/src/main/java/backtype/storm/serialization/GzipBridgeThriftSerializationDelegate.java
index e5e77c3..6d580db 100755
--- a/jstorm-core/src/main/java/backtype/storm/serialization/GzipBridgeThriftSerializationDelegate.java
+++ b/jstorm-core/src/main/java/backtype/storm/serialization/GzipBridgeThriftSerializationDelegate.java
@@ -22,8 +22,8 @@ import java.util.zip.GZIPInputStream;
 
 /**
  * Always writes gzip out, but tests incoming to see if it's gzipped. If it is, deserializes with gzip. If not, uses
- * {@link backtype.storm.serialization.ThriftSerializationDelegate} to deserialize. Any logic needing to be enabled
- * via {@link #prepare(java.util.Map)} is passed through to both delegates.
+ * {@link ThriftSerializationDelegate} to deserialize. Any logic needing to be enabled via {@link #prepare(Map)} is
+ * passed through to both delegates.
  */
 public class GzipBridgeThriftSerializationDelegate implements SerializationDelegate {
 
@@ -46,7 +46,7 @@ public class GzipBridgeThriftSerializationDelegate implements SerializationDeleg
         if (isGzipped(bytes)) {
             return gzipDelegate.deserialize(bytes, clazz);
         } else {
-            return defaultDelegate.deserialize(bytes,clazz);
+            return defaultDelegate.deserialize(bytes, clazz);
         }
     }
 
@@ -58,7 +58,6 @@ public class GzipBridgeThriftSerializationDelegate implements SerializationDeleg
      * Looks ahead to see if the GZIP magic constant is heading {@code bytes}
      */
     private boolean isGzipped(byte[] bytes) {
-        return (bytes.length > 1) && (bytes[0] == GZIP_MAGIC_FIRST_BYTE)
-               && (bytes[1] == GZIP_MAGIC_SECOND_BYTE);
+        return (bytes.length > 1) && (bytes[0] == GZIP_MAGIC_FIRST_BYTE) && (bytes[1] == GZIP_MAGIC_SECOND_BYTE);
     }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/serialization/GzipSerializationDelegate.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/serialization/GzipSerializationDelegate.java b/jstorm-core/src/main/java/backtype/storm/serialization/GzipSerializationDelegate.java
index 3c8ee8b..2b27af0 100755
--- a/jstorm-core/src/main/java/backtype/storm/serialization/GzipSerializationDelegate.java
+++ b/jstorm-core/src/main/java/backtype/storm/serialization/GzipSerializationDelegate.java
@@ -54,10 +54,10 @@ public class GzipSerializationDelegate implements SerializationDelegate {
             ObjectInputStream ois = new ObjectInputStream(gis);
             Object ret = ois.readObject();
             ois.close();
-            return (T)ret;
-        } catch(IOException ioe) {
+            return (T) ret;
+        } catch (IOException ioe) {
             throw new RuntimeException(ioe);
-        } catch(ClassNotFoundException e) {
+        } catch (ClassNotFoundException e) {
             throw new RuntimeException(e);
         }
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/serialization/GzipThriftSerializationDelegate.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/serialization/GzipThriftSerializationDelegate.java b/jstorm-core/src/main/java/backtype/storm/serialization/GzipThriftSerializationDelegate.java
index 933a125..a76c080 100755
--- a/jstorm-core/src/main/java/backtype/storm/serialization/GzipThriftSerializationDelegate.java
+++ b/jstorm-core/src/main/java/backtype/storm/serialization/GzipThriftSerializationDelegate.java
@@ -49,7 +49,7 @@ public class GzipThriftSerializationDelegate implements SerializationDelegate {
         try {
             TBase instance = (TBase) clazz.newInstance();
             new TDeserializer().deserialize(instance, Utils.gunzip(bytes));
-            return (T)instance;
+            return (T) instance;
         } catch (Exception e) {
             throw new RuntimeException(e);
         }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/serialization/IKryoDecorator.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/serialization/IKryoDecorator.java b/jstorm-core/src/main/java/backtype/storm/serialization/IKryoDecorator.java
index b154a36..36e59a5 100755
--- a/jstorm-core/src/main/java/backtype/storm/serialization/IKryoDecorator.java
+++ b/jstorm-core/src/main/java/backtype/storm/serialization/IKryoDecorator.java
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 package backtype.storm.serialization;
+
 import com.esotericsoftware.kryo.Kryo;
 
 public interface IKryoDecorator {

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/serialization/IKryoFactory.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/serialization/IKryoFactory.java b/jstorm-core/src/main/java/backtype/storm/serialization/IKryoFactory.java
index 60a847d..b5c4522 100755
--- a/jstorm-core/src/main/java/backtype/storm/serialization/IKryoFactory.java
+++ b/jstorm-core/src/main/java/backtype/storm/serialization/IKryoFactory.java
@@ -21,20 +21,18 @@ import com.esotericsoftware.kryo.Kryo;
 import java.util.Map;
 
 /**
- * An interface that controls the Kryo instance used by Storm for serialization.
- * The lifecycle is:
+ * An interface that controls the Kryo instance used by Storm for serialization. The lifecycle is:
  * 
- * 1. The Kryo instance is constructed using getKryo
- * 2. Storm registers the default classes (e.g. arrays, lists, maps, etc.)
- * 3. Storm calls preRegister hook
- * 4. Storm registers all user-defined registrations through topology.kryo.register
- * 5. Storm calls postRegister hook
- * 6. Storm calls all user-defined decorators through topology.kryo.decorators
- * 7. Storm calls postDecorate hook
+ * 1. The Kryo instance is constructed using getKryo 2. Storm registers the default classes (e.g. arrays, lists, maps, etc.) 3. Storm calls preRegister hook 4.
+ * Storm registers all user-defined registrations through topology.kryo.register 5. Storm calls postRegister hook 6. Storm calls all user-defined decorators
+ * through topology.kryo.decorators 7. Storm calls postDecorate hook
  */
 public interface IKryoFactory {
     Kryo getKryo(Map conf);
+
     void preRegister(Kryo k, Map conf);
+
     void postRegister(Kryo k, Map conf);
+
     void postDecorate(Kryo k, Map conf);
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/serialization/ITupleDeserializer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/serialization/ITupleDeserializer.java b/jstorm-core/src/main/java/backtype/storm/serialization/ITupleDeserializer.java
index 4e68658..641a472 100755
--- a/jstorm-core/src/main/java/backtype/storm/serialization/ITupleDeserializer.java
+++ b/jstorm-core/src/main/java/backtype/storm/serialization/ITupleDeserializer.java
@@ -21,5 +21,5 @@ import backtype.storm.tuple.Tuple;
 import java.io.IOException;
 
 public interface ITupleDeserializer {
-    Tuple deserialize(byte[] ser);        
+    Tuple deserialize(byte[] ser);
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/serialization/ITupleSerializer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/serialization/ITupleSerializer.java b/jstorm-core/src/main/java/backtype/storm/serialization/ITupleSerializer.java
index 90ad932..68df8bf 100755
--- a/jstorm-core/src/main/java/backtype/storm/serialization/ITupleSerializer.java
+++ b/jstorm-core/src/main/java/backtype/storm/serialization/ITupleSerializer.java
@@ -19,8 +19,7 @@ package backtype.storm.serialization;
 
 import backtype.storm.tuple.Tuple;
 
-
 public interface ITupleSerializer {
     byte[] serialize(Tuple tuple);
-//    long crc32(Tuple tuple);
+    // long crc32(Tuple tuple);
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/serialization/KryoTupleDeserializer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/serialization/KryoTupleDeserializer.java b/jstorm-core/src/main/java/backtype/storm/serialization/KryoTupleDeserializer.java
index 3496e68..bb8bcb4 100644
--- a/jstorm-core/src/main/java/backtype/storm/serialization/KryoTupleDeserializer.java
+++ b/jstorm-core/src/main/java/backtype/storm/serialization/KryoTupleDeserializer.java
@@ -40,55 +40,65 @@ import java.util.Map;
 
 public class KryoTupleDeserializer implements ITupleDeserializer {
     private static final Logger LOG = LoggerFactory.getLogger(KryoTupleDeserializer.class);
-    
+
     public static final boolean USE_RAW_PACKET = true;
-    
+
     GeneralTopologyContext _context;
     KryoValuesDeserializer _kryo;
     SerializationFactory.IdDictionary _ids;
     Input _kryoInput;
-    
+
     public KryoTupleDeserializer(final Map conf, final GeneralTopologyContext context) {
         _kryo = new KryoValuesDeserializer(conf);
         _context = context;
         _ids = new SerializationFactory.IdDictionary(context.getRawTopology());
         _kryoInput = new Input(1);
     }
-    
+
     public Tuple deserialize(byte[] ser) {
-        
+        _kryoInput.setBuffer(ser);
+        return deserialize(_kryoInput);
+    }
+
+    public Tuple deserialize(byte[] ser, int offset, int count) {
+        _kryoInput.setBuffer(ser, offset, count);
+        return deserialize(_kryoInput);
+    }
+
+    public Tuple deserialize(Input input) {
         int targetTaskId = 0;
+        long timeStamp = 0l;
         int taskId = 0;
         int streamId = 0;
         String componentName = null;
         String streamName = null;
         MessageId id = null;
-        
+
         try {
-            
-            _kryoInput.setBuffer(ser);
-            
-            targetTaskId = _kryoInput.readInt();
-            taskId = _kryoInput.readInt(true);
-            streamId = _kryoInput.readInt(true);
+            targetTaskId = input.readInt();
+            timeStamp = input.readLong();
+            taskId = input.readInt(true);
+            streamId = input.readInt(true);
             componentName = _context.getComponentId(taskId);
             streamName = _ids.getStreamName(componentName, streamId);
-            id = MessageId.deserialize(_kryoInput);
-            List<Object> values = _kryo.deserializeFrom(_kryoInput);
+            id = MessageId.deserialize(input);
+            List<Object> values = _kryo.deserializeFrom(input);
             TupleImplExt tuple = new TupleImplExt(_context, values, taskId, streamName, id);
             tuple.setTargetTaskId(targetTaskId);
+            tuple.setCreationTimeStamp(timeStamp);
             return tuple;
         } catch (Throwable e) {
             StringBuilder sb = new StringBuilder();
-            
+
             sb.append("Deserialize error:");
             sb.append("targetTaskId:").append(targetTaskId);
+            sb.append(",creationTimeStamp:").append(timeStamp);
             sb.append(",taskId:").append(taskId);
             sb.append(",streamId:").append(streamId);
             sb.append(",componentName:").append(componentName);
             sb.append(",streamName:").append(streamName);
             sb.append(",MessageId").append(id);
-            
+
             LOG.info(sb.toString(), e);
             throw new RuntimeException(e);
         }
@@ -99,15 +109,14 @@ public class KryoTupleDeserializer implements ITupleDeserializer {
         
         int offset = 0;
         while(offset < ser.length) {
-            int tupleSize = Utils.readIntFromByteArray(ser, offset);
+            _kryoInput.setBuffer(ser, offset, offset + 4);
+            int tupleSize = _kryoInput.readInt();
             offset += 4;
 
-            ByteBuffer buff = ByteBuffer.allocate(tupleSize);
-            buff.put(ser, offset, tupleSize);
-            ret.addToBatch(deserialize(buff.array()));
+            ret.addToBatch(deserialize(ser, offset, offset + tupleSize));
             offset += tupleSize;
         }
-        
+
         return ret;
     }
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/serialization/KryoTupleSerializer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/serialization/KryoTupleSerializer.java b/jstorm-core/src/main/java/backtype/storm/serialization/KryoTupleSerializer.java
index 1c53d5d..e49e58b 100644
--- a/jstorm-core/src/main/java/backtype/storm/serialization/KryoTupleSerializer.java
+++ b/jstorm-core/src/main/java/backtype/storm/serialization/KryoTupleSerializer.java
@@ -33,30 +33,35 @@ public class KryoTupleSerializer implements ITupleSerializer {
     KryoValuesSerializer _kryo;
     SerializationFactory.IdDictionary _ids;
     Output _kryoOut;
-    
+
     public KryoTupleSerializer(final Map conf, final GeneralTopologyContext context) {
         _kryo = new KryoValuesSerializer(conf);
         _kryoOut = new Output(2000, 2000000000);
         _ids = new SerializationFactory.IdDictionary(context.getRawTopology());
     }
-    
+
+    public byte[] serialize(Tuple tuple) {
+        _kryoOut.clear();
+        serializeTuple(_kryoOut, tuple);
+        return _kryoOut.toBytes();
+    }
     /**
      * @@@ in the furture, it will skill serialize 'targetTask' through check some flag
-     * @see backtype.storm.serialization.ITupleSerializer#serialize(int, backtype.storm.tuple.Tuple)
+     * @see ITupleSerializer#serialize(int, Tuple)
      */
-    public byte[] serialize(Tuple tuple) {
+    private void serializeTuple(Output output, Tuple tuple) {
         try {
-            
-            _kryoOut.clear();
             if (tuple instanceof TupleExt) {
-                _kryoOut.writeInt(((TupleExt) tuple).getTargetTaskId());
+                output.writeInt(((TupleExt) tuple).getTargetTaskId());
+                output.writeLong(((TupleExt) tuple).getCreationTimeStamp());
             }
-            
-            _kryoOut.writeInt(tuple.getSourceTask(), true);
-            _kryoOut.writeInt(_ids.getStreamId(tuple.getSourceComponent(), tuple.getSourceStreamId()), true);
-            tuple.getMessageId().serialize(_kryoOut);
-            _kryo.serializeInto(tuple.getValues(), _kryoOut);
-            return _kryoOut.toBytes();
+
+            output.writeInt(tuple.getSourceTask(), true);
+            output.writeInt(
+                    _ids.getStreamId(tuple.getSourceComponent(),
+                            tuple.getSourceStreamId()), true);
+            tuple.getMessageId().serialize(output);
+            _kryo.serializeInto(tuple.getValues(), output);
         } catch (IOException e) {
             throw new RuntimeException(e);
         }
@@ -66,31 +71,28 @@ public class KryoTupleSerializer implements ITupleSerializer {
         if (batch == null || batch.currBatchSize() == 0)
             return null;
 
-        byte[][] bytes = new byte[batch.currBatchSize()][];
-        int i = 0, len = 0;
+        _kryoOut.clear();
         for (Tuple tuple : batch.getTuples()) {
-            /* byte structure: 
+            /* 
+             * byte structure: 
              * 1st tuple: length + tuple bytes
              * 2nd tuple: length + tuple bytes
              * ......
              */
-            bytes[i] = serialize(tuple);
-            len += bytes[i].length;
-            // add length bytes (int)
-            len += 4;
-            i++;
-        }
-
-        byte[] ret = new byte[len];
-        int index = 0;
-        for (i = 0; i < bytes.length; i++) {
-            Utils.writeIntToByteArray(ret, index, bytes[i].length);
-            index += 4;
-            for (int j = 0; j < bytes[i].length; j++) {
-                ret[index++] = bytes[i][j];
-            }
+            int startPos = _kryoOut.position();
+            
+            // Set initial value of tuple length, which will be updated accordingly after serialization
+            _kryoOut.writeInt(0);
+            
+            serializeTuple(_kryoOut, tuple);
+            
+            // Update the tuple length
+            int endPos = _kryoOut.position();
+            _kryoOut.setPosition(startPos);
+            _kryoOut.writeInt(endPos - startPos - 4);
+            _kryoOut.setPosition(endPos);
         }
-        return ret;
+        return _kryoOut.toBytes();
     }
 
     public static byte[] serialize(int targetTask) {

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/serialization/KryoValuesDeserializer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/serialization/KryoValuesDeserializer.java b/jstorm-core/src/main/java/backtype/storm/serialization/KryoValuesDeserializer.java
index 209ae53..45a7376 100755
--- a/jstorm-core/src/main/java/backtype/storm/serialization/KryoValuesDeserializer.java
+++ b/jstorm-core/src/main/java/backtype/storm/serialization/KryoValuesDeserializer.java
@@ -28,22 +28,22 @@ import java.util.Map;
 public class KryoValuesDeserializer {
     Kryo _kryo;
     Input _kryoInput;
-    
+
     public KryoValuesDeserializer(Map conf) {
         _kryo = SerializationFactory.getKryo(conf);
         _kryoInput = new Input(1);
     }
-    
+
     public List<Object> deserializeFrom(Input input) {
-    	ListDelegate delegate = (ListDelegate) _kryo.readObject(input, ListDelegate.class);
-   	return delegate.getDelegate();
+        ListDelegate delegate = (ListDelegate) _kryo.readObject(input, ListDelegate.class);
+        return delegate.getDelegate();
     }
-    
+
     public List<Object> deserialize(byte[] ser) throws IOException {
         _kryoInput.setBuffer(ser);
         return deserializeFrom(_kryoInput);
     }
-    
+
     public Object deserializeObject(byte[] ser) throws IOException {
         _kryoInput.setBuffer(ser);
         return _kryo.readClassAndObject(_kryoInput);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/serialization/KryoValuesSerializer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/serialization/KryoValuesSerializer.java b/jstorm-core/src/main/java/backtype/storm/serialization/KryoValuesSerializer.java
index c4a2f71..d53f1bd 100755
--- a/jstorm-core/src/main/java/backtype/storm/serialization/KryoValuesSerializer.java
+++ b/jstorm-core/src/main/java/backtype/storm/serialization/KryoValuesSerializer.java
@@ -28,28 +28,28 @@ public class KryoValuesSerializer {
     Kryo _kryo;
     ListDelegate _delegate;
     Output _kryoOut;
-    
+
     public KryoValuesSerializer(Map conf) {
         _kryo = SerializationFactory.getKryo(conf);
         _delegate = new ListDelegate();
         _kryoOut = new Output(2000, 2000000000);
     }
-    
+
     public void serializeInto(List<Object> values, Output out) throws IOException {
         // this ensures that list of values is always written the same way, regardless
-        // of whether it's a java collection or one of clojure's persistent collections 
+        // of whether it's a java collection or one of clojure's persistent collections
         // (which have different serializers)
         // Doing this lets us deserialize as ArrayList and avoid writing the class here
         _delegate.setDelegate(values);
-        _kryo.writeObject(out, _delegate); 
+        _kryo.writeObject(out, _delegate);
     }
-    
+
     public byte[] serialize(List<Object> values) throws IOException {
         _kryoOut.clear();
         serializeInto(values, _kryoOut);
         return _kryoOut.toBytes();
     }
-    
+
     public byte[] serializeObject(Object obj) {
         _kryoOut.clear();
         _kryo.writeClassAndObject(_kryoOut, obj);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/serialization/SerializableSerializer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/serialization/SerializableSerializer.java b/jstorm-core/src/main/java/backtype/storm/serialization/SerializableSerializer.java
index 376ad2a..b60e8b8 100755
--- a/jstorm-core/src/main/java/backtype/storm/serialization/SerializableSerializer.java
+++ b/jstorm-core/src/main/java/backtype/storm/serialization/SerializableSerializer.java
@@ -30,7 +30,7 @@ import java.io.ObjectOutputStream;
 import org.apache.commons.io.input.ClassLoaderObjectInputStream;
 
 public class SerializableSerializer extends Serializer<Object> {
-    
+
     @Override
     public void write(Kryo kryo, Output output, Object object) {
         ByteArrayOutputStream bos = new ByteArrayOutputStream();
@@ -45,7 +45,7 @@ public class SerializableSerializer extends Serializer<Object> {
         output.writeInt(ser.length);
         output.writeBytes(ser);
     }
-    
+
     @Override
     public Object read(Kryo kryo, Input input, Class c) {
         int len = input.readInt();

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/serialization/SerializationFactory.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/serialization/SerializationFactory.java b/jstorm-core/src/main/java/backtype/storm/serialization/SerializationFactory.java
index ef859be..ebf6158 100755
--- a/jstorm-core/src/main/java/backtype/storm/serialization/SerializationFactory.java
+++ b/jstorm-core/src/main/java/backtype/storm/serialization/SerializationFactory.java
@@ -21,7 +21,6 @@ import backtype.storm.Config;
 import backtype.storm.generated.ComponentCommon;
 import backtype.storm.generated.StormTopology;
 import backtype.storm.serialization.types.ArrayListSerializer;
-import backtype.storm.serialization.types.ListDelegateSerializer;
 import backtype.storm.serialization.types.HashMapSerializer;
 import backtype.storm.serialization.types.HashSetSerializer;
 import backtype.storm.transactional.TransactionAttempt;
@@ -33,27 +32,22 @@ import carbonite.JavaBridge;
 import com.esotericsoftware.kryo.Kryo;
 import com.esotericsoftware.kryo.Serializer;
 import com.esotericsoftware.kryo.serializers.DefaultSerializers.BigIntegerSerializer;
-import java.math.BigInteger;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.math.BigInteger;
+import java.util.*;
+
 public class SerializationFactory {
     public static final Logger LOG = LoggerFactory.getLogger(SerializationFactory.class);
-    
+
     public static Kryo getKryo(Map conf) {
         IKryoFactory kryoFactory = (IKryoFactory) Utils.newInstance((String) conf.get(Config.TOPOLOGY_KRYO_FACTORY));
         Kryo k = kryoFactory.getKryo(conf);
         if (WorkerClassLoader.getInstance() != null)
             k.setClassLoader(WorkerClassLoader.getInstance());
         k.register(byte[].class);
-        
+
         /* tuple payload serializer is specified via configuration */
         String payloadSerializerName = (String) conf.get(Config.TOPOLOGY_TUPLE_SERIALIZER);
         try {
@@ -63,7 +57,7 @@ public class SerializationFactory {
         } catch (ClassNotFoundException ex) {
             throw new RuntimeException(ex);
         }
-        
+
         k.register(ArrayList.class, new ArrayListSerializer());
         k.register(HashMap.class, new HashMapSerializer());
         k.register(HashSet.class, new HashSetSerializer());
@@ -78,17 +72,17 @@ public class SerializationFactory {
         } catch (Exception e) {
             throw new RuntimeException(e);
         }
-        
+
         Map<String, String> registrations = normalizeKryoRegister(conf);
-        
+
         kryoFactory.preRegister(k, conf);
-        
+
         boolean skipMissing = (Boolean) conf.get(Config.TOPOLOGY_SKIP_MISSING_KRYO_REGISTRATIONS);
         for (String klassName : registrations.keySet()) {
             String serializerClassName = registrations.get(klassName);
             try {
                 Class klass = Class.forName(klassName, true, k.getClassLoader());
-                
+
                 Class serializerClass = null;
                 if (serializerClassName != null)
                     serializerClass = Class.forName(serializerClassName, true, k.getClassLoader());
@@ -105,9 +99,9 @@ public class SerializationFactory {
                 }
             }
         }
-        
+
         kryoFactory.postRegister(k, conf);
-        
+
         if (conf.get(Config.TOPOLOGY_KRYO_DECORATORS) != null) {
             for (String klassName : (List<String>) conf.get(Config.TOPOLOGY_KRYO_DECORATORS)) {
                 try {
@@ -127,21 +121,21 @@ public class SerializationFactory {
                 }
             }
         }
-        
+
         kryoFactory.postDecorate(k, conf);
-        
+
         return k;
     }
-    
+
     public static class IdDictionary {
         Map<String, Map<String, Integer>> streamNametoId = new HashMap<String, Map<String, Integer>>();
         Map<String, Map<Integer, String>> streamIdToName = new HashMap<String, Map<Integer, String>>();
-        
+
         public IdDictionary(StormTopology topology) {
             List<String> componentNames = new ArrayList<String>(topology.get_spouts().keySet());
             componentNames.addAll(topology.get_bolts().keySet());
             componentNames.addAll(topology.get_state_spouts().keySet());
-            
+
             for (String name : componentNames) {
                 ComponentCommon common = Utils.getComponentCommon(topology, name);
                 List<String> streams = new ArrayList<String>(common.get_streams().keySet());
@@ -149,15 +143,15 @@ public class SerializationFactory {
                 streamIdToName.put(name, Utils.reverseMap(streamNametoId.get(name)));
             }
         }
-        
+
         public int getStreamId(String component, String stream) {
             return streamNametoId.get(component).get(stream);
         }
-        
+
         public String getStreamName(String component, int stream) {
             return streamIdToName.get(component).get(stream);
         }
-        
+
         private static Map<String, Integer> idify(List<String> names) {
             Collections.sort(names);
             Map<String, Integer> ret = new HashMap<String, Integer>();
@@ -169,7 +163,7 @@ public class SerializationFactory {
             return ret;
         }
     }
-    
+
     private static Serializer resolveSerializerInstance(Kryo k, Class superClass, Class<? extends Serializer> serializerClass, Map conf) {
         try {
             try {
@@ -201,7 +195,7 @@ public class SerializationFactory {
             throw new IllegalArgumentException("Unable to create serializer \"" + serializerClass.getName() + "\" for class: " + superClass.getName(), ex);
         }
     }
-    
+
     private static Map<String, String> normalizeKryoRegister(Map conf) {
         // TODO: de-duplicate this logic with the code in nimbus
         Object res = conf.get(Config.TOPOLOGY_KRYO_REGISTER);
@@ -219,7 +213,7 @@ public class SerializationFactory {
                 }
             }
         }
-        
+
         // ensure always same order for registrations with TreeMap
         return new TreeMap<String, String>(ret);
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/serialization/ThriftSerializationDelegate.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/serialization/ThriftSerializationDelegate.java b/jstorm-core/src/main/java/backtype/storm/serialization/ThriftSerializationDelegate.java
index f5d03e4..ba37614 100755
--- a/jstorm-core/src/main/java/backtype/storm/serialization/ThriftSerializationDelegate.java
+++ b/jstorm-core/src/main/java/backtype/storm/serialization/ThriftSerializationDelegate.java
@@ -33,7 +33,7 @@ public class ThriftSerializationDelegate implements SerializationDelegate {
     @Override
     public byte[] serialize(Object object) {
         try {
-            return  new TSerializer().serialize((TBase) object);
+            return new TSerializer().serialize((TBase) object);
         } catch (TException e) {
             throw new RuntimeException(e);
         }
@@ -44,7 +44,7 @@ public class ThriftSerializationDelegate implements SerializationDelegate {
         try {
             TBase instance = (TBase) clazz.newInstance();
             new TDeserializer().deserialize(instance, bytes);
-            return (T)instance;
+            return (T) instance;
         } catch (Exception e) {
             throw new RuntimeException(e);
         }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/serialization/types/ArrayListSerializer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/serialization/types/ArrayListSerializer.java b/jstorm-core/src/main/java/backtype/storm/serialization/types/ArrayListSerializer.java
index 6b7e308..a4bac2f 100755
--- a/jstorm-core/src/main/java/backtype/storm/serialization/types/ArrayListSerializer.java
+++ b/jstorm-core/src/main/java/backtype/storm/serialization/types/ArrayListSerializer.java
@@ -23,10 +23,9 @@ import com.esotericsoftware.kryo.serializers.CollectionSerializer;
 import java.util.ArrayList;
 import java.util.Collection;
 
-
 public class ArrayListSerializer extends CollectionSerializer {
     @Override
     public Collection create(Kryo kryo, Input input, Class<Collection> type) {
         return new ArrayList();
-    }    
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/serialization/types/HashMapSerializer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/serialization/types/HashMapSerializer.java b/jstorm-core/src/main/java/backtype/storm/serialization/types/HashMapSerializer.java
index 662211b..00af80d 100755
--- a/jstorm-core/src/main/java/backtype/storm/serialization/types/HashMapSerializer.java
+++ b/jstorm-core/src/main/java/backtype/storm/serialization/types/HashMapSerializer.java
@@ -23,7 +23,6 @@ import com.esotericsoftware.kryo.serializers.MapSerializer;
 import java.util.HashMap;
 import java.util.Map;
 
-
 public class HashMapSerializer extends MapSerializer {
     @Override
     public Map create(Kryo kryo, Input input, Class<Map> type) {

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/serialization/types/HashSetSerializer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/serialization/types/HashSetSerializer.java b/jstorm-core/src/main/java/backtype/storm/serialization/types/HashSetSerializer.java
index 77fc353..eb3aab0 100755
--- a/jstorm-core/src/main/java/backtype/storm/serialization/types/HashSetSerializer.java
+++ b/jstorm-core/src/main/java/backtype/storm/serialization/types/HashSetSerializer.java
@@ -23,10 +23,9 @@ import com.esotericsoftware.kryo.serializers.CollectionSerializer;
 import java.util.Collection;
 import java.util.HashSet;
 
-
 public class HashSetSerializer extends CollectionSerializer {
     @Override
     public Collection create(Kryo kryo, Input input, Class<Collection> type) {
         return new HashSet();
-    }       
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/serialization/types/ListDelegateSerializer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/serialization/types/ListDelegateSerializer.java b/jstorm-core/src/main/java/backtype/storm/serialization/types/ListDelegateSerializer.java
index c71a19d..c65f16a 100755
--- a/jstorm-core/src/main/java/backtype/storm/serialization/types/ListDelegateSerializer.java
+++ b/jstorm-core/src/main/java/backtype/storm/serialization/types/ListDelegateSerializer.java
@@ -23,10 +23,9 @@ import com.esotericsoftware.kryo.serializers.CollectionSerializer;
 import backtype.storm.utils.ListDelegate;
 import java.util.Collection;
 
-
 public class ListDelegateSerializer extends CollectionSerializer {
     @Override
     public Collection create(Kryo kryo, Input input, Class<Collection> type) {
         return new ListDelegate();
-    }    
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/spout/IMultiSchemableSpout.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/spout/IMultiSchemableSpout.java b/jstorm-core/src/main/java/backtype/storm/spout/IMultiSchemableSpout.java
index 5999fbb..9b837ba 100755
--- a/jstorm-core/src/main/java/backtype/storm/spout/IMultiSchemableSpout.java
+++ b/jstorm-core/src/main/java/backtype/storm/spout/IMultiSchemableSpout.java
@@ -18,6 +18,7 @@
 package backtype.storm.spout;
 
 public interface IMultiSchemableSpout {
-  MultiScheme getScheme();
-  void setScheme(MultiScheme scheme);
+    MultiScheme getScheme();
+
+    void setScheme(MultiScheme scheme);
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/spout/ISchemableSpout.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/spout/ISchemableSpout.java b/jstorm-core/src/main/java/backtype/storm/spout/ISchemableSpout.java
index df455d9..7eca980 100755
--- a/jstorm-core/src/main/java/backtype/storm/spout/ISchemableSpout.java
+++ b/jstorm-core/src/main/java/backtype/storm/spout/ISchemableSpout.java
@@ -17,8 +17,8 @@
  */
 package backtype.storm.spout;
 
-
 public interface ISchemableSpout {
-     Scheme getScheme();
-     void setScheme(Scheme scheme);
+    Scheme getScheme();
+
+    void setScheme(Scheme scheme);
 }


[04/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/comm/TaskSendTargets.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/comm/TaskSendTargets.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/comm/TaskSendTargets.java
index 8a55ec5..9b86458 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/task/comm/TaskSendTargets.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/comm/TaskSendTargets.java
@@ -35,8 +35,7 @@ import com.alibaba.jstorm.utils.JStormUtils;
 
 /**
  * 
- * tuple sending object, which get which task should tuple be send to, and
- * update statics
+ * tuple sending object, which get which task should tuple be send to, and update statics
  * 
  * @author yannian/Longda
  * 
@@ -58,8 +57,7 @@ public class TaskSendTargets {
     private boolean isDebuging = false;
     private String debugIdStr;
 
-    public TaskSendTargets(Map<Object, Object> _storm_conf, String _component,
-            Map<String, Map<String, MkGrouper>> _stream_component_grouper,
+    public TaskSendTargets(Map<Object, Object> _storm_conf, String _component, Map<String, Map<String, MkGrouper>> _stream_component_grouper,
             TopologyContext _topology_context, TaskBaseMetric _task_stats) {
         this.stormConf = _storm_conf;
         this.componentId = _component;
@@ -67,17 +65,14 @@ public class TaskSendTargets {
         this.topologyContext = _topology_context;
         this.taskStats = _task_stats;
 
-        isDebuging =
-                JStormUtils.parseBoolean(stormConf.get(Config.TOPOLOGY_DEBUG),
-                        false);
+        isDebuging = JStormUtils.parseBoolean(stormConf.get(Config.TOPOLOGY_DEBUG), false);
 
         taskId = topologyContext.getThisTaskId();
         debugIdStr = " Emit from " + componentId + ":" + taskId + " ";
     }
 
     // direct send tuple to special task
-    public java.util.List<Integer> get(Integer out_task_id, String stream,
-            List<Object> tuple) {
+    public List<Integer> get(Integer out_task_id, String stream, List<Object> tuple) {
 
         // in order to improve acker's speed, skip checking
         // String target_component =
@@ -92,29 +87,26 @@ public class TaskSendTargets {
         // }
 
         if (isDebuging) {
-            LOG.info(debugIdStr + stream + " to " + out_task_id + ":"
-                    + tuple.toString());
+            LOG.info(debugIdStr + stream + " to " + out_task_id + ":" + tuple.toString());
         }
 
         taskStats.send_tuple(stream, 1);
 
-        java.util.List<Integer> out_tasks = new ArrayList<Integer>();
+        List<Integer> out_tasks = new ArrayList<Integer>();
         out_tasks.add(out_task_id);
         return out_tasks;
     }
 
     // send tuple according to grouping
-    public java.util.List<Integer> get(String stream, List<Object> tuple) {
-        java.util.List<Integer> out_tasks = new ArrayList<Integer>();
+    public List<Integer> get(String stream, List<Object> tuple) {
+        List<Integer> out_tasks = new ArrayList<Integer>();
 
         // get grouper, then get which task should tuple be sent to.
-        Map<String, MkGrouper> componentCrouping =
-                streamComponentgrouper.get(stream);
+        Map<String, MkGrouper> componentCrouping = streamComponentgrouper.get(stream);
         if (componentCrouping == null) {
             // if the target component's parallelism is 0, don't need send to
             // them
-            LOG.debug("Failed to get Grouper of " + stream + " in "
-                    + debugIdStr);
+            LOG.debug("Failed to get Grouper of " + stream + " in " + debugIdStr);
             return out_tasks;
         }
 
@@ -123,8 +115,7 @@ public class TaskSendTargets {
             MkGrouper g = ee.getValue();
 
             if (GrouperType.direct.equals(g.gettype())) {
-                throw new IllegalArgumentException(
-                        "Cannot do regular emit to direct stream");
+                throw new IllegalArgumentException("Cannot do regular emit to direct stream");
             }
 
             out_tasks.addAll(g.grouper(tuple));
@@ -133,8 +124,7 @@ public class TaskSendTargets {
 
         if (isDebuging) {
 
-            LOG.info(debugIdStr + stream + " to " + out_tasks + ":"
-                    + tuple.toString());
+            LOG.info(debugIdStr + stream + " to " + out_tasks + ":" + tuple.toString());
         }
 
         int num_out_tasks = out_tasks.size();
@@ -144,8 +134,7 @@ public class TaskSendTargets {
         return out_tasks;
     }
 
-    public void updateStreamCompGrouper(
-            Map<String, Map<String, MkGrouper>> streamComponentgrouper) {
+    public void updateStreamCompGrouper(Map<String, Map<String, MkGrouper>> streamComponentgrouper) {
         this.streamComponentgrouper = streamComponentgrouper;
     }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/comm/TupleInfo.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/comm/TupleInfo.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/comm/TupleInfo.java
index a6a3406..dc2a2bf 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/task/comm/TupleInfo.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/comm/TupleInfo.java
@@ -66,8 +66,7 @@ public class TupleInfo implements Serializable {
 
     @Override
     public String toString() {
-        return ToStringBuilder.reflectionToString(this,
-                ToStringStyle.SHORT_PREFIX_STYLE);
+        return ToStringBuilder.reflectionToString(this, ToStringStyle.SHORT_PREFIX_STYLE);
     }
 
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/comm/UnanchoredSend.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/comm/UnanchoredSend.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/comm/UnanchoredSend.java
index 72c4061..6b0ed2d 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/task/comm/UnanchoredSend.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/comm/UnanchoredSend.java
@@ -32,11 +32,9 @@ import com.alibaba.jstorm.task.TaskTransfer;
  */
 
 public class UnanchoredSend {
-    public static void send(TopologyContext topologyContext,
-            TaskSendTargets taskTargets, TaskTransfer transfer_fn,
-            String stream, List<Object> values) {
+    public static void send(TopologyContext topologyContext, TaskSendTargets taskTargets, TaskTransfer transfer_fn, String stream, List<Object> values) {
 
-        java.util.List<Integer> tasks = taskTargets.get(stream, values);
+        List<Integer> tasks = taskTargets.get(stream, values);
         if (tasks.size() == 0) {
             return;
         }
@@ -44,8 +42,7 @@ public class UnanchoredSend {
         Integer taskId = topologyContext.getThisTaskId();
 
         for (Integer task : tasks) {
-            TupleImplExt tup =
-                    new TupleImplExt(topologyContext, values, taskId, stream);
+            TupleImplExt tup = new TupleImplExt(topologyContext, values, taskId, stream);
             tup.setTargetTaskId(task);
 
             transfer_fn.transfer(tup);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/error/ITaskReportErr.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/error/ITaskReportErr.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/error/ITaskReportErr.java
index d0d70be..2475ede 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/task/error/ITaskReportErr.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/error/ITaskReportErr.java
@@ -25,4 +25,6 @@ package com.alibaba.jstorm.task.error;
  */
 public interface ITaskReportErr {
     public void report(Throwable error);
+
+    public void report(String error);
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/error/TaskErrorRunable.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/error/TaskErrorRunable.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/error/TaskErrorRunable.java
index d460e5a..1b41037 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/task/error/TaskErrorRunable.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/error/TaskErrorRunable.java
@@ -20,8 +20,7 @@ package com.alibaba.jstorm.task.error;
 import com.alibaba.jstorm.callback.RunnableCallback;
 
 /**
- * The callback will be called, when task occur error It just call
- * TaskReportErrorAndDie
+ * The callback will be called, when task occur error It just call TaskReportErrorAndDie
  * 
  * @author yannian
  * 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/error/TaskReportError.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/error/TaskReportError.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/error/TaskReportError.java
index e7506b2..bf177f6 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/task/error/TaskReportError.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/error/TaskReportError.java
@@ -34,8 +34,7 @@ public class TaskReportError implements ITaskReportErr {
     private String topology_id;
     private int task_id;
 
-    public TaskReportError(StormClusterState _storm_cluster_state,
-            String _topology_id, int _task_id) {
+    public TaskReportError(StormClusterState _storm_cluster_state, String _topology_id, int _task_id) {
         this.zkCluster = _storm_cluster_state;
         this.topology_id = _topology_id;
         this.task_id = _task_id;
@@ -44,16 +43,23 @@ public class TaskReportError implements ITaskReportErr {
     @Override
     public void report(Throwable error) {
 
-        LOG.error("Report error to /ZK/taskerrors/" + topology_id + "/"
-                + task_id + "\n", error);
+        LOG.error("Report error to /ZK/taskerrors/" + topology_id + "/" + task_id + "\n", error);
         try {
             zkCluster.report_task_error(topology_id, task_id, error);
         } catch (Exception e) {
             // TODO Auto-generated catch block
-            LOG.error("Failed update error to /ZK/taskerrors/" + topology_id
-                    + "/" + task_id + "\n", e);
+            LOG.error("Failed update error to /ZK/taskerrors/" + topology_id + "/" + task_id + "\n", e);
         }
-
     }
 
+    @Override
+    public void report(String error) {
+
+        LOG.error("Report error to /ZK/taskerrors/" + topology_id + "/" + task_id + ": " + error);
+        try {
+            zkCluster.report_task_error(topology_id, task_id, error, null);
+        } catch (Exception e) {
+            LOG.error("Failed update error to /ZK/taskerrors/" + topology_id + "/" + task_id + "\n", e);
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/error/TaskReportErrorAndDie.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/error/TaskReportErrorAndDie.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/error/TaskReportErrorAndDie.java
index 4f4eab3..e8596de 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/task/error/TaskReportErrorAndDie.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/error/TaskReportErrorAndDie.java
@@ -29,15 +29,20 @@ public class TaskReportErrorAndDie implements ITaskReportErr {
     private ITaskReportErr reporterror;
     private RunnableCallback haltfn;
 
-    public TaskReportErrorAndDie(ITaskReportErr _reporterror,
-            RunnableCallback _haltfn) {
+    public TaskReportErrorAndDie(ITaskReportErr _reporterror, RunnableCallback _haltfn) {
         this.reporterror = _reporterror;
         this.haltfn = _haltfn;
     }
 
+    // If throwable error was caught, a error will be reported and current task will be shutdown.
     @Override
     public void report(Throwable error) {
         this.reporterror.report(error);
         this.haltfn.run();
     }
+
+    @Override
+    public void report(String error) {
+        this.reporterror.report(error);
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/BaseExecutors.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/BaseExecutors.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/BaseExecutors.java
index 3f4c18f..7e4495a 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/BaseExecutors.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/BaseExecutors.java
@@ -17,43 +17,40 @@
  */
 package com.alibaba.jstorm.task.execute;
 
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.LinkedBlockingDeque;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import backtype.storm.Config;
-import backtype.storm.serialization.KryoTupleDeserializer;
+import backtype.storm.Constants;
+import backtype.storm.task.OutputCollector;
 import backtype.storm.task.TopologyContext;
-import backtype.storm.tuple.Tuple;
 import backtype.storm.utils.DisruptorQueue;
 import backtype.storm.utils.Utils;
 import backtype.storm.utils.WorkerClassLoader;
-
-import com.alibaba.jstorm.callback.AsyncLoopRunnable;
-import com.alibaba.jstorm.callback.AsyncLoopThread;
 import com.alibaba.jstorm.callback.RunnableCallback;
 import com.alibaba.jstorm.client.ConfigExtension;
-import com.alibaba.jstorm.common.metric.Histogram;
+import com.alibaba.jstorm.common.metric.AsmGauge;
 import com.alibaba.jstorm.common.metric.QueueGauge;
 import com.alibaba.jstorm.daemon.worker.timer.RotatingMapTrigger;
+import com.alibaba.jstorm.daemon.worker.timer.TaskBatchFlushTrigger;
 import com.alibaba.jstorm.daemon.worker.timer.TaskHeartbeatTrigger;
-import com.alibaba.jstorm.metric.JStormHealthCheck;
-import com.alibaba.jstorm.metric.JStormMetrics;
-import com.alibaba.jstorm.metric.MetricDef;
+import com.alibaba.jstorm.metric.*;
 import com.alibaba.jstorm.task.Task;
 import com.alibaba.jstorm.task.TaskBaseMetric;
+import com.alibaba.jstorm.task.TaskBatchTransfer;
 import com.alibaba.jstorm.task.TaskStatus;
 import com.alibaba.jstorm.task.TaskTransfer;
 import com.alibaba.jstorm.task.error.ITaskReportErr;
 import com.alibaba.jstorm.utils.JStormServerUtils;
 import com.alibaba.jstorm.utils.JStormUtils;
-import com.lmax.disruptor.EventHandler;
 import com.lmax.disruptor.WaitStrategy;
 import com.lmax.disruptor.dsl.ProducerType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Map;
+import java.util.SortedSet;
+import java.util.TreeSet;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingDeque;
+import java.util.concurrent.TimeUnit;
 
 //import com.alibaba.jstorm.message.zeroMq.IRecvConnection;
 
@@ -67,15 +64,17 @@ import com.lmax.disruptor.dsl.ProducerType;
 public class BaseExecutors extends RunnableCallback {
     private static Logger LOG = LoggerFactory.getLogger(BaseExecutors.class);
 
-    protected final String component_id;
+    protected final String topologyId;
+    protected final String componentId;
     protected final int taskId;
     protected final String idStr;
 
     protected Map storm_conf;
-    
+
     protected final boolean isDebug;
 
     protected TopologyContext userTopologyCtx;
+    protected TopologyContext sysTopologyCtx;
     protected TaskBaseMetric task_stats;
 
     protected volatile TaskStatus taskStatus;
@@ -93,74 +92,91 @@ public class BaseExecutors extends RunnableCallback {
     protected Task task;
     protected long assignmentTs;
     protected TaskTransfer taskTransfer;
+   
+    protected JStormMetricsReporter metricsReporter;
+    
+    protected boolean isFinishInit = false;
+
+    protected RotatingMapTrigger rotatingMapTrigger;
+    protected TaskHeartbeatTrigger taskHbTrigger;
 
     // protected IntervalCheck intervalCheck = new IntervalCheck();
 
-    public BaseExecutors(Task task, TaskTransfer _transfer_fn, Map _storm_conf,
-            Map<Integer, DisruptorQueue> innerTaskTransfer,
-            TopologyContext topology_context, TopologyContext _user_context,
-            TaskBaseMetric _task_stats, TaskStatus taskStatus,
-            ITaskReportErr _report_error) {
+    public BaseExecutors(Task task) {
 
         this.task = task;
-        this.storm_conf = _storm_conf;
-
-        this.userTopologyCtx = _user_context;
-        this.task_stats = _task_stats;
-        this.taskId = topology_context.getThisTaskId();
-        this.innerTaskTransfer = innerTaskTransfer;
-        this.component_id = topology_context.getThisComponentId();
-        this.idStr = JStormServerUtils.getName(component_id, taskId);
-
-        this.taskStatus = taskStatus;
-        this.report_error = _report_error;
-
-        this.isDebug =
-                JStormUtils.parseBoolean(storm_conf.get(Config.TOPOLOGY_DEBUG),
-                        false);
-
-        message_timeout_secs =
-                JStormUtils.parseInt(
-                        storm_conf.get(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS),
-                        30);
-
-        int queue_size =
-                Utils.getInt(storm_conf
-                        .get(Config.TOPOLOGY_EXECUTOR_RECEIVE_BUFFER_SIZE), 256);
-        WaitStrategy waitStrategy =
-                (WaitStrategy) JStormUtils.createDisruptorWaitStrategy(storm_conf);
-        this.exeQueue =
-                DisruptorQueue.mkInstance(idStr, ProducerType.MULTI,
-                        queue_size, waitStrategy);
+        this.storm_conf = task.getStormConf();
+
+        this.userTopologyCtx = task.getUserContext();
+        this.sysTopologyCtx = task.getTopologyContext();
+        this.task_stats = task.getTaskStats();
+        this.taskId = sysTopologyCtx.getThisTaskId();
+        this.innerTaskTransfer = task.getInnerTaskTransfer();
+        this.topologyId = sysTopologyCtx.getTopologyId();
+        this.componentId = sysTopologyCtx.getThisComponentId();
+        this.idStr = JStormServerUtils.getName(componentId, taskId);
+
+        this.taskStatus = task.getTaskStatus();
+        this.report_error = task.getReportErrorDie();
+        this.taskTransfer = task.getTaskTransfer();
+        this.metricsReporter = task.getWorkerData().getMetricsReporter();
+
+        this.isDebug = JStormUtils.parseBoolean(storm_conf.get(Config.TOPOLOGY_DEBUG), false);
+
+        message_timeout_secs = JStormUtils.parseInt(storm_conf.get(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS), 30);
+
+        int queue_size = Utils.getInt(storm_conf.get(Config.TOPOLOGY_EXECUTOR_RECEIVE_BUFFER_SIZE), 256);
+        WaitStrategy waitStrategy = (WaitStrategy) JStormUtils.createDisruptorWaitStrategy(storm_conf);
+        this.exeQueue = DisruptorQueue.mkInstance(idStr, ProducerType.MULTI, queue_size, waitStrategy);
         this.exeQueue.consumerStarted();
-        this.controlQueue = new LinkedBlockingDeque<Object>(8);
+        this.controlQueue = new LinkedBlockingDeque<Object>();
 
         this.registerInnerTransfer(exeQueue);
 
-        QueueGauge exeQueueGauge =
-                new QueueGauge(idStr + MetricDef.EXECUTE_QUEUE, exeQueue);
-        JStormMetrics.registerTaskGauge(exeQueueGauge, taskId,
-                MetricDef.EXECUTE_QUEUE);
-        JStormHealthCheck.registerTaskHealthCheck(taskId,
-                MetricDef.EXECUTE_QUEUE, exeQueueGauge);
+        QueueGauge exeQueueGauge = new QueueGauge(exeQueue, idStr, MetricDef.EXECUTE_QUEUE);
+        JStormMetrics.registerTaskMetric(MetricUtils.taskMetricName(topologyId, componentId, taskId, MetricDef.EXECUTE_QUEUE, MetricType.GAUGE), new AsmGauge(
+                exeQueueGauge));
+        JStormHealthCheck.registerTaskHealthCheck(taskId, MetricDef.EXECUTE_QUEUE, exeQueueGauge);
 
-        RotatingMapTrigger rotatingMapTrigger =
-                new RotatingMapTrigger(storm_conf, idStr + "_rotating",
-                        exeQueue);
+        rotatingMapTrigger = new RotatingMapTrigger(storm_conf, idStr + "_rotating", exeQueue);
         rotatingMapTrigger.register();
-        TaskHeartbeatTrigger taskHbTrigger =
-                new TaskHeartbeatTrigger(storm_conf, idStr + "_taskHeartbeat",
-                        exeQueue, controlQueue, taskId);
+        taskHbTrigger = new TaskHeartbeatTrigger(storm_conf, idStr + "_taskHeartbeat", exeQueue, controlQueue, taskId, componentId, sysTopologyCtx, report_error);
         taskHbTrigger.register();
-
+        
         assignmentTs = System.currentTimeMillis();
         
-        this.taskTransfer = _transfer_fn;
+    }
+    
+    public void init() throws Exception {
+    	// this function will be override by SpoutExecutor or BoltExecutor
+        throw new RuntimeException("Should implement this function");
+    }
+    
+    public void initWrapper() {
+    	try {
+            LOG.info("{} begin to init", idStr);
+            
+            init();
+            
+            if (taskId == getMinTaskIdOfWorker()) {
+                metricsReporter.setOutputCollector(getOutputCollector());
+            }
+            
+            isFinishInit = true;
+        } catch (Throwable e) {
+            error = e;
+            LOG.error("Init error ", e);
+            report_error.report(e);
+        } finally {
+
+            LOG.info("{} initialization finished", idStr);
+            
+        }
     }
 
     @Override
     public void preRun() {
-        WorkerClassLoader.switchThreadContext();  
+        WorkerClassLoader.switchThreadContext();
     }
 
     @Override
@@ -174,28 +190,11 @@ public class BaseExecutors extends RunnableCallback {
         throw new RuntimeException("Should implement this function");
     }
 
-    // @Override
-    // public Object getResult() {
-    // if (taskStatus.isRun()) {
-    // return 0;
-    // } else if (taskStatus.isPause()) {
-    // return 0;
-    // } else if (taskStatus.isShutdown()) {
-    // this.shutdown();
-    // return -1;
-    // } else {
-    // LOG.info("Unknow TaskStatus, shutdown executing thread of " + idStr);
-    // this.shutdown();
-    // return -1;
-    // }
-    // }
-
     @Override
     public Exception error() {
         if (error == null) {
             return null;
         }
-
         return new Exception(error);
     }
 
@@ -213,12 +212,9 @@ public class BaseExecutors extends RunnableCallback {
         LOG.info("Registor inner transfer for executor thread of " + idStr);
         DisruptorQueue existInnerTransfer = innerTaskTransfer.get(taskId);
         if (existInnerTransfer != null) {
-            LOG.info("Exist inner task transfer for executing thread of "
-                    + idStr);
+            LOG.info("Exist inner task transfer for executing thread of " + idStr);
             if (existInnerTransfer != disruptorQueue) {
-                throw new RuntimeException(
-                        "Inner task transfer must be only one in executing thread of "
-                                + idStr);
+                throw new RuntimeException("Inner task transfer must be only one in executing thread of " + idStr);
             }
         }
         innerTaskTransfer.put(taskId, disruptorQueue);
@@ -229,4 +225,12 @@ public class BaseExecutors extends RunnableCallback {
         innerTaskTransfer.remove(taskId);
     }
 
+    protected int getMinTaskIdOfWorker() {
+        SortedSet<Integer> tasks = new TreeSet<Integer>(sysTopologyCtx.getThisWorkerTasks());
+        return tasks.first();
+    }
+    
+    public Object getOutputCollector() {
+    	return null;
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/BoltCollector.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/BoltCollector.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/BoltCollector.java
index a51d09a..c4ee4ad 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/BoltCollector.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/BoltCollector.java
@@ -28,16 +28,13 @@ import java.util.Random;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import backtype.storm.Config;
-import backtype.storm.task.IOutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.tuple.MessageId;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.TupleImplExt;
-
-import com.alibaba.jstorm.common.metric.Histogram;
+import com.alibaba.jstorm.common.metric.AsmHistogram;
+import com.alibaba.jstorm.common.metric.AsmMetric;
 import com.alibaba.jstorm.metric.JStormMetrics;
 import com.alibaba.jstorm.metric.MetricDef;
+import com.alibaba.jstorm.metric.MetricType;
+import com.alibaba.jstorm.metric.MetricUtils;
+import com.alibaba.jstorm.task.Task;
 import com.alibaba.jstorm.task.TaskBaseMetric;
 import com.alibaba.jstorm.task.TaskTransfer;
 import com.alibaba.jstorm.task.acker.Acker;
@@ -48,11 +45,18 @@ import com.alibaba.jstorm.utils.JStormUtils;
 import com.alibaba.jstorm.utils.RotatingMap;
 import com.alibaba.jstorm.utils.TimeUtils;
 
+import backtype.storm.Config;
+import backtype.storm.task.IOutputCollector;
+import backtype.storm.task.TopologyContext;
+import backtype.storm.tuple.MessageId;
+import backtype.storm.tuple.Tuple;
+import backtype.storm.tuple.TupleExt;
+import backtype.storm.tuple.TupleImplExt;
+
 /**
  * bolt output interface, do emit/ack/fail
  * 
  * @author yannian/Longda
- * 
  */
 public class BoltCollector implements IOutputCollector {
     private static Logger LOG = LoggerFactory.getLogger(BoltCollector.class);
@@ -72,62 +76,56 @@ public class BoltCollector implements IOutputCollector {
 
     private Map storm_conf;
     private Integer ackerNum;
-    private Histogram timer;
+    private AsmMetric timer;
     private Random random;
-
-    public BoltCollector(int message_timeout_secs, ITaskReportErr report_error,
-            TaskSendTargets _send_fn, Map _storm_conf,
-            TaskTransfer _transfer_fn, TopologyContext _topology_context,
-            Integer task_id, RotatingMap<Tuple, Long> tuple_start_times,
-            TaskBaseMetric _task_stats) {
-
-        this.rotateTime =
-                1000L * message_timeout_secs / (Acker.TIMEOUT_BUCKET_NUM - 1);
-        this.reportError = report_error;
-        this.sendTargets = _send_fn;
-        this.storm_conf = _storm_conf;
-        this.taskTransfer = _transfer_fn;
-        this.topologyContext = _topology_context;
-        this.task_id = task_id;
-        this.task_stats = _task_stats;
-
-        this.pending_acks =
-                new RotatingMap<Tuple, Long>(Acker.TIMEOUT_BUCKET_NUM);
+    
+
+    //ITaskReportErr report_error, TaskSendTargets _send_fn, Map _storm_conf, TaskTransfer _transfer_fn,
+    //TopologyContext _topology_context, Integer task_id,  TaskBaseMetric _task_stats
+    public BoltCollector(Task task, RotatingMap<Tuple, Long> tuple_start_times, int message_timeout_secs) {
+
+        this.rotateTime = 1000L * message_timeout_secs / (Acker.TIMEOUT_BUCKET_NUM - 1);
+        this.reportError = task.getReportErrorDie();
+        this.sendTargets = task.getTaskSendTargets();
+        this.storm_conf = task.getStormConf();
+        this.taskTransfer = task.getTaskTransfer();
+        this.topologyContext = task.getTopologyContext();
+        this.task_id = task.getTaskId();
+        this.task_stats = task.getTaskStats();
+
+        this.pending_acks = new RotatingMap<Tuple, Long>(Acker.TIMEOUT_BUCKET_NUM);
         // this.pending_acks = new TimeCacheMap<Tuple,
         // Long>(message_timeout_secs,
         // Acker.TIMEOUT_BUCKET_NUM);
         this.tuple_start_times = tuple_start_times;
 
-        this.ackerNum =
-                JStormUtils.parseInt(storm_conf
-                        .get(Config.TOPOLOGY_ACKER_EXECUTORS));
+        this.ackerNum = JStormUtils.parseInt(storm_conf.get(Config.TOPOLOGY_ACKER_EXECUTORS));
 
         String componentId = topologyContext.getThisComponentId();
-        timer =
-                JStormMetrics.registerTaskHistogram(task_id,
-                        MetricDef.COLLECTOR_EMIT_TIME);
+        this.timer =
+                JStormMetrics.registerTaskMetric(
+                        MetricUtils.taskMetricName(topologyContext.getTopologyId(), componentId, task_id, MetricDef.COLLECTOR_EMIT_TIME, MetricType.HISTOGRAM),
+                        new AsmHistogram());
 
         random = new Random();
         random.setSeed(System.currentTimeMillis());
+
     }
 
     @Override
-    public List<Integer> emit(String streamId, Collection<Tuple> anchors,
-            List<Object> tuple) {
+    public List<Integer> emit(String streamId, Collection<Tuple> anchors, List<Object> tuple) {
         return boltEmit(streamId, anchors, tuple, null);
     }
 
     @Override
-    public void emitDirect(int taskId, String streamId,
-            Collection<Tuple> anchors, List<Object> tuple) {
+    public void emitDirect(int taskId, String streamId, Collection<Tuple> anchors, List<Object> tuple) {
         boltEmit(streamId, anchors, tuple, taskId);
     }
 
-    private List<Integer> boltEmit(String out_stream_id,
-            Collection<Tuple> anchors, List<Object> values, Integer out_task_id) {
-        long start = System.nanoTime();
+    private List<Integer> boltEmit(String out_stream_id, Collection<Tuple> anchors, List<Object> values, Integer out_task_id) {
+        final long start = System.nanoTime();
         try {
-            java.util.List<Integer> out_tasks = null;
+            List<Integer> out_tasks;
             if (out_task_id != null) {
                 out_tasks = sendTargets.get(out_task_id, out_stream_id, values);
             } else {
@@ -146,59 +144,49 @@ public class BoltCollector implements IOutputCollector {
                             lastRotate = now;
                         }
                         put_xor(pending_acks, a, edge_id);
-                        for (Long root_id : a.getMessageId().getAnchorsToIds()
-                                .keySet()) {
+                        for (Long root_id : a.getMessageId().getAnchorsToIds().keySet()) {
                             put_xor(anchors_to_ids, root_id, edge_id);
                         }
                     }
                 }
                 MessageId msgid = MessageId.makeId(anchors_to_ids);
-                TupleImplExt tupleExt =
-                        new TupleImplExt(topologyContext, values, task_id,
-                                out_stream_id, msgid);
+                TupleImplExt tupleExt = new TupleImplExt(topologyContext, values, task_id, out_stream_id, msgid);
                 tupleExt.setTargetTaskId(t);
 
                 taskTransfer.transfer(tupleExt);
-
             }
             return out_tasks;
         } catch (Exception e) {
             LOG.error("bolt emit", e);
         } finally {
             long end = System.nanoTime();
-            timer.update((end - start)/1000000.0d);
+            timer.update((end - start) / TimeUtils.NS_PER_US);
         }
         return new ArrayList<Integer>();
     }
 
     @Override
     public void ack(Tuple input) {
-
         if (ackerNum > 0) {
-
-            Long ack_val = Long.valueOf(0);
+            Long ack_val = 0L;
             Object pend_val = pending_acks.remove(input);
             if (pend_val != null) {
                 ack_val = (Long) (pend_val);
             }
 
-            for (Entry<Long, Long> e : input.getMessageId().getAnchorsToIds()
-                    .entrySet()) {
-
-                UnanchoredSend.send(
-                        topologyContext,
-                        sendTargets,
-                        taskTransfer,
-                        Acker.ACKER_ACK_STREAM_ID,
-                        JStormUtils.mk_list((Object) e.getKey(),
-                                JStormUtils.bit_xor(e.getValue(), ack_val)));
+            for (Entry<Long, Long> e : input.getMessageId().getAnchorsToIds().entrySet()) {
+                UnanchoredSend.send(topologyContext, sendTargets, taskTransfer, Acker.ACKER_ACK_STREAM_ID,
+                        JStormUtils.mk_list((Object) e.getKey(), JStormUtils.bit_xor(e.getValue(), ack_val)));
             }
         }
 
-        Long delta = tuple_time_delta(tuple_start_times, input);
-        if (delta != null) {
-            task_stats.bolt_acked_tuple(input.getSourceComponent(),
-                    input.getSourceStreamId(), Double.valueOf(delta));
+        Long startTime = (Long) tuple_start_times.remove(input);
+        if (startTime != null) {
+        	Long endTime = System.nanoTime();
+        	long latency = (endTime - startTime)/TimeUtils.NS_PER_US;
+        	long lifeCycle = (System.currentTimeMillis() - ((TupleExt) input).getCreationTimeStamp()) * TimeUtils.NS_PER_US;
+        	
+            task_stats.bolt_acked_tuple(input.getSourceComponent(), input.getSourceStreamId(), latency, lifeCycle);
         }
     }
 
@@ -207,17 +195,12 @@ public class BoltCollector implements IOutputCollector {
         // if ackerNum == 0, we can just return
         if (ackerNum > 0) {
             pending_acks.remove(input);
-            for (Entry<Long, Long> e : input.getMessageId().getAnchorsToIds()
-                    .entrySet()) {
-                UnanchoredSend.send(topologyContext, sendTargets, taskTransfer,
-                        Acker.ACKER_FAIL_STREAM_ID,
-                        JStormUtils.mk_list((Object) e.getKey()));
+            for (Entry<Long, Long> e : input.getMessageId().getAnchorsToIds().entrySet()) {
+                UnanchoredSend.send(topologyContext, sendTargets, taskTransfer, Acker.ACKER_FAIL_STREAM_ID, JStormUtils.mk_list((Object) e.getKey()));
             }
         }
 
-        task_stats.bolt_failed_tuple(input.getSourceComponent(),
-                input.getSourceStreamId());
-
+        task_stats.bolt_failed_tuple(input.getSourceComponent(), input.getSourceStreamId());
     }
 
     @Override
@@ -226,21 +209,19 @@ public class BoltCollector implements IOutputCollector {
     }
 
     // Utility functions, just used here
-    public static Long tuple_time_delta(RotatingMap<Tuple, Long> start_times,
-            Tuple tuple) {
+    public static Long tuple_time_delta(RotatingMap<Tuple, Long> start_times, Tuple tuple) {
         Long start_time = (Long) start_times.remove(tuple);
         if (start_time != null) {
-            return TimeUtils.time_delta_ms(start_time);
+            return (System.nanoTime() - start_time)/TimeUtils.NS_PER_US;
         }
         return null;
     }
 
-    public static void put_xor(RotatingMap<Tuple, Long> pending, Tuple key,
-            Long id) {
+    public static void put_xor(RotatingMap<Tuple, Long> pending, Tuple key, Long id) {
         // synchronized (pending) {
         Long curr = pending.get(key);
         if (curr == null) {
-            curr = Long.valueOf(0);
+            curr = 0L;
         }
         pending.put(key, JStormUtils.bit_xor(curr, id));
         // }
@@ -250,7 +231,7 @@ public class BoltCollector implements IOutputCollector {
         // synchronized (pending) {
         Long curr = pending.get(key);
         if (curr == null) {
-            curr = Long.valueOf(0);
+            curr = 0L;
         }
         pending.put(key, JStormUtils.bit_xor(curr, id));
         // }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/BoltExecutors.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/BoltExecutors.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/BoltExecutors.java
index 15adbf2..5c4413e 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/BoltExecutors.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/BoltExecutors.java
@@ -17,13 +17,6 @@
  */
 package com.alibaba.jstorm.task.execute;
 
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.concurrent.TimeUnit;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import backtype.storm.Config;
 import backtype.storm.Constants;
 import backtype.storm.task.IBolt;
@@ -32,37 +25,48 @@ import backtype.storm.task.OutputCollector;
 import backtype.storm.task.TopologyContext;
 import backtype.storm.tuple.BatchTuple;
 import backtype.storm.tuple.Tuple;
+import backtype.storm.tuple.TupleExt;
+import backtype.storm.tuple.TupleImplExt;
+import backtype.storm.tuple.Values;
 import backtype.storm.utils.DisruptorQueue;
 import backtype.storm.utils.WorkerClassLoader;
 
 import com.alibaba.jstorm.client.ConfigExtension;
-import com.alibaba.jstorm.common.metric.Histogram;
+import com.alibaba.jstorm.cluster.Common;
+import com.alibaba.jstorm.common.metric.AsmHistogram;
+import com.alibaba.jstorm.common.metric.AsmMetric;
+import com.alibaba.jstorm.daemon.worker.timer.BackpressureCheckTrigger;
 import com.alibaba.jstorm.daemon.worker.timer.TaskBatchFlushTrigger;
 import com.alibaba.jstorm.daemon.worker.timer.TickTupleTrigger;
 import com.alibaba.jstorm.daemon.worker.timer.TimerConstants;
 import com.alibaba.jstorm.daemon.worker.timer.TimerTrigger;
 import com.alibaba.jstorm.metric.JStormMetrics;
+import com.alibaba.jstorm.metric.JStormMetricsReporter;
 import com.alibaba.jstorm.metric.MetricDef;
-import com.alibaba.jstorm.task.Task;
-import com.alibaba.jstorm.task.TaskBaseMetric;
-import com.alibaba.jstorm.task.TaskStatus;
-import com.alibaba.jstorm.task.TaskTransfer;
-import com.alibaba.jstorm.task.TaskBatchTransfer;
+import com.alibaba.jstorm.metric.MetricType;
+import com.alibaba.jstorm.metric.MetricUtils;
+import com.alibaba.jstorm.task.*;
 import com.alibaba.jstorm.task.acker.Acker;
+import com.alibaba.jstorm.task.backpressure.BackpressureTrigger;
 import com.alibaba.jstorm.task.comm.TaskSendTargets;
 import com.alibaba.jstorm.task.error.ITaskReportErr;
-import com.alibaba.jstorm.task.heartbeat.TaskHeartbeatRunable;
 import com.alibaba.jstorm.utils.JStormUtils;
 import com.alibaba.jstorm.utils.RotatingMap;
 import com.alibaba.jstorm.utils.TimeUtils;
 import com.lmax.disruptor.EventHandler;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.TimeUnit;
+
 /**
- * 
  * BoltExecutor
  * 
  * @author yannian/Longda
- * 
  */
 public class BoltExecutors extends BaseExecutors implements EventHandler {
     private static Logger LOG = LoggerFactory.getLogger(BoltExecutors.class);
@@ -76,89 +80,56 @@ public class BoltExecutors extends BaseExecutors implements EventHandler {
     // internal outputCollector is BoltCollector
     private OutputCollector outputCollector;
 
-    private Histogram boltExeTimer;
-
-    public BoltExecutors(Task task, IBolt _bolt, TaskTransfer _transfer_fn,
-            Map<Integer, DisruptorQueue> innerTaskTransfer, Map storm_conf,
-            TaskSendTargets _send_fn, TaskStatus taskStatus,
-            TopologyContext sysTopologyCxt, TopologyContext userTopologyCxt,
-            TaskBaseMetric _task_stats, ITaskReportErr _report_error) {
+    private AsmMetric boltExeTimer;
+    private volatile double exeTime;
 
-        super(task, _transfer_fn, storm_conf, innerTaskTransfer,
-                sysTopologyCxt, userTopologyCxt, _task_stats, taskStatus,
-                _report_error);
+    private BackpressureTrigger backpressureTrigger;
+    private boolean isSystemBolt;
 
-        this.bolt = _bolt;
+    //, IBolt _bolt, TaskTransfer _transfer_fn, Map<Integer, DisruptorQueue> innerTaskTransfer, Map storm_conf,
+    //TaskSendTargets _send_fn, TaskStatus taskStatus, TopologyContext sysTopologyCxt, TopologyContext userTopologyCxt, TaskBaseMetric _task_stats,
+    //ITaskReportErr _report_error, JStormMetricsReporter metricReport
+    public BoltExecutors(Task task) {
 
-        // create TimeCacheMap
+        super(task);
 
-        this.tuple_start_times =
-                new RotatingMap<Tuple, Long>(Acker.TIMEOUT_BUCKET_NUM);
+        this.bolt = (IBolt)task.getTaskObj();
 
-        this.ackerNum =
-                JStormUtils.parseInt(storm_conf
-                        .get(Config.TOPOLOGY_ACKER_EXECUTORS));
-
-        // don't use TimeoutQueue for recv_tuple_queue,
-        // then other place should check the queue size
-        // TimeCacheQueue.DefaultExpiredCallback<Tuple> logExpireCb = new
-        // TimeCacheQueue.DefaultExpiredCallback<Tuple>(
-        // idStr);
-        // this.recv_tuple_queue = new
-        // TimeCacheQueue<Tuple>(message_timeout_secs,
-        // TimeCacheQueue.DEFAULT_NUM_BUCKETS, logExpireCb);
+        // create TimeCacheMap
+        this.tuple_start_times = new RotatingMap<Tuple, Long>(Acker.TIMEOUT_BUCKET_NUM);
+        this.ackerNum = JStormUtils.parseInt(storm_conf.get(Config.TOPOLOGY_ACKER_EXECUTORS));
 
         // create BoltCollector
-        IOutputCollector output_collector =
-                new BoltCollector(message_timeout_secs, _report_error,
-                        _send_fn, storm_conf, _transfer_fn, sysTopologyCxt,
-                        taskId, tuple_start_times, _task_stats);
-
+        IOutputCollector output_collector = new BoltCollector(task, tuple_start_times, message_timeout_secs);
         outputCollector = new OutputCollector(output_collector);
+        taskHbTrigger.setBoltOutputCollector(outputCollector);
 
-        boltExeTimer =
-                JStormMetrics.registerTaskHistogram(taskId,
-                        MetricDef.EXECUTE_TIME);
+        String metricName = MetricUtils.taskMetricName(topologyId, componentId, taskId, MetricDef.EXECUTE_TIME, MetricType.HISTOGRAM);
+        this.boltExeTimer = JStormMetrics.registerTaskMetric(metricName, new AsmHistogram());
 
-        Object tickFrequence =
-                storm_conf.get(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS);
+        Object tickFrequence = storm_conf.get(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS);
         if (tickFrequence != null) {
             Integer frequence = JStormUtils.parseInt(tickFrequence);
-            TickTupleTrigger tickTupleTrigger =
-                    new TickTupleTrigger(sysTopologyCxt, frequence, idStr
-                            + Constants.SYSTEM_TICK_STREAM_ID, exeQueue);
+            TickTupleTrigger tickTupleTrigger = new TickTupleTrigger(sysTopologyCtx, frequence, idStr + Constants.SYSTEM_TICK_STREAM_ID, exeQueue);
             tickTupleTrigger.register();
         }
-
-        if (ConfigExtension.isTaskBatchTuple(storm_conf)) {
-            TaskBatchFlushTrigger batchFlushTrigger =
-                    new TaskBatchFlushTrigger(5, idStr
-                            + Constants.SYSTEM_COMPONENT_ID,
-                            (TaskBatchTransfer) _transfer_fn);
-            batchFlushTrigger.register(TimeUnit.MILLISECONDS);
-        }
-
-        try {
-            // do prepare
-            WorkerClassLoader.switchThreadContext();
-
-            // Method method = IBolt.class.getMethod("prepare", new Class[]
-            // {Map.class, TopologyContext.class,
-            // OutputCollector.class});
-            // method.invoke(bolt, new Object[] {storm_conf, userTopologyCxt,
-            // outputCollector});
-            bolt.prepare(storm_conf, userTopologyCtx, outputCollector);
-
-        } catch (Throwable e) {
-            error = e;
-            LOG.error("bolt prepare error ", e);
-            report_error.report(e);
-        } finally {
-            WorkerClassLoader.restoreThreadContext();
+       
+        
+        isSystemBolt = Common.isSystemComponent(componentId);
+        if (isSystemBolt == false) {
+            backpressureTrigger = new BackpressureTrigger(task, this, storm_conf, outputCollector);
+            int backpressureCheckFrequence = ConfigExtension.getBackpressureCheckIntervl(storm_conf);
+            BackpressureCheckTrigger backpressureCheckTrigger =
+                    new BackpressureCheckTrigger(30, backpressureCheckFrequence, idStr + " backpressure check trigger", backpressureTrigger);
+            backpressureCheckTrigger.register(TimeUnit.MILLISECONDS);
         }
 
         LOG.info("Successfully create BoltExecutors " + idStr);
-
+    }
+    
+    @Override
+    public void init() {
+    	bolt.prepare(storm_conf, userTopologyCtx, outputCollector);
     }
 
     @Override
@@ -168,10 +139,14 @@ public class BoltExecutors extends BaseExecutors implements EventHandler {
 
     @Override
     public void run() {
+    	if (isFinishInit == false) {
+    		initWrapper();
+    	}
         while (taskStatus.isShutdown() == false) {
             try {
+                //if (backpressureTrigger != null)
+                //    backpressureTrigger.checkAndTrigger();
                 exeQueue.consumeBatchWhenAvailable(this);
-
                 processControlEvent();
             } catch (Throwable e) {
                 if (taskStatus.isShutdown() == false) {
@@ -182,20 +157,19 @@ public class BoltExecutors extends BaseExecutors implements EventHandler {
     }
 
     @Override
-    public void onEvent(Object event, long sequence, boolean endOfBatch)
-            throws Exception {
-
+    public void onEvent(Object event, long sequence, boolean endOfBatch) throws Exception {
         if (event == null) {
             return;
         }
 
         long start = System.nanoTime();
-
         try {
             if (event instanceof Tuple) {
+                processControlEvent();
                 processTupleEvent((Tuple) event);
             } else if (event instanceof BatchTuple) {
                 for (Tuple tuple : ((BatchTuple) event).getTuples()) {
+                    processControlEvent();
                     processTupleEvent((Tuple) tuple);
                 }
             } else if (event instanceof TimerTrigger.TimerEvent) {
@@ -205,18 +179,21 @@ public class BoltExecutors extends BaseExecutors implements EventHandler {
             }
         } finally {
             long end = System.nanoTime();
-            boltExeTimer.update((end - start) / 1000000.0d);
+            exeTime = (end - start) / TimeUtils.NS_PER_US;
+            boltExeTimer.update(exeTime);
         }
     }
 
     private void processTupleEvent(Tuple tuple) {
-        task_stats.recv_tuple(tuple.getSourceComponent(),
-                tuple.getSourceStreamId());
-
-        tuple_start_times.put(tuple, System.currentTimeMillis());
+        task_stats.recv_tuple(tuple.getSourceComponent(), tuple.getSourceStreamId());
+        tuple_start_times.put(tuple, System.nanoTime());
 
         try {
-            bolt.execute(tuple);
+            if (isSystemBolt == false && tuple.getSourceStreamId().equals(Common.TOPOLOGY_MASTER_CONTROL_STREAM_ID)) {
+                backpressureTrigger.handle(tuple);
+            } else {
+                bolt.execute(tuple);
+            }
         } catch (Throwable e) {
             error = e;
             LOG.error("bolt execute error ", e);
@@ -226,11 +203,12 @@ public class BoltExecutors extends BaseExecutors implements EventHandler {
         if (ackerNum == 0) {
             // only when acker is disable
             // get tuple process latency
-            Long start_time = (Long) tuple_start_times.remove(tuple);
-            if (start_time != null) {
-                Long delta = TimeUtils.time_delta_ms(start_time);
-                task_stats.bolt_acked_tuple(tuple.getSourceComponent(),
-                        tuple.getSourceStreamId(), Double.valueOf(delta));
+            Long startTime = (Long) tuple_start_times.remove(tuple);
+            if (startTime != null) {
+                Long endTime = System.nanoTime();
+                long latency = (endTime - startTime)/TimeUtils.NS_PER_US;
+                long lifeCycle  = (System.currentTimeMillis() - ((TupleExt) tuple).getCreationTimeStamp()) * TimeUtils.NS_PER_US;
+                task_stats.bolt_acked_tuple(tuple.getSourceComponent(), tuple.getSourceStreamId(), latency, lifeCycle);
             }
         }
     }
@@ -244,8 +222,7 @@ public class BoltExecutors extends BaseExecutors implements EventHandler {
                 // only when acker is enable
                 for (Entry<Tuple, Long> entry : timeoutMap.entrySet()) {
                     Tuple input = entry.getKey();
-                    task_stats.bolt_failed_tuple(input.getSourceComponent(),
-                            input.getSourceStreamId());
+                    task_stats.bolt_failed_tuple(input.getSourceComponent(), input.getSourceStreamId());
                 }
             }
             break;
@@ -262,13 +239,11 @@ public class BoltExecutors extends BaseExecutors implements EventHandler {
             break;
         }
         case TimerConstants.TASK_HEARTBEAT: {
-            Integer taskId = (Integer) event.getMsg();
-            TaskHeartbeatRunable.updateTaskHbStats(taskId, task);
+            taskHbTrigger.setExeThreadHbTime(TimeUtils.current_time_secs());
             break;
         }
         default: {
-            LOG.warn("Receive unsupported timer event, opcode="
-                    + event.getOpCode());
+            LOG.warn("Receive unsupported timer event, opcode=" + event.getOpCode());
             break;
         }
         }
@@ -282,9 +257,17 @@ public class BoltExecutors extends BaseExecutors implements EventHandler {
                 processTimerEvent((TimerTrigger.TimerEvent) event);
                 LOG.debug("Received one event from control queue");
             } else {
-                LOG.warn("Received unknown control event, "
-                        + event.getClass().getName());
+                LOG.warn("Received unknown control event, " + event.getClass().getName());
             }
         }
     }
+    
+    public double getExecuteTime() {
+        return exeTime;
+    }
+    
+    @Override
+    public Object getOutputCollector() {
+    	return outputCollector;
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/spout/AckSpoutMsg.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/spout/AckSpoutMsg.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/spout/AckSpoutMsg.java
index d554efc..d3bc53a 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/spout/AckSpoutMsg.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/spout/AckSpoutMsg.java
@@ -23,10 +23,13 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import backtype.storm.spout.ISpout;
+import backtype.storm.tuple.Tuple;
+import backtype.storm.tuple.TupleExt;
 
 import com.alibaba.jstorm.client.spout.IAckValueSpout;
 import com.alibaba.jstorm.task.TaskBaseMetric;
 import com.alibaba.jstorm.task.comm.TupleInfo;
+import com.alibaba.jstorm.utils.TimeUtils;
 
 /**
  * The action after spout receive one ack tuple
@@ -38,15 +41,15 @@ public class AckSpoutMsg implements IAckMsg {
     private static Logger LOG = LoggerFactory.getLogger(AckSpoutMsg.class);
 
     private ISpout spout;
+    private Tuple tuple;
+    private TupleInfo tupleInfo;
     private Object msgId;
     private String stream;
-    private long timeStamp;
     private List<Object> values;
     private TaskBaseMetric task_stats;
     private boolean isDebug = false;
 
-    public AckSpoutMsg(ISpout _spout, TupleInfo tupleInfo,
-            TaskBaseMetric _task_stats, boolean _isDebug) {
+    public AckSpoutMsg(ISpout _spout, Tuple tuple, TupleInfo tupleInfo, TaskBaseMetric _task_stats, boolean _isDebug) {
 
         this.task_stats = _task_stats;
 
@@ -55,10 +58,11 @@ public class AckSpoutMsg implements IAckMsg {
 
         this.msgId = tupleInfo.getMessageId();
         this.stream = tupleInfo.getStream();
-        if (tupleInfo.getTimestamp() != 0) {
-            this.timeStamp = System.currentTimeMillis() - tupleInfo.getTimestamp(); 
-        }
+        
         this.values = tupleInfo.getValues();
+
+        this.tuple = tuple;
+        this.tupleInfo = tupleInfo;
     }
 
     public void run() {
@@ -73,7 +77,15 @@ public class AckSpoutMsg implements IAckMsg {
             spout.ack(msgId);
         }
 
-        task_stats.spout_acked_tuple(stream, timeStamp);
+        long latency = 0, lifeCycle = 0;
+        if (tupleInfo.getTimestamp() != 0) {
+        	long endTime = System.nanoTime();
+        	latency = (endTime - tupleInfo.getTimestamp())/TimeUtils.NS_PER_US;
+        	if (tuple != null && tuple instanceof TupleExt) {
+        		lifeCycle = (System.currentTimeMillis() - ((TupleExt) tuple).getCreationTimeStamp()) * TimeUtils.NS_PER_US;
+        	}
+        }
+        task_stats.spout_acked_tuple(stream, latency, lifeCycle);
     }
 
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/spout/FailSpoutMsg.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/spout/FailSpoutMsg.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/spout/FailSpoutMsg.java
index 7b5d37b..f570a74 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/spout/FailSpoutMsg.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/spout/FailSpoutMsg.java
@@ -40,8 +40,7 @@ public class FailSpoutMsg implements IAckMsg {
     private TaskBaseMetric task_stats;
     private boolean isDebug = false;
 
-    public FailSpoutMsg(Object id, ISpout _spout, TupleInfo _tupleInfo,
-            TaskBaseMetric _task_stats, boolean _isDebug) {
+    public FailSpoutMsg(Object id, ISpout _spout, TupleInfo _tupleInfo, TaskBaseMetric _task_stats, boolean _isDebug) {
         this.id = id;
         this.spout = _spout;
         this.tupleInfo = _tupleInfo;
@@ -63,8 +62,7 @@ public class FailSpoutMsg implements IAckMsg {
         task_stats.spout_failed_tuple(tupleInfo.getStream());
 
         if (isDebug) {
-            LOG.info("Failed message rootId: {}, messageId:{} : {}", id,
-                    msg_id, tupleInfo.getValues().toString());
+            LOG.info("Failed message rootId: {}, messageId:{} : {}", id, msg_id, tupleInfo.getValues().toString());
         }
     }
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/spout/MultipleThreadSpoutExecutors.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/spout/MultipleThreadSpoutExecutors.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/spout/MultipleThreadSpoutExecutors.java
index a74079f..8edd3cc 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/spout/MultipleThreadSpoutExecutors.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/spout/MultipleThreadSpoutExecutors.java
@@ -17,19 +17,13 @@
  */
 package com.alibaba.jstorm.task.execute.spout;
 
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import backtype.storm.task.TopologyContext;
 import backtype.storm.utils.DisruptorQueue;
 import backtype.storm.utils.WorkerClassLoader;
-
 import com.alibaba.jstorm.callback.AsyncLoopRunnable;
 import com.alibaba.jstorm.callback.AsyncLoopThread;
 import com.alibaba.jstorm.callback.RunnableCallback;
+import com.alibaba.jstorm.metric.JStormMetricsReporter;
 import com.alibaba.jstorm.task.Task;
 import com.alibaba.jstorm.task.TaskBaseMetric;
 import com.alibaba.jstorm.task.TaskStatus;
@@ -39,35 +33,36 @@ import com.alibaba.jstorm.task.comm.TaskSendTargets;
 import com.alibaba.jstorm.task.comm.TupleInfo;
 import com.alibaba.jstorm.task.error.ITaskReportErr;
 import com.alibaba.jstorm.utils.RotatingMap;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicBoolean;
 
 /**
  * spout executor
- * 
+ * <p/>
  * All spout actions will be done here
  * 
  * @author yannian/Longda
- * 
  */
 public class MultipleThreadSpoutExecutors extends SpoutExecutors {
-    private static Logger LOG = LoggerFactory
-            .getLogger(MultipleThreadSpoutExecutors.class);
-
-    public MultipleThreadSpoutExecutors(Task task,
-            backtype.storm.spout.ISpout _spout, TaskTransfer _transfer_fn,
-            Map<Integer, DisruptorQueue> innerTaskTransfer, Map _storm_conf,
-            TaskSendTargets sendTargets, TaskStatus taskStatus,
-            TopologyContext topology_context, TopologyContext _user_context,
-            TaskBaseMetric _task_stats, ITaskReportErr _report_error) {
-        super(task, _spout, _transfer_fn, innerTaskTransfer, _storm_conf,
-                sendTargets, taskStatus, topology_context, _user_context,
-                _task_stats, _report_error);
-
-        ackerRunnableThread = new AsyncLoopThread(new AckerRunnable());
-        pending =
-                new RotatingMap<Long, TupleInfo>(Acker.TIMEOUT_BUCKET_NUM,
-                        null, false);
-
-        super.prepare(sendTargets, _transfer_fn, topology_context);
+    private static Logger LOG = LoggerFactory.getLogger(MultipleThreadSpoutExecutors.class);
+
+    public MultipleThreadSpoutExecutors(Task task) {
+        super(task);
+
+        ackerRunnableThread = new AsyncLoopThread(new AckerRunnable(), false, Thread.NORM_PRIORITY, false);
+    }
+    
+    public void mkPending() {
+    	pending = new RotatingMap<Long, TupleInfo>(Acker.TIMEOUT_BUCKET_NUM, null, false);
+    }
+    
+    @Override 
+    public void init() throws Exception {
+    	super.init();
+    	ackerRunnableThread.start();
     }
 
     @Override
@@ -75,11 +70,14 @@ public class MultipleThreadSpoutExecutors extends SpoutExecutors {
         return idStr + "-" + MultipleThreadSpoutExecutors.class.getSimpleName();
     }
 
-    @Override
-    public void run() {
+	@Override
+	public void run() {
+		if (isFinishInit == false) {
+			initWrapper();
+		}
 
-        super.nextTuple();
-    }
+		super.nextTuple();
+	}
 
     class AckerRunnable extends RunnableCallback {
 
@@ -117,7 +115,7 @@ public class MultipleThreadSpoutExecutors extends SpoutExecutors {
                 }
 
             }
-            
+
             LOG.info("Successfully shutdown Spout's acker thread " + idStr);
         }
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/spout/SingleThreadSpoutExecutors.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/spout/SingleThreadSpoutExecutors.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/spout/SingleThreadSpoutExecutors.java
index 9e4dd21..144b041 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/spout/SingleThreadSpoutExecutors.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/spout/SingleThreadSpoutExecutors.java
@@ -17,15 +17,9 @@
  */
 package com.alibaba.jstorm.task.execute.spout;
 
-import java.util.Map;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import backtype.storm.task.TopologyContext;
 import backtype.storm.utils.DisruptorQueue;
-import backtype.storm.utils.WorkerClassLoader;
-
+import com.alibaba.jstorm.metric.JStormMetricsReporter;
 import com.alibaba.jstorm.task.Task;
 import com.alibaba.jstorm.task.TaskBaseMetric;
 import com.alibaba.jstorm.task.TaskStatus;
@@ -35,35 +29,30 @@ import com.alibaba.jstorm.task.comm.TaskSendTargets;
 import com.alibaba.jstorm.task.comm.TupleInfo;
 import com.alibaba.jstorm.task.error.ITaskReportErr;
 import com.alibaba.jstorm.utils.RotatingMap;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Map;
 
 /**
  * spout executor
- * 
+ * <p/>
  * All spout actions will be done here
  * 
  * @author yannian/Longda
- * 
  */
 public class SingleThreadSpoutExecutors extends SpoutExecutors {
-    private static Logger LOG = LoggerFactory
-            .getLogger(SingleThreadSpoutExecutors.class);
+    private static Logger LOG = LoggerFactory.getLogger(SingleThreadSpoutExecutors.class);
 
-    public SingleThreadSpoutExecutors(Task task,
-            backtype.storm.spout.ISpout _spout, TaskTransfer _transfer_fn,
-            Map<Integer, DisruptorQueue> innerTaskTransfer, Map _storm_conf,
-            TaskSendTargets sendTargets, TaskStatus taskStatus,
-            TopologyContext topology_context, TopologyContext _user_context,
-            TaskBaseMetric _task_stats, ITaskReportErr _report_error) {
-        super(task, _spout, _transfer_fn, innerTaskTransfer, _storm_conf,
-                sendTargets, taskStatus, topology_context, _user_context,
-                _task_stats, _report_error);
+    public SingleThreadSpoutExecutors(Task task) {
+        super(task);
 
-        // sending Tuple's TimeCacheMap
-        pending =
-                new RotatingMap<Long, TupleInfo>(Acker.TIMEOUT_BUCKET_NUM,
-                        null, true);
-
-        super.prepare(sendTargets, _transfer_fn, topology_context);
+    }
+    
+    @Override
+    public void mkPending() {
+    	// sending Tuple's TimeCacheMap
+        pending = new RotatingMap<Long, TupleInfo>(Acker.TIMEOUT_BUCKET_NUM, null, true);
     }
 
     @Override
@@ -73,10 +62,14 @@ public class SingleThreadSpoutExecutors extends SpoutExecutors {
 
     @Override
     public void run() {
+    	if (isFinishInit == false ) {
+    		initWrapper();
+    	}
+    	
         executeEvent();
 
         super.nextTuple();
-            
+
         processControlEvent();
 
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/spout/SpoutCollector.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/spout/SpoutCollector.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/spout/SpoutCollector.java
index d913a9e..baf709f 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/spout/SpoutCollector.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/spout/SpoutCollector.java
@@ -25,16 +25,12 @@ import java.util.Random;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import backtype.storm.Config;
-import backtype.storm.spout.ISpoutOutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.tuple.MessageId;
-import backtype.storm.tuple.TupleImplExt;
-import backtype.storm.utils.DisruptorQueue;
-
-import com.alibaba.jstorm.common.metric.Histogram;
+import com.alibaba.jstorm.common.metric.AsmHistogram;
 import com.alibaba.jstorm.metric.JStormMetrics;
 import com.alibaba.jstorm.metric.MetricDef;
+import com.alibaba.jstorm.metric.MetricType;
+import com.alibaba.jstorm.metric.MetricUtils;
+import com.alibaba.jstorm.task.Task;
 import com.alibaba.jstorm.task.TaskBaseMetric;
 import com.alibaba.jstorm.task.TaskTransfer;
 import com.alibaba.jstorm.task.acker.Acker;
@@ -44,12 +40,20 @@ import com.alibaba.jstorm.task.comm.UnanchoredSend;
 import com.alibaba.jstorm.task.error.ITaskReportErr;
 import com.alibaba.jstorm.utils.JStormUtils;
 import com.alibaba.jstorm.utils.TimeOutMap;
+import com.alibaba.jstorm.utils.TimeUtils;
+
+import backtype.storm.Config;
+import backtype.storm.spout.ISpout;
+import backtype.storm.spout.ISpoutOutputCollector;
+import backtype.storm.task.TopologyContext;
+import backtype.storm.tuple.MessageId;
+import backtype.storm.tuple.TupleImplExt;
+import backtype.storm.utils.DisruptorQueue;
 
 /**
  * spout collector, sending tuple through this Object
  * 
  * @author yannian/Longda
- * 
  */
 public class SpoutCollector implements ISpoutOutputCollector {
     private static Logger LOG = LoggerFactory.getLogger(SpoutCollector.class);
@@ -71,64 +75,53 @@ public class SpoutCollector implements ISpoutOutputCollector {
     private Integer ackerNum;
     private boolean isDebug = false;
 
-    private Histogram emitTotalTimer;
+    private AsmHistogram emitTotalTimer;
     Random random;
 
-    public SpoutCollector(Integer task_id, backtype.storm.spout.ISpout spout,
-            TaskBaseMetric task_stats, TaskSendTargets sendTargets,
-            Map _storm_conf, TaskTransfer _transfer_fn,
-            TimeOutMap<Long, TupleInfo> pending,
-            TopologyContext topology_context,
-            DisruptorQueue disruptorAckerQueue, ITaskReportErr _report_error) {
-        this.sendTargets = sendTargets;
-        this.storm_conf = _storm_conf;
-        this.transfer_fn = _transfer_fn;
+    //Integer task_id, backtype.storm.spout.ISpout spout, TaskBaseMetric task_stats, TaskSendTargets sendTargets, Map _storm_conf,
+    //TaskTransfer _transfer_fn, TimeOutMap<Long, TupleInfo> pending, TopologyContext topology_context, DisruptorQueue disruptorAckerQueue,
+    //ITaskReportErr _report_error
+    public SpoutCollector(Task task, TimeOutMap<Long, TupleInfo> pending, DisruptorQueue disruptorAckerQueue) {
+        this.sendTargets = task.getTaskSendTargets();
+        this.storm_conf = task.getStormConf();
+        this.transfer_fn = task.getTaskTransfer();
         this.pending = pending;
-        this.topology_context = topology_context;
+        this.topology_context = task.getTopologyContext();
 
         this.disruptorAckerQueue = disruptorAckerQueue;
 
-        this.task_stats = task_stats;
-        this.spout = spout;
-        this.task_id = task_id;
-        this.report_error = _report_error;
+        this.task_stats = task.getTaskStats();
+        this.spout = (ISpout)task.getTaskObj();
+        this.task_id = task.getTaskId();
+        this.report_error = task.getReportErrorDie();
 
-        ackerNum =
-                JStormUtils.parseInt(storm_conf
-                        .get(Config.TOPOLOGY_ACKER_EXECUTORS));
-        isDebug =
-                JStormUtils.parseBoolean(storm_conf.get(Config.TOPOLOGY_DEBUG),
-                        false);
+        ackerNum = JStormUtils.parseInt(storm_conf.get(Config.TOPOLOGY_ACKER_EXECUTORS));
+        isDebug = JStormUtils.parseBoolean(storm_conf.get(Config.TOPOLOGY_DEBUG), false);
 
         random = new Random();
         random.setSeed(System.currentTimeMillis());
 
         String componentId = topology_context.getThisComponentId();
         emitTotalTimer =
-                JStormMetrics.registerTaskHistogram(task_id,
-                        MetricDef.COLLECTOR_EMIT_TIME);
-
+                (AsmHistogram) JStormMetrics
+                        .registerTaskMetric(MetricUtils.taskMetricName(topology_context.getTopologyId(), componentId, task_id, MetricDef.COLLECTOR_EMIT_TIME,
+                                MetricType.HISTOGRAM), new AsmHistogram());
     }
 
     @Override
-    public List<Integer> emit(String streamId, List<Object> tuple,
-            Object messageId) {
+    public List<Integer> emit(String streamId, List<Object> tuple, Object messageId) {
         return sendSpoutMsg(streamId, tuple, messageId, null);
     }
 
     @Override
-    public void emitDirect(int taskId, String streamId, List<Object> tuple,
-            Object messageId) {
+    public void emitDirect(int taskId, String streamId, List<Object> tuple, Object messageId) {
         sendSpoutMsg(streamId, tuple, messageId, taskId);
     }
 
-    private List<Integer> sendSpoutMsg(String out_stream_id,
-            List<Object> values, Object message_id, Integer out_task_id) {
-
-        long startTime = System.nanoTime();
-
+    private List<Integer> sendSpoutMsg(String out_stream_id, List<Object> values, Object message_id, Integer out_task_id) {
+        final long startTime = System.nanoTime();
         try {
-            java.util.List<Integer> out_tasks = null;
+            List<Integer> out_tasks;
             if (out_task_id != null) {
                 out_tasks = sendTargets.get(out_task_id, out_stream_id, values);
             } else {
@@ -148,7 +141,7 @@ public class SpoutCollector implements ISpoutOutputCollector {
             // when duplicate root_id, it will miss call ack/fail
             Long root_id = MessageId.generateId(random);
             if (needAck) {
-                while (pending.containsKey(root_id) == true) {
+                while (pending.containsKey(root_id)) {
                     root_id = MessageId.generateId(random);
                 }
             }
@@ -163,30 +156,23 @@ public class SpoutCollector implements ISpoutOutputCollector {
                     msgid = MessageId.makeUnanchored();
                 }
 
-                TupleImplExt tp =
-                        new TupleImplExt(topology_context, values, task_id,
-                                out_stream_id, msgid);
+                TupleImplExt tp = new TupleImplExt(topology_context, values, task_id, out_stream_id, msgid);
                 tp.setTargetTaskId(t);
                 transfer_fn.transfer(tp);
-
             }
 
             if (needAck) {
-
                 TupleInfo info = new TupleInfo();
                 info.setStream(out_stream_id);
                 info.setValues(values);
                 info.setMessageId(message_id);
-                info.setTimestamp(System.currentTimeMillis());
+                info.setTimestamp(System.nanoTime());
 
                 pending.putHead(root_id, info);
 
-                List<Object> ackerTuple =
-                        JStormUtils.mk_list((Object) root_id,
-                                JStormUtils.bit_xor_vals(ackSeq), task_id);
+                List<Object> ackerTuple = JStormUtils.mk_list((Object) root_id, JStormUtils.bit_xor_vals(ackSeq), task_id);
 
-                UnanchoredSend.send(topology_context, sendTargets, transfer_fn,
-                        Acker.ACKER_INIT_STREAM_ID, ackerTuple);
+                UnanchoredSend.send(topology_context, sendTargets, transfer_fn, Acker.ACKER_INIT_STREAM_ID, ackerTuple);
 
             } else if (message_id != null) {
                 TupleInfo info = new TupleInfo();
@@ -195,23 +181,20 @@ public class SpoutCollector implements ISpoutOutputCollector {
                 info.setMessageId(message_id);
                 info.setTimestamp(0);
 
-                AckSpoutMsg ack =
-                        new AckSpoutMsg(spout, info, task_stats, isDebug);
+                AckSpoutMsg ack = new AckSpoutMsg(spout, null, info, task_stats, isDebug);
                 ack.run();
-
             }
 
             return out_tasks;
         } finally {
             long endTime = System.nanoTime();
-            emitTotalTimer.update((endTime - startTime)/1000000.0d);
+            emitTotalTimer.update((endTime - startTime) / TimeUtils.NS_PER_US);
         }
 
     }
 
     @Override
     public void reportError(Throwable error) {
-        // TODO Auto-generated method stub
         report_error.report(error);
     }
 


[31/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/spout/ISpout.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/spout/ISpout.java b/jstorm-core/src/main/java/backtype/storm/spout/ISpout.java
index c421fed..ae86448 100755
--- a/jstorm-core/src/main/java/backtype/storm/spout/ISpout.java
+++ b/jstorm-core/src/main/java/backtype/storm/spout/ISpout.java
@@ -22,83 +22,83 @@ import java.util.Map;
 import java.io.Serializable;
 
 /**
- * ISpout is the core interface for implementing spouts. A Spout is responsible
- * for feeding messages into the topology for processing. For every tuple emitted by
- * a spout, Storm will track the (potentially very large) DAG of tuples generated
- * based on a tuple emitted by the spout. When Storm detects that every tuple in
- * that DAG has been successfully processed, it will send an ack message to the Spout.
- *
- * <p>If a tuple fails to be fully processed within the configured timeout for the
- * topology (see {@link backtype.storm.Config}), Storm will send a fail message to the spout
- * for the message.</p>
- *
- * <p> When a Spout emits a tuple, it can tag the tuple with a message id. The message id
- * can be any type. When Storm acks or fails a message, it will pass back to the
- * spout the same message id to identify which tuple it's referring to. If the spout leaves out
- * the message id, or sets it to null, then Storm will not track the message and the spout
- * will not receive any ack or fail callbacks for the message.</p>
- *
- * <p>Storm executes ack, fail, and nextTuple all on the same thread. This means that an implementor
- * of an ISpout does not need to worry about concurrency issues between those methods. However, it 
- * also means that an implementor must ensure that nextTuple is non-blocking: otherwise 
- * the method could block acks and fails that are pending to be processed.</p>
+ * ISpout is the core interface for implementing spouts. A Spout is responsible for feeding messages into the topology for processing. For every tuple emitted
+ * by a spout, Storm will track the (potentially very large) DAG of tuples generated based on a tuple emitted by the spout. When Storm detects that every tuple
+ * in that DAG has been successfully processed, it will send an ack message to the Spout.
+ * 
+ * <p>
+ * If a tuple fails to be fully processed within the configured timeout for the topology (see {@link backtype.storm.Config}), Storm will send a fail message to
+ * the spout for the message.
+ * </p>
+ * 
+ * <p>
+ * When a Spout emits a tuple, it can tag the tuple with a message id. The message id can be any type. When Storm acks or fails a message, it will pass back to
+ * the spout the same message id to identify which tuple it's referring to. If the spout leaves out the message id, or sets it to null, then Storm will not
+ * track the message and the spout will not receive any ack or fail callbacks for the message.
+ * </p>
+ * 
+ * <p>
+ * Storm executes ack, fail, and nextTuple all on the same thread. This means that an implementor of an ISpout does not need to worry about concurrency issues
+ * between those methods. However, it also means that an implementor must ensure that nextTuple is non-blocking: otherwise the method could block acks and fails
+ * that are pending to be processed.
+ * </p>
  */
 public interface ISpout extends Serializable {
     /**
-     * Called when a task for this component is initialized within a worker on the cluster.
-     * It provides the spout with the environment in which the spout executes.
-     *
-     * <p>This includes the:</p>
-     *
-     * @param conf The Storm configuration for this spout. This is the configuration provided to the topology merged in with cluster configuration on this machine.
-     * @param context This object can be used to get information about this task's place within the topology, including the task id and component id of this task, input and output information, etc.
-     * @param collector The collector is used to emit tuples from this spout. Tuples can be emitted at any time, including the open and close methods. The collector is thread-safe and should be saved as an instance variable of this spout object.
+     * Called when a task for this component is initialized within a worker on the cluster. It provides the spout with the environment in which the spout
+     * executes.
+     * 
+     * <p>
+     * This includes the:
+     * </p>
+     * 
+     * @param conf The Storm configuration for this spout. This is the configuration provided to the topology merged in with cluster configuration on this
+     *            machine.
+     * @param context This object can be used to get information about this task's place within the topology, including the task id and component id of this
+     *            task, input and output information, etc.
+     * @param collector The collector is used to emit tuples from this spout. Tuples can be emitted at any time, including the open and close methods. The
+     *            collector is thread-safe and should be saved as an instance variable of this spout object.
      */
     void open(Map conf, TopologyContext context, SpoutOutputCollector collector);
 
     /**
-     * Called when an ISpout is going to be shutdown. There is no guarentee that close
-     * will be called, because the supervisor kill -9's worker processes on the cluster.
-     *
-     * <p>The one context where close is guaranteed to be called is a topology is
-     * killed when running Storm in local mode.</p>
+     * Called when an ISpout is going to be shutdown. There is no guarentee that close will be called, because the supervisor kill -9's worker processes on the
+     * cluster.
+     * 
+     * <p>
+     * The one context where close is guaranteed to be called is a topology is killed when running Storm in local mode.
+     * </p>
      */
     void close();
-    
+
     /**
-     * Called when a spout has been activated out of a deactivated mode.
-     * nextTuple will be called on this spout soon. A spout can become activated
-     * after having been deactivated when the topology is manipulated using the 
-     * `storm` client. 
+     * Called when a spout has been activated out of a deactivated mode. nextTuple will be called on this spout soon. A spout can become activated after having
+     * been deactivated when the topology is manipulated using the `storm` client.
      */
     void activate();
-    
+
     /**
-     * Called when a spout has been deactivated. nextTuple will not be called while
-     * a spout is deactivated. The spout may or may not be reactivated in the future.
+     * Called when a spout has been deactivated. nextTuple will not be called while a spout is deactivated. The spout may or may not be reactivated in the
+     * future.
      */
     void deactivate();
 
     /**
-     * When this method is called, Storm is requesting that the Spout emit tuples to the 
-     * output collector. This method should be non-blocking, so if the Spout has no tuples
-     * to emit, this method should return. nextTuple, ack, and fail are all called in a tight
-     * loop in a single thread in the spout task. When there are no tuples to emit, it is courteous
-     * to have nextTuple sleep for a short amount of time (like a single millisecond)
-     * so as not to waste too much CPU.
+     * When this method is called, Storm is requesting that the Spout emit tuples to the output collector. This method should be non-blocking, so if the Spout
+     * has no tuples to emit, this method should return. nextTuple, ack, and fail are all called in a tight loop in a single thread in the spout task. When
+     * there are no tuples to emit, it is courteous to have nextTuple sleep for a short amount of time (like a single millisecond) so as not to waste too much
+     * CPU.
      */
     void nextTuple();
 
     /**
-     * Storm has determined that the tuple emitted by this spout with the msgId identifier
-     * has been fully processed. Typically, an implementation of this method will take that
-     * message off the queue and prevent it from being replayed.
+     * Storm has determined that the tuple emitted by this spout with the msgId identifier has been fully processed. Typically, an implementation of this method
+     * will take that message off the queue and prevent it from being replayed.
      */
     void ack(Object msgId);
 
     /**
-     * The tuple emitted by this spout with the msgId identifier has failed to be
-     * fully processed. Typically, an implementation of this method will put that
+     * The tuple emitted by this spout with the msgId identifier has failed to be fully processed. Typically, an implementation of this method will put that
      * message back on the queue to be replayed at a later time.
      */
     void fail(Object msgId);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/spout/ISpoutOutputCollector.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/spout/ISpoutOutputCollector.java b/jstorm-core/src/main/java/backtype/storm/spout/ISpoutOutputCollector.java
index 3cebe43..00640ea 100755
--- a/jstorm-core/src/main/java/backtype/storm/spout/ISpoutOutputCollector.java
+++ b/jstorm-core/src/main/java/backtype/storm/spout/ISpoutOutputCollector.java
@@ -21,10 +21,11 @@ import java.util.List;
 
 public interface ISpoutOutputCollector {
     /**
-        Returns the task ids that received the tuples.
-    */
+     * Returns the task ids that received the tuples.
+     */
     List<Integer> emit(String streamId, List<Object> tuple, Object messageId);
+
     void emitDirect(int taskId, String streamId, List<Object> tuple, Object messageId);
+
     void reportError(Throwable error);
 }
-

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/spout/ISpoutWaitStrategy.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/spout/ISpoutWaitStrategy.java b/jstorm-core/src/main/java/backtype/storm/spout/ISpoutWaitStrategy.java
index d0bdfa8..7fc288a 100755
--- a/jstorm-core/src/main/java/backtype/storm/spout/ISpoutWaitStrategy.java
+++ b/jstorm-core/src/main/java/backtype/storm/spout/ISpoutWaitStrategy.java
@@ -20,15 +20,14 @@ package backtype.storm.spout;
 import java.util.Map;
 
 /**
- * The strategy a spout needs to use when its waiting. Waiting is
- * triggered in one of two conditions:
+ * The strategy a spout needs to use when its waiting. Waiting is triggered in one of two conditions:
  * 
- * 1. nextTuple emits no tuples
- * 2. The spout has hit maxSpoutPending and can't emit any more tuples
+ * 1. nextTuple emits no tuples 2. The spout has hit maxSpoutPending and can't emit any more tuples
  * 
  * The default strategy sleeps for one millisecond.
  */
 public interface ISpoutWaitStrategy {
     void prepare(Map conf);
+
     void emptyEmit(long streak);
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/spout/MultiScheme.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/spout/MultiScheme.java b/jstorm-core/src/main/java/backtype/storm/spout/MultiScheme.java
index ca2ce91..b75bd51 100755
--- a/jstorm-core/src/main/java/backtype/storm/spout/MultiScheme.java
+++ b/jstorm-core/src/main/java/backtype/storm/spout/MultiScheme.java
@@ -23,6 +23,7 @@ import java.io.Serializable;
 import backtype.storm.tuple.Fields;
 
 public interface MultiScheme extends Serializable {
-  public Iterable<List<Object>> deserialize(byte[] ser);
-  public Fields getOutputFields();
+    public Iterable<List<Object>> deserialize(byte[] ser);
+
+    public Fields getOutputFields();
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/spout/NothingEmptyEmitStrategy.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/spout/NothingEmptyEmitStrategy.java b/jstorm-core/src/main/java/backtype/storm/spout/NothingEmptyEmitStrategy.java
index 36bea94..7f0df27 100755
--- a/jstorm-core/src/main/java/backtype/storm/spout/NothingEmptyEmitStrategy.java
+++ b/jstorm-core/src/main/java/backtype/storm/spout/NothingEmptyEmitStrategy.java
@@ -21,7 +21,7 @@ import java.util.Map;
 
 public class NothingEmptyEmitStrategy implements ISpoutWaitStrategy {
     @Override
-    public void emptyEmit(long streak) {        
+    public void emptyEmit(long streak) {
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/spout/RawMultiScheme.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/spout/RawMultiScheme.java b/jstorm-core/src/main/java/backtype/storm/spout/RawMultiScheme.java
index 7f73975..8c31097 100755
--- a/jstorm-core/src/main/java/backtype/storm/spout/RawMultiScheme.java
+++ b/jstorm-core/src/main/java/backtype/storm/spout/RawMultiScheme.java
@@ -21,18 +21,17 @@ import java.util.List;
 
 import backtype.storm.tuple.Fields;
 
-
 import static backtype.storm.utils.Utils.tuple;
 import static java.util.Arrays.asList;
 
 public class RawMultiScheme implements MultiScheme {
-  @Override
-  public Iterable<List<Object>> deserialize(byte[] ser) {
-    return asList(tuple(ser));
-  }
+    @Override
+    public Iterable<List<Object>> deserialize(byte[] ser) {
+        return asList(tuple(ser));
+    }
 
-  @Override
-  public Fields getOutputFields() {
-    return new Fields("bytes");
-  }
+    @Override
+    public Fields getOutputFields() {
+        return new Fields("bytes");
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/spout/Scheme.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/spout/Scheme.java b/jstorm-core/src/main/java/backtype/storm/spout/Scheme.java
index ca68954..6c8aeed 100755
--- a/jstorm-core/src/main/java/backtype/storm/spout/Scheme.java
+++ b/jstorm-core/src/main/java/backtype/storm/spout/Scheme.java
@@ -21,8 +21,8 @@ import backtype.storm.tuple.Fields;
 import java.io.Serializable;
 import java.util.List;
 
-
 public interface Scheme extends Serializable {
     public List<Object> deserialize(byte[] ser);
+
     public Fields getOutputFields();
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/spout/SchemeAsMultiScheme.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/spout/SchemeAsMultiScheme.java b/jstorm-core/src/main/java/backtype/storm/spout/SchemeAsMultiScheme.java
index 29f7fce..3414533 100755
--- a/jstorm-core/src/main/java/backtype/storm/spout/SchemeAsMultiScheme.java
+++ b/jstorm-core/src/main/java/backtype/storm/spout/SchemeAsMultiScheme.java
@@ -23,19 +23,23 @@ import java.util.List;
 import backtype.storm.tuple.Fields;
 
 public class SchemeAsMultiScheme implements MultiScheme {
-  public final Scheme scheme;
+    public final Scheme scheme;
 
-  public SchemeAsMultiScheme(Scheme scheme) {
-    this.scheme = scheme;
-  }
+    public SchemeAsMultiScheme(Scheme scheme) {
+        this.scheme = scheme;
+    }
 
-  @Override public Iterable<List<Object>> deserialize(final byte[] ser) {
-    List<Object> o = scheme.deserialize(ser);
-    if(o == null) return null;
-    else return Arrays.asList(o);
-  }
+    @Override
+    public Iterable<List<Object>> deserialize(final byte[] ser) {
+        List<Object> o = scheme.deserialize(ser);
+        if (o == null)
+            return null;
+        else
+            return Arrays.asList(o);
+    }
 
-  @Override public Fields getOutputFields() {
-    return scheme.getOutputFields();
-  }
+    @Override
+    public Fields getOutputFields() {
+        return scheme.getOutputFields();
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/spout/ShellSpout.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/spout/ShellSpout.java b/jstorm-core/src/main/java/backtype/storm/spout/ShellSpout.java
index 06c1647..f680550 100755
--- a/jstorm-core/src/main/java/backtype/storm/spout/ShellSpout.java
+++ b/jstorm-core/src/main/java/backtype/storm/spout/ShellSpout.java
@@ -25,29 +25,28 @@ import backtype.storm.multilang.ShellMsg;
 import backtype.storm.multilang.SpoutMsg;
 import backtype.storm.task.TopologyContext;
 import backtype.storm.utils.ShellProcess;
-import java.util.Map;
+import clojure.lang.RT;
+import com.google.common.util.concurrent.MoreExecutors;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import java.util.List;
+import java.util.Map;
 import java.util.TimerTask;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.ScheduledThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
 
-import clojure.lang.RT;
-import com.google.common.util.concurrent.MoreExecutors;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-
 public class ShellSpout implements ISpout {
     public static Logger LOG = LoggerFactory.getLogger(ShellSpout.class);
 
     private SpoutOutputCollector _collector;
     private String[] _command;
     private ShellProcess _process;
-    
+
     private TopologyContext _context;
-    
+
     private SpoutMsg _spoutMsg;
 
     private int workerTimeoutMills;
@@ -62,8 +61,7 @@ public class ShellSpout implements ISpout {
         _command = command;
     }
 
-    public void open(Map stormConf, TopologyContext context,
-                     SpoutOutputCollector collector) {
+    public void open(Map stormConf, TopologyContext context, SpoutOutputCollector collector) {
         _collector = collector;
         _context = context;
 
@@ -108,25 +106,25 @@ public class ShellSpout implements ISpout {
         _spoutMsg.setId(msgId);
         querySubprocess();
     }
-    
+
     private void handleMetrics(ShellMsg shellMsg) {
-        //get metric name
+        // get metric name
         String name = shellMsg.getMetricName();
         if (name.isEmpty()) {
             throw new RuntimeException("Receive Metrics name is empty");
         }
-        
-        //get metric by name
+
+        // get metric by name
         IMetric iMetric = _context.getRegisteredMetricByName(name);
         if (iMetric == null) {
-            throw new RuntimeException("Could not find metric by name["+name+"] ");
+            throw new RuntimeException("Could not find metric by name[" + name + "] ");
         }
-        if ( !(iMetric instanceof IShellMetric)) {
-            throw new RuntimeException("Metric["+name+"] is not IShellMetric, can not call by RPC");
+        if (!(iMetric instanceof IShellMetric)) {
+            throw new RuntimeException("Metric[" + name + "] is not IShellMetric, can not call by RPC");
         }
-        IShellMetric iShellMetric = (IShellMetric)iMetric;
-        
-        //call updateMetricFromRPC with params
+        IShellMetric iShellMetric = (IShellMetric) iMetric;
+
+        // call updateMetricFromRPC with params
         Object paramsObj = shellMsg.getMetricParams();
         try {
             iShellMetric.updateMetricFromRPC(paramsObj);
@@ -134,7 +132,7 @@ public class ShellSpout implements ISpout {
             throw re;
         } catch (Exception e) {
             throw new RuntimeException(e);
-        }       
+        }
     }
 
     private void querySubprocess() {
@@ -187,24 +185,24 @@ public class ShellSpout implements ISpout {
         ShellMsg.ShellLogLevel logLevel = shellMsg.getLogLevel();
 
         switch (logLevel) {
-            case TRACE:
-                LOG.trace(msg);
-                break;
-            case DEBUG:
-                LOG.debug(msg);
-                break;
-            case INFO:
-                LOG.info(msg);
-                break;
-            case WARN:
-                LOG.warn(msg);
-                break;
-            case ERROR:
-                LOG.error(msg);
-                break;
-            default:
-                LOG.info(msg);
-                break;
+        case TRACE:
+            LOG.trace(msg);
+            break;
+        case DEBUG:
+            LOG.debug(msg);
+            break;
+        case INFO:
+            LOG.info(msg);
+            break;
+        case WARN:
+            LOG.warn(msg);
+            break;
+        case ERROR:
+            LOG.error(msg);
+            break;
+        default:
+            LOG.info(msg);
+            break;
         }
     }
 
@@ -254,8 +252,7 @@ public class ShellSpout implements ISpout {
             long currentTimeMillis = System.currentTimeMillis();
             long lastHeartbeat = getLastHeartbeat();
 
-            LOG.debug("current time : {}, last heartbeat : {}, worker timeout (ms) : {}",
-                    currentTimeMillis, lastHeartbeat, workerTimeoutMills);
+            LOG.debug("current time : {}, last heartbeat : {}, worker timeout (ms) : {}", currentTimeMillis, lastHeartbeat, workerTimeoutMills);
 
             if (currentTimeMillis - lastHeartbeat > workerTimeoutMills) {
                 spout.die(new RuntimeException("subprocess heartbeat timeout"));

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/spout/SleepSpoutWaitStrategy.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/spout/SleepSpoutWaitStrategy.java b/jstorm-core/src/main/java/backtype/storm/spout/SleepSpoutWaitStrategy.java
index 3ccf4e1..e01f668 100755
--- a/jstorm-core/src/main/java/backtype/storm/spout/SleepSpoutWaitStrategy.java
+++ b/jstorm-core/src/main/java/backtype/storm/spout/SleepSpoutWaitStrategy.java
@@ -20,11 +20,10 @@ package backtype.storm.spout;
 import backtype.storm.Config;
 import java.util.Map;
 
-
 public class SleepSpoutWaitStrategy implements ISpoutWaitStrategy {
 
     long sleepMillis;
-    
+
     @Override
     public void prepare(Map conf) {
         sleepMillis = ((Number) conf.get(Config.TOPOLOGY_SLEEP_SPOUT_WAIT_STRATEGY_TIME_MS)).longValue();

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/spout/SpoutOutputCollector.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/spout/SpoutOutputCollector.java b/jstorm-core/src/main/java/backtype/storm/spout/SpoutOutputCollector.java
index 7a33026..4b9dc51 100755
--- a/jstorm-core/src/main/java/backtype/storm/spout/SpoutOutputCollector.java
+++ b/jstorm-core/src/main/java/backtype/storm/spout/SpoutOutputCollector.java
@@ -22,11 +22,9 @@ import backtype.storm.utils.Utils;
 import java.util.List;
 
 /**
- * This output collector exposes the API for emitting tuples from an {@link backtype.storm.topology.IRichSpout}.
- * The main difference between this output collector and {@link OutputCollector}
- * for {@link backtype.storm.topology.IRichBolt} is that spouts can tag messages with ids so that they can be
- * acked or failed later on. This is the Spout portion of Storm's API to
- * guarantee that each message is fully processed at least once.
+ * This output collector exposes the API for emitting tuples from an {@link backtype.storm.topology.IRichSpout}. The main difference between this output
+ * collector and {@link OutputCollector} for {@link backtype.storm.topology.IRichBolt} is that spouts can tag messages with ids so that they can be acked or
+ * failed later on. This is the Spout portion of Storm's API to guarantee that each message is fully processed at least once.
  */
 public class SpoutOutputCollector implements ISpoutOutputCollector {
     ISpoutOutputCollector _delegate;
@@ -36,13 +34,10 @@ public class SpoutOutputCollector implements ISpoutOutputCollector {
     }
 
     /**
-     * Emits a new tuple to the specified output stream with the given message ID.
-     * When Storm detects that this tuple has been fully processed, or has failed
-     * to be fully processed, the spout will receive an ack or fail callback respectively
-     * with the messageId as long as the messageId was not null. If the messageId was null,
-     * Storm will not track the tuple and no callback will be received. The emitted values must be 
-     * immutable.
-     *
+     * Emits a new tuple to the specified output stream with the given message ID. When Storm detects that this tuple has been fully processed, or has failed to
+     * be fully processed, the spout will receive an ack or fail callback respectively with the messageId as long as the messageId was not null. If the
+     * messageId was null, Storm will not track the tuple and no callback will be received. The emitted values must be immutable.
+     * 
      * @return the list of task ids that this tuple was sent to
      */
     public List<Integer> emit(String streamId, List<Object> tuple, Object messageId) {
@@ -50,13 +45,10 @@ public class SpoutOutputCollector implements ISpoutOutputCollector {
     }
 
     /**
-     * Emits a new tuple to the default output stream with the given message ID.
-     * When Storm detects that this tuple has been fully processed, or has failed
-     * to be fully processed, the spout will receive an ack or fail callback respectively
-     * with the messageId as long as the messageId was not null. If the messageId was null,
-     * Storm will not track the tuple and no callback will be received. The emitted values must be 
-     * immutable.
-     *
+     * Emits a new tuple to the default output stream with the given message ID. When Storm detects that this tuple has been fully processed, or has failed to
+     * be fully processed, the spout will receive an ack or fail callback respectively with the messageId as long as the messageId was not null. If the
+     * messageId was null, Storm will not track the tuple and no callback will be received. The emitted values must be immutable.
+     * 
      * @return the list of task ids that this tuple was sent to
      */
     public List<Integer> emit(List<Object> tuple, Object messageId) {
@@ -64,64 +56,56 @@ public class SpoutOutputCollector implements ISpoutOutputCollector {
     }
 
     /**
-     * Emits a tuple to the default output stream with a null message id. Storm will
-     * not track this message so ack and fail will never be called for this tuple. The
-     * emitted values must be immutable.
+     * Emits a tuple to the default output stream with a null message id. Storm will not track this message so ack and fail will never be called for this tuple.
+     * The emitted values must be immutable.
      */
     public List<Integer> emit(List<Object> tuple) {
         return emit(tuple, null);
     }
 
     /**
-     * Emits a tuple to the specified output stream with a null message id. Storm will
-     * not track this message so ack and fail will never be called for this tuple. The
-     * emitted values must be immutable.
+     * Emits a tuple to the specified output stream with a null message id. Storm will not track this message so ack and fail will never be called for this
+     * tuple. The emitted values must be immutable.
      */
     public List<Integer> emit(String streamId, List<Object> tuple) {
         return emit(streamId, tuple, null);
     }
 
     /**
-     * Emits a tuple to the specified task on the specified output stream. This output
-     * stream must have been declared as a direct stream, and the specified task must
-     * use a direct grouping on this stream to receive the message. The emitted values must be 
-     * immutable.
+     * Emits a tuple to the specified task on the specified output stream. This output stream must have been declared as a direct stream, and the specified task
+     * must use a direct grouping on this stream to receive the message. The emitted values must be immutable.
      */
     public void emitDirect(int taskId, String streamId, List<Object> tuple, Object messageId) {
         _delegate.emitDirect(taskId, streamId, tuple, messageId);
     }
 
     /**
-     * Emits a tuple to the specified task on the default output stream. This output
-     * stream must have been declared as a direct stream, and the specified task must
-     * use a direct grouping on this stream to receive the message. The emitted values must be 
-     * immutable.
+     * Emits a tuple to the specified task on the default output stream. This output stream must have been declared as a direct stream, and the specified task
+     * must use a direct grouping on this stream to receive the message. The emitted values must be immutable.
      */
     public void emitDirect(int taskId, List<Object> tuple, Object messageId) {
         emitDirect(taskId, Utils.DEFAULT_STREAM_ID, tuple, messageId);
     }
-    
+
     /**
-     * Emits a tuple to the specified task on the specified output stream. This output
-     * stream must have been declared as a direct stream, and the specified task must
-     * use a direct grouping on this stream to receive the message. The emitted values must be 
-     * immutable.
-     *
-     * <p> Because no message id is specified, Storm will not track this message
-     * so ack and fail will never be called for this tuple.</p>
+     * Emits a tuple to the specified task on the specified output stream. This output stream must have been declared as a direct stream, and the specified task
+     * must use a direct grouping on this stream to receive the message. The emitted values must be immutable.
+     * 
+     * <p>
+     * Because no message id is specified, Storm will not track this message so ack and fail will never be called for this tuple.
+     * </p>
      */
     public void emitDirect(int taskId, String streamId, List<Object> tuple) {
         emitDirect(taskId, streamId, tuple, null);
     }
 
     /**
-     * Emits a tuple to the specified task on the default output stream. This output
-     * stream must have been declared as a direct stream, and the specified task must
-     * use a direct grouping on this stream to receive the message. The emitted values must be 
-     * immutable.
-     *
-     * <p> Because no message id is specified, Storm will not track this message
-     * so ack and fail will never be called for this tuple.</p>
+     * Emits a tuple to the specified task on the default output stream. This output stream must have been declared as a direct stream, and the specified task
+     * must use a direct grouping on this stream to receive the message. The emitted values must be immutable.
+     * 
+     * <p>
+     * Because no message id is specified, Storm will not track this message so ack and fail will never be called for this tuple.
+     * </p>
      */
     public void emitDirect(int taskId, List<Object> tuple) {
         emitDirect(taskId, tuple, null);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/state/IStateSpout.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/state/IStateSpout.java b/jstorm-core/src/main/java/backtype/storm/state/IStateSpout.java
index f4aa14f..7d6efd4 100755
--- a/jstorm-core/src/main/java/backtype/storm/state/IStateSpout.java
+++ b/jstorm-core/src/main/java/backtype/storm/state/IStateSpout.java
@@ -23,7 +23,10 @@ import java.util.Map;
 
 public interface IStateSpout extends Serializable {
     void open(Map conf, TopologyContext context);
+
     void close();
+
     void nextTuple(StateSpoutOutputCollector collector);
+
     void synchronize(SynchronizeOutputCollector collector);
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/state/ISubscribedState.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/state/ISubscribedState.java b/jstorm-core/src/main/java/backtype/storm/state/ISubscribedState.java
index 6eff72c..ab9b60c 100755
--- a/jstorm-core/src/main/java/backtype/storm/state/ISubscribedState.java
+++ b/jstorm-core/src/main/java/backtype/storm/state/ISubscribedState.java
@@ -21,5 +21,6 @@ import backtype.storm.tuple.Tuple;
 
 public interface ISubscribedState {
     void set(Object id, Tuple tuple);
+
     void remove(Object id);
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/state/ISynchronizeOutputCollector.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/state/ISynchronizeOutputCollector.java b/jstorm-core/src/main/java/backtype/storm/state/ISynchronizeOutputCollector.java
index 9c80a75..926ea38 100755
--- a/jstorm-core/src/main/java/backtype/storm/state/ISynchronizeOutputCollector.java
+++ b/jstorm-core/src/main/java/backtype/storm/state/ISynchronizeOutputCollector.java
@@ -20,5 +20,5 @@ package backtype.storm.state;
 import java.util.List;
 
 public interface ISynchronizeOutputCollector {
-    void add(int streamId, Object id, List<Object> tuple);    
+    void add(int streamId, Object id, List<Object> tuple);
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/state/StateSpoutOutputCollector.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/state/StateSpoutOutputCollector.java b/jstorm-core/src/main/java/backtype/storm/state/StateSpoutOutputCollector.java
index 4bb10e0..b683835 100755
--- a/jstorm-core/src/main/java/backtype/storm/state/StateSpoutOutputCollector.java
+++ b/jstorm-core/src/main/java/backtype/storm/state/StateSpoutOutputCollector.java
@@ -17,7 +17,6 @@
  */
 package backtype.storm.state;
 
-
 public class StateSpoutOutputCollector extends SynchronizeOutputCollector implements IStateSpoutOutputCollector {
 
     @Override

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/state/SynchronizeOutputCollector.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/state/SynchronizeOutputCollector.java b/jstorm-core/src/main/java/backtype/storm/state/SynchronizeOutputCollector.java
index 9fbba6e..6c9817f 100755
--- a/jstorm-core/src/main/java/backtype/storm/state/SynchronizeOutputCollector.java
+++ b/jstorm-core/src/main/java/backtype/storm/state/SynchronizeOutputCollector.java
@@ -19,7 +19,6 @@ package backtype.storm.state;
 
 import java.util.List;
 
-
 public class SynchronizeOutputCollector implements ISynchronizeOutputCollector {
 
     @Override

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/task/GeneralTopologyContext.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/task/GeneralTopologyContext.java b/jstorm-core/src/main/java/backtype/storm/task/GeneralTopologyContext.java
index 7540500..88127ae 100644
--- a/jstorm-core/src/main/java/backtype/storm/task/GeneralTopologyContext.java
+++ b/jstorm-core/src/main/java/backtype/storm/task/GeneralTopologyContext.java
@@ -17,14 +17,6 @@
  */
 package backtype.storm.task;
 
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.json.simple.JSONAware;
-
 import backtype.storm.Config;
 import backtype.storm.Constants;
 import backtype.storm.generated.ComponentCommon;
@@ -34,6 +26,9 @@ import backtype.storm.generated.StormTopology;
 import backtype.storm.tuple.Fields;
 import backtype.storm.utils.ThriftTopologyUtils;
 import backtype.storm.utils.Utils;
+import org.json.simple.JSONAware;
+
+import java.util.*;
 
 public class GeneralTopologyContext implements JSONAware {
     private StormTopology _topology;
@@ -42,11 +37,10 @@ public class GeneralTopologyContext implements JSONAware {
     private Map<String, Map<String, Fields>> _componentToStreamToFields;
     private String _topologyId;
     protected Map _stormConf;
-    
+
     // pass in componentToSortedTasks for the case of running tons of tasks in single executor
-    public GeneralTopologyContext(StormTopology topology, Map stormConf, 
-            Map<Integer, String> taskToComponent, Map<String, List<Integer>> componentToSortedTasks, 
-			Map<String, Map<String, Fields>> componentToStreamToFields, String topologyId) {
+    public GeneralTopologyContext(StormTopology topology, Map stormConf, Map<Integer, String> taskToComponent,
+            Map<String, List<Integer>> componentToSortedTasks, Map<String, Map<String, Fields>> componentToStreamToFields, String topologyId) {
         _topology = topology;
         _stormConf = stormConf;
         _taskToComponent = taskToComponent;
@@ -54,7 +48,7 @@ public class GeneralTopologyContext implements JSONAware {
         _componentToTasks = componentToSortedTasks;
         _componentToStreamToFields = componentToStreamToFields;
     }
-    
+
     /**
      * Gets the unique id assigned to this topology. The id is the storm name with a unique nonce appended to it.
      * 
@@ -63,7 +57,7 @@ public class GeneralTopologyContext implements JSONAware {
     public String getTopologyId() {
         return _topologyId;
     }
-    
+
     /**
      * Please use the getTopologId() instead.
      * 
@@ -73,7 +67,7 @@ public class GeneralTopologyContext implements JSONAware {
     public String getStormId() {
         return _topologyId;
     }
-    
+
     /**
      * Gets the Thrift object representing the topology.
      * 
@@ -82,7 +76,7 @@ public class GeneralTopologyContext implements JSONAware {
     public StormTopology getRawTopology() {
         return _topology;
     }
-    
+
     /**
      * Gets the component id for the specified task id. The component id maps to a component id specified for a Spout or Bolt in the topology definition.
      * 
@@ -96,14 +90,14 @@ public class GeneralTopologyContext implements JSONAware {
             return _taskToComponent.get(taskId);
         }
     }
-    
+
     /**
      * Gets the set of streams declared for the specified component.
      */
     public Set<String> getComponentStreams(String componentId) {
         return getComponentCommon(componentId).get_streams().keySet();
     }
-    
+
     /**
      * Gets the task ids allocated for the given component id. The task ids are always returned in ascending order.
      */
@@ -114,7 +108,7 @@ public class GeneralTopologyContext implements JSONAware {
         else
             return new ArrayList<Integer>(ret);
     }
-    
+
     /**
      * Gets the declared output fields for the specified component/stream.
      */
@@ -125,14 +119,14 @@ public class GeneralTopologyContext implements JSONAware {
         }
         return ret;
     }
-    
+
     /**
      * Gets the declared output fields for the specified global stream id.
      */
     public Fields getComponentOutputFields(GlobalStreamId id) {
         return getComponentOutputFields(id.get_componentId(), id.get_streamId());
     }
-    
+
     /**
      * Gets the declared inputs to the specified component.
      * 
@@ -141,7 +135,7 @@ public class GeneralTopologyContext implements JSONAware {
     public Map<GlobalStreamId, Grouping> getSources(String componentId) {
         return getComponentCommon(componentId).get_inputs();
     }
-    
+
     /**
      * Gets information about who is consuming the outputs of the specified component, and how.
      * 
@@ -163,7 +157,7 @@ public class GeneralTopologyContext implements JSONAware {
         }
         return ret;
     }
-    
+
     @Override
     public String toJSONString() {
         Map obj = new HashMap();
@@ -172,25 +166,25 @@ public class GeneralTopologyContext implements JSONAware {
         // at the minimum should send source info
         return Utils.to_json(obj);
     }
-    
+
     /**
      * Gets a map from task id to component id.
      */
     public Map<Integer, String> getTaskToComponent() {
         return _taskToComponent;
     }
-    
+
     /**
      * Gets a list of all component ids in this topology
      */
     public Set<String> getComponentIds() {
         return ThriftTopologyUtils.getComponentIds(getRawTopology());
     }
-    
+
     public ComponentCommon getComponentCommon(String componentId) {
         return ThriftTopologyUtils.getComponentCommon(getRawTopology(), componentId);
     }
-    
+
     public int maxTopologyMessageTimeout() {
         Integer max = Utils.getInt(_stormConf.get(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS));
         for (String spout : getRawTopology().get_spouts().keySet()) {
@@ -206,4 +200,8 @@ public class GeneralTopologyContext implements JSONAware {
         }
         return max;
     }
+
+    public Map getStormConf() {
+        return _stormConf;
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/task/IBolt.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/task/IBolt.java b/jstorm-core/src/main/java/backtype/storm/task/IBolt.java
index 48acdda..45f8eef 100755
--- a/jstorm-core/src/main/java/backtype/storm/task/IBolt.java
+++ b/jstorm-core/src/main/java/backtype/storm/task/IBolt.java
@@ -22,63 +22,70 @@ import java.util.Map;
 import java.io.Serializable;
 
 /**
- * An IBolt represents a component that takes tuples as input and produces tuples
- * as output. An IBolt can do everything from filtering to joining to functions
- * to aggregations. It does not have to process a tuple immediately and may
- * hold onto tuples to process later.
- *
- * <p>A bolt's lifecycle is as follows:</p>
- *
- * <p>IBolt object created on client machine. The IBolt is serialized into the topology
- * (using Java serialization) and submitted to the master machine of the cluster (Nimbus).
- * Nimbus then launches workers which deserialize the object, call prepare on it, and then
- * start processing tuples.</p>
- *
- * <p>If you want to parameterize an IBolt, you should set the parameters through its
- * constructor and save the parameterization state as instance variables (which will
- * then get serialized and shipped to every task executing this bolt across the cluster).</p>
- *
- * <p>When defining bolts in Java, you should use the IRichBolt interface which adds
- * necessary methods for using the Java TopologyBuilder API.</p>
+ * An IBolt represents a component that takes tuples as input and produces tuples as output. An IBolt can do everything from filtering to joining to functions
+ * to aggregations. It does not have to process a tuple immediately and may hold onto tuples to process later.
+ * 
+ * <p>
+ * A bolt's lifecycle is as follows:
+ * </p>
+ * 
+ * <p>
+ * IBolt object created on client machine. The IBolt is serialized into the topology (using Java serialization) and submitted to the master machine of the
+ * cluster (Nimbus). Nimbus then launches workers which deserialize the object, call prepare on it, and then start processing tuples.
+ * </p>
+ * 
+ * <p>
+ * If you want to parameterize an IBolt, you should set the parameters through its constructor and save the parameterization state as instance variables (which
+ * will then get serialized and shipped to every task executing this bolt across the cluster).
+ * </p>
+ * 
+ * <p>
+ * When defining bolts in Java, you should use the IRichBolt interface which adds necessary methods for using the Java TopologyBuilder API.
+ * </p>
  */
 public interface IBolt extends Serializable {
     /**
-     * Called when a task for this component is initialized within a worker on the cluster.
-     * It provides the bolt with the environment in which the bolt executes.
-     *
-     * <p>This includes the:</p>
+     * Called when a task for this component is initialized within a worker on the cluster. It provides the bolt with the environment in which the bolt
+     * executes.
      * 
-     * @param stormConf The Storm configuration for this bolt. This is the configuration provided to the topology merged in with cluster configuration on this machine.
-     * @param context This object can be used to get information about this task's place within the topology, including the task id and component id of this task, input and output information, etc.
-     * @param collector The collector is used to emit tuples from this bolt. Tuples can be emitted at any time, including the prepare and cleanup methods. The collector is thread-safe and should be saved as an instance variable of this bolt object.
+     * <p>
+     * This includes the:
+     * </p>
+     * 
+     * @param stormConf The Storm configuration for this bolt. This is the configuration provided to the topology merged in with cluster configuration on this
+     *            machine.
+     * @param context This object can be used to get information about this task's place within the topology, including the task id and component id of this
+     *            task, input and output information, etc.
+     * @param collector The collector is used to emit tuples from this bolt. Tuples can be emitted at any time, including the prepare and cleanup methods. The
+     *            collector is thread-safe and should be saved as an instance variable of this bolt object.
      */
     void prepare(Map stormConf, TopologyContext context, OutputCollector collector);
 
     /**
-     * Process a single tuple of input. The Tuple object contains metadata on it
-     * about which component/stream/task it came from. The values of the Tuple can
-     * be accessed using Tuple#getValue. The IBolt does not have to process the Tuple
-     * immediately. It is perfectly fine to hang onto a tuple and process it later
+     * Process a single tuple of input. The Tuple object contains metadata on it about which component/stream/task it came from. The values of the Tuple can be
+     * accessed using Tuple#getValue. The IBolt does not have to process the Tuple immediately. It is perfectly fine to hang onto a tuple and process it later
      * (for instance, to do an aggregation or join).
-     *
-     * <p>Tuples should be emitted using the OutputCollector provided through the prepare method.
-     * It is required that all input tuples are acked or failed at some point using the OutputCollector.
-     * Otherwise, Storm will be unable to determine when tuples coming off the spouts
-     * have been completed.</p>
-     *
-     * <p>For the common case of acking an input tuple at the end of the execute method,
-     * see IBasicBolt which automates this.</p>
+     * 
+     * <p>
+     * Tuples should be emitted using the OutputCollector provided through the prepare method. It is required that all input tuples are acked or failed at some
+     * point using the OutputCollector. Otherwise, Storm will be unable to determine when tuples coming off the spouts have been completed.
+     * </p>
+     * 
+     * <p>
+     * For the common case of acking an input tuple at the end of the execute method, see IBasicBolt which automates this.
+     * </p>
      * 
      * @param input The input tuple to be processed.
      */
     void execute(Tuple input);
 
     /**
-     * Called when an IBolt is going to be shutdown. There is no guarentee that cleanup
-     * will be called, because the supervisor kill -9's worker processes on the cluster.
-     *
-     * <p>The one context where cleanup is guaranteed to be called is when a topology
-     * is killed when running Storm in local mode.</p>
+     * Called when an IBolt is going to be shutdown. There is no guarentee that cleanup will be called, because the supervisor kill -9's worker processes on the
+     * cluster.
+     * 
+     * <p>
+     * The one context where cleanup is guaranteed to be called is when a topology is killed when running Storm in local mode.
+     * </p>
      */
     void cleanup();
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/task/IMetricsContext.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/task/IMetricsContext.java b/jstorm-core/src/main/java/backtype/storm/task/IMetricsContext.java
index a1d8bc2..cb77ab5 100755
--- a/jstorm-core/src/main/java/backtype/storm/task/IMetricsContext.java
+++ b/jstorm-core/src/main/java/backtype/storm/task/IMetricsContext.java
@@ -23,9 +23,10 @@ import backtype.storm.metric.api.IMetric;
 import backtype.storm.metric.api.IReducer;
 import backtype.storm.metric.api.ReducedMetric;
 
-
 public interface IMetricsContext {
     <T extends IMetric> T registerMetric(String name, T metric, int timeBucketSizeInSecs);
+
     ReducedMetric registerMetric(String name, IReducer reducer, int timeBucketSizeInSecs);
-    CombinedMetric registerMetric(String name, ICombiner combiner, int timeBucketSizeInSecs);  
+
+    CombinedMetric registerMetric(String name, ICombiner combiner, int timeBucketSizeInSecs);
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/task/IOutputCollector.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/task/IOutputCollector.java b/jstorm-core/src/main/java/backtype/storm/task/IOutputCollector.java
index a62563a..1759310 100755
--- a/jstorm-core/src/main/java/backtype/storm/task/IOutputCollector.java
+++ b/jstorm-core/src/main/java/backtype/storm/task/IOutputCollector.java
@@ -23,10 +23,13 @@ import java.util.List;
 
 public interface IOutputCollector extends IErrorReporter {
     /**
-     *  Returns the task ids that received the tuples.
+     * Returns the task ids that received the tuples.
      */
     List<Integer> emit(String streamId, Collection<Tuple> anchors, List<Object> tuple);
+
     void emitDirect(int taskId, String streamId, Collection<Tuple> anchors, List<Object> tuple);
+
     void ack(Tuple input);
+
     void fail(Tuple input);
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/task/OutputCollector.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/task/OutputCollector.java b/jstorm-core/src/main/java/backtype/storm/task/OutputCollector.java
index 620d33d..673a8b2 100755
--- a/jstorm-core/src/main/java/backtype/storm/task/OutputCollector.java
+++ b/jstorm-core/src/main/java/backtype/storm/task/OutputCollector.java
@@ -24,22 +24,19 @@ import java.util.Collection;
 import java.util.List;
 
 /**
- * This output collector exposes the API for emitting tuples from an IRichBolt.
- * This is the core API for emitting tuples. For a simpler API, and a more restricted
- * form of stream processing, see IBasicBolt and BasicOutputCollector.
+ * This output collector exposes the API for emitting tuples from an IRichBolt. This is the core API for emitting tuples. For a simpler API, and a more
+ * restricted form of stream processing, see IBasicBolt and BasicOutputCollector.
  */
 public class OutputCollector implements IOutputCollector {
     private IOutputCollector _delegate;
-    
-    
+
     public OutputCollector(IOutputCollector delegate) {
         _delegate = delegate;
     }
-    
+
     /**
-     * Emits a new tuple to a specific stream with a single anchor. The emitted values must be 
-     * immutable.
-     *
+     * Emits a new tuple to a specific stream with a single anchor. The emitted values must be immutable.
+     * 
      * @param streamId the stream to emit to
      * @param anchor the tuple to anchor to
      * @param tuple the new output tuple from this bolt
@@ -50,10 +47,8 @@ public class OutputCollector implements IOutputCollector {
     }
 
     /**
-     * Emits a new unanchored tuple to the specified stream. Because it's unanchored,
-     * if a failure happens downstream, this new tuple won't affect whether any
-     * spout tuples are considered failed or not. The emitted values must be 
-     * immutable.
+     * Emits a new unanchored tuple to the specified stream. Because it's unanchored, if a failure happens downstream, this new tuple won't affect whether any
+     * spout tuples are considered failed or not. The emitted values must be immutable.
      * 
      * @param streamId the stream to emit to
      * @param tuple the new output tuple from this bolt
@@ -64,8 +59,7 @@ public class OutputCollector implements IOutputCollector {
     }
 
     /**
-     * Emits a new tuple to the default stream anchored on a group of input tuples. The emitted
-     * values must be immutable.
+     * Emits a new tuple to the default stream anchored on a group of input tuples. The emitted values must be immutable.
      * 
      * @param anchors the tuples to anchor to
      * @param tuple the new output tuple from this bolt
@@ -75,10 +69,8 @@ public class OutputCollector implements IOutputCollector {
         return emit(Utils.DEFAULT_STREAM_ID, anchors, tuple);
     }
 
-
     /**
-     * Emits a new tuple to the default stream anchored on a single tuple. The emitted values must be 
-     * immutable.
+     * Emits a new tuple to the default stream anchored on a single tuple. The emitted values must be immutable.
      * 
      * @param anchor the tuple to anchor to
      * @param tuple the new output tuple from this bolt
@@ -89,11 +81,9 @@ public class OutputCollector implements IOutputCollector {
     }
 
     /**
-     * Emits a new unanchored tuple to the default stream. Beacuse it's unanchored,
-     * if a failure happens downstream, this new tuple won't affect whether any
-     * spout tuples are considered failed or not. The emitted values must be 
-     * immutable.
-     *
+     * Emits a new unanchored tuple to the default stream. Beacuse it's unanchored, if a failure happens downstream, this new tuple won't affect whether any
+     * spout tuples are considered failed or not. The emitted values must be immutable.
+     * 
      * @param tuple the new output tuple from this bolt
      * @return the list of task ids that this new tuple was sent to
      */
@@ -102,13 +92,10 @@ public class OutputCollector implements IOutputCollector {
     }
 
     /**
-     * Emits a tuple directly to the specified task id on the specified stream.
-     * If the target bolt does not subscribe to this bolt using a direct grouping,
-     * the tuple will not be sent. If the specified output stream is not declared
-     * as direct, or the target bolt subscribes with a non-direct grouping,
-     * an error will occur at runtime. The emitted values must be 
-     * immutable.
-     *
+     * Emits a tuple directly to the specified task id on the specified stream. If the target bolt does not subscribe to this bolt using a direct grouping, the
+     * tuple will not be sent. If the specified output stream is not declared as direct, or the target bolt subscribes with a non-direct grouping, an error will
+     * occur at runtime. The emitted values must be immutable.
+     * 
      * @param taskId the taskId to send the new tuple to
      * @param streamId the stream to send the tuple on. It must be declared as a direct stream in the topology definition.
      * @param anchor the tuple to anchor to
@@ -119,14 +106,11 @@ public class OutputCollector implements IOutputCollector {
     }
 
     /**
-     * Emits a tuple directly to the specified task id on the specified stream.
-     * If the target bolt does not subscribe to this bolt using a direct grouping,
-     * the tuple will not be sent. If the specified output stream is not declared
-     * as direct, or the target bolt subscribes with a non-direct grouping,
-     * an error will occur at runtime. Note that this method does not use anchors,
-     * so downstream failures won't affect the failure status of any spout tuples.
-     * The emitted values must be immutable.
-     *
+     * Emits a tuple directly to the specified task id on the specified stream. If the target bolt does not subscribe to this bolt using a direct grouping, the
+     * tuple will not be sent. If the specified output stream is not declared as direct, or the target bolt subscribes with a non-direct grouping, an error will
+     * occur at runtime. Note that this method does not use anchors, so downstream failures won't affect the failure status of any spout tuples. The emitted
+     * values must be immutable.
+     * 
      * @param taskId the taskId to send the new tuple to
      * @param streamId the stream to send the tuple on. It must be declared as a direct stream in the topology definition.
      * @param tuple the new output tuple from this bolt
@@ -136,17 +120,15 @@ public class OutputCollector implements IOutputCollector {
     }
 
     /**
-     * Emits a tuple directly to the specified task id on the default stream.
-     * If the target bolt does not subscribe to this bolt using a direct grouping,
-     * the tuple will not be sent. If the specified output stream is not declared
-     * as direct, or the target bolt subscribes with a non-direct grouping,
-     * an error will occur at runtime. The emitted values must be 
-     * immutable.
-     *
-     * <p>The default stream must be declared as direct in the topology definition.
-     * See OutputDeclarer#declare for how this is done when defining topologies
-     * in Java.</p>
-     *
+     * Emits a tuple directly to the specified task id on the default stream. If the target bolt does not subscribe to this bolt using a direct grouping, the
+     * tuple will not be sent. If the specified output stream is not declared as direct, or the target bolt subscribes with a non-direct grouping, an error will
+     * occur at runtime. The emitted values must be immutable.
+     * 
+     * <p>
+     * The default stream must be declared as direct in the topology definition. See OutputDeclarer#declare for how this is done when defining topologies in
+     * Java.
+     * </p>
+     * 
      * @param taskId the taskId to send the new tuple to
      * @param anchosr the tuples to anchor to
      * @param tuple the new output tuple from this bolt
@@ -156,17 +138,15 @@ public class OutputCollector implements IOutputCollector {
     }
 
     /**
-     * Emits a tuple directly to the specified task id on the default stream.
-     * If the target bolt does not subscribe to this bolt using a direct grouping,
-     * the tuple will not be sent. If the specified output stream is not declared
-     * as direct, or the target bolt subscribes with a non-direct grouping,
-     * an error will occur at runtime. The emitted values must be 
-     * immutable.
-     *
-     * <p>The default stream must be declared as direct in the topology definition.
-     * See OutputDeclarer#declare for how this is done when defining topologies
-     * in Java.</p>
-     *
+     * Emits a tuple directly to the specified task id on the default stream. If the target bolt does not subscribe to this bolt using a direct grouping, the
+     * tuple will not be sent. If the specified output stream is not declared as direct, or the target bolt subscribes with a non-direct grouping, an error will
+     * occur at runtime. The emitted values must be immutable.
+     * 
+     * <p>
+     * The default stream must be declared as direct in the topology definition. See OutputDeclarer#declare for how this is done when defining topologies in
+     * Java.
+     * </p>
+     * 
      * @param taskId the taskId to send the new tuple to
      * @param anchor the tuple to anchor to
      * @param tuple the new output tuple from this bolt
@@ -175,22 +155,20 @@ public class OutputCollector implements IOutputCollector {
         emitDirect(taskId, Utils.DEFAULT_STREAM_ID, anchor, tuple);
     }
 
-
     /**
-     * Emits a tuple directly to the specified task id on the default stream.
-     * If the target bolt does not subscribe to this bolt using a direct grouping,
-     * the tuple will not be sent. If the specified output stream is not declared
-     * as direct, or the target bolt subscribes with a non-direct grouping,
-     * an error will occur at runtime. The emitted values must be 
-     * immutable.
-     *
-     * <p>The default stream must be declared as direct in the topology definition.
-     * See OutputDeclarer#declare for how this is done when defining topologies
-     * in Java.</p>
-     *
-     * <p>Note that this method does not use anchors, so downstream failures won't
-     * affect the failure status of any spout tuples.</p>
-     *
+     * Emits a tuple directly to the specified task id on the default stream. If the target bolt does not subscribe to this bolt using a direct grouping, the
+     * tuple will not be sent. If the specified output stream is not declared as direct, or the target bolt subscribes with a non-direct grouping, an error will
+     * occur at runtime. The emitted values must be immutable.
+     * 
+     * <p>
+     * The default stream must be declared as direct in the topology definition. See OutputDeclarer#declare for how this is done when defining topologies in
+     * Java.
+     * </p>
+     * 
+     * <p>
+     * Note that this method does not use anchors, so downstream failures won't affect the failure status of any spout tuples.
+     * </p>
+     * 
      * @param taskId the taskId to send the new tuple to
      * @param tuple the new output tuple from this bolt
      */

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/task/ShellBolt.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/task/ShellBolt.java b/jstorm-core/src/main/java/backtype/storm/task/ShellBolt.java
index d9c8f03..ae7c76f 100755
--- a/jstorm-core/src/main/java/backtype/storm/task/ShellBolt.java
+++ b/jstorm-core/src/main/java/backtype/storm/task/ShellBolt.java
@@ -40,29 +40,28 @@ import java.util.concurrent.atomic.AtomicLong;
 import static java.util.concurrent.TimeUnit.SECONDS;
 
 /**
- * A bolt that shells out to another process to process tuples. ShellBolt
- * communicates with that process over stdio using a special protocol. An ~100
- * line library is required to implement that protocol, and adapter libraries
- * currently exist for Ruby and Python.
- *
- * <p>To run a ShellBolt on a cluster, the scripts that are shelled out to must be
- * in the resources directory within the jar submitted to the master.
- * During development/testing on a local machine, that resources directory just
- * needs to be on the classpath.</p>
- *
- * <p>When creating topologies using the Java API, subclass this bolt and implement
- * the IRichBolt interface to create components for the topology that use other languages. For example:
+ * A bolt that shells out to another process to process tuples. ShellBolt communicates with that process over stdio using a special protocol. An ~100 line
+ * library is required to implement that protocol, and adapter libraries currently exist for Ruby and Python.
+ * 
+ * <p>
+ * To run a ShellBolt on a cluster, the scripts that are shelled out to must be in the resources directory within the jar submitted to the master. During
+ * development/testing on a local machine, that resources directory just needs to be on the classpath.
  * </p>
- *
+ * 
+ * <p>
+ * When creating topologies using the Java API, subclass this bolt and implement the IRichBolt interface to create components for the topology that use other
+ * languages. For example:
+ * </p>
+ * 
  * <pre>
  * public class MyBolt extends ShellBolt implements IRichBolt {
- *      public MyBolt() {
- *          super("python", "mybolt.py");
- *      }
- *
- *      public void declareOutputFields(OutputFieldsDeclarer declarer) {
- *          declarer.declare(new Fields("field1", "field2"));
- *      }
+ *     public MyBolt() {
+ *         super(&quot;python&quot;, &quot;mybolt.py&quot;);
+ *     }
+ * 
+ *     public void declareOutputFields(OutputFieldsDeclarer declarer) {
+ *         declarer.declare(new Fields(&quot;field1&quot;, &quot;field2&quot;));
+ *     }
  * }
  * </pre>
  */
@@ -82,7 +81,7 @@ public class ShellBolt implements IBolt {
 
     private Thread _readerThread;
     private Thread _writerThread;
-    
+
     private TopologyContext _context;
 
     private int workerTimeoutMills;
@@ -98,11 +97,10 @@ public class ShellBolt implements IBolt {
         _command = command;
     }
 
-    public void prepare(Map stormConf, TopologyContext context,
-                        final OutputCollector collector) {
+    public void prepare(Map stormConf, TopologyContext context, final OutputCollector collector) {
         Object maxPending = stormConf.get(Config.TOPOLOGY_SHELLBOLT_MAX_PENDING);
         if (maxPending != null) {
-           this._pendingWrites = new LinkedBlockingQueue(((Number)maxPending).intValue());
+            this._pendingWrites = new LinkedBlockingQueue(((Number) maxPending).intValue());
         }
         _rand = new Random();
         _collector = collector;
@@ -113,7 +111,7 @@ public class ShellBolt implements IBolt {
 
         _process = new ShellProcess(_command);
 
-        //subprocesses must send their pid first thing
+        // subprocesses must send their pid first thing
         Number subpid = _process.launch(stormConf, context);
         LOG.info("Launched subprocess with pid " + subpid);
 
@@ -136,14 +134,14 @@ public class ShellBolt implements IBolt {
             throw new RuntimeException(_exception);
         }
 
-        //just need an id
+        // just need an id
         String genId = Long.toString(_rand.nextLong());
         _inputs.put(genId, input);
         try {
             BoltMsg boltMsg = createBoltMessage(input, genId);
 
             _pendingWrites.put(boltMsg);
-        } catch(InterruptedException e) {
+        } catch (InterruptedException e) {
             String processInfo = _process.getProcessInfoString() + _process.getProcessTerminationInfoString();
             throw new RuntimeException("Error during multilang processing " + processInfo, e);
         }
@@ -170,7 +168,7 @@ public class ShellBolt implements IBolt {
 
     private void handleAck(Object id) {
         Tuple acked = _inputs.remove(id);
-        if(acked==null) {
+        if (acked == null) {
             throw new RuntimeException("Acked a non-existent or already acked/failed id: " + id);
         }
         _collector.ack(acked);
@@ -178,7 +176,7 @@ public class ShellBolt implements IBolt {
 
     private void handleFail(Object id) {
         Tuple failed = _inputs.remove(id);
-        if(failed==null) {
+        if (failed == null) {
             throw new RuntimeException("Failed a non-existent or already acked/failed id: " + id);
         }
         _collector.fail(failed);
@@ -201,14 +199,13 @@ public class ShellBolt implements IBolt {
             }
         }
 
-        if(shellMsg.getTask() == 0) {
+        if (shellMsg.getTask() == 0) {
             List<Integer> outtasks = _collector.emit(shellMsg.getStream(), anchors, shellMsg.getTuple());
             if (shellMsg.areTaskIdsNeeded()) {
                 _pendingWrites.put(outtasks);
             }
         } else {
-            _collector.emitDirect((int) shellMsg.getTask(),
-                    shellMsg.getStream(), anchors, shellMsg.getTuple());
+            _collector.emitDirect((int) shellMsg.getTask(), shellMsg.getStream(), anchors, shellMsg.getTuple());
         }
     }
 
@@ -218,46 +215,46 @@ public class ShellBolt implements IBolt {
         ShellMsg.ShellLogLevel logLevel = shellMsg.getLogLevel();
 
         switch (logLevel) {
-            case TRACE:
-                LOG.trace(msg);
-                break;
-            case DEBUG:
-                LOG.debug(msg);
-                break;
-            case INFO:
-                LOG.info(msg);
-                break;
-            case WARN:
-                LOG.warn(msg);
-                break;
-            case ERROR:
-                LOG.error(msg);
-                _collector.reportError(new ReportedFailedException(msg));
-                break;
-            default:
-                LOG.info(msg);
-                break;
+        case TRACE:
+            LOG.trace(msg);
+            break;
+        case DEBUG:
+            LOG.debug(msg);
+            break;
+        case INFO:
+            LOG.info(msg);
+            break;
+        case WARN:
+            LOG.warn(msg);
+            break;
+        case ERROR:
+            LOG.error(msg);
+            _collector.reportError(new ReportedFailedException(msg));
+            break;
+        default:
+            LOG.info(msg);
+            break;
         }
     }
 
     private void handleMetrics(ShellMsg shellMsg) {
-        //get metric name
+        // get metric name
         String name = shellMsg.getMetricName();
         if (name.isEmpty()) {
             throw new RuntimeException("Receive Metrics name is empty");
         }
-        
-        //get metric by name
+
+        // get metric by name
         IMetric iMetric = _context.getRegisteredMetricByName(name);
         if (iMetric == null) {
-            throw new RuntimeException("Could not find metric by name["+name+"] ");
+            throw new RuntimeException("Could not find metric by name[" + name + "] ");
         }
-        if ( !(iMetric instanceof IShellMetric)) {
-            throw new RuntimeException("Metric["+name+"] is not IShellMetric, can not call by RPC");
+        if (!(iMetric instanceof IShellMetric)) {
+            throw new RuntimeException("Metric[" + name + "] is not IShellMetric, can not call by RPC");
         }
-        IShellMetric iShellMetric = (IShellMetric)iMetric;
-        
-        //call updateMetricFromRPC with params
+        IShellMetric iShellMetric = (IShellMetric) iMetric;
+
+        // call updateMetricFromRPC with params
         Object paramsObj = shellMsg.getMetricParams();
         try {
             iShellMetric.updateMetricFromRPC(paramsObj);
@@ -265,7 +262,7 @@ public class ShellBolt implements IBolt {
             throw re;
         } catch (Exception e) {
             throw new RuntimeException(e);
-        }       
+        }
     }
 
     private void setHeartbeat() {
@@ -279,12 +276,10 @@ public class ShellBolt implements IBolt {
     private void die(Throwable exception) {
         String processInfo = _process.getProcessInfoString() + _process.getProcessTerminationInfoString();
         _exception = new RuntimeException(processInfo, exception);
-        String message = String.format("Halting process: ShellBolt died. Command: %s, ProcessInfo %s",
-                Arrays.toString(_command),
-                processInfo);
+        String message = String.format("Halting process: ShellBolt died. Command: %s, ProcessInfo %s", Arrays.toString(_command), processInfo);
         LOG.error(message, exception);
         _collector.reportError(exception);
-        if (_running || (exception instanceof Error)) { //don't exit if not running, unless it is an Error
+        if (_running || (exception instanceof Error)) { // don't exit if not running, unless it is an Error
             System.exit(11);
         }
     }
@@ -301,8 +296,7 @@ public class ShellBolt implements IBolt {
             long currentTimeMillis = System.currentTimeMillis();
             long lastHeartbeat = getLastHeartbeat();
 
-            LOG.debug("BOLT - current time : {}, last heartbeat : {}, worker timeout (ms) : {}",
-                    currentTimeMillis, lastHeartbeat, workerTimeoutMills);
+            LOG.debug("BOLT - current time : {}, last heartbeat : {}, worker timeout (ms) : {}", currentTimeMillis, lastHeartbeat, workerTimeoutMills);
 
             if (currentTimeMillis - lastHeartbeat > workerTimeoutMills) {
                 bolt.die(new RuntimeException("subprocess heartbeat timeout"));
@@ -311,7 +305,6 @@ public class ShellBolt implements IBolt {
             sendHeartbeatFlag.compareAndSet(false, true);
         }
 
-
     }
 
     private class BoltReaderRunnable implements Runnable {
@@ -326,7 +319,7 @@ public class ShellBolt implements IBolt {
                     }
                     if (command.equals("sync")) {
                         setHeartbeat();
-                    } else if(command.equals("ack")) {
+                    } else if (command.equals("ack")) {
                         handleAck(shellMsg.getId());
                     } else if (command.equals("fail")) {
                         handleFail(shellMsg.getId());
@@ -363,7 +356,7 @@ public class ShellBolt implements IBolt {
                     if (write instanceof BoltMsg) {
                         _process.writeBoltMsg((BoltMsg) write);
                     } else if (write instanceof List<?>) {
-                        _process.writeTaskIds((List<Integer>)write);
+                        _process.writeTaskIds((List<Integer>) write);
                     } else if (write != null) {
                         throw new RuntimeException("Unknown class type to write: " + write.getClass().getName());
                     }


[10/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/NettyClient.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/NettyClient.java b/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/NettyClient.java
index 6b79fb4..45c353c 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/NettyClient.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/NettyClient.java
@@ -17,6 +17,26 @@
  */
 package com.alibaba.jstorm.message.netty;
 
+import backtype.storm.Config;
+import backtype.storm.messaging.IConnection;
+import backtype.storm.messaging.TaskMessage;
+import backtype.storm.utils.DisruptorQueue;
+import backtype.storm.utils.Utils;
+import com.alibaba.jstorm.client.ConfigExtension;
+import com.alibaba.jstorm.common.metric.*;
+import com.alibaba.jstorm.metric.*;
+import com.alibaba.jstorm.utils.JStormServerUtils;
+import com.alibaba.jstorm.utils.JStormUtils;
+import com.alibaba.jstorm.utils.NetWorkUtils;
+import com.codahale.metrics.health.HealthCheck;
+import org.jboss.netty.bootstrap.ClientBootstrap;
+import org.jboss.netty.channel.Channel;
+import org.jboss.netty.channel.ChannelFactory;
+import org.jboss.netty.channel.ChannelFuture;
+import org.jboss.netty.channel.ChannelFutureListener;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import java.net.InetSocketAddress;
 import java.net.SocketAddress;
 import java.util.HashSet;
@@ -29,35 +49,9 @@ import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.atomic.AtomicReference;
 
-import org.jboss.netty.bootstrap.ClientBootstrap;
-import org.jboss.netty.channel.Channel;
-import org.jboss.netty.channel.ChannelFactory;
-import org.jboss.netty.channel.ChannelFuture;
-import org.jboss.netty.channel.ChannelFutureListener;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import backtype.storm.Config;
-import backtype.storm.messaging.IConnection;
-import backtype.storm.messaging.TaskMessage;
-import backtype.storm.utils.DisruptorQueue;
-import backtype.storm.utils.Utils;
-
-import com.alibaba.jstorm.client.ConfigExtension;
-import com.alibaba.jstorm.common.metric.Histogram;
-import com.alibaba.jstorm.common.metric.Meter;
-import com.alibaba.jstorm.common.metric.QueueGauge;
-import com.alibaba.jstorm.metric.JStormHealthCheck;
-import com.alibaba.jstorm.metric.JStormMetrics;
-import com.alibaba.jstorm.metric.MetricDef;
-import com.alibaba.jstorm.utils.JStormServerUtils;
-import com.alibaba.jstorm.utils.JStormUtils;
-import com.alibaba.jstorm.utils.NetWorkUtils;
-import com.codahale.metrics.health.HealthCheck;
-
 class NettyClient implements IConnection {
-    private static final Logger LOG = LoggerFactory
-            .getLogger(NettyClient.class);
+    private static final Logger LOG = LoggerFactory.getLogger(NettyClient.class);
+
     protected String name;
 
     protected final int max_retries;
@@ -84,9 +78,11 @@ class NettyClient implements IConnection {
 
     protected String address;
     // doesn't use timer, due to competition
-    protected Histogram sendTimer;
-    protected Histogram batchSizeHistogram;
-    protected Meter     sendSpeed;
+    protected AsmHistogram sendTimer;
+    protected AsmHistogram batchSizeHistogram;
+    protected AsmMeter sendSpeed;
+    protected static AsmMeter totalSendSpeed = (AsmMeter) JStormMetrics.registerWorkerMetric(MetricUtils.workerMetricName(
+            MetricDef.NETTY_CLI_SEND_SPEED, MetricType.METER), new AsmMeter());
 
     protected ReconnectRunnable reconnector;
     protected ChannelFactory clientChannelFactory;
@@ -94,19 +90,19 @@ class NettyClient implements IConnection {
     protected Set<Channel> closingChannel;
 
     protected AtomicBoolean isConnecting = new AtomicBoolean(false);
-    
+
     protected NettyConnection nettyConnection;
-    
+
     protected Map stormConf;
-    
+
     protected boolean connectMyself;
 
     protected Object channelClosing = new Object();
 
+    protected boolean enableNettyMetrics;
+
     @SuppressWarnings("rawtypes")
-    NettyClient(Map storm_conf, ChannelFactory factory,
-            ScheduledExecutorService scheduler, String host, int port,
-            ReconnectRunnable reconnector) {
+    NettyClient(Map storm_conf, ChannelFactory factory, ScheduledExecutorService scheduler, String host, int port, ReconnectRunnable reconnector) {
         this.stormConf = storm_conf;
         this.factory = factory;
         this.scheduler = scheduler;
@@ -116,34 +112,21 @@ class NettyClient implements IConnection {
         channelRef = new AtomicReference<Channel>(null);
         being_closed = new AtomicBoolean(false);
         pendings = new AtomicLong(0);
-        
+
         nettyConnection = new NettyConnection();
-        nettyConnection.setClientPort(NetWorkUtils.ip(), 
-                ConfigExtension.getLocalWorkerPort(storm_conf));
+        nettyConnection.setClientPort(NetWorkUtils.ip(), ConfigExtension.getLocalWorkerPort(storm_conf));
         nettyConnection.setServerPort(host, port);
 
         // Configure
-        buffer_size =
-                Utils.getInt(storm_conf
-                        .get(Config.STORM_MESSAGING_NETTY_BUFFER_SIZE));
-        max_retries =
-                Math.min(30, Utils.getInt(storm_conf
-                        .get(Config.STORM_MESSAGING_NETTY_MAX_RETRIES)));
-        base_sleep_ms =
-                Utils.getInt(storm_conf
-                        .get(Config.STORM_MESSAGING_NETTY_MIN_SLEEP_MS));
-        max_sleep_ms =
-                Utils.getInt(storm_conf
-                        .get(Config.STORM_MESSAGING_NETTY_MAX_SLEEP_MS));
+        buffer_size = Utils.getInt(storm_conf.get(Config.STORM_MESSAGING_NETTY_BUFFER_SIZE));
+        max_retries = Math.min(30, Utils.getInt(storm_conf.get(Config.STORM_MESSAGING_NETTY_MAX_RETRIES)));
+        base_sleep_ms = Utils.getInt(storm_conf.get(Config.STORM_MESSAGING_NETTY_MIN_SLEEP_MS));
+        max_sleep_ms = Utils.getInt(storm_conf.get(Config.STORM_MESSAGING_NETTY_MAX_SLEEP_MS));
 
         timeoutMs = ConfigExtension.getNettyPendingBufferTimeout(storm_conf);
-        MAX_SEND_PENDING =
-                (int) ConfigExtension.getNettyMaxSendPending(storm_conf);
+        MAX_SEND_PENDING = (int) ConfigExtension.getNettyMaxSendPending(storm_conf);
 
-        this.messageBatchSize =
-                Utils.getInt(
-                        storm_conf.get(Config.STORM_NETTY_MESSAGE_BATCH_SIZE),
-                        262144);
+        this.messageBatchSize = Utils.getInt(storm_conf.get(Config.STORM_NETTY_MESSAGE_BATCH_SIZE), 262144);
         messageBatchRef = new AtomicReference<MessageBatch>();
 
         // Start the connection attempt.
@@ -152,56 +135,62 @@ class NettyClient implements IConnection {
         connectMyself = isConnectMyself(stormConf, host, port);
 
         address = JStormServerUtils.getName(host, port);
-        
-        if (connectMyself == false) {
+
+        this.enableNettyMetrics = MetricUtils.isEnableNettyMetrics(storm_conf);
+        LOG.info("** enable netty metrics: {}", this.enableNettyMetrics);
+        if (!connectMyself) {
             registerMetrics();
         }
         closingChannel = new HashSet<Channel>();
     }
-    
+
     public void registerMetrics() {
-        sendTimer =
-                JStormMetrics.registerWorkerHistogram(
-                        MetricDef.NETTY_CLI_SEND_TIME, nettyConnection.toString());
-        batchSizeHistogram =
-                JStormMetrics.registerWorkerHistogram(
-                        MetricDef.NETTY_CLI_BATCH_SIZE, nettyConnection.toString());
-        sendSpeed = JStormMetrics.registerWorkerMeter(MetricDef.NETTY_CLI_SEND_SPEED, 
-                nettyConnection.toString());
-
-        CacheGaugeHealthCheck cacheGauge =
-                new CacheGaugeHealthCheck(messageBatchRef,
-                        MetricDef.NETTY_CLI_CACHE_SIZE + ":" + nettyConnection.toString());
-        JStormMetrics.registerWorkerGauge(cacheGauge,
-                MetricDef.NETTY_CLI_CACHE_SIZE, nettyConnection.toString());
-        JStormHealthCheck.registerWorkerHealthCheck(
-                MetricDef.NETTY_CLI_CACHE_SIZE + ":" + nettyConnection.toString(), cacheGauge);
-
-        JStormMetrics.registerWorkerGauge(
-                new com.codahale.metrics.Gauge<Double>() {
+        if (this.enableNettyMetrics) {
+            sendTimer = (AsmHistogram) JStormMetrics.registerNettyMetric(
+                    MetricUtils.nettyMetricName(AsmMetric.mkName(MetricDef.NETTY_CLI_SEND_TIME, nettyConnection),
+                            MetricType.HISTOGRAM),
+                    new AsmHistogram());
+            batchSizeHistogram = (AsmHistogram) JStormMetrics.registerNettyMetric(
+                    MetricUtils.nettyMetricName(AsmMetric.mkName(MetricDef.NETTY_CLI_BATCH_SIZE, nettyConnection),
+                            MetricType.HISTOGRAM),
+                    new AsmHistogram());
+            sendSpeed = (AsmMeter) JStormMetrics.registerNettyMetric(MetricUtils.nettyMetricName(
+                    AsmMetric.mkName(MetricDef.NETTY_CLI_SEND_SPEED, nettyConnection), MetricType.METER), new AsmMeter());
+
+            CacheGaugeHealthCheck cacheGauge = new CacheGaugeHealthCheck(messageBatchRef,
+                    MetricDef.NETTY_CLI_CACHE_SIZE + ":" + nettyConnection.toString());
+            JStormMetrics.registerNettyMetric(MetricUtils
+                            .nettyMetricName(AsmMetric.mkName(MetricDef.NETTY_CLI_CACHE_SIZE, nettyConnection), MetricType.GAUGE),
+                    new AsmGauge(cacheGauge));
+
+            JStormMetrics.registerNettyMetric(MetricUtils
+                            .nettyMetricName(AsmMetric.mkName(MetricDef.NETTY_CLI_SEND_PENDING, nettyConnection), MetricType.GAUGE),
+                    new AsmGauge(new com.codahale.metrics.Gauge<Double>() {
+                        @Override
+                        public Double getValue() {
+                            return ((Long) pendings.get()).doubleValue();
+                        }
+                    }));
 
-                    @Override
-                    public Double getValue() {
-                        return ((Long) pendings.get()).doubleValue();
-                    }
-                }, MetricDef.NETTY_CLI_SEND_PENDING, nettyConnection.toString());
-        
-        JStormHealthCheck.registerWorkerHealthCheck(
-                MetricDef.NETTY_CLI_CONNECTION + ":" + nettyConnection.toString(), 
+            JStormHealthCheck.registerWorkerHealthCheck(MetricDef.NETTY_CLI_CACHE_SIZE + ":" + nettyConnection.toString(),
+                    cacheGauge);
+        }
+
+        JStormHealthCheck.registerWorkerHealthCheck(MetricDef.NETTY_CLI_CONNECTION + ":" + nettyConnection.toString(),
                 new HealthCheck() {
-                    HealthCheck.Result healthy = HealthCheck.Result.healthy();
-                    HealthCheck.Result unhealthy = HealthCheck.Result.unhealthy
-                            ("NettyConnection " + nettyConnection.toString() + " is broken.");
+                    Result healthy = Result.healthy();
+                    Result unhealthy = Result
+                            .unhealthy("NettyConnection " + nettyConnection.toString() + " is broken.");
+
                     @Override
                     protected Result check() throws Exception {
-                        // TODO Auto-generated method stub
                         if (isChannelReady() == null) {
                             return unhealthy;
-                        }else {
+                        } else {
                             return healthy;
                         }
                     }
-                    
+
                 });
     }
 
@@ -216,23 +205,28 @@ class NettyClient implements IConnection {
         bootstrap.setPipelineFactory(new StormClientPipelineFactory(this, stormConf));
         reconnect();
     }
-    
+
     public boolean isConnectMyself(Map conf, String host, int port) {
         String localIp = NetWorkUtils.ip();
         String remoteIp = NetWorkUtils.host2Ip(host);
         int localPort = ConfigExtension.getLocalWorkerPort(conf);
-        
-        if (localPort == port && 
-                localIp.equals(remoteIp)) {
+
+        if (localPort == port && localIp.equals(remoteIp)) {
             return true;
         }
-        
+
         return false;
     }
 
+    public void notifyInterestChanged(Channel channel) {
+        if (channel.isWritable()) {
+            MessageBatch messageBatch = messageBatchRef.getAndSet(null);
+            flushRequest(channel, messageBatch);
+        }
+    }
+
     /**
      * The function can't be synchronized, otherwise it will be deadlock
-     * 
      */
     public void doReconnect() {
         if (channelRef.get() != null) {
@@ -255,12 +249,10 @@ class NettyClient implements IConnection {
         }
 
         long sleepMs = getSleepTimeMs();
-        LOG.info("Reconnect ... [{}], {}, sleep {}ms", retries.get(), name,
-                sleepMs);
+        LOG.info("Reconnect ... [{}], {}, sleep {}ms", retries.get(), name, sleepMs);
         ChannelFuture future = bootstrap.connect(remote_addr);
         future.addListener(new ChannelFutureListener() {
-            public void operationComplete(ChannelFuture future)
-                    throws Exception {
+            public void operationComplete(ChannelFuture future) throws Exception {
                 isConnecting.set(false);
                 Channel channel = future.getChannel();
                 if (future.isSuccess()) {
@@ -269,17 +261,12 @@ class NettyClient implements IConnection {
                     setChannel(channel);
                     // handleResponse();
                 } else {
-                    LOG.info(
-                            "Failed to reconnect ... [{}], {}, channel = {}, cause = {}",
-                            retries.get(), name, channel, future.getCause());
+                    LOG.info("Failed to reconnect ... [{}], {}, channel = {}, cause = {}", retries.get(), name, channel, future.getCause());
                     reconnect();
                 }
             }
         });
         JStormUtils.sleepMs(sleepMs);
-
-        return;
-
     }
 
     public void reconnect() {
@@ -290,7 +277,6 @@ class NettyClient implements IConnection {
      * # of milliseconds to wait per exponential back-off policy
      */
     private int getSleepTimeMs() {
-
         int sleepMs = base_sleep_ms * retries.incrementAndGet();
         if (sleepMs > 1000) {
             sleepMs = 1000;
@@ -310,7 +296,7 @@ class NettyClient implements IConnection {
     public void send(TaskMessage message) {
         LOG.warn("Should be overload");
     }
-    
+
     Channel isChannelReady() {
         Channel channel = channelRef.get();
         if (channel == null) {
@@ -325,26 +311,28 @@ class NettyClient implements IConnection {
         return channel;
     }
 
-    protected synchronized void flushRequest(Channel channel,
-            final MessageBatch requests) {
+    protected synchronized void flushRequest(Channel channel, final MessageBatch requests) {
         if (requests == null || requests.isEmpty())
             return;
 
-        Double batchSize = Double.valueOf(requests.getEncoded_length());
-        batchSizeHistogram.update(batchSize);
+        Long batchSize = (long) requests.getEncoded_length();
+        if (batchSizeHistogram != null) {
+            batchSizeHistogram.update(batchSize);
+        }
         pendings.incrementAndGet();
-        sendSpeed.update(batchSize);
+        if (sendSpeed != null) {
+            sendSpeed.update(batchSize);
+        }
+        totalSendSpeed.update(batchSize);
         ChannelFuture future = channel.write(requests);
         future.addListener(new ChannelFutureListener() {
-            public void operationComplete(ChannelFuture future)
-                    throws Exception {
+            public void operationComplete(ChannelFuture future) throws Exception {
 
                 pendings.decrementAndGet();
                 if (!future.isSuccess()) {
                     Channel channel = future.getChannel();
                     if (isClosed() == false) {
-                        LOG.info("Failed to send requests to " + name + ": "
-                                + channel.toString() + ":", future.getCause());
+                        LOG.info("Failed to send requests to " + name + ": " + channel.toString() + ":", future.getCause());
                     }
 
                     if (null != channel) {
@@ -357,32 +345,29 @@ class NettyClient implements IConnection {
             }
         });
     }
-    
+
     public void unregisterMetrics() {
-        JStormMetrics.unregisterWorkerMetric(MetricDef.NETTY_CLI_SEND_TIME,
-                nettyConnection.toString());
-        JStormMetrics.unregisterWorkerMetric(MetricDef.NETTY_CLI_BATCH_SIZE,
-                nettyConnection.toString());
-        JStormMetrics.unregisterWorkerMetric(MetricDef.NETTY_CLI_SEND_PENDING,
-                nettyConnection.toString());
-        JStormMetrics.unregisterWorkerMetric(MetricDef.NETTY_CLI_CACHE_SIZE,
-                nettyConnection.toString());
-        JStormMetrics.unregisterWorkerMetric(MetricDef.NETTY_CLI_SEND_SPEED, 
-                nettyConnection.toString());
-
-        JStormHealthCheck
-                .unregisterWorkerHealthCheck(MetricDef.NETTY_CLI_CACHE_SIZE
-                        + ":" + nettyConnection.toString());
-        
-        JStormHealthCheck.unregisterWorkerHealthCheck(
-                MetricDef.NETTY_CLI_CONNECTION + ":" + nettyConnection.toString()); 
+        if (this.enableNettyMetrics) {
+            JStormMetrics.unregisterNettyMetric(MetricUtils.nettyMetricName(
+                    AsmMetric.mkName(MetricDef.NETTY_CLI_SEND_TIME, nettyConnection), MetricType.HISTOGRAM));
+            JStormMetrics.unregisterNettyMetric(MetricUtils.nettyMetricName(
+                    AsmMetric.mkName(MetricDef.NETTY_CLI_BATCH_SIZE, nettyConnection), MetricType.HISTOGRAM));
+            JStormMetrics.unregisterNettyMetric(MetricUtils.nettyMetricName(
+                    AsmMetric.mkName(MetricDef.NETTY_CLI_SEND_PENDING, nettyConnection), MetricType.GAUGE));
+            JStormMetrics.unregisterNettyMetric(MetricUtils
+                    .nettyMetricName(AsmMetric.mkName(MetricDef.NETTY_CLI_CACHE_SIZE, nettyConnection), MetricType.GAUGE));
+            JStormMetrics.unregisterNettyMetric(MetricUtils
+                    .nettyMetricName(AsmMetric.mkName(MetricDef.NETTY_CLI_SEND_SPEED, nettyConnection), MetricType.METER));
+        }
+        JStormHealthCheck.unregisterWorkerHealthCheck(MetricDef.NETTY_CLI_CACHE_SIZE + ":" + nettyConnection.toString());
+
+        JStormHealthCheck.unregisterWorkerHealthCheck(MetricDef.NETTY_CLI_CONNECTION + ":" + nettyConnection.toString());
     }
 
     /**
      * gracefully close this client.
-     * 
-     * We will send all existing requests, and then invoke close_n_release()
-     * method
+     * <p/>
+     * We will send all existing requests, and then invoke close_n_release() method
      */
     public void close() {
         LOG.info("Close netty connection to {}", name());
@@ -391,7 +376,7 @@ class NettyClient implements IConnection {
             return;
         }
 
-        if (connectMyself == false) {
+        if (!connectMyself) {
             unregisterMetrics();
         }
 
@@ -410,17 +395,13 @@ class NettyClient implements IConnection {
         final long timeoutMilliSeconds = 10 * 1000;
         final long start = System.currentTimeMillis();
 
-        LOG.info("Waiting for pending batchs to be sent with " + name()
-                + "..., timeout: {}ms, pendings: {}", timeoutMilliSeconds,
-                pendings.get());
+        LOG.info("Waiting for pending batchs to be sent with " + name() + "..., timeout: {}ms, pendings: {}", timeoutMilliSeconds, pendings.get());
 
         while (pendings.get() != 0) {
             try {
                 long delta = System.currentTimeMillis() - start;
                 if (delta > timeoutMilliSeconds) {
-                    LOG.error(
-                            "Timeout when sending pending batchs with {}..., there are still {} pending batchs not sent",
-                            name(), pendings.get());
+                    LOG.error("Timeout when sending pending batchs with {}..., there are still {} pending batchs not sent", name(), pendings.get());
                     break;
                 }
                 Thread.sleep(1000); // sleep 1s
@@ -445,7 +426,7 @@ class NettyClient implements IConnection {
 
     /**
      * Avoid channel double close
-     * 
+     *
      * @param channel
      */
     void closeChannel(final Channel channel) {
@@ -461,8 +442,7 @@ class NettyClient implements IConnection {
         LOG.debug(channel.toString() + " begin to closed");
         ChannelFuture closeFuture = channel.close();
         closeFuture.addListener(new ChannelFutureListener() {
-            public void operationComplete(ChannelFuture future)
-                    throws Exception {
+            public void operationComplete(ChannelFuture future) throws Exception {
 
                 synchronized (channelClosing) {
                     closingChannel.remove(channel);
@@ -501,14 +481,9 @@ class NettyClient implements IConnection {
             retries.set(0);
         }
 
-        final String oldLocalAddres =
-                (oldChannel == null) ? "null" : oldChannel.getLocalAddress()
-                        .toString();
-        String newLocalAddress =
-                (newChannel == null) ? "null" : newChannel.getLocalAddress()
-                        .toString();
-        LOG.info("Use new channel {} replace old channel {}", newLocalAddress,
-                oldLocalAddres);
+        final String oldLocalAddres = (oldChannel == null) ? "null" : oldChannel.getLocalAddress().toString();
+        String newLocalAddress = (newChannel == null) ? "null" : newChannel.getLocalAddress().toString();
+        LOG.info("Use new channel {} replace old channel {}", newLocalAddress, oldLocalAddres);
 
         // avoid one netty client use too much connection, close old one
         if (oldChannel != newChannel && oldChannel != null) {
@@ -555,60 +530,56 @@ class NettyClient implements IConnection {
 
     @Override
     public Object recv(Integer taskId, int flags) {
-        throw new UnsupportedOperationException(
-                "recvTask: Client connection should not receive any messages");
+        throw new UnsupportedOperationException("recvTask: Client connection should not receive any messages");
     }
 
     @Override
     public void registerQueue(Integer taskId, DisruptorQueue recvQueu) {
-        throw new UnsupportedOperationException(
-                "recvTask: Client connection should not receive any messages");
+        throw new UnsupportedOperationException("recvTask: Client connection should not receive any messages");
     }
 
     @Override
     public void enqueue(TaskMessage message) {
-        throw new UnsupportedOperationException(
-                "recvTask: Client connection should not receive any messages");
+        throw new UnsupportedOperationException("recvTask: Client connection should not receive any messages");
     }
 
-    public static class CacheGaugeHealthCheck extends HealthCheck implements
-            com.codahale.metrics.Gauge<Double> {
+    public static class CacheGaugeHealthCheck extends HealthCheck implements com.codahale.metrics.Gauge<Double> {
 
         AtomicReference<MessageBatch> messageBatchRef;
         String name;
         Result healthy;
 
-        public CacheGaugeHealthCheck(
-                AtomicReference<MessageBatch> messageBatchRef, String name) {
+        public CacheGaugeHealthCheck(AtomicReference<MessageBatch> messageBatchRef, String name) {
             this.messageBatchRef = messageBatchRef;
             this.name = name;
-            this.healthy = HealthCheck.Result.healthy();
+            this.healthy = Result.healthy();
         }
 
         @Override
         public Double getValue() {
-            // TODO Auto-generated method stub
             MessageBatch messageBatch = messageBatchRef.get();
             if (messageBatch == null) {
                 return 0.0;
             } else {
-                Double ret = (double) messageBatch.getEncoded_length();
-                return ret;
+                return (double) messageBatch.getEncoded_length();
             }
 
         }
 
         @Override
         protected Result check() throws Exception {
-            // TODO Auto-generated method stub
             Double size = getValue();
             if (size > 8 * JStormUtils.SIZE_1_M) {
-                return HealthCheck.Result.unhealthy(name
-                        + QueueGauge.QUEUE_IS_FULL);
+                return Result.unhealthy(name + QueueGauge.QUEUE_IS_FULL);
             } else {
                 return healthy;
             }
         }
 
     }
+
+    @Override
+    public boolean available() {
+        return (isChannelReady() != null);
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/NettyClientAsync.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/NettyClientAsync.java b/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/NettyClientAsync.java
index 1d582ba..9b58063 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/NettyClientAsync.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/NettyClientAsync.java
@@ -17,31 +17,29 @@
  */
 package com.alibaba.jstorm.message.netty;
 
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.apache.commons.lang.builder.ToStringBuilder;
-import org.apache.commons.lang.builder.ToStringStyle;
-import org.jboss.netty.channel.Channel;
-import org.jboss.netty.channel.ChannelFactory;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import backtype.storm.Config;
 import backtype.storm.messaging.TaskMessage;
 import backtype.storm.utils.Utils;
-
 import com.alibaba.jstorm.client.ConfigExtension;
 import com.alibaba.jstorm.utils.IntervalCheck;
 import com.alibaba.jstorm.utils.JStormServerUtils;
 import com.alibaba.jstorm.utils.JStormUtils;
+import com.alibaba.jstorm.utils.TimeUtils;
+import org.apache.commons.lang.builder.ToStringBuilder;
+import org.apache.commons.lang.builder.ToStringStyle;
+import org.jboss.netty.channel.Channel;
+import org.jboss.netty.channel.ChannelFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
 
 class NettyClientAsync extends NettyClient {
-    private static final Logger LOG = LoggerFactory
-            .getLogger(NettyClientAsync.class);
+    private static final Logger LOG = LoggerFactory.getLogger(NettyClientAsync.class);
     public static final String PREFIX = "Netty-Client-";
 
     // when batch buffer size is more than BATCH_THREASHOLD_WARN
@@ -54,7 +52,6 @@ class NettyClientAsync extends NettyClient {
     protected final boolean blockSend;
 
     boolean isDirectSend(Map conf) {
-
         if (JStormServerUtils.isOnePending(conf) == true) {
             return true;
         }
@@ -71,22 +68,15 @@ class NettyClientAsync extends NettyClient {
     }
 
     @SuppressWarnings("rawtypes")
-    NettyClientAsync(Map storm_conf, ChannelFactory factory,
-            ScheduledExecutorService scheduler, String host, int port,
-            ReconnectRunnable reconnector) {
+    NettyClientAsync(Map storm_conf, ChannelFactory factory, ScheduledExecutorService scheduler, String host, int port, ReconnectRunnable reconnector) {
         super(storm_conf, factory, scheduler, host, port, reconnector);
 
-        BATCH_THREASHOLD_WARN =
-                ConfigExtension.getNettyBufferThresholdSize(storm_conf);
-
+        BATCH_THREASHOLD_WARN = ConfigExtension.getNettyBufferThresholdSize(storm_conf);
         blockSend = isBlockSend(storm_conf);
-
         directlySend = isDirectSend(storm_conf);
 
         flush_later = new AtomicBoolean(false);
-        flushCheckInterval =
-                Utils.getInt(storm_conf
-                        .get(Config.STORM_NETTY_FLUSH_CHECK_INTERVAL_MS), 10);
+        flushCheckInterval = Utils.getInt(storm_conf.get(Config.STORM_NETTY_FLUSH_CHECK_INTERVAL_MS), 10);
 
         Runnable flusher = new Runnable() {
             @Override
@@ -95,13 +85,11 @@ class NettyClientAsync extends NettyClient {
             }
         };
         long initialDelay = Math.min(1000, max_sleep_ms * max_retries);
-        scheduler.scheduleAtFixedRate(flusher, initialDelay,
-                flushCheckInterval, TimeUnit.MILLISECONDS);
+        scheduler.scheduleAtFixedRate(flusher, initialDelay, flushCheckInterval, TimeUnit.MILLISECONDS);
 
         clientChannelFactory = factory;
 
         start();
-
         LOG.info(this.toString());
     }
 
@@ -109,7 +97,7 @@ class NettyClientAsync extends NettyClient {
      * Enqueue a task message to be sent to server
      */
     @Override
-    synchronized public void send(List<TaskMessage> messages) {
+    public synchronized void send(List<TaskMessage> messages) {
         // throw exception if the client is being closed
         if (isClosed()) {
             LOG.warn("Client is being closed, and does not take requests any more");
@@ -123,13 +111,14 @@ class NettyClientAsync extends NettyClient {
             throw new RuntimeException(e);
         } finally {
             long end = System.nanoTime();
-            sendTimer.update((end - start)/1000000.0d);
-
+            if (sendTimer != null) {
+                sendTimer.update((end - start) / TimeUtils.NS_PER_US);
+            }
         }
     }
 
     @Override
-    synchronized public void send(TaskMessage message) {
+    public synchronized void send(TaskMessage message) {
         // throw exception if the client is being closed
         if (isClosed()) {
             LOG.warn("Client is being closed, and does not take requests any more");
@@ -143,7 +132,9 @@ class NettyClientAsync extends NettyClient {
             throw new RuntimeException(e);
         } finally {
             long end = System.nanoTime();
-            sendTimer.update((end - start)/1000000.0d);
+            if (sendTimer != null) {
+                sendTimer.update((end - start) / TimeUtils.NS_PER_US);
+            }
         }
     }
 
@@ -159,21 +150,17 @@ class NettyClientAsync extends NettyClient {
             long now = System.currentTimeMillis();
             long delt = now - begin;
             if (oneSecond.check() == true) {
-                LOG.warn(
-                        "Target server  {} is unavailable, pending {}, bufferSize {}, block sending {}ms",
-                        name, pendings.get(), cachedSize, delt);
+                LOG.warn("Target server  {} is unavailable, pending {}, bufferSize {}, block sending {}ms", name, pendings.get(), cachedSize, delt);
             }
 
             if (timeoutIntervalCheck.check() == true) {
                 if (messageBatchRef.get() != null) {
-                    LOG.warn(
-                            "Target server  {} is unavailable, wait too much time, throw timeout message",
-                            name);
+                    LOG.warn("Target server  {} is unavailable, wait too much time, throw timeout message", name);
                     messageBatchRef.set(null);
                 }
                 setChannel(null);
                 LOG.warn("Reset channel as null");
-                
+
                 if (blockSend == false) {
                     reconnect();
                     break;
@@ -184,12 +171,10 @@ class NettyClientAsync extends NettyClient {
             JStormUtils.sleepMs(sleepMs);
 
             if (delt > 2 * timeoutMs * 1000L && changeThreadhold == false) {
-                if (channelRef.get() != null
-                        && BATCH_THREASHOLD_WARN >= 2 * messageBatchSize) {
+                if (channelRef.get() != null && BATCH_THREASHOLD_WARN >= 2 * messageBatchSize) {
                     // it is just channel isn't writable;
                     BATCH_THREASHOLD_WARN = BATCH_THREASHOLD_WARN / 2;
-                    LOG.info("Reduce BATCH_THREASHOLD_WARN to {}",
-                            BATCH_THREASHOLD_WARN);
+                    LOG.info("Reduce BATCH_THREASHOLD_WARN to {}", BATCH_THREASHOLD_WARN);
 
                     changeThreadhold = true;
                 }
@@ -296,12 +281,9 @@ class NettyClientAsync extends NettyClient {
         } else {
             if (messageBatchRef.compareAndSet(null, messageBatch)) {
                 flush_later.set(true);
-            }
-            else
+            } else
                 LOG.error("MessageBatch will be lost. This should not happen.");
         }
-
-        return;
     }
 
     void flush() {
@@ -339,12 +321,10 @@ class NettyClientAsync extends NettyClient {
     @Override
     public void handleResponse() {
         // do nothing
-        return;
     }
 
     @Override
     public String toString() {
-        return ToStringBuilder.reflectionToString(this,
-                ToStringStyle.SHORT_PREFIX_STYLE);
+        return ToStringBuilder.reflectionToString(this, ToStringStyle.SHORT_PREFIX_STYLE);
     }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/NettyClientSync.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/NettyClientSync.java b/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/NettyClientSync.java
index c239dd1..2f08957 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/NettyClientSync.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/NettyClientSync.java
@@ -17,43 +17,35 @@
  */
 package com.alibaba.jstorm.message.netty;
 
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ConcurrentLinkedQueue;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.apache.commons.lang.builder.ToStringBuilder;
-import org.apache.commons.lang.builder.ToStringStyle;
-import org.jboss.netty.channel.Channel;
-import org.jboss.netty.channel.ChannelFactory;
-import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import backtype.storm.Config;
 import backtype.storm.messaging.TaskMessage;
 import backtype.storm.utils.DisruptorQueue;
 import backtype.storm.utils.Utils;
-
+import com.alibaba.jstorm.common.metric.AsmGauge;
 import com.alibaba.jstorm.common.metric.QueueGauge;
-import com.alibaba.jstorm.metric.JStormHealthCheck;
-import com.alibaba.jstorm.metric.JStormMetrics;
-import com.alibaba.jstorm.metric.MetricDef;
+import com.alibaba.jstorm.metric.*;
 import com.alibaba.jstorm.utils.JStormServerUtils;
 import com.alibaba.jstorm.utils.JStormUtils;
+import com.alibaba.jstorm.utils.TimeUtils;
 import com.codahale.metrics.Gauge;
 import com.lmax.disruptor.EventHandler;
 import com.lmax.disruptor.WaitStrategy;
 import com.lmax.disruptor.dsl.ProducerType;
+import org.apache.commons.lang.builder.ToStringBuilder;
+import org.apache.commons.lang.builder.ToStringStyle;
+import org.jboss.netty.channel.Channel;
+import org.jboss.netty.channel.ChannelFactory;
+import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.*;
+import java.util.concurrent.atomic.AtomicLong;
 
 class NettyClientSync extends NettyClient implements EventHandler {
-    private static final Logger LOG = LoggerFactory
-            .getLogger(NettyClientSync.class);
+    private static final Logger LOG = LoggerFactory.getLogger(NettyClientSync.class);
 
     private ConcurrentLinkedQueue<MessageBatch> batchQueue;
     private DisruptorQueue disruptorQueue;
@@ -63,20 +55,14 @@ class NettyClientSync extends NettyClient implements EventHandler {
     private AtomicLong emitTs = new AtomicLong(0);
 
     @SuppressWarnings("rawtypes")
-    NettyClientSync(Map storm_conf, ChannelFactory factory,
-            ScheduledExecutorService scheduler, String host, int port,
-            ReconnectRunnable reconnector) {
+    NettyClientSync(Map storm_conf, ChannelFactory factory, ScheduledExecutorService scheduler, String host, int port, ReconnectRunnable reconnector) {
         super(storm_conf, factory, scheduler, host, port, reconnector);
 
         batchQueue = new ConcurrentLinkedQueue<MessageBatch>();
 
-        WaitStrategy waitStrategy =
-                (WaitStrategy) JStormUtils
-                        .createDisruptorWaitStrategy(storm_conf);
+        WaitStrategy waitStrategy = (WaitStrategy) Utils.newInstance((String) storm_conf.get(Config.TOPOLOGY_DISRUPTOR_WAIT_STRATEGY));
 
-        disruptorQueue =
-                DisruptorQueue.mkInstance(name, ProducerType.MULTI,
-                        MAX_SEND_PENDING * 8, waitStrategy);
+        disruptorQueue = DisruptorQueue.mkInstance(name, ProducerType.MULTI, MAX_SEND_PENDING * 8, waitStrategy);
         disruptorQueue.consumerStarted();
 
         if (connectMyself == false) {
@@ -93,21 +79,14 @@ class NettyClientSync extends NettyClient implements EventHandler {
         scheduler.scheduleAtFixedRate(trigger, 10, 1, TimeUnit.SECONDS);
 
         /**
-         * In sync mode, it can't directly use common factory, it will occur
-         * problem when client close and restart
+         * In sync mode, it can't directly use common factory, it will occur problem when client close and restart
          */
-        ThreadFactory bossFactory =
-                new NettyRenameThreadFactory(MetricDef.NETTY_CLI
-                        + JStormServerUtils.getName(host, port) + "-boss");
+        ThreadFactory bossFactory = new NettyRenameThreadFactory(MetricDef.NETTY_CLI + JStormServerUtils.getName(host, port) + "-boss");
         bossExecutor = Executors.newCachedThreadPool(bossFactory);
-        ThreadFactory workerFactory =
-                new NettyRenameThreadFactory(MetricDef.NETTY_CLI
-                        + JStormServerUtils.getName(host, port) + "-worker");
+        ThreadFactory workerFactory = new NettyRenameThreadFactory(MetricDef.NETTY_CLI + JStormServerUtils.getName(host, port) + "-worker");
         workerExecutor = Executors.newCachedThreadPool(workerFactory);
 
-        clientChannelFactory =
-                new NioClientSocketChannelFactory(bossExecutor, workerExecutor,
-                        1);
+        clientChannelFactory = new NioClientSocketChannelFactory(bossExecutor, workerExecutor, 1);
 
         start();
 
@@ -115,24 +94,24 @@ class NettyClientSync extends NettyClient implements EventHandler {
     }
 
     public void registerSyncMetrics() {
-        JStormMetrics.registerWorkerGauge(new Gauge<Double>() {
-            @Override
-            public Double getValue() {
-                return Double.valueOf(batchQueue.size());
-            }
-        }, MetricDef.NETTY_CLI_SYNC_BATCH_QUEUE, nettyConnection.toString());
-
-        QueueGauge cacheQueueGauge =
-                new QueueGauge(MetricDef.NETTY_CLI_SYNC_DISR_QUEUE
-                        + nettyConnection.toString(), disruptorQueue);
-
-        JStormMetrics
-                .registerWorkerGauge(cacheQueueGauge,
-                        MetricDef.NETTY_CLI_SYNC_DISR_QUEUE,
-                        nettyConnection.toString());
-        JStormHealthCheck.registerWorkerHealthCheck(
-                MetricDef.NETTY_CLI_SYNC_DISR_QUEUE + ":"
-                        + nettyConnection.toString(), cacheQueueGauge);
+        if (enableNettyMetrics) {
+            JStormMetrics.registerNettyMetric(MetricUtils
+                            .nettyMetricName(MetricDef.NETTY_CLI_SYNC_BATCH_QUEUE + nettyConnection.toString(), MetricType.GAUGE),
+                    new AsmGauge(new Gauge<Double>() {
+                        @Override
+                        public Double getValue() {
+                            return (double) batchQueue.size();
+                        }
+                    }));
+
+            QueueGauge cacheQueueGauge = new QueueGauge(disruptorQueue, MetricDef.NETTY_CLI_SYNC_DISR_QUEUE, nettyConnection.toString());
+
+            JStormMetrics.registerNettyMetric(MetricUtils
+                            .nettyMetricName(MetricDef.NETTY_CLI_SYNC_DISR_QUEUE + nettyConnection.toString(), MetricType.GAUGE),
+                    new AsmGauge(cacheQueueGauge));
+            JStormHealthCheck.registerWorkerHealthCheck(
+                    MetricDef.NETTY_CLI_SYNC_DISR_QUEUE + ":" + nettyConnection.toString(), cacheQueueGauge);
+        }
     }
 
     /**
@@ -166,8 +145,6 @@ class NettyClientSync extends NettyClient implements EventHandler {
 
     /**
      * Don't take care of competition
-     * 
-     * @param blocked
      */
     public void sendData() {
         long start = System.nanoTime();
@@ -188,12 +165,13 @@ class NettyClientSync extends NettyClient implements EventHandler {
             JStormUtils.halt_process(-1, err);
         } finally {
             long end = System.nanoTime();
-            sendTimer.update((end - start) / 1000000.0d);
+            if (sendTimer != null) {
+                sendTimer.update((end - start) / TimeUtils.NS_PER_US);
+            }
         }
     }
 
     public void sendAllData() {
-
         long start = System.nanoTime();
         try {
             disruptorQueue.consumeBatch(this);
@@ -216,7 +194,9 @@ class NettyClientSync extends NettyClient implements EventHandler {
             JStormUtils.halt_process(-1, err);
         } finally {
             long end = System.nanoTime();
-            sendTimer.update((end - start) / 1000000.0d);
+            if (sendTimer != null) {
+                sendTimer.update((end - start) / TimeUtils.NS_PER_US);
+            }
         }
     }
 
@@ -227,8 +207,7 @@ class NettyClientSync extends NettyClient implements EventHandler {
     }
 
     @Override
-    public void onEvent(Object event, long sequence, boolean endOfBatch)
-            throws Exception {
+    public void onEvent(Object event, long sequence, boolean endOfBatch) throws Exception {
         if (event == null) {
             return;
         }
@@ -296,22 +275,19 @@ class NettyClientSync extends NettyClient implements EventHandler {
     }
 
     public void unregisterSyncMetrics() {
-        JStormMetrics.unregisterWorkerMetric(
-                MetricDef.NETTY_CLI_SYNC_BATCH_QUEUE,
-                nettyConnection.toString());
-        JStormMetrics
-                .unregisterWorkerMetric(MetricDef.NETTY_CLI_SYNC_DISR_QUEUE,
-                        nettyConnection.toString());
-        JStormHealthCheck
-                .unregisterWorkerHealthCheck(MetricDef.NETTY_CLI_SYNC_DISR_QUEUE
-                        + ":" + nettyConnection.toString());
+        if (enableNettyMetrics) {
+            JStormMetrics.unregisterNettyMetric(MetricUtils
+                    .nettyMetricName(MetricDef.NETTY_CLI_SYNC_BATCH_QUEUE + nettyConnection.toString(), MetricType.GAUGE));
+            JStormMetrics.unregisterNettyMetric(MetricUtils
+                    .nettyMetricName(MetricDef.NETTY_CLI_SYNC_DISR_QUEUE + nettyConnection.toString(), MetricType.GAUGE));
+            JStormHealthCheck
+                    .unregisterWorkerHealthCheck(MetricDef.NETTY_CLI_SYNC_DISR_QUEUE + ":" + nettyConnection.toString());
+        }
     }
 
     @Override
     public void close() {
-        LOG.info(
-                "Begin to close connection to {} and flush all data, batchQueue {}, disruptor {}",
-                name, batchQueue.size(), disruptorQueue.population());
+        LOG.info("Begin to close connection to {} and flush all data, batchQueue {}, disruptor {}", name, batchQueue.size(), disruptorQueue.population());
         sendAllData();
         disruptorQueue.haltWithInterrupt();
         if (connectMyself == false) {
@@ -326,7 +302,6 @@ class NettyClientSync extends NettyClient implements EventHandler {
 
     @Override
     public String toString() {
-        return ToStringBuilder.reflectionToString(this,
-                ToStringStyle.SHORT_PREFIX_STYLE);
+        return ToStringBuilder.reflectionToString(this, ToStringStyle.SHORT_PREFIX_STYLE);
     }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/NettyConnection.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/NettyConnection.java b/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/NettyConnection.java
index cd8c0fa..4f2358f 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/NettyConnection.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/NettyConnection.java
@@ -21,23 +21,23 @@ import java.io.Serializable;
 
 import com.alibaba.jstorm.utils.NetWorkUtils;
 
-public class NettyConnection implements Serializable{
+public class NettyConnection implements Serializable {
     protected String clientPort;
     protected String serverPort;
-    
+
     public String getClientPort() {
         return clientPort;
     }
-    
+
     public void setClientPort(String client, int port) {
         String ip = NetWorkUtils.host2Ip(client);
         clientPort = ip + ":" + port;
     }
-    
+
     public String getServerPort() {
         return serverPort;
     }
-    
+
     public void setServerPort(String server, int port) {
         String ip = NetWorkUtils.host2Ip(server);
         serverPort = ip + ":" + port;
@@ -47,12 +47,8 @@ public class NettyConnection implements Serializable{
     public int hashCode() {
         final int prime = 31;
         int result = 1;
-        result =
-                prime * result
-                        + ((clientPort == null) ? 0 : clientPort.hashCode());
-        result =
-                prime * result
-                        + ((serverPort == null) ? 0 : serverPort.hashCode());
+        result = prime * result + ((clientPort == null) ? 0 : clientPort.hashCode());
+        result = prime * result + ((serverPort == null) ? 0 : serverPort.hashCode());
         return result;
     }
 
@@ -77,15 +73,14 @@ public class NettyConnection implements Serializable{
             return false;
         return true;
     }
-    
+
     @Override
     public String toString() {
-        return clientPort  + "->" + serverPort;
+        return clientPort + "->" + serverPort;
     }
-    
-    public static String mkString(String client, int clientPort, 
-            String server, int serverPort) {
+
+    public static String mkString(String client, int clientPort, String server, int serverPort) {
         return client + ":" + clientPort + "->" + server + ":" + serverPort;
     }
-    
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/NettyContext.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/NettyContext.java b/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/NettyContext.java
index a6ddd9a..1a090f2 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/NettyContext.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/NettyContext.java
@@ -40,8 +40,7 @@ import com.alibaba.jstorm.metric.MetricDef;
 import com.alibaba.jstorm.utils.JStormUtils;
 
 public class NettyContext implements IContext {
-    private final static Logger LOG = LoggerFactory
-            .getLogger(NettyContext.class);
+    private final static Logger LOG = LoggerFactory.getLogger(NettyContext.class);
     @SuppressWarnings("rawtypes")
     private Map storm_conf;
 
@@ -65,36 +64,19 @@ public class NettyContext implements IContext {
     public void prepare(Map storm_conf) {
         this.storm_conf = storm_conf;
 
-        int maxWorkers =
-                Utils.getInt(storm_conf
-                        .get(Config.STORM_MESSAGING_NETTY_CLIENT_WORKER_THREADS));
-        ThreadFactory bossFactory =
-                new NettyRenameThreadFactory(MetricDef.NETTY_CLI + "boss");
-        ThreadFactory workerFactory =
-                new NettyRenameThreadFactory(MetricDef.NETTY_CLI + "worker");
+        int maxWorkers = Utils.getInt(storm_conf.get(Config.STORM_MESSAGING_NETTY_CLIENT_WORKER_THREADS));
+        ThreadFactory bossFactory = new NettyRenameThreadFactory(MetricDef.NETTY_CLI + "boss");
+        ThreadFactory workerFactory = new NettyRenameThreadFactory(MetricDef.NETTY_CLI + "worker");
 
         if (maxWorkers > 0) {
             clientChannelFactory =
-                    new NioClientSocketChannelFactory(
-                            Executors.newCachedThreadPool(bossFactory),
-                            Executors.newCachedThreadPool(workerFactory),
-                            maxWorkers);
+                    new NioClientSocketChannelFactory(Executors.newCachedThreadPool(bossFactory), Executors.newCachedThreadPool(workerFactory), maxWorkers);
         } else {
-            clientChannelFactory =
-                    new NioClientSocketChannelFactory(
-                            Executors.newCachedThreadPool(bossFactory),
-                            Executors.newCachedThreadPool(workerFactory));
+            clientChannelFactory = new NioClientSocketChannelFactory(Executors.newCachedThreadPool(bossFactory), Executors.newCachedThreadPool(workerFactory));
         }
-        int otherWorkers =
-                Utils.getInt(storm_conf.get(Config.TOPOLOGY_WORKERS), 1) - 1;
-        int poolSize =
-                Math.min(Math.max(1, otherWorkers),
-                        MAX_CLIENT_SCHEDULER_THREAD_POOL_SIZE);
-        clientScheduleService =
-                Executors
-                        .newScheduledThreadPool(poolSize,
-                                new NettyRenameThreadFactory(
-                                        "client-schedule-service"));
+        int otherWorkers = Utils.getInt(storm_conf.get(Config.TOPOLOGY_WORKERS), 1) - 1;
+        int poolSize = Math.min(Math.max(1, otherWorkers), MAX_CLIENT_SCHEDULER_THREAD_POOL_SIZE);
+        clientScheduleService = Executors.newScheduledThreadPool(poolSize, new NettyRenameThreadFactory("client-schedule-service"));
 
         reconnector = new ReconnectRunnable();
         new AsyncLoopThread(reconnector, true, Thread.MIN_PRIORITY, true);
@@ -119,11 +101,9 @@ public class NettyContext implements IContext {
     @Override
     public IConnection connect(String topology_id, String host, int port) {
         if (isSyncMode == true) {
-            return new NettyClientSync(storm_conf, clientChannelFactory,
-                    clientScheduleService, host, port, reconnector);
+            return new NettyClientSync(storm_conf, clientChannelFactory, clientScheduleService, host, port, reconnector);
         } else {
-            return new NettyClientAsync(storm_conf, clientChannelFactory,
-                    clientScheduleService, host, port, reconnector);
+            return new NettyClientAsync(storm_conf, clientChannelFactory, clientScheduleService, host, port, reconnector);
         }
     }
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/NettyRenameThreadFactory.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/NettyRenameThreadFactory.java b/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/NettyRenameThreadFactory.java
index 2e060c2..5d38fc5 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/NettyRenameThreadFactory.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/NettyRenameThreadFactory.java
@@ -27,8 +27,7 @@ public class NettyRenameThreadFactory implements ThreadFactory {
 
     static {
         // Rename Netty threads
-        ThreadRenamingRunnable
-                .setThreadNameDeterminer(ThreadNameDeterminer.CURRENT);
+        ThreadRenamingRunnable.setThreadNameDeterminer(ThreadNameDeterminer.CURRENT);
     }
 
     final ThreadGroup group;
@@ -37,15 +36,12 @@ public class NettyRenameThreadFactory implements ThreadFactory {
 
     NettyRenameThreadFactory(String name) {
         SecurityManager s = System.getSecurityManager();
-        group =
-                (s != null) ? s.getThreadGroup() : Thread.currentThread()
-                        .getThreadGroup();
+        group = (s != null) ? s.getThreadGroup() : Thread.currentThread().getThreadGroup();
         this.name = name;
     }
 
     public Thread newThread(Runnable r) {
-        Thread t =
-                new Thread(group, r, name + "-" + index.getAndIncrement(), 0);
+        Thread t = new Thread(group, r, name + "-" + index.getAndIncrement(), 0);
         if (t.isDaemon())
             t.setDaemon(false);
         if (t.getPriority() != Thread.NORM_PRIORITY)

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/NettyServer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/NettyServer.java b/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/NettyServer.java
index d00b24f..a5fa859 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/NettyServer.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/NettyServer.java
@@ -44,21 +44,19 @@ import com.alibaba.jstorm.utils.JStormUtils;
 import com.alibaba.jstorm.client.ConfigExtension;
 
 class NettyServer implements IConnection {
-    private static final Logger LOG = LoggerFactory
-            .getLogger(NettyServer.class);
+    private static final Logger LOG = LoggerFactory.getLogger(NettyServer.class);
     @SuppressWarnings("rawtypes")
     Map storm_conf;
     int port;
 
     // private LinkedBlockingQueue message_queue;
-    volatile ChannelGroup allChannels =
-            new DefaultChannelGroup("jstorm-server");
+    volatile ChannelGroup allChannels = new DefaultChannelGroup("jstorm-server");
     final ChannelFactory factory;
     final ServerBootstrap bootstrap;
 
     // ayncBatch is only one solution, so directly set it as true
     private final boolean isSyncMode;
-    
+
     private ConcurrentHashMap<Integer, DisruptorQueue> deserializeQueues;
 
     @SuppressWarnings("rawtypes")
@@ -69,30 +67,17 @@ class NettyServer implements IConnection {
         this.deserializeQueues = deserializeQueues;
 
         // Configure the server.
-        int buffer_size =
-                Utils.getInt(storm_conf
-                        .get(Config.STORM_MESSAGING_NETTY_BUFFER_SIZE));
-        int maxWorkers =
-                Utils.getInt(storm_conf
-                        .get(Config.STORM_MESSAGING_NETTY_SERVER_WORKER_THREADS));
+        int buffer_size = Utils.getInt(storm_conf.get(Config.STORM_MESSAGING_NETTY_BUFFER_SIZE));
+        int maxWorkers = Utils.getInt(storm_conf.get(Config.STORM_MESSAGING_NETTY_SERVER_WORKER_THREADS));
 
         // asyncBatch = ConfigExtension.isNettyTransferAsyncBatch(storm_conf);
 
-        ThreadFactory bossFactory =
-                new NettyRenameThreadFactory("server" + "-boss");
-        ThreadFactory workerFactory =
-                new NettyRenameThreadFactory("server" + "-worker");
+        ThreadFactory bossFactory = new NettyRenameThreadFactory("server" + "-boss");
+        ThreadFactory workerFactory = new NettyRenameThreadFactory("server" + "-worker");
         if (maxWorkers > 0) {
-            factory =
-                    new NioServerSocketChannelFactory(
-                            Executors.newCachedThreadPool(bossFactory),
-                            Executors.newCachedThreadPool(workerFactory),
-                            maxWorkers);
+            factory = new NioServerSocketChannelFactory(Executors.newCachedThreadPool(bossFactory), Executors.newCachedThreadPool(workerFactory), maxWorkers);
         } else {
-            factory =
-                    new NioServerSocketChannelFactory(
-                            Executors.newCachedThreadPool(bossFactory),
-                            Executors.newCachedThreadPool(workerFactory));
+            factory = new NioServerSocketChannelFactory(Executors.newCachedThreadPool(bossFactory), Executors.newCachedThreadPool(workerFactory));
         }
 
         bootstrap = new ServerBootstrap(factory);
@@ -108,8 +93,7 @@ class NettyServer implements IConnection {
         Channel channel = bootstrap.bind(new InetSocketAddress(port));
         allChannels.add(channel);
 
-        LOG.info("Successfull bind {}, buffer_size:{}, maxWorkers:{}", port,
-                buffer_size, maxWorkers);
+        LOG.info("Successfull bind {}, buffer_size:{}, maxWorkers:{}", port, buffer_size, maxWorkers);
     }
 
     @Override
@@ -129,8 +113,7 @@ class NettyServer implements IConnection {
 
         DisruptorQueue queue = deserializeQueues.get(task);
         if (queue == null) {
-            LOG.debug("Received invalid message directed at port " + task
-                    + ". Dropping...");
+            LOG.debug("Received invalid message directed at port " + task + ". Dropping...");
             return;
         }
 
@@ -138,8 +121,7 @@ class NettyServer implements IConnection {
     }
 
     /**
-     * fetch a message from message queue synchronously (flags != 1) or
-     * asynchronously (flags==1)
+     * fetch a message from message queue synchronously (flags != 1) or asynchronously (flags==1)
      */
     public Object recv(Integer taskId, int flags) {
         try {
@@ -211,14 +193,12 @@ class NettyServer implements IConnection {
 
     @Override
     public void send(List<TaskMessage> messages) {
-        throw new UnsupportedOperationException(
-                "Server connection should not send any messages");
+        throw new UnsupportedOperationException("Server connection should not send any messages");
     }
 
     @Override
     public void send(TaskMessage message) {
-        throw new UnsupportedOperationException(
-                "Server connection should not send any messages");
+        throw new UnsupportedOperationException("Server connection should not send any messages");
     }
 
     @Override
@@ -231,4 +211,8 @@ class NettyServer implements IConnection {
         return isSyncMode;
     }
 
+    @Override
+    public boolean available() {
+        return true;
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/ReconnectRunnable.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/ReconnectRunnable.java b/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/ReconnectRunnable.java
index dcf2a5d..f5ec324 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/ReconnectRunnable.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/ReconnectRunnable.java
@@ -26,11 +26,9 @@ import org.slf4j.LoggerFactory;
 import com.alibaba.jstorm.callback.RunnableCallback;
 
 public class ReconnectRunnable extends RunnableCallback {
-    private static final Logger LOG = LoggerFactory
-            .getLogger(ReconnectRunnable.class);
+    private static final Logger LOG = LoggerFactory.getLogger(ReconnectRunnable.class);
 
-    private BlockingQueue<NettyClient> queue =
-            new LinkedBlockingDeque<NettyClient>();
+    private BlockingQueue<NettyClient> queue = new LinkedBlockingDeque<NettyClient>();
 
     public void pushEvent(NettyClient client) {
         queue.offer(client);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/StormClientHandler.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/StormClientHandler.java b/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/StormClientHandler.java
index f84c2f0..7b511e9 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/StormClientHandler.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/StormClientHandler.java
@@ -30,8 +30,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public class StormClientHandler extends SimpleChannelUpstreamHandler {
-    private static final Logger LOG = LoggerFactory
-            .getLogger(StormClientHandler.class);
+    private static final Logger LOG = LoggerFactory.getLogger(StormClientHandler.class);
     private NettyClient client;
     private AtomicBoolean being_closed;
 
@@ -41,16 +40,25 @@ public class StormClientHandler extends SimpleChannelUpstreamHandler {
     }
 
     /**
-     * Sometime when connect one bad channel which isn't writable, it will call
-     * this function
+     * @@@ Comment this function
+     * 
+     * Don't allow call from low netty layer, whose call will try to obtain the lock of jstorm netty layer
+     * otherwise it will lead to deadlock 
+     */
+//    @Override
+//    public void channelInterestChanged(ChannelHandlerContext ctx, ChannelStateEvent e) throws Exception {
+//        
+//    	client.notifyInterestChanged(e.getChannel());
+//    }
+
+    /**
+     * Sometime when connect one bad channel which isn't writable, it will call this function
      */
     @Override
-    public void channelConnected(ChannelHandlerContext ctx,
-            ChannelStateEvent event) {
+    public void channelConnected(ChannelHandlerContext ctx, ChannelStateEvent event) {
         // register the newly established channel
         Channel channel = event.getChannel();
-        LOG.info("connection established to :{}, local port:{}",
-                client.getRemoteAddr(), channel.getLocalAddress());
+        LOG.info("connection established to :{}, local port:{}", client.getRemoteAddr(), channel.getLocalAddress());
 
         client.handleResponse();
     }
@@ -63,8 +71,8 @@ public class StormClientHandler extends SimpleChannelUpstreamHandler {
 
     /**
      * 
-     * @see org.jboss.netty.channel.SimpleChannelUpstreamHandler#exceptionCaught(org.jboss.netty.channel.ChannelHandlerContext,
-     *      org.jboss.netty.channel.ExceptionEvent)
+     * @see SimpleChannelUpstreamHandler#exceptionCaught(ChannelHandlerContext,
+     *      ExceptionEvent)
      */
     @Override
     public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent event) {
@@ -82,14 +90,12 @@ public class StormClientHandler extends SimpleChannelUpstreamHandler {
     /**
      * Attention please,
      * 
-     * @see org.jboss.netty.channel.SimpleChannelUpstreamHandler#channelDisconnected(org.jboss.netty.channel.ChannelHandlerContext,
-     *      org.jboss.netty.channel.ChannelStateEvent)
+     * @see SimpleChannelUpstreamHandler#channelDisconnected(ChannelHandlerContext,
+     *      ChannelStateEvent)
      */
     @Override
-    public void channelDisconnected(ChannelHandlerContext ctx,
-            ChannelStateEvent e) throws Exception {
-        LOG.info("Receive channelDisconnected to {}, channel = {}",
-                client.getRemoteAddr(), e.getChannel());
+    public void channelDisconnected(ChannelHandlerContext ctx, ChannelStateEvent e) throws Exception {
+        LOG.info("Receive channelDisconnected to {}, channel = {}", client.getRemoteAddr(), e.getChannel());
         // ctx.sendUpstream(e);
         super.channelDisconnected(ctx, e);
 
@@ -97,10 +103,8 @@ public class StormClientHandler extends SimpleChannelUpstreamHandler {
     }
 
     @Override
-    public void channelClosed(ChannelHandlerContext ctx, ChannelStateEvent e)
-            throws Exception {
-        LOG.info("Connection to {} has been closed, channel = {}",
-                client.getRemoteAddr(), e.getChannel());
+    public void channelClosed(ChannelHandlerContext ctx, ChannelStateEvent e) throws Exception {
+        LOG.info("Connection to {} has been closed, channel = {}", client.getRemoteAddr(), e.getChannel());
         super.channelClosed(ctx, e);
     }
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/StormClientPipelineFactory.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/StormClientPipelineFactory.java b/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/StormClientPipelineFactory.java
index 080f91c..8927809 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/StormClientPipelineFactory.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/StormClientPipelineFactory.java
@@ -27,12 +27,12 @@ import com.alibaba.jstorm.client.ConfigExtension;
 
 class StormClientPipelineFactory implements ChannelPipelineFactory {
     private NettyClient client;
-    private Map         conf;
+    private Map conf;
 
     StormClientPipelineFactory(NettyClient client, Map conf) {
         this.client = client;
         this.conf = conf;
-        
+
     }
 
     public ChannelPipeline getPipeline() throws Exception {

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/StormServerHandler.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/StormServerHandler.java b/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/StormServerHandler.java
index 916ce93..c1b9cf2 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/StormServerHandler.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/StormServerHandler.java
@@ -33,8 +33,7 @@ import org.slf4j.LoggerFactory;
 import backtype.storm.messaging.TaskMessage;
 
 class StormServerHandler extends SimpleChannelUpstreamHandler {
-    private static final Logger LOG = LoggerFactory
-            .getLogger(StormServerHandler.class);
+    private static final Logger LOG = LoggerFactory.getLogger(StormServerHandler.class);
     private NettyServer server;
     private Map<Channel, Integer> failureCounters;
 
@@ -71,29 +70,28 @@ class StormServerHandler extends SimpleChannelUpstreamHandler {
         LOG.info("Connection established {}", e.getChannel().getRemoteAddress());
         server.addChannel(e.getChannel());
     }
-    
+
     @Override
-    public void childChannelClosed(
-            ChannelHandlerContext ctx, ChildChannelStateEvent e) throws Exception {
+    public void childChannelClosed(ChannelHandlerContext ctx, ChildChannelStateEvent e) throws Exception {
         super.childChannelClosed(ctx, e);
         LOG.info("Connection closed {}", e.getChildChannel().getRemoteAddress());
-        
+
         MessageDecoder.removeTransmitHistogram(e.getChildChannel());
     }
-    
+
     @Override
     public void channelDisconnected(ChannelHandlerContext ctx, ChannelStateEvent e) throws Exception {
         super.channelDisconnected(ctx, e);
         LOG.info("Connection channelDisconnected {}", e.getChannel().getRemoteAddress());
-        
+
         MessageDecoder.removeTransmitHistogram(e.getChannel());
     };
-    
+
     @Override
     public void channelClosed(ChannelHandlerContext ctx, ChannelStateEvent e) throws Exception {
         super.channelClosed(ctx, e);
         LOG.info("Connection channelClosed {}", e.getChannel().getRemoteAddress());
-        
+
         MessageDecoder.removeTransmitHistogram(e.getChannel());
     };
 
@@ -131,8 +129,7 @@ class StormServerHandler extends SimpleChannelUpstreamHandler {
     public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) {
         // removeFailureCounter(e.getChannel());
         if (e.getChannel() != null) {
-            LOG.info("Channel occur exception {}", e.getChannel()
-                    .getRemoteAddress());
+            LOG.info("Channel occur exception {}", e.getChannel().getRemoteAddress());
         }
 
         server.closeChannel(e.getChannel());

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/StormServerPipelineFactory.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/StormServerPipelineFactory.java b/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/StormServerPipelineFactory.java
index 9dead91..6489d4f 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/StormServerPipelineFactory.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/StormServerPipelineFactory.java
@@ -26,7 +26,7 @@ import org.jboss.netty.channel.Channels;
 class StormServerPipelineFactory implements ChannelPipelineFactory {
     private NettyServer server;
     private Map conf;
-    
+
     StormServerPipelineFactory(NettyServer server, Map conf) {
         this.server = server;
         this.conf = conf;

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/metric/AlimonitorClient.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/metric/AlimonitorClient.java b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/AlimonitorClient.java
deleted file mode 100755
index 760e538..0000000
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/metric/AlimonitorClient.java
+++ /dev/null
@@ -1,267 +0,0 @@
-package com.alibaba.jstorm.metric;
-
-import java.net.URLEncoder;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.http.HttpEntity;
-import org.apache.http.NameValuePair;
-import org.apache.http.client.entity.UrlEncodedFormEntity;
-import org.apache.http.client.methods.CloseableHttpResponse;
-import org.apache.http.client.methods.HttpGet;
-import org.apache.http.client.methods.HttpPost;
-import org.apache.http.impl.client.CloseableHttpClient;
-import org.apache.http.impl.client.HttpClientBuilder;
-import org.apache.http.message.BasicNameValuePair;
-import org.apache.http.util.EntityUtils;
-import org.apache.log4j.Logger;
-
-import backtype.storm.utils.Utils;
-
-public class AlimonitorClient extends MetricSendClient {
-
-    public static Logger LOG = Logger.getLogger(AlimonitorClient.class);
-
-    // Send to localhost:15776 by default
-    public static final String DEFAUT_ADDR = "127.0.0.1";
-    public static final String DEFAULT_PORT = "15776";
-    public static final int DEFAUTL_FLAG = 0;
-    public static final String DEFAULT_ERROR_INFO = "";
-
-    private final String COLLECTION_FLAG = "collection_flag";
-    private final String ERROR_INFO = "error_info";
-    private final String MSG = "MSG";
-
-    private String port;
-    private String requestIP;
-    private String monitorName;
-    private int collectionFlag;
-    private String errorInfo;
-
-    private boolean post;
-
-    public AlimonitorClient() {
-    }
-
-    public AlimonitorClient(String requestIP, String port, boolean post) {
-        this.requestIP = requestIP;
-        this.port = port;
-        this.post = post;
-        this.monitorName = null;
-        this.collectionFlag = 0;
-        this.errorInfo = null;
-    }
-
-    public void setIpAddr(String ipAddr) {
-        this.requestIP = ipAddr;
-    }
-
-    public void setPort(String port) {
-        this.port = port;
-    }
-
-    public void setMonitorName(String monitorName) {
-        this.monitorName = monitorName;
-    }
-
-    public void setCollectionFlag(int flag) {
-        this.collectionFlag = flag;
-    }
-
-    public void setErrorInfo(String msg) {
-        this.errorInfo = msg;
-    }
-
-    public void setPostFlag(boolean post) {
-        this.post = post;
-    }
-
-    public String buildURL() {
-        return "http://" + requestIP + ":" + port + "/passive";
-    }
-
-    public String buildRqstAddr() {
-        return "http://" + requestIP + ":" + port + "/passive?name="
-                + monitorName + "&msg=";
-    }
-
-    @Override
-    public boolean send(Map<String, Object> msg) {
-        try {
-            if (monitorName == null) {
-                LOG.warn("monitor name is null");
-                return false;
-            }
-            return sendRequest(collectionFlag, errorInfo, msg);
-        } catch (Exception e) {
-            LOG.error("Failed to sendRequest", e);
-            return false;
-        }
-    }
-
-    @Override
-    public boolean send(List<Map<String, Object>> msg) {
-        try {
-            if (monitorName == null) {
-                LOG.warn("monitor name is null");
-                return false;
-            }
-            return sendRequest(collectionFlag, errorInfo, msg);
-        } catch (Exception e) {
-            LOG.error("Failed to sendRequest", e);
-            return false;
-        }
-    }
-
-    public Map buildAliMonitorMsg(int collection_flag, String error_message) {
-        // Json format of the message sent to Alimonitor
-        // {
-        // "collection_flag":int,
-        // "error_info":string,
-        // "MSG": ojbect | array
-        // }
-        Map ret = new HashMap();
-        ret.put(COLLECTION_FLAG, collection_flag);
-        ret.put(ERROR_INFO, error_message);
-        ret.put(MSG, null);
-
-        return ret;
-    }
-
-    private void addMsgData(Map jsonObj, Map<String, Object> map) {
-        jsonObj.put(MSG, map);
-    }
-
-    private void addMsgData(Map jsonObj, List<Map<String, Object>> mapList) {
-        // JSONArray jsonArray = new JSONArray();
-        // for(Map<String, Object> map : mapList) {
-        // jsonArray.add(map);
-        // }
-
-        jsonObj.put(MSG, mapList);
-    }
-
-    private boolean sendRequest(int collection_flag, String error_message,
-            Map<String, Object> msg) throws Exception {
-        boolean ret = false;
-
-        if (msg.size() == 0)
-            return ret;
-
-        Map jsonObj = buildAliMonitorMsg(collection_flag, error_message);
-        addMsgData(jsonObj, msg);
-        String jsonMsg = jsonObj.toString();
-        LOG.info(jsonMsg);
-
-        if (post == true) {
-            String url = buildURL();
-            ret = httpPost(url, jsonMsg);
-        } else {
-            String request = buildRqstAddr();
-            StringBuilder postAddr = new StringBuilder();
-            postAddr.append(request);
-            postAddr.append(URLEncoder.encode(jsonMsg));
-
-            ret = httpGet(postAddr);
-        }
-
-        return ret;
-    }
-
-    private boolean sendRequest(int collection_flag, String error_message,
-            List<Map<String, Object>> msgList) throws Exception {
-        boolean ret = false;
-
-        if (msgList.size() == 0)
-            return ret;
-
-        Map jsonObj = buildAliMonitorMsg(collection_flag, error_message);
-        addMsgData(jsonObj, msgList);
-
-        String jsonMsg = Utils.to_json(jsonObj);
-        LOG.info(jsonMsg);
-
-        if (post == true) {
-            String url = buildURL();
-            ret = httpPost(url, jsonMsg);
-        } else {
-            String request = buildRqstAddr();
-            StringBuilder postAddr = new StringBuilder();
-            postAddr.append(request);
-            postAddr.append(URLEncoder.encode(jsonMsg));
-
-            ret = httpGet(postAddr);
-        }
-
-        return ret;
-    }
-
-    private boolean httpGet(StringBuilder postAddr) {
-        boolean ret = false;
-
-        CloseableHttpClient httpClient = HttpClientBuilder.create().build();
-        CloseableHttpResponse response = null;
-
-        try {
-            HttpGet request = new HttpGet(postAddr.toString());
-            response = httpClient.execute(request);
-            HttpEntity entity = response.getEntity();
-            if (entity != null) {
-                LOG.info(EntityUtils.toString(entity));
-            }
-            EntityUtils.consume(entity);
-            ret = true;
-        } catch (Exception e) {
-            LOG.error("Exception when sending http request to alimonitor", e);
-        } finally {
-            try {
-                if (response != null)
-                    response.close();
-                httpClient.close();
-            } catch (Exception e) {
-                LOG.error("Exception when closing httpclient", e);
-            }
-        }
-
-        return ret;
-    }
-
-    private boolean httpPost(String url, String msg) {
-        boolean ret = false;
-
-        CloseableHttpClient httpClient = HttpClientBuilder.create().build();
-        CloseableHttpResponse response = null;
-
-        try {
-            HttpPost request = new HttpPost(url);
-            List<NameValuePair> nvps = new ArrayList<NameValuePair>();
-            nvps.add(new BasicNameValuePair("name", monitorName));
-            nvps.add(new BasicNameValuePair("msg", msg));
-            request.setEntity(new UrlEncodedFormEntity(nvps));
-            response = httpClient.execute(request);
-            HttpEntity entity = response.getEntity();
-            if (entity != null) {
-                LOG.info(EntityUtils.toString(entity));
-            }
-            EntityUtils.consume(entity);
-            ret = true;
-        } catch (Exception e) {
-            LOG.error("Exception when sending http request to alimonitor", e);
-        } finally {
-            try {
-                if (response != null)
-                    response.close();
-                httpClient.close();
-            } catch (Exception e) {
-                LOG.error("Exception when closing httpclient", e);
-            }
-        }
-
-        return ret;
-    }
-
-    public void close() {
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/metric/AsmMetricFilter.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/metric/AsmMetricFilter.java b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/AsmMetricFilter.java
new file mode 100644
index 0000000..4313b6f
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/AsmMetricFilter.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.metric;
+
+import com.alibaba.jstorm.common.metric.AsmMetric;
+
+import java.io.Serializable;
+
+/**
+ * @author Cody (weiyue.wy@alibaba-inc.com)
+ * @since 2.0.5
+ */
+public interface AsmMetricFilter extends Serializable {
+    /**
+     * Matches all metrics, regardless of type or name.
+     */
+    AsmMetricFilter ALL = new AsmMetricFilter() {
+        private static final long serialVersionUID = 7089987006352295530L;
+
+        @Override
+        public boolean matches(String name, AsmMetric metric) {
+            return true;
+        }
+    };
+
+    /**
+     * Returns {@code true} if the metric matches the filter; {@code false} otherwise.
+     * 
+     * @param name the metric node
+     * @param metric the metric
+     * @return {@code true} if the metric matches the filter
+     */
+    boolean matches(String name, AsmMetric metric);
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/metric/AsmMetricRegistry.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/metric/AsmMetricRegistry.java b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/AsmMetricRegistry.java
new file mode 100644
index 0000000..710da9d
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/AsmMetricRegistry.java
@@ -0,0 +1,205 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.metric;
+
+import com.alibaba.jstorm.common.metric.*;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.*;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+/**
+ * metric registry. generally methods of this class should not be exposed, wrapper methods in @see JStormMonitorMetrics should be called.
+ *
+ * @author Cody (weiyue.wy@alibaba-inc.com)
+ * @since 2.0.5
+ */
+public class AsmMetricRegistry implements AsmMetricSet {
+    private static final long serialVersionUID = 8184106900230111064L;
+    private static final Logger LOG = LoggerFactory.getLogger(AsmMetricRegistry.class);
+
+    protected final ConcurrentMap<String, AsmMetric> metrics = new ConcurrentHashMap<String, AsmMetric>();
+
+    public int size() {
+        return metrics.size();
+    }
+
+    /**
+     * Given a {@link com.alibaba.jstorm.common.metric.old.window.Metric}, registers it under the given name.
+     *
+     * @param name   the metric node
+     * @param metric the metric
+     * @param <T>    the type of the metric
+     * @return {@code metric}
+     * @throws IllegalArgumentException if the name is already registered
+     */
+    @SuppressWarnings("unchecked")
+    public <T extends AsmMetric> AsmMetric register(String name, T metric) throws IllegalArgumentException {
+        metric.setMetricName(name);
+        final AsmMetric existing = metrics.putIfAbsent(name, metric);
+        if (existing == null) {
+            LOG.info("Successfully register metric of {}", name);
+            return metric;
+        } else {
+            LOG.warn("duplicate metric: {}", name);
+            return existing;
+        }
+    }
+
+    /**
+     * Removes the metric with the given name.
+     *
+     * @param name the metric node
+     * @return whether or not the metric was removed
+     */
+    public boolean remove(String name) {
+        final AsmMetric metric = metrics.remove(name);
+        if (metric != null) {
+            LOG.info("Successfully unregister metric of {}", name);
+            return true;
+        }
+        return false;
+    }
+
+    public AsmMetric getMetric(String name) {
+        return metrics.get(name);
+    }
+
+    /**
+     * Returns a set of the names of all the metrics in the registry.
+     *
+     * @return the names of all the metrics
+     */
+    public SortedSet<String> getMetricNames() {
+        return Collections.unmodifiableSortedSet(new TreeSet<String>(metrics.keySet()));
+    }
+
+    /**
+     * Returns a map of all the gauges in the registry and their names.
+     *
+     * @return all the gauges in the registry
+     */
+    public SortedMap<String, AsmGauge> getGauges() {
+        return getGauges(AsmMetricFilter.ALL);
+    }
+
+    /**
+     * Returns a map of all the gauges in the registry and their names which match the given filter.
+     *
+     * @param filter the metric filter to match
+     * @return all the gauges in the registry
+     */
+    public SortedMap<String, AsmGauge> getGauges(AsmMetricFilter filter) {
+        return getMetrics(AsmGauge.class, filter);
+    }
+
+    /**
+     * Returns a map of all the counters in the registry and their names.
+     *
+     * @return all the counters in the registry
+     */
+    public SortedMap<String, AsmCounter> getCounters() {
+        return getCounters(AsmMetricFilter.ALL);
+    }
+
+    /**
+     * Returns a map of all the counters in the registry and their names which match the given filter.
+     *
+     * @param filter the metric filter to match
+     * @return all the counters in the registry
+     */
+    public SortedMap<String, AsmCounter> getCounters(AsmMetricFilter filter) {
+        return getMetrics(AsmCounter.class, filter);
+    }
+
+    /**
+     * Returns a map of all the histograms in the registry and their names.
+     *
+     * @return all the histograms in the registry
+     */
+    public SortedMap<String, AsmHistogram> getHistograms() {
+        return getHistograms(AsmMetricFilter.ALL);
+    }
+
+    /**
+     * Returns a map of all the histograms in the registry and their names which match the given filter.
+     *
+     * @param filter the metric filter to match
+     * @return all the histograms in the registry
+     */
+    public SortedMap<String, AsmHistogram> getHistograms(AsmMetricFilter filter) {
+        return getMetrics(AsmHistogram.class, filter);
+    }
+
+    /**
+     * Returns a map of all the meters in the registry and their names.
+     *
+     * @return all the meters in the registry
+     */
+    public SortedMap<String, AsmMeter> getMeters() {
+        return getMeters(AsmMetricFilter.ALL);
+    }
+
+    /**
+     * Returns a map of all the meters in the registry and their names which match the given filter.
+     *
+     * @param filter the metric filter to match
+     * @return all the meters in the registry
+     */
+    public SortedMap<String, AsmMeter> getMeters(AsmMetricFilter filter) {
+        return getMetrics(AsmMeter.class, filter);
+    }
+
+    /**
+     * Returns a map of all the timers in the registry and their names.
+     *
+     * @return all the timers in the registry
+     */
+    public SortedMap<String, AsmTimer> getTimers() {
+        return getTimers(AsmMetricFilter.ALL);
+    }
+
+    /**
+     * Returns a map of all the timers in the registry and their names which match the given filter.
+     *
+     * @param filter the metric filter to match
+     * @return all the timers in the registry
+     */
+    public SortedMap<String, AsmTimer> getTimers(AsmMetricFilter filter) {
+        return getMetrics(AsmTimer.class, filter);
+    }
+
+    @SuppressWarnings("unchecked")
+    private <T extends AsmMetric> SortedMap<String, T> getMetrics(Class<T> klass, AsmMetricFilter filter) {
+        final TreeMap<String, T> timers = new TreeMap<String, T>();
+        for (Map.Entry<String, AsmMetric> entry : metrics.entrySet()) {
+            if (klass.isInstance(entry.getValue()) && filter.matches(entry.getKey(), entry.getValue())) {
+                timers.put(entry.getKey(), (T) entry.getValue());
+            }
+        }
+        return timers;
+    }
+
+    @Override
+    public Map<String, AsmMetric> getMetrics() {
+        return metrics;
+    }
+
+}


[50/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/history_cn.md
----------------------------------------------------------------------
diff --git a/history_cn.md b/history_cn.md
index e57e9bb..bf0bf72 100644
--- a/history_cn.md
+++ b/history_cn.md
@@ -1,21 +1,96 @@
 [JStorm English introduction](http://42.121.19.155/jstorm/JStorm-introduce-en.pptx)
 [JStorm Chinese introduction](http://42.121.19.155/jstorm/JStorm-introduce.pptx)
-#Release 2.0.4-SNAPSHOT
+
+# Release 2.1.0
+
+## New features
+* 完全重构web ui
+	*	 大量美化界面
+	*	 大幅提高web ui展示速度
+	*	 增加topology和集群基本的最近30分钟的汇总信息
+	*	 增加拓扑图, 并增加一些交互功能来直观获取拓扑的一些关键信息(例如emit, tuple lifecycle time, tps)
+* 重构采样系统, 全新采样引擎和监控系统
+	*	 新采样不再存储数据到zk
+	*	 底层采样引擎更新, 支持抗噪处理, 合并计算更加方便
+	*	 支持metrics的高可用
+	*	 增加tuple生命周期, netty,disk空间 采样, worker内存采样更准确
+	*	 支持外接数据库插件存储监控数据
+* 实现智能反压(backpressure) 功能
+	*	 自动进行限流控制
+	*	 可以手动人工干预限流控制状态
+* 实现中央控制单元TopologyMaster
+	*	 重构心跳检查机制, 支持6000+ task
+	*	 收集所有metrics,并做合并计算
+	*	 中央控制流协调器
+	*	 HA 状态存储
+* 重新定义zk 数据结构和使用方式, 使一套zookeeper可以支撑2000+物理机器
+	*	 不再存储任何动态数据
+	*	 nimbus 获取topology/supervisor/cluster info时, 减少对zk访问次数
+	*	 合并大量task级别znode,降低对zk的访问
+	*	 优化task error节点,降低对zk的访问
+	*	 优化zk cache操作
+* 优化应用层batch功能, 提高性能
+	*	 增加自动调整batch size功能
+	*	 修复内存拷贝问题
+	*	 内部通道数据,无需batch
+	*	 默认kryo序列化
+* 增加动态binary更新功能和配置更新功能 
+* localShuffle 功能优化,提高性能,本worker,本节点,其他节点 3级shuffle,并动态探测队列负荷, 网络连接状态。
+* 默认打开kryo, 提高性能
+* 优化nimbus HA 机制, 优先级最高的nimbus 才能被promote成master,增加稳定性
+
+
+
+## 优化
+* supervisor自动dump worker jstack和jmap, 当worker处于invalid状态时.
+* supervisor可以对内存超卖设置
+* supervisor增加topology相关文件的下载重试机制。
+* 增加配置logdir设置
+* 增加配置,可使nimbus机器不自动启动supervisor
+* 增加supervisor/nimbus/drpc gc日志
+* 优化jvm参数  1. set -Xmn 1/2 of heap memory 2. set PermSize to 1/32 and MaxPermSize 1/16 of heap memory; 3. 增加最小内存设置-Xms "worker.memory.min.size"。
+* ZK error 重新定义, 并且当worker死去时会,会在web ui报错, 
+* 更新zktool,支持清理不干净的topology,并支持list功能
+* 优化netty client和zk连接重试的时间间隔获取机制
+* 修改out task状态更新机制,从zk上读取心跳信息,改为根据网络连接状态。以减少zk的依赖
+* 增加配置参数 topology.enable.metrics: true/false, 用来启用或禁用metric
+* 日志归类,相同topologyName的日志归类到对应目录下
+
+
+## Bug fix
+* Fix supervisor在调度有变化时,重复的下载任务jar包
+* Fix supervisor下载失败,不会尝试用错误的jar去启动worker
+* 大量的线程使用了错误的conf, 应该使用worker的conf
+* 提交拓扑时,服务端会首先检测拓扑名字的合法性
+* Fix fieldGrouping方式之前对Object[]数据结构不支持
+* Fix 使drpc 单例模式
+* 客户端topologyNameExists改进,直接使用trhift api
+* Fix restart 过程中, 因定时清理线程清理导致的restart失败
+* Fix 当trigger bolt失败时,反压可能丢失
+* Fix DefaultMetricUploader没有删除rocksdb中的数据,导致新的metrics数据无法添加
+
+
+## 运维和脚本
+* 优化cleandisk.sh脚本, 防止误删worker日志
+
+# Release 2.0.4-SNAPSHOT
+
 ## New features
-1.完全重构采样系统, 使用全新的Rollingwindow和Metric计算方式,尤其是netty采样数据,另外metric 发送和接收将不通过zk
-2.完全重构web-ui
-3.引入rocketdb,增加nimbus cache layer
-4.梳理所有的zk节点和zk操作, 去掉无用的zk 操作
-5.梳理所有的thrift 数据结构和函数, 去掉无用的rpc函数
-6.将jstorm-client/jstorm-client-extension/jstorm-core整合为jstorm-core
-7.同步依赖和storm一样
-8.同步apache-storm-0.10.0-beta1 java 代码
-9.切换日志系统到logback
-10.升级thrift 到apache thrift 0.9.2
+1. 完全重构采样系统, 使用全新的Rollingwindow和Metric计算方式,尤其是netty采样数据,另外metric 发送和接收将不通过zk
+2. 完全重构web-ui
+3. 引入rocketdb,增加nimbus cache layer
+4. 梳理所有的zk节点和zk操作, 去掉无用的zk 操作
+5. 梳理所有的thrift 数据结构和函数, 去掉无用的rpc函数
+6. 将jstorm-client/jstorm-client-extension/jstorm-core整合为jstorm-core
+7. 同步依赖和storm一样
+8. 同步apache-storm-0.10.0-beta1 java 代码
+9. 切换日志系统到logback
+10. 升级thrift 到apache thrift 0.9.2
 11. 针对超大型任务600个worker/2000个task以上任务进行优化
 12. 要求 jdk7 or higher
 
-#Release 0.9.7.1
+# Release 0.9.7.1
+
 ## New features
 1. 增加Tuple自动batch的支持,以提高TPS以及降低消息处理延迟(task.batch.tuple=true,task.msg.batch.size=4)
 2. localFirst在本地节点处理能力跟不上时,自动对外部节点进行扩容
@@ -28,21 +103,25 @@
 9. 启动nimbus/supervisor时, 如果取得的是127.0.0.0地址时, 拒绝启动
 10. 增加自定义样例
 11. 合并supervisor 的zk同步线程syncSupervisor和worker同步线程syncProcess
+
 ## 配置变更
 1. 默认超时心跳时间设置为4分钟
 2. 修改netty 线程池clientScheduleService大小为5
+
 ## Bug fix
 1. 优化gc参数,4g以下内存的worker默认4个gc线程,4g以上内存, 按内存大小/1g * 1.5原则设置gc线程数量
 2. Fix在bolt处理速度慢时,可能出现的task心跳更新不及时的bug
 3. Fix在一些情况下,netty连接重连时的异常等待bug
 4. 提交任务时, 避免重复创建thrift client
 5. Fix 启动worker失败时,重复下载binary问题
-##运维和脚本
+
+## 运维和脚本
 1. 优化cleandisk.sh脚本, 防止把当前目录删除和/tmp/hsperfdata_admin/
 2. 增加example下脚本执行权限
 3. 添加参数supervisor.host.start: true/false,可以通过脚本start.sh批量控制启动supervisor或不启动supervisor,默认是启动supervisor
 
-#Release 0.9.7
+# Release 0.9.7
+
 ## New features
 1. 实现topology任务并发动态调整的功能。在任务不下线的情况下,可以动态的对worker,spout, bolt或者ack进行扩容或缩容。rebalance命令被扩展用于支持动态扩容/缩容功能。
 2. 当打开资源隔离时,增加worker对cpu核使用上限的控制
@@ -56,11 +135,13 @@
 10. 增加supervisor 心跳检查, 会拒绝分配任务到supervisor心跳超时的supervisor
 11. 更新发送到Alimonitor的user defined metrics 数据结构
 12. 增加客户端exclude-jar 功能, 当客户端提交任务时,可以通过exclude-jar和classloader来解决jar冲突问题。
+
 ## 配置变更
 1. 修改supervisor到nimbus的心跳 超时时间到180秒
 2. 为避免内存outofmemory, 设置storm.messaging.netty.max.pending默认值为4
 3. 设置Nimbus 内存至4G
 4. 调大队列大小 task 队列大小为1024, 总发送队列和总接收队列为2048
+
 ## Bug fix
 1. 短时间能多次restart worker配置多的任务时,由于Nimbus thrift thread的OOM导致,Supervisor可能出现假死的情况
 2. 同时提交任务,后续的任务可能会失败
@@ -73,12 +154,14 @@
 9. 解决 zkTool 读取 monitor的 znode 失败问题
 10.解决 本地模式和打开classloader模式下, 出现异常问题
 11.解决使用自定义日志logback时, 本地模式下,打印双份日志问题
+
 ## 运维& 脚本
 1. Add rpm build spec
 2. Add deploy files of jstorm for rpm package building
 3. cronjob改成每小时运行一次, 并且coredump 改成保留1个小时
 
-#Release 0.9.6.3
+# Release 0.9.6.3
+
 ## New features
 1. 实现tick tuple
 2. 支持logbak
@@ -88,6 +171,7 @@
 6. 所有底层使用ip,自定义调度的时候,支持自定义调度中ip和hostname混用
 7. 本地模式支持junit test
 8. 客户端命令(比如提交jar时)可以指定storm.yaml 配置文件
+
 ## Bug fix
 1. 在spout 的prepare里面增加active动作
 2. 多语言支持
@@ -116,7 +200,7 @@
 25. rpm安装包中,设置本地临时端口区间
 26. 需要一个noarch的rpm包
 
-#Release 0.9.6.2
+# Release 0.9.6.2
 1. Add option to switch between BlockingQueue and Disruptor
 2. Fix the bug which under sync netty mode, client failed to send message to server 
 3. Fix the bug let web UI can dispaly 0.9.6.1 cluster
@@ -126,7 +210,7 @@
 7. Add the validation of topology name, component name... Only A-Z, a-z, 0-9, '_', '-', '.' are valid now.
 8. Fix the bug close thrift client
 
-#Release 0.9.6.2-rc
+# Release 0.9.6.2-rc
 1. Improve user experience from Web UI
 1.1 Add jstack link
 1.2 Add worker log link in supervisor page
@@ -146,7 +230,7 @@
 11. Add tcp option "reuseAddress" in netty framework
 12. Fix the bug: When spout does not implement the ICommitterTrident interface, MasterCoordinatorSpout will stick on commit phase.
 
-#Release 0.9.6.2-rc
+# Release 0.9.6.2-rc
 1. Improve user experience from Web UI
 1.1 Add jstack link
 1.2 Add worker log link in supervisor page
@@ -166,7 +250,7 @@
 11. Add tcp option "reuseAddress" in netty framework
 12. Fix the bug: When spout does not implement the ICommitterTrident interface, MasterCoordinatorSpout will stick on commit phase.
 
-#Release 0.9.6.1
+# Release 0.9.6.1
 1. Add management of multiclusters to Web UI. Added management tools for multiclusters in WebUI.
 2. Merged Trident API from storm-0.9.3
 3. Replaced gson with fastjson
@@ -192,7 +276,7 @@
 23. Support assign topology to user-defined supervisors
 
 
-#Release 0.9.6
+# Release 0.9.6
 1. Update UI 
   - Display the metrics information of task and worker
   - Add warning flag when errors occur for a topology
@@ -206,7 +290,7 @@
 8. Add closing channel check in netty client to avoid double close
 9. Add connecting check in netty client to avoid connecting one server twice at one time 
 
-#Release 0.9.5.1
+# Release 0.9.5.1
 1. Add netty sync mode
 2. Add block operation in netty async mode
 3. Replace exception with Throwable in executor layer
@@ -214,16 +298,18 @@
 5. Add more netty junit test
 6. Add log when queue is full
 
-#Release 0.9.5
-##Big feature:
+# Release 0.9.5
+
+## Big feature:
 1. Redesign scheduler arithmetic, basing worker not task .
 
 ## Bug fix
 1. Fix disruptor use too much cpu
 2. Add target NettyServer log when f1ail to send data by netty
 
-#Release 0.9.4.1
-##Bug fix:
+# Release 0.9.4.1
+
+## Bug fix:
 1. Improve speed between tasks who is running in one worker
 2. Fix wrong timeout seconds
 3. Add checking port when worker initialize and begin to kill old worker
@@ -242,7 +328,7 @@
 
 
 
-#Release 0.9.4
+# Release 0.9.4
 
 ## Big features
 1. Add transaction programming mode
@@ -258,7 +344,7 @@
 
 
 
-##Bug fix:
+## Bug fix:
 1. Setting buffer size  when upload jar
 2. Add lock between ZK watch and timer thread when refresh connection
 3. Enable nimbus monitor thread only when topology is running in cluster mode
@@ -266,7 +352,7 @@
 5. classloader fix when both parent and current classloader load the same class
 6. Fix log view null pointer exception
 
-#Release 0.9.3.1
+# Release 0.9.3.1
 
 ## Enhancement
 1. switch apache thrift7 to storm thrift7
@@ -277,7 +363,8 @@
 6. Set gc dump dir as log's dir
 
 
-#Release 0.9.3
+# Release 0.9.3
+
 ## New feature
 1. Support Aliyun Apsara/Hadoop Yarn
 
@@ -309,7 +396,8 @@
  
 
 
-#Release 0.9.2
+# Release 0.9.2
+
 ## New feature
 1. Support LocalCluster/LocalDrpc mode, support debugging topology under local mode
 2. Support CGroups, assigning CPU in hardware level.

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/pom.xml
----------------------------------------------------------------------
diff --git a/jstorm-core/pom.xml b/jstorm-core/pom.xml
index 489ff69..cf84d19 100755
--- a/jstorm-core/pom.xml
+++ b/jstorm-core/pom.xml
@@ -14,14 +14,13 @@
  See the License for the specific language governing permissions and
  limitations under the License.
 -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-	xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
 
 	
 	<parent>
 		<groupId>com.alibaba.jstorm</groupId>
 		<artifactId>jstorm-all</artifactId>
-		<version>2.0.4-SNAPSHOT</version>
+		<version>2.1.0</version>
 		<relativePath>..</relativePath>
 	</parent>
 	<!-- <parent> 
@@ -71,9 +70,8 @@
 	
 	<properties>
 		<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
-		<powermock.version>1.4.11</powermock.version>
-        <metrics.version>3.1.2</metrics.version>
-    </properties>
+        	<metrics.version>3.1.2</metrics.version>
+    	</properties>
 	<dependencies>
 		<dependency>
 			<groupId>org.clojure</groupId>
@@ -121,11 +119,11 @@
                 </exclusion>
             </exclusions>
 		</dependency>
-		<dependency>
+		<!--<dependency>
 			<groupId>clj-time</groupId>
-            <artifactId>clj-time</artifactId>
+			<artifactId>clj-time</artifactId>
 			<version>0.8.0</version>
-		</dependency>
+		</dependency>-->
 		<dependency>
 			<groupId>org.apache.curator</groupId>
             <artifactId>curator-framework</artifactId>
@@ -191,21 +189,15 @@
 			<scope>test</scope>
 		</dependency>
 		<dependency>
-			<groupId>org.powermock</groupId>
-			<artifactId>powermock-module-junit4</artifactId>
-			<version>${powermock.version}</version>
-			<scope>test</scope>
-		</dependency>
-		<dependency>
 			<groupId>ch.qos.logback</groupId>
 			<artifactId>logback-classic</artifactId>
 			<version>1.0.13</version>
-        </dependency>
-		<dependency>
-            <groupId>org.slf4j</groupId>
-            <artifactId>log4j-over-slf4j</artifactId>
-            <version>1.6.6</version>
-        </dependency>
+	        </dependency>
+			<dependency>
+	            <groupId>org.slf4j</groupId>
+	            <artifactId>log4j-over-slf4j</artifactId>
+	            <version>1.6.6</version>
+	        </dependency>
 		<!-- 
 		<dependency>
 		      <groupId>org.codehaus.plexus</groupId>


[16/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/ServiceHandler.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/ServiceHandler.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/ServiceHandler.java
index 513e83f..b60864a 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/ServiceHandler.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/ServiceHandler.java
@@ -18,95 +18,54 @@
  */
 package com.alibaba.jstorm.daemon.nimbus;
 
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.nio.channels.Channels;
-import java.nio.channels.WritableByteChannel;
-import java.security.InvalidParameterException;
-import java.util.ArrayList;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.SortedMap;
-import java.util.TreeMap;
-import java.util.UUID;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.lang.StringUtils;
-import org.apache.thrift.TException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import backtype.storm.Config;
 import backtype.storm.daemon.Shutdownable;
-import backtype.storm.generated.AlreadyAliveException;
-import backtype.storm.generated.ClusterSummary;
-import backtype.storm.generated.ComponentSummary;
-import backtype.storm.generated.Credentials;
-import backtype.storm.generated.ErrorInfo;
-import backtype.storm.generated.InvalidTopologyException;
-import backtype.storm.generated.KillOptions;
-import backtype.storm.generated.MetricInfo;
-import backtype.storm.generated.MonitorOptions;
-import backtype.storm.generated.NettyMetric;
+import backtype.storm.generated.*;
 import backtype.storm.generated.Nimbus.Iface;
-import backtype.storm.generated.NimbusSummary;
-import backtype.storm.generated.NotAliveException;
-import backtype.storm.generated.RebalanceOptions;
-import backtype.storm.generated.StormTopology;
-import backtype.storm.generated.SubmitOptions;
-import backtype.storm.generated.SupervisorSummary;
-import backtype.storm.generated.SupervisorWorkers;
-import backtype.storm.generated.TaskComponent;
-import backtype.storm.generated.TaskSummary;
-import backtype.storm.generated.TopologyAssignException;
-import backtype.storm.generated.TopologyInfo;
-import backtype.storm.generated.TopologyInitialStatus;
-import backtype.storm.generated.TopologyMetric;
-import backtype.storm.generated.TopologySummary;
-import backtype.storm.generated.WorkerSummary;
-import backtype.storm.generated.WorkerUploadMetrics;
 import backtype.storm.utils.BufferFileInputStream;
 import backtype.storm.utils.TimeCacheMap;
 import backtype.storm.utils.Utils;
-
 import com.alibaba.jstorm.callback.impl.RemoveTransitionCallback;
-import com.alibaba.jstorm.cluster.Cluster;
-import com.alibaba.jstorm.cluster.Common;
-import com.alibaba.jstorm.cluster.DaemonCommon;
-import com.alibaba.jstorm.cluster.StormBase;
-import com.alibaba.jstorm.cluster.StormClusterState;
-import com.alibaba.jstorm.cluster.StormConfig;
+import com.alibaba.jstorm.client.ConfigExtension;
+import com.alibaba.jstorm.cluster.*;
 import com.alibaba.jstorm.daemon.supervisor.SupervisorInfo;
-import com.alibaba.jstorm.metric.MetricDef;
+import com.alibaba.jstorm.metric.MetaType;
+import com.alibaba.jstorm.metric.MetricUtils;
 import com.alibaba.jstorm.metric.SimpleJStormMetric;
 import com.alibaba.jstorm.schedule.Assignment;
 import com.alibaba.jstorm.schedule.default_assign.ResourceWorkerSlot;
 import com.alibaba.jstorm.task.TaskInfo;
 import com.alibaba.jstorm.task.error.TaskError;
-import com.alibaba.jstorm.task.heartbeat.TaskHeartbeat;
-import com.alibaba.jstorm.utils.FailedAssignTopologyException;
-import com.alibaba.jstorm.utils.JStormUtils;
-import com.alibaba.jstorm.utils.NetWorkUtils;
-import com.alibaba.jstorm.utils.PathUtils;
-import com.alibaba.jstorm.utils.Thrift;
-import com.alibaba.jstorm.utils.TimeUtils;
+import com.alibaba.jstorm.utils.*;
+import com.google.common.collect.Lists;
+import org.apache.commons.io.FileExistsException;
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.lang.StringUtils;
+import org.apache.thrift.TException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.channels.Channels;
+import java.nio.channels.WritableByteChannel;
+import java.security.InvalidParameterException;
+import java.util.*;
+import java.util.Map.Entry;
+import java.util.concurrent.ConcurrentHashMap;
+
+import static com.alibaba.jstorm.daemon.nimbus.TopologyMetricsRunnable.*;
 
 /**
  * Thrift callback, all commands handling entrance
- * 
+ *
  * @author version 1: lixin, version 2:Longda
- * 
  */
 public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
-    private final static Logger LOG = LoggerFactory
-            .getLogger(ServiceHandler.class);
+    private final static Logger LOG = LoggerFactory.getLogger(ServiceHandler.class);
 
     public final static int THREAD_NUM = 64;
 
@@ -138,24 +97,18 @@ public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
     }
 
     @Override
-    public void submitTopology(String name, String uploadedJarLocation,
-            String jsonConf, StormTopology topology)
-            throws AlreadyAliveException, InvalidTopologyException,
-            TopologyAssignException, TException {
+    public void submitTopology(String name, String uploadedJarLocation, String jsonConf, StormTopology topology) throws TException, AlreadyAliveException,
+            InvalidTopologyException, TopologyAssignException {
         SubmitOptions options = new SubmitOptions(TopologyInitialStatus.ACTIVE);
-
-        submitTopologyWithOpts(name, uploadedJarLocation, jsonConf, topology,
-                options);
+        submitTopologyWithOpts(name, uploadedJarLocation, jsonConf, topology, options);
     }
 
-    private void makeAssignment(String topologyName, String topologyId,
-            TopologyInitialStatus status) throws FailedAssignTopologyException {
+    private void makeAssignment(String topologyName, String topologyId, TopologyInitialStatus status) throws FailedAssignTopologyException {
         TopologyAssignEvent assignEvent = new TopologyAssignEvent();
         assignEvent.setTopologyId(topologyId);
         assignEvent.setScratch(false);
         assignEvent.setTopologyName(topologyName);
-        assignEvent.setOldStatus(Thrift
-                .topologyInitialStatusToStormStatus(status));
+        assignEvent.setOldStatus(Thrift.topologyInitialStatusToStormStatus(status));
 
         TopologyAssign.push(assignEvent);
 
@@ -169,86 +122,100 @@ public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
 
     /**
      * Submit one Topology
-     * 
-     * @param topologyname String: topology name
+     *
+     * @param topologyName        String: topology name
      * @param uploadedJarLocation String: already uploaded jar path
-     * @param jsonConf String: jsonConf serialize all toplogy configuration to
-     *            Json
-     * @param topology StormTopology: topology Object
+     * @param jsonConf            String: jsonConf serialize all toplogy configuration to
+     *                            Json
+     * @param topology            StormTopology: topology Object
      */
     @SuppressWarnings("unchecked")
     @Override
-    public void submitTopologyWithOpts(String topologyname,
-            String uploadedJarLocation, String jsonConf,
-            StormTopology topology, SubmitOptions options)
-            throws AlreadyAliveException, InvalidTopologyException,
-            TopologyAssignException, TException {
-        LOG.info("Receive " + topologyname + ", uploadedJarLocation:"
-                + uploadedJarLocation);
+    public void submitTopologyWithOpts(String topologyName, String uploadedJarLocation, String jsonConf, StormTopology topology, SubmitOptions options)
+            throws AlreadyAliveException, InvalidTopologyException, TopologyAssignException, TException {
+        LOG.info("Receive " + topologyName + ", uploadedJarLocation:" + uploadedJarLocation);
         long start = System.nanoTime();
+
+        //check topologyname is valid
+        if (!Common.charValidate(topologyName)) {
+            throw new InvalidTopologyException(topologyName + " is not a valid topology name");
+        }
+
         try {
-            checkTopologyActive(data, topologyname, false);
+            checkTopologyActive(data, topologyName, false);
         } catch (AlreadyAliveException e) {
-            LOG.info(topologyname + " is already exist ");
+            LOG.info(topologyName + " already exists ");
             throw e;
         } catch (Throwable e) {
             LOG.info("Failed to check whether topology is alive or not", e);
             throw new TException(e);
         }
 
-        int counter = data.getSubmittedCount().incrementAndGet();
-        String topologyId = Common.topologyNameToId(topologyname, counter);
-        data.getPendingSubmitTopoloygs().put(topologyId, null);
-
+        String topologyId = null;
+        synchronized (data) {
+            // avoid to the same topologys wered submmitted at the same time
+            Set<String> pendingTopologys =
+                    data.getPendingSubmitTopoloygs().keySet();
+            for (String cachTopologyId : pendingTopologys) {
+                if (cachTopologyId.contains(topologyName + "-"))
+                    throw new AlreadyAliveException(
+                            topologyName + "  were submitted");
+            }
+            int counter = data.getSubmittedCount().incrementAndGet();
+            topologyId = Common.topologyNameToId(topologyName, counter);
+            data.getPendingSubmitTopoloygs().put(topologyId, null);
+        }
         try {
 
-            Map<Object, Object> serializedConf =
-                    (Map<Object, Object>) JStormUtils.from_json(jsonConf);
+            Map<Object, Object> serializedConf = (Map<Object, Object>) JStormUtils.from_json(jsonConf);
             if (serializedConf == null) {
                 LOG.warn("Failed to serialized Configuration");
-                throw new InvalidTopologyException(
-                        "Failed to serilaze topology configuration");
+                throw new InvalidTopologyException("Failed to serialize topology configuration");
             }
 
             serializedConf.put(Config.TOPOLOGY_ID, topologyId);
-            serializedConf.put(Config.TOPOLOGY_NAME, topologyname);
+            serializedConf.put(Config.TOPOLOGY_NAME, topologyName);
 
             Map<Object, Object> stormConf;
 
-            stormConf =
-                    NimbusUtils.normalizeConf(conf, serializedConf, topology);
+            stormConf = NimbusUtils.normalizeConf(conf, serializedConf, topology);
             LOG.info("Normalized configuration:" + stormConf);
-            data.getTopologyNettyMgr().setTopology(stormConf);
 
-            Map<Object, Object> totalStormConf =
-                    new HashMap<Object, Object>(conf);
+            Map<Object, Object> totalStormConf = new HashMap<Object, Object>(conf);
             totalStormConf.putAll(stormConf);
 
-            StormTopology normalizedTopology =
-                    NimbusUtils.normalizeTopology(stormConf, topology, true);
+            StormTopology normalizedTopology = NimbusUtils.normalizeTopology(stormConf, topology, true);
 
             // this validates the structure of the topology
-            Common.validate_basic(normalizedTopology, totalStormConf,
-                    topologyId);
-            
+            Common.validate_basic(normalizedTopology, totalStormConf, topologyId);
             // don't need generate real topology, so skip Common.system_topology
             // Common.system_topology(totalStormConf, topology);
 
             StormClusterState stormClusterState = data.getStormClusterState();
 
+            double metricsSampleRate = ConfigExtension.getMetricSampleRate(stormConf);
             // create /local-dir/nimbus/topologyId/xxxx files
-            setupStormCode(conf, topologyId, uploadedJarLocation, stormConf,
-                    normalizedTopology);
+            setupStormCode(conf, topologyId, uploadedJarLocation, stormConf, normalizedTopology);
 
             // generate TaskInfo for every bolt or spout in ZK
             // /ZK/tasks/topoologyId/xxx
             setupZkTaskInfo(conf, topologyId, stormClusterState);
 
             // make assignments for a topology
-            LOG.info("Submit for " + topologyname + " with conf "
-                    + serializedConf);
-            makeAssignment(topologyname, topologyId,
-                    options.get_initial_status());
+            LOG.info("Submit for " + topologyName + " with conf " + serializedConf);
+            makeAssignment(topologyName, topologyId, options.get_initial_status());
+
+            // when make assignment for a topology,so remove the topologyid form
+            // pendingSubmitTopologys
+            data.getPendingSubmitTopoloygs().remove(topologyId);
+
+            // push start event after startup
+            StartTopologyEvent startEvent = new StartTopologyEvent();
+            startEvent.clusterName = this.data.getClusterName();
+            startEvent.topologyId = topologyId;
+            startEvent.timestamp = System.currentTimeMillis();
+            startEvent.sampleRate = metricsSampleRate;
+            this.data.getMetricRunnable().pushEvent(startEvent);
 
         } catch (FailedAssignTopologyException e) {
             StringBuilder sb = new StringBuilder();
@@ -291,28 +258,27 @@ public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
             LOG.error(sb.toString(), e);
             data.getPendingSubmitTopoloygs().remove(topologyId);
             throw new TopologyAssignException(sb.toString());
-        }finally {
-            double spend = (System.nanoTime() - start)/1000000.0d;
-            SimpleJStormMetric.updateHistorgram("submitTopologyWithOpts", spend);
-            LOG.info("submitTopologyWithOpts {} spend {}ms", topologyname, spend);
+        } finally {
+            double spend = (System.nanoTime() - start) / TimeUtils.NS_PER_US;
+            SimpleJStormMetric.updateNimbusHistogram("submitTopologyWithOpts", spend);
+            LOG.info("submitTopologyWithOpts {} costs {}ms", topologyName, spend);
         }
 
     }
 
     /**
      * kill topology
-     * 
-     * @param topologyname String topology name
+     *
+     * @param topologyName String topology name
      */
     @Override
-    public void killTopology(String name) throws NotAliveException, TException {
-        killTopologyWithOpts(name, new KillOptions());
+    public void killTopology(String topologyName) throws TException, NotAliveException {
+        killTopologyWithOpts(topologyName, new KillOptions());
 
     }
 
     @Override
-    public void killTopologyWithOpts(String topologyName, KillOptions options)
-            throws NotAliveException, TException {
+    public void killTopologyWithOpts(String topologyName, KillOptions options) throws TException, NotAliveException {
         try {
             checkTopologyActive(data, topologyName, true);
 
@@ -322,17 +288,13 @@ public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
             if (options.is_set_wait_secs()) {
                 wait_amt = options.get_wait_secs();
             }
-            NimbusUtils.transitionName(data, topologyName, true,
-                    StatusType.kill, wait_amt);
+            NimbusUtils.transitionName(data, topologyName, true, StatusType.kill, wait_amt);
 
-            TopologyMetricsRunnable.Remove event =
-                    new TopologyMetricsRunnable.Remove();
+            Remove event = new Remove();
             event.topologyId = topologyId;
-
             data.getMetricRunnable().pushEvent(event);
         } catch (NotAliveException e) {
-            String errMsg =
-                    "KillTopology Error, no this topology " + topologyName;
+            String errMsg = "KillTopology Error, no this topology " + topologyName;
             LOG.error(errMsg, e);
             throw new NotAliveException(errMsg);
         } catch (Exception e) {
@@ -345,16 +307,13 @@ public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
 
     /**
      * set topology status as active
-     * 
-     * @param topologyname
-     * 
+     *
+     * @param topologyName
      */
     @Override
-    public void activate(String topologyName) throws NotAliveException,
-            TException {
+    public void activate(String topologyName) throws TException, NotAliveException {
         try {
-            NimbusUtils.transitionName(data, topologyName, true,
-                    StatusType.activate);
+            NimbusUtils.transitionName(data, topologyName, true, StatusType.activate);
         } catch (NotAliveException e) {
             String errMsg = "Activate Error, no this topology " + topologyName;
             LOG.error(errMsg, e);
@@ -369,20 +328,15 @@ public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
 
     /**
      * set topology stauts as deactive
-     * 
-     * @param topologyname
-     * 
+     *
+     * @param topologyName
      */
     @Override
-    public void deactivate(String topologyName) throws NotAliveException,
-            TException {
-
+    public void deactivate(String topologyName) throws TException, NotAliveException {
         try {
-            NimbusUtils.transitionName(data, topologyName, true,
-                    StatusType.inactivate);
+            NimbusUtils.transitionName(data, topologyName, true, StatusType.inactivate);
         } catch (NotAliveException e) {
-            String errMsg =
-                    "Deactivate Error, no this topology " + topologyName;
+            String errMsg = "Deactivate Error, no this topology " + topologyName;
             LOG.error(errMsg, e);
             throw new NotAliveException(errMsg);
         } catch (Exception e) {
@@ -395,20 +349,16 @@ public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
 
     /**
      * rebalance one topology
-     * 
+     *
+     * @param topologyName topology name
+     * @param options      RebalanceOptions
      * @@@ rebalance options hasn't implements
-     * 
-     *     It is used to let workers wait several seconds to finish jobs
-     * 
-     * @param topologyname String
-     * @param options RebalanceOptions
+     * <p/>
+     * It is used to let workers wait several seconds to finish jobs
      */
     @Override
-    public void rebalance(String topologyName, RebalanceOptions options)
-            throws NotAliveException, TException, InvalidTopologyException {
-
+    public void rebalance(String topologyName, RebalanceOptions options) throws TException, NotAliveException {
         try {
-
             checkTopologyActive(data, topologyName, true);
             Integer wait_amt = null;
             String jsonConf = null;
@@ -422,15 +372,11 @@ public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
                     jsonConf = options.get_conf();
             }
 
-            LOG.info("Begin to rebalance " + topologyName + "wait_time:"
-                    + wait_amt + ", reassign: " + reassign
-                    + ", new worker/bolt configuration:" + jsonConf);
+            LOG.info("Begin to rebalance " + topologyName + "wait_time:" + wait_amt + ", reassign: " + reassign + ", new worker/bolt configuration:" + jsonConf);
 
-            Map<Object, Object> conf =
-                    (Map<Object, Object>) JStormUtils.from_json(jsonConf);
+            Map<Object, Object> conf = (Map<Object, Object>) JStormUtils.from_json(jsonConf);
 
-            NimbusUtils.transitionName(data, topologyName, true,
-                    StatusType.rebalance, wait_amt, reassign, conf);
+            NimbusUtils.transitionName(data, topologyName, true, StatusType.rebalance, wait_amt, reassign, conf);
         } catch (NotAliveException e) {
             String errMsg = "Rebalance Error, no this topology " + topologyName;
             LOG.error(errMsg, e);
@@ -444,13 +390,12 @@ public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
     }
 
     @Override
-    public void restart(String name, String jsonConf) throws NotAliveException,
-            InvalidTopologyException, TopologyAssignException, TException {
+    public void restart(String name, String jsonConf) throws TException, NotAliveException, InvalidTopologyException, TopologyAssignException {
         LOG.info("Begin to restart " + name + ", new configuration:" + jsonConf);
 
         // 1. get topologyId
         StormClusterState stormClusterState = data.getStormClusterState();
-        String topologyId = null;
+        String topologyId;
         try {
             topologyId = Cluster.get_topology_id(stormClusterState, name);
         } catch (Exception e2) {
@@ -468,30 +413,26 @@ public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
         LOG.info("Deactivate " + name);
 
         // 3. backup old jar/configuration/topology
-        StormTopology topology = null;
-
-        Map topologyConf = null;
+        StormTopology topology;
+        Map topologyConf;
         String topologyCodeLocation = null;
         try {
             topology = StormConfig.read_nimbus_topology_code(conf, topologyId);
-
-            topologyConf =
-                    StormConfig.read_nimbus_topology_conf(conf, topologyId);
+            topologyConf = StormConfig.read_nimbus_topology_conf(conf, topologyId);
             if (jsonConf != null) {
-                Map<Object, Object> newConf =
-                        (Map<Object, Object>) JStormUtils.from_json(jsonConf);
+                Map<Object, Object> newConf = (Map<Object, Object>) JStormUtils.from_json(jsonConf);
                 topologyConf.putAll(newConf);
             }
 
             // Copy storm files back to stormdist dir from the tmp dir
-            String oldDistDir =
-                    StormConfig.masterStormdistRoot(conf, topologyId);
+            String oldDistDir = StormConfig.masterStormdistRoot(conf, topologyId);
             String parent = StormConfig.masterInbox(conf);
             topologyCodeLocation = parent + PathUtils.SEPERATOR + topologyId;
             FileUtils.forceMkdir(new File(topologyCodeLocation));
             FileUtils.cleanDirectory(new File(topologyCodeLocation));
-            FileUtils.copyDirectory(new File(oldDistDir), new File(
-                    topologyCodeLocation));
+            File stormDistDir = new File(oldDistDir);
+            stormDistDir.setLastModified(System.currentTimeMillis());
+            FileUtils.copyDirectory(stormDistDir, new File(topologyCodeLocation));
 
             LOG.info("Successfully read old jar/conf/topology " + name);
         } catch (Exception e) {
@@ -499,8 +440,7 @@ public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
             if (topologyCodeLocation != null) {
                 try {
                     PathUtils.rmr(topologyCodeLocation);
-                } catch (IOException e1) {
-
+                } catch (IOException ignored) {
                 }
             }
             throw new TException("Failed to read old jar/conf/topology ");
@@ -509,24 +449,31 @@ public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
 
         // 4. Kill
         // directly use remove command to kill, more stable than issue kill cmd
-        RemoveTransitionCallback killCb =
-                new RemoveTransitionCallback(data, topologyId);
+        RemoveTransitionCallback killCb = new RemoveTransitionCallback(data, topologyId);
         killCb.execute(new Object[0]);
         LOG.info("Successfully kill the topology " + name);
 
+        // send metric events
+        TopologyMetricsRunnable.KillTopologyEvent killEvent = new TopologyMetricsRunnable.KillTopologyEvent();
+        killEvent.clusterName = this.data.getClusterName();
+        killEvent.topologyId = topologyId;
+        killEvent.timestamp = System.currentTimeMillis();
+        this.data.getMetricRunnable().pushEvent(killEvent);
+
+        Remove removeEvent = new Remove();
+        removeEvent.topologyId = topologyId;
+        this.data.getMetricRunnable().pushEvent(removeEvent);
+
         // 5. submit
         try {
-            submitTopology(name, topologyCodeLocation,
-                    JStormUtils.to_json(topologyConf), topology);
-
+            submitTopology(name, topologyCodeLocation, JStormUtils.to_json(topologyConf), topology);
         } catch (AlreadyAliveException e) {
             LOG.info("Failed to kill the topology" + name);
             throw new TException("Failed to kill the topology" + name);
         } finally {
             try {
                 PathUtils.rmr(topologyCodeLocation);
-            } catch (IOException e1) {
-
+            } catch (IOException ignored) {
             }
         }
 
@@ -537,11 +484,9 @@ public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
         try {
             String parent = PathUtils.parent_path(libName);
             PathUtils.local_mkdirs(parent);
-            data.getUploaders().put(libName,
-                    Channels.newChannel(new FileOutputStream(libName)));
+            data.getUploaders().put(libName, Channels.newChannel(new FileOutputStream(libName)));
             LOG.info("Begin upload file from client to " + libName);
         } catch (Exception e) {
-            // TODO Auto-generated catch block
             LOG.error("Fail to upload jar " + libName, e);
             throw new TException(e);
         }
@@ -549,23 +494,20 @@ public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
 
     /**
      * prepare to uploading topology jar, return the file location
-     * 
-     * @throws
      */
     @Override
     public String beginFileUpload() throws TException {
 
         String fileLoc = null;
         try {
-            String path = null;
+            String path;
             String key = UUID.randomUUID().toString();
             path = StormConfig.masterInbox(conf) + "/" + key;
             FileUtils.forceMkdir(new File(path));
             FileUtils.cleanDirectory(new File(path));
             fileLoc = path + "/stormjar-" + key + ".jar";
 
-            data.getUploaders().put(fileLoc,
-                    Channels.newChannel(new FileOutputStream(fileLoc)));
+            data.getUploaders().put(fileLoc, Channels.newChannel(new FileOutputStream(fileLoc)));
             LOG.info("Begin upload file from client to " + fileLoc);
             return path;
         } catch (FileNotFoundException e) {
@@ -581,14 +523,11 @@ public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
      * uploading topology jar data
      */
     @Override
-    public void uploadChunk(String location, ByteBuffer chunk)
-            throws TException {
+    public void uploadChunk(String location, ByteBuffer chunk) throws TException {
         TimeCacheMap<Object, Object> uploaders = data.getUploaders();
         Object obj = uploaders.get(location);
         if (obj == null) {
-            throw new TException(
-                    "File for that location does not exist (or timed out) "
-                            + location);
+            throw new TException("File for that location does not exist (or timed out) " + location);
         }
         try {
             if (obj instanceof WritableByteChannel) {
@@ -596,13 +535,10 @@ public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
                 channel.write(chunk);
                 uploaders.put(location, channel);
             } else {
-                throw new TException("Object isn't WritableByteChannel for "
-                        + location);
+                throw new TException("Object isn't WritableByteChannel for " + location);
             }
         } catch (IOException e) {
-            String errMsg =
-                    " WritableByteChannel write filed when uploadChunk "
-                            + location;
+            String errMsg = " WritableByteChannel write filed when uploadChunk " + location;
             LOG.error(errMsg);
             throw new TException(e);
         }
@@ -611,12 +547,10 @@ public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
 
     @Override
     public void finishFileUpload(String location) throws TException {
-
         TimeCacheMap<Object, Object> uploaders = data.getUploaders();
         Object obj = uploaders.get(location);
         if (obj == null) {
-            throw new TException(
-                    "File for that location does not exist (or timed out)");
+            throw new TException("File for that location does not exist (or timed out)");
         }
         try {
             if (obj instanceof WritableByteChannel) {
@@ -625,25 +559,20 @@ public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
                 uploaders.remove(location);
                 LOG.info("Finished uploading file from client: " + location);
             } else {
-                throw new TException("Object isn't WritableByteChannel for "
-                        + location);
+                throw new TException("Object isn't WritableByteChannel for " + location);
             }
         } catch (IOException e) {
-            LOG.error(" WritableByteChannel close failed when finishFileUpload "
-                    + location);
+            LOG.error(" WritableByteChannel close failed when finishFileUpload " + location);
         }
 
     }
 
     @Override
     public String beginFileDownload(String file) throws TException {
-        BufferFileInputStream is = null;
-        String id = null;
+        BufferFileInputStream is;
+        String id;
         try {
-            int bufferSize =
-                    JStormUtils.parseInt(
-                            conf.get(Config.NIMBUS_THRIFT_MAX_BUFFER_SIZE),
-                            1024 * 1024) / 2;
+            int bufferSize = JStormUtils.parseInt(conf.get(Config.NIMBUS_THRIFT_MAX_BUFFER_SIZE), 1024 * 1024) / 2;
 
             is = new BufferFileInputStream(file, bufferSize);
             id = UUID.randomUUID().toString();
@@ -670,16 +599,14 @@ public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
                 BufferFileInputStream is = (BufferFileInputStream) obj;
                 byte[] ret = is.read();
                 if (ret != null) {
-                    downloaders.put(id, (BufferFileInputStream) is);
+                    downloaders.put(id, is);
                     return ByteBuffer.wrap(ret);
                 }
             } else {
-                throw new TException("Object isn't BufferFileInputStream for "
-                        + id);
+                throw new TException("Object isn't BufferFileInputStream for " + id);
             }
         } catch (IOException e) {
-            LOG.error("BufferFileInputStream read failed when downloadChunk ",
-                    e);
+            LOG.error("BufferFileInputStream read failed when downloadChunk ", e);
             throw new TException(e);
         }
         byte[] empty = {};
@@ -692,72 +619,49 @@ public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
     }
 
     /**
-     * get cluster's summary, it will contain SupervisorSummary and
-     * TopologySummary
-     * 
+     * get cluster's summary, it will contain SupervisorSummary and TopologySummary
+     *
      * @return ClusterSummary
      */
     @Override
     public ClusterSummary getClusterInfo() throws TException {
         long start = System.nanoTime();
         try {
-
             StormClusterState stormClusterState = data.getStormClusterState();
 
-            Map<String, Assignment> assignments =
-                    new HashMap<String, Assignment>();
+            Map<String, Assignment> assignments = new HashMap<String, Assignment>();
 
             // get TopologySummary
-            List<TopologySummary> topologySummaries =
-                    NimbusUtils.getTopologySummary(stormClusterState,
-                            assignments);
+            List<TopologySummary> topologySummaries = NimbusUtils.getTopologySummary(stormClusterState, assignments);
 
             // all supervisors
-            Map<String, SupervisorInfo> supervisorInfos =
-                    Cluster.get_all_SupervisorInfo(stormClusterState, null);
+            Map<String, SupervisorInfo> supervisorInfos = Cluster.get_all_SupervisorInfo(stormClusterState, null);
 
             // generate SupervisorSummaries
-            List<SupervisorSummary> supervisorSummaries =
-                    NimbusUtils.mkSupervisorSummaries(supervisorInfos,
-                            assignments);
-
-            NimbusSummary nimbusSummary =
-                    NimbusUtils.getNimbusSummary(stormClusterState,
-                            supervisorSummaries, data);
+            List<SupervisorSummary> supervisorSummaries = NimbusUtils.mkSupervisorSummaries(supervisorInfos, assignments);
 
-            ClusterSummary ret =
-                    new ClusterSummary(nimbusSummary, supervisorSummaries,
-                            topologySummaries);
-
-            return ret;
+            NimbusSummary nimbusSummary = NimbusUtils.getNimbusSummary(stormClusterState, supervisorSummaries, data);
 
+            return new ClusterSummary(nimbusSummary, supervisorSummaries, topologySummaries);
         } catch (TException e) {
             LOG.info("Failed to get ClusterSummary ", e);
             throw e;
         } catch (Exception e) {
             LOG.info("Failed to get ClusterSummary ", e);
             throw new TException(e);
-        }finally {
-            double spend = (System.nanoTime() - start)/1000000.0d;
-            SimpleJStormMetric.updateHistorgram("getClusterInfo", spend);
-            LOG.info("getClusterInfo spend {}ms", spend);
+        } finally {
+            long end = System.nanoTime();
+            SimpleJStormMetric.updateNimbusHistogram("getClusterInfo", (end - start) / TimeUtils.NS_PER_US);
         }
     }
 
     @Override
     public String getVersion() throws TException {
-    	try {
-            return Utils.getVersion();
-	    }catch(Exception e) {
-	            String errMsg = "!!! Binary has been changed, please restart Nimbus !!! ";
-	            LOG.error(errMsg, e);
-	        throw new TException(errMsg, e);
-	    }
+        return Utils.getVersion();
     }
 
     @Override
-    public SupervisorWorkers getSupervisorWorkers(String host)
-            throws NotAliveException, TException {
+    public SupervisorWorkers getSupervisorWorkers(String host) throws NotAliveException, TException {
         long start = System.nanoTime();
         try {
 
@@ -770,15 +674,12 @@ public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
             String hostName = NetWorkUtils.ip2Host(host);
 
             // all supervisors
-            Map<String, SupervisorInfo> supervisorInfos =
-                    Cluster.get_all_SupervisorInfo(stormClusterState, null);
+            Map<String, SupervisorInfo> supervisorInfos = Cluster.get_all_SupervisorInfo(stormClusterState, null);
 
-            for (Entry<String, SupervisorInfo> entry : supervisorInfos
-                    .entrySet()) {
+            for (Entry<String, SupervisorInfo> entry : supervisorInfos.entrySet()) {
 
                 SupervisorInfo info = entry.getValue();
-                if (info.getHostName().equals(hostName)
-                        || info.getHostName().equals(ip)) {
+                if (info.getHostName().equals(hostName) || info.getHostName().equals(ip)) {
                     supervisorId = entry.getKey();
                     supervisorInfo = info;
                     break;
@@ -789,29 +690,20 @@ public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
                 throw new TException("No supervisor of " + host);
             }
 
-            Map<String, Assignment> assignments =
-                    Cluster.get_all_assignment(stormClusterState, null);
-
-            Map<Integer, WorkerSummary> portWorkerSummarys =
-                    new TreeMap<Integer, WorkerSummary>();
+            Map<String, Assignment> assignments = Cluster.get_all_assignment(stormClusterState, null);
 
-            Map<String, MetricInfo> metricInfoMap =
-                    new HashMap<String, MetricInfo>();
+            Map<Integer, WorkerSummary> portWorkerSummarys = new TreeMap<Integer, WorkerSummary>();
 
             int usedSlotNumber = 0;
 
-            Map<String, Map<Integer, String>> topologyTaskToComponent =
-                    new HashMap<String, Map<Integer, String>>();
+            Map<String, Map<Integer, String>> topologyTaskToComponent = new HashMap<String, Map<Integer, String>>();
 
+            Map<String, MetricInfo> metricInfoMap = new HashMap<String, MetricInfo>();
             for (Entry<String, Assignment> entry : assignments.entrySet()) {
                 String topologyId = entry.getKey();
                 Assignment assignment = entry.getValue();
 
                 Set<ResourceWorkerSlot> workers = assignment.getWorkers();
-                
-                TopologyMetric topologyMetric = data.getMetricRunnable().getTopologyMetric(topologyId);
-                
-
                 for (ResourceWorkerSlot worker : workers) {
                     if (supervisorId.equals(worker.getNodeId()) == false) {
                         continue;
@@ -829,67 +721,52 @@ public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
                         portWorkerSummarys.put(port, workerSummary);
                     }
 
-                    Map<Integer, String> taskToComponent =
-                            topologyTaskToComponent.get(topologyId);
+                    Map<Integer, String> taskToComponent = topologyTaskToComponent.get(topologyId);
                     if (taskToComponent == null) {
                         taskToComponent = Cluster.get_all_task_component(stormClusterState, topologyId, null);
-                        topologyTaskToComponent
-                                .put(topologyId, taskToComponent);
+                        topologyTaskToComponent.put(topologyId, taskToComponent);
                     }
 
                     int earliest = TimeUtils.current_time_secs();
                     for (Integer taskId : worker.getTasks()) {
-
                         TaskComponent taskComponent = new TaskComponent();
-                        taskComponent
-                                .set_component(taskToComponent.get(taskId));
+                        taskComponent.set_component(taskToComponent.get(taskId));
                         taskComponent.set_taskId(taskId);
-                        Integer startTime =
-                                assignment.getTaskStartTimeSecs().get(taskId);
+                        Integer startTime = assignment.getTaskStartTimeSecs().get(taskId);
                         if (startTime != null && startTime < earliest) {
                             earliest = startTime;
                         }
 
                         workerSummary.add_to_tasks(taskComponent);
-
                     }
 
                     workerSummary.set_uptime(TimeUtils.time_delta(earliest));
 
-                    if (topologyMetric == null) {
-                        LOG.warn("Failed to get topologyMetric of " + topologyId);
-                        continue;
-                    }
-
-                    String workerSlotName =
-                            TopologyMetricsRunnable.getWorkerSlotName(
-                                    supervisorInfo.getHostName(), port);
-                    if (topologyMetric.get_workerMetric() != null) {
-                        MetricInfo workerMetricInfo =
-                                topologyMetric.get_workerMetric().get(
-                                        workerSlotName);
-
-                        if (workerMetricInfo != null) {
-                            metricInfoMap.put(workerSlotName, workerMetricInfo);
+                    String workerSlotName = getWorkerSlotName(supervisorInfo.getHostName(), port);
+                    List<MetricInfo> workerMetricInfoList = this.data.getMetricCache().getMetricData(topologyId, MetaType.WORKER);
+                    if (workerMetricInfoList.size() > 0) {
+                        MetricInfo workerMetricInfo = workerMetricInfoList.get(0);
+                        // remove metrics that don't belong to current worker
+                        for (Iterator<String> itr = workerMetricInfo.get_metrics().keySet().iterator();
+                             itr.hasNext(); ) {
+                            String metricName = itr.next();
+                            if (!metricName.contains(host)) {
+                                itr.remove();
+                            }
                         }
+                        metricInfoMap.put(workerSlotName, workerMetricInfo);
                     }
                 }
             }
 
-            List<WorkerSummary> wokersList = new ArrayList<WorkerSummary>();
-            wokersList.addAll(portWorkerSummarys.values());
+            List<WorkerSummary> workerList = new ArrayList<WorkerSummary>();
+            workerList.addAll(portWorkerSummarys.values());
 
-            Map<String, Integer> supervisorToUsedSlotNum =
-                    new HashMap<String, Integer>();
+            Map<String, Integer> supervisorToUsedSlotNum = new HashMap<String, Integer>();
             supervisorToUsedSlotNum.put(supervisorId, usedSlotNumber);
-            SupervisorSummary supervisorSummary =
-                    NimbusUtils.mkSupervisorSummary(supervisorInfo,
-                            supervisorId, supervisorToUsedSlotNum);
+            SupervisorSummary supervisorSummary = NimbusUtils.mkSupervisorSummary(supervisorInfo, supervisorId, supervisorToUsedSlotNum);
 
-            SupervisorWorkers ret =
-                    new SupervisorWorkers(supervisorSummary, wokersList,
-                            metricInfoMap);
-            return ret;
+            return new SupervisorWorkers(supervisorSummary, workerList, metricInfoMap);
 
         } catch (TException e) {
             LOG.info("Failed to get ClusterSummary ", e);
@@ -897,21 +774,19 @@ public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
         } catch (Exception e) {
             LOG.info("Failed to get ClusterSummary ", e);
             throw new TException(e);
-        }finally {
-            double spend = (System.nanoTime() - start)/1000000.0d;
-            SimpleJStormMetric.updateHistorgram("getSupervisorWorkers", spend);
-            LOG.info("getSupervisorWorkers, {} spend {} ms", host, spend);
+        } finally {
+            long end = System.nanoTime();
+            SimpleJStormMetric.updateNimbusHistogram("getSupervisorWorkers", (end - start) / TimeUtils.NS_PER_US);
         }
     }
 
     /**
      * Get TopologyInfo, it contain all data of the topology running status
-     * 
+     *
      * @return TopologyInfo
      */
     @Override
-    public TopologyInfo getTopologyInfo(String topologyId)
-            throws NotAliveException, TException {
+    public TopologyInfo getTopologyInfo(String topologyId) throws NotAliveException, TException {
         long start = System.nanoTime();
         StormClusterState stormClusterState = data.getStormClusterState();
 
@@ -923,49 +798,40 @@ public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
                 throw new NotAliveException("No topology of " + topologyId);
             }
 
-            Assignment assignment =
-                    stormClusterState.assignment_info(topologyId, null);
+            Assignment assignment = stormClusterState.assignment_info(topologyId, null);
             if (assignment == null) {
                 throw new NotAliveException("No topology of " + topologyId);
             }
-            
 
-            Map<String, TaskHeartbeat> taskHBMap =
-                    Cluster.get_all_task_heartbeat(stormClusterState,
-                            topologyId);
+            TopologyTaskHbInfo topologyTaskHbInfo = data.getTasksHeartbeat().get(topologyId);
+            Map<Integer, TaskHeartbeat> taskHbMap = null;
+            if (topologyTaskHbInfo != null)
+                taskHbMap = topologyTaskHbInfo.get_taskHbs();
 
             Map<Integer, TaskInfo> taskInfoMap = Cluster.get_all_taskInfo(stormClusterState, topologyId);
             Map<Integer, String> taskToComponent = Cluster.get_all_task_component(stormClusterState, topologyId, taskInfoMap);
             Map<Integer, String> taskToType = Cluster.get_all_task_type(stormClusterState, topologyId, taskInfoMap);
-            
 
-            String errorString = null;
+
+            String errorString;
             if (Cluster.is_topology_exist_error(stormClusterState, topologyId)) {
                 errorString = "Y";
             } else {
                 errorString = "";
             }
-            
+
             TopologySummary topologySummary = new TopologySummary();
             topologySummary.set_id(topologyId);
             topologySummary.set_name(base.getStormName());
-            topologySummary.set_uptime_secs(TimeUtils.time_delta(base
-                    .getLanchTimeSecs()));
-            ;
+            topologySummary.set_uptimeSecs(TimeUtils.time_delta(base.getLanchTimeSecs()));
             topologySummary.set_status(base.getStatusString());
-            topologySummary.set_num_tasks(NimbusUtils
-                    .getTopologyTaskNum(assignment));
-            topologySummary.set_num_workers(assignment.getWorkers().size());
-            topologySummary.set_error_info(errorString);
-
-            Map<String, ComponentSummary> componentSummaryMap =
-                    new HashMap<String, ComponentSummary>();
+            topologySummary.set_numTasks(NimbusUtils.getTopologyTaskNum(assignment));
+            topologySummary.set_numWorkers(assignment.getWorkers().size());
+            topologySummary.set_errorInfo(errorString);
 
-            HashMap<String, List<Integer>> componentToTasks =
-                    JStormUtils.reverse_map(taskToComponent);
-
-            for (Entry<String, List<Integer>> entry : componentToTasks
-                    .entrySet()) {
+            Map<String, ComponentSummary> componentSummaryMap = new HashMap<String, ComponentSummary>();
+            HashMap<String, List<Integer>> componentToTasks = JStormUtils.reverse_map(taskToComponent);
+            for (Entry<String, List<Integer>> entry : componentToTasks.entrySet()) {
                 String name = entry.getKey();
                 List<Integer> taskIds = entry.getValue();
                 if (taskIds == null || taskIds.size() == 0) {
@@ -979,43 +845,46 @@ public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
                 componentSummary.set_name(name);
                 componentSummary.set_type(taskToType.get(taskIds.get(0)));
                 componentSummary.set_parallel(taskIds.size());
-                componentSummary.set_task_ids(taskIds);
+                componentSummary.set_taskIds(taskIds);
             }
 
-            Map<Integer, TaskSummary> taskSummaryMap =
-                    new TreeMap<Integer, TaskSummary>();
-            Map<Integer, List<TaskError> > taskErrors = Cluster.get_all_task_errors(
-            		stormClusterState, topologyId);
+            Map<Integer, TaskSummary> taskSummaryMap = new TreeMap<Integer, TaskSummary>();
+            Map<Integer, List<TaskError>> taskErrors = Cluster.get_all_task_errors(stormClusterState, topologyId);
 
             for (Integer taskId : taskInfoMap.keySet()) {
                 TaskSummary taskSummary = new TaskSummary();
                 taskSummaryMap.put(taskId, taskSummary);
 
-                taskSummary.set_task_id(taskId);
-                TaskHeartbeat hb = taskHBMap.get(String.valueOf(taskId));
-                if (hb == null) {
+                taskSummary.set_taskId(taskId);
+                if (taskHbMap == null) {
                     taskSummary.set_status("Starting");
                     taskSummary.set_uptime(0);
                 } else {
-                    taskSummary.set_status("ACTIVE");
-                    taskSummary.set_uptime(hb.getUptimeSecs());
+                    TaskHeartbeat hb = taskHbMap.get(taskId);
+                    if (hb == null) {
+                        taskSummary.set_status("Starting");
+                        taskSummary.set_uptime(0);
+                    } else {
+                        boolean isInactive = NimbusUtils.isTaskDead(data, topologyId, taskId);
+                        if (isInactive)
+                            taskSummary.set_status("INACTIVE");
+                        else
+                            taskSummary.set_status("ACTIVE");
+                        taskSummary.set_uptime(hb.get_uptime());
+                    }
                 }
 
                 if (StringUtils.isBlank(errorString)) {
-                	continue;
+                    continue;
                 }
+
                 List<TaskError> taskErrorList = taskErrors.get(taskId);
                 if (taskErrorList != null && taskErrorList.size() != 0) {
                     for (TaskError taskError : taskErrorList) {
-                        ErrorInfo errorInfo =
-                                new ErrorInfo(taskError.getError(),
-                                        taskError.getTimSecs());
-
+                        ErrorInfo errorInfo = new ErrorInfo(taskError.getError(), taskError.getTimSecs());
                         taskSummary.add_to_errors(errorInfo);
-
                         String component = taskToComponent.get(taskId);
-                        componentSummaryMap.get(component).add_to_errors(
-                                errorInfo);
+                        componentSummaryMap.get(component).add_to_errors(errorInfo);
                     }
                 }
             }
@@ -1033,14 +902,38 @@ public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
 
             TopologyInfo topologyInfo = new TopologyInfo();
             topologyInfo.set_topology(topologySummary);
-            topologyInfo.set_components(JStormUtils.mk_list(componentSummaryMap
-                    .values()));
-            topologyInfo
-                    .set_tasks(JStormUtils.mk_list(taskSummaryMap.values()));
-            topologyInfo.set_metrics(data.getMetricRunnable()
-                    .getTopologyMetric(topologyId));
-
-            
+            topologyInfo.set_components(JStormUtils.mk_list(componentSummaryMap.values()));
+            topologyInfo.set_tasks(JStormUtils.mk_list(taskSummaryMap.values()));
+
+            // return topology metric & component metric only
+            List<MetricInfo> tpMetricList = data.getMetricCache().getMetricData(topologyId, MetaType.TOPOLOGY);
+            List<MetricInfo> compMetricList = data.getMetricCache().getMetricData(topologyId, MetaType.COMPONENT);
+            List<MetricInfo> workerMetricList = data.getMetricCache().getMetricData(topologyId, MetaType.WORKER);
+            MetricInfo taskMetric = MetricUtils.mkMetricInfo();
+            MetricInfo streamMetric = MetricUtils.mkMetricInfo();
+            MetricInfo nettyMetric = MetricUtils.mkMetricInfo();
+            MetricInfo tpMetric, compMetric, workerMetric;
+
+            if (tpMetricList == null || tpMetricList.size() == 0) {
+                tpMetric = MetricUtils.mkMetricInfo();
+            } else {
+                // get the last min topology metric
+                tpMetric = tpMetricList.get(tpMetricList.size() - 1);
+            }
+            if (compMetricList == null || compMetricList.size() == 0) {
+                compMetric = MetricUtils.mkMetricInfo();
+            } else {
+                compMetric = compMetricList.get(0);
+            }
+            if (workerMetricList == null || workerMetricList.size() == 0) {
+                workerMetric = MetricUtils.mkMetricInfo();
+            } else {
+                workerMetric = workerMetricList.get(0);
+            }
+            TopologyMetric topologyMetrics = new TopologyMetric(tpMetric, compMetric, workerMetric,
+                    taskMetric, streamMetric, nettyMetric);
+            topologyInfo.set_metrics(topologyMetrics);
+
             return topologyInfo;
         } catch (TException e) {
             LOG.info("Failed to get topologyInfo " + topologyId, e);
@@ -1048,19 +941,15 @@ public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
         } catch (Exception e) {
             LOG.info("Failed to get topologyInfo " + topologyId, e);
             throw new TException("Failed to get topologyInfo" + topologyId);
-        }finally {
+        } finally {
             long end = System.nanoTime();
-            double spend = (end - start)/1000000.0d;
-            SimpleJStormMetric.updateHistorgram("getTopologyInfo", spend);
-            LOG.info("Finish getTopologyInfo {}, spend {} ms", topologyId, spend);
+            SimpleJStormMetric.updateNimbusHistogram("getTopologyInfo", (end - start) / TimeUtils.NS_PER_US);
         }
 
     }
 
     @Override
-    public TopologyInfo getTopologyInfoByName(String topologyName)
-            throws NotAliveException, TException {
-
+    public TopologyInfo getTopologyInfoByName(String topologyName) throws NotAliveException, TException {
         String topologyId = getTopologyId(topologyName);
         return getTopologyInfo(topologyId);
 
@@ -1069,8 +958,7 @@ public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
     @Override
     public String getNimbusConf() throws TException {
         try {
-            String ret = JStormUtils.to_json(data.getConf());
-            return ret;
+            return JStormUtils.to_json(data.getConf());
         } catch (Exception e) {
             String err = "Failed to generate Nimbus configuration";
             LOG.error(err, e);
@@ -1080,20 +968,17 @@ public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
 
     /**
      * get topology configuration
-     * 
+     *
      * @param id String: topology id
      * @return String
      */
     @Override
-    public String getTopologyConf(String id) throws NotAliveException,
-            TException {
+    public String getTopologyConf(String id) throws NotAliveException, TException {
         String rtn;
         try {
-            Map<Object, Object> topologyConf =
-                    StormConfig.read_nimbus_topology_conf(conf, id);
+            Map<Object, Object> topologyConf = StormConfig.read_nimbus_topology_conf(conf, id);
             rtn = JStormUtils.to_json(topologyConf);
         } catch (IOException e) {
-            // TODO Auto-generated catch block
             LOG.info("Failed to get configuration of " + id, e);
             throw new TException(e);
         }
@@ -1101,15 +986,12 @@ public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
     }
 
     @Override
-    public String getTopologyId(String topologyName) throws NotAliveException,
-            TException {
-        // TODO Auto-generated method stub
+    public String getTopologyId(String topologyName) throws NotAliveException, TException {
         StormClusterState stormClusterState = data.getStormClusterState();
 
         try {
             // get all active topology's StormBase
-            String topologyId =
-                    Cluster.get_topology_id(stormClusterState, topologyName);
+            String topologyId = Cluster.get_topology_id(stormClusterState, topologyName);
             if (topologyId != null) {
                 return topologyId;
             }
@@ -1125,24 +1007,20 @@ public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
 
     /**
      * get StormTopology throw deserialize local files
-     * 
+     *
      * @param id String: topology id
      * @return StormTopology
      */
     @Override
-    public StormTopology getTopology(String id) throws NotAliveException,
-            TException {
-        StormTopology topology = null;
+    public StormTopology getTopology(String id) throws NotAliveException, TException {
+        StormTopology topology;
         try {
-            StormTopology stormtopology =
-                    StormConfig.read_nimbus_topology_code(conf, id);
+            StormTopology stormtopology = StormConfig.read_nimbus_topology_code(conf, id);
             if (stormtopology == null) {
                 throw new NotAliveException("No topology of " + id);
             }
 
-            Map<Object, Object> topologyConf =
-                    (Map<Object, Object>) StormConfig
-                            .read_nimbus_topology_conf(conf, id);
+            Map<Object, Object> topologyConf = (Map<Object, Object>) StormConfig.read_nimbus_topology_conf(conf, id);
 
             topology = Common.system_topology(topologyConf, stormtopology);
         } catch (Exception e) {
@@ -1153,12 +1031,10 @@ public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
     }
 
     @Override
-    public StormTopology getUserTopology(String id) throws NotAliveException,
-            TException {
+    public StormTopology getUserTopology(String id) throws NotAliveException, TException {
         StormTopology topology = null;
         try {
-            StormTopology stormtopology =
-                    StormConfig.read_nimbus_topology_code(conf, id);
+            StormTopology stormtopology = StormConfig.read_nimbus_topology_code(conf, id);
             if (stormtopology == null) {
                 throw new NotAliveException("No topology of " + id);
             }
@@ -1173,35 +1049,32 @@ public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
 
     /**
      * check whether the topology is bActive?
-     * 
+     *
      * @param nimbus
      * @param topologyName
      * @param bActive
      * @throws Exception
      */
-    public void checkTopologyActive(NimbusData nimbus, String topologyName,
-            boolean bActive) throws Exception {
+    public void checkTopologyActive(NimbusData nimbus, String topologyName, boolean bActive) throws Exception {
         if (isTopologyActive(nimbus.getStormClusterState(), topologyName) != bActive) {
             if (bActive) {
                 throw new NotAliveException(topologyName + " is not alive");
             } else {
-                throw new AlreadyAliveException(topologyName
-                        + " is already active");
+                throw new AlreadyAliveException(topologyName + " is already active");
             }
         }
     }
 
     /**
      * whether the topology is active by topology name
-     * 
+     *
      * @param stormClusterState see Cluster_clj
      * @param topologyName
      * @return boolean if the storm is active, return true, otherwise return
-     *         false;
+     * false;
      * @throws Exception
      */
-    public boolean isTopologyActive(StormClusterState stormClusterState,
-            String topologyName) throws Exception {
+    public boolean isTopologyActive(StormClusterState stormClusterState, String topologyName) throws Exception {
         boolean rtn = false;
         if (Cluster.get_topology_id(stormClusterState, topologyName) != null) {
             rtn = true;
@@ -1213,7 +1086,7 @@ public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
      * create local topology files /local-dir/nimbus/topologyId/stormjar.jar
      * /local-dir/nimbus/topologyId/stormcode.ser
      * /local-dir/nimbus/topologyId/stormconf.ser
-     * 
+     *
      * @param conf
      * @param topologyId
      * @param tmpJarLocation
@@ -1221,9 +1094,8 @@ public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
      * @param topology
      * @throws IOException
      */
-    private void setupStormCode(Map<Object, Object> conf, String topologyId,
-            String tmpJarLocation, Map<Object, Object> stormConf,
-            StormTopology topology) throws IOException {
+    private void setupStormCode(Map<Object, Object> conf, String topologyId, String tmpJarLocation, Map<Object, Object> stormConf, StormTopology topology)
+            throws IOException {
         // local-dir/nimbus/stormdist/topologyId
         String stormroot = StormConfig.masterStormdistRoot(conf, topologyId);
 
@@ -1234,18 +1106,16 @@ public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
         setupJar(conf, tmpJarLocation, stormroot);
 
         // serialize to file /local-dir/nimbus/topologyId/stormcode.ser
-        FileUtils.writeByteArrayToFile(
-                new File(StormConfig.stormcode_path(stormroot)),
-                Utils.serialize(topology));
+        FileUtils.writeByteArrayToFile(new File(StormConfig.stormcode_path(stormroot)), Utils.serialize(topology));
 
         // serialize to file /local-dir/nimbus/topologyId/stormconf.ser
-        FileUtils.writeByteArrayToFile(
-                new File(StormConfig.stormconf_path(stormroot)),
-                Utils.serialize(stormConf));
+        FileUtils.writeByteArrayToFile(new File(StormConfig.stormconf_path(stormroot)), Utils.serialize(stormConf));
+
+        // Update downloadCode timeStamp
+        StormConfig.write_nimbus_topology_timestamp(data.getConf(), topologyId, System.currentTimeMillis());
     }
 
-    private boolean copyLibJars(String tmpJarLocation, String stormroot)
-            throws IOException {
+    private boolean copyLibJars(String tmpJarLocation, String stormroot) throws IOException {
         String srcLibPath = StormConfig.stormlib_path(tmpJarLocation);
         String destLibPath = StormConfig.stormlib_path(stormroot);
         LOG.info("Begin to copy from " + srcLibPath + " to " + destLibPath);
@@ -1265,14 +1135,13 @@ public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
 
     /**
      * Copy jar to /local-dir/nimbus/topologyId/stormjar.jar
-     * 
+     *
      * @param conf
      * @param tmpJarLocation
      * @param stormroot
      * @throws IOException
      */
-    private void setupJar(Map<Object, Object> conf, String tmpJarLocation,
-            String stormroot) throws IOException {
+    private void setupJar(Map<Object, Object> conf, String tmpJarLocation, String stormroot) throws IOException {
         if (!StormConfig.local_mode(conf)) {
             boolean existLibs = copyLibJars(tmpJarLocation, stormroot);
 
@@ -1287,8 +1156,7 @@ public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
 
             if (jarPath == null) {
                 if (existLibs == false) {
-                    throw new IllegalArgumentException("No jar under "
-                            + tmpJarLocation);
+                    throw new IllegalArgumentException("No jar under " + tmpJarLocation);
                 } else {
                     LOG.info("No submit jar");
                     return;
@@ -1297,86 +1165,71 @@ public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
 
             File srcFile = new File(jarPath);
             if (!srcFile.exists()) {
-                throw new IllegalArgumentException(jarPath + " to copy to "
-                        + stormroot + " does not exist!");
+                throw new IllegalArgumentException(jarPath + " to copy to " + stormroot + " does not exist!");
             }
 
             String path = StormConfig.stormjar_path(stormroot);
             File destFile = new File(path);
             FileUtils.copyFile(srcFile, destFile);
             srcFile.delete();
-
-            return;
         }
     }
 
     /**
      * generate TaskInfo for every bolt or spout in ZK /ZK/tasks/topoologyId/xxx
-     * 
+     *
      * @param conf
      * @param topologyId
      * @param stormClusterState
      * @throws Exception
      */
-    public void setupZkTaskInfo(Map<Object, Object> conf, String topologyId,
-            StormClusterState stormClusterState) throws Exception {
+    public void setupZkTaskInfo(Map<Object, Object> conf, String topologyId, StormClusterState stormClusterState) throws Exception {
+        Map<Integer, TaskInfo> taskToTaskInfo = mkTaskComponentAssignments(conf, topologyId);
 
         // mkdir /ZK/taskbeats/topoologyId
-        stormClusterState.setup_heartbeats(topologyId);
+        int masterId = NimbusUtils.getTopologyMasterId(taskToTaskInfo);
+        TopologyTaskHbInfo topoTaskHbinfo = new TopologyTaskHbInfo(topologyId, masterId);
+        data.getTasksHeartbeat().put(topologyId, topoTaskHbinfo);
+        stormClusterState.topology_heartbeat(topologyId, topoTaskHbinfo);
 
-        Map<Integer, TaskInfo> taskToTaskInfo =
-                mkTaskComponentAssignments(conf, topologyId);
         if (taskToTaskInfo == null || taskToTaskInfo.size() == 0) {
             throw new InvalidTopologyException("Failed to generate TaskIDs map");
         }
-            // key is taskid, value is taskinfo
+        // key is taskid, value is taskinfo
         stormClusterState.set_task(topologyId, taskToTaskInfo);
     }
 
     /**
      * generate a taskid(Integer) for every task
-     * 
+     *
      * @param conf
      * @param topologyid
      * @return Map<Integer, String>: from taskid to componentid
      * @throws IOException
      * @throws InvalidTopologyException
      */
-    public Map<Integer, TaskInfo> mkTaskComponentAssignments(
-            Map<Object, Object> conf, String topologyid) throws IOException,
-            InvalidTopologyException {
+    public Map<Integer, TaskInfo> mkTaskComponentAssignments(Map<Object, Object> conf, String topologyid) throws IOException, InvalidTopologyException {
 
         // @@@ here exist a little problem,
         // we can directly pass stormConf from Submit method
-        Map<Object, Object> stormConf =
-                StormConfig.read_nimbus_topology_conf(conf, topologyid);
-
-        StormTopology stopology =
-                StormConfig.read_nimbus_topology_code(conf, topologyid);
-
+        Map<Object, Object> stormConf = StormConfig.read_nimbus_topology_conf(conf, topologyid);
+        StormTopology stopology = StormConfig.read_nimbus_topology_code(conf, topologyid);
         StormTopology topology = Common.system_topology(stormConf, stopology);
 
-
         return Common.mkTaskInfo(stormConf, topology, topologyid);
     }
 
-    
-
     @Override
-    public void metricMonitor(String topologyName, MonitorOptions options)
-            throws NotAliveException, TException {
+    public void metricMonitor(String topologyName, MonitorOptions options) throws TException {
         boolean isEnable = options.is_isEnable();
         StormClusterState clusterState = data.getStormClusterState();
 
         try {
-            String topologyId =
-                    Cluster.get_topology_id(clusterState, topologyName);
+            String topologyId = Cluster.get_topology_id(clusterState, topologyName);
             if (null != topologyId) {
                 clusterState.set_storm_monitor(topologyId, isEnable);
             } else {
-                throw new NotAliveException(
-                        "Failed to update metricsMonitor status as "
-                                + topologyName + " is not alive");
+                throw new NotAliveException("Failed to update metricsMonitor status as " + topologyName + " is not alive");
             }
         } catch (Exception e) {
             String errMsg = "Failed to update metricsMonitor " + topologyName;
@@ -1387,137 +1240,263 @@ public class ServiceHandler implements Iface, Shutdownable, DaemonCommon {
     }
 
     @Override
-    public TopologyMetric getTopologyMetric(String topologyId)
-            throws NotAliveException, TException {
-        LOG.debug("Nimbus service handler, getTopologyMetric, topology ID: "
-                + topologyId);
+    public TopologyMetric getTopologyMetrics(String topologyId) throws TException {
+        LOG.debug("Nimbus service handler, getTopologyMetric, topology ID: " + topologyId);
         long start = System.nanoTime();
         try {
-            TopologyMetric metric =
-                    data.getMetricRunnable().getTopologyMetric(topologyId);
-    
-            return metric;
-        }finally {
-            double spend = ( System.nanoTime()- start)/1000000.0d;;
-            SimpleJStormMetric.updateHistorgram("getTopologyMetric", spend);
-            LOG.info("getTopologyMetric, {}:{}", topologyId, spend);
+            return data.getMetricRunnable().getTopologyMetric(topologyId);
+        } finally {
+            long end = System.nanoTime();
+            SimpleJStormMetric.updateNimbusHistogram("getTopologyMetric", (end - start) / TimeUtils.NS_PER_US);
         }
     }
 
     @Override
-    public void workerUploadMetric(WorkerUploadMetrics uploadMetrics)
-            throws TException {
-        // TODO Auto-generated method stub
-        LOG.debug("!!!!!!! workerUploadMetric:{}:{}:{} ", uploadMetrics.get_topology_id(),
-                uploadMetrics.get_supervisor_id(), uploadMetrics.get_port());
+    public void uploadTopologyMetrics(String topologyId, TopologyMetric uploadMetrics) throws TException {
+        LOG.info("Received topology metrics:{}", topologyId);
 
-        TopologyMetricsRunnable.Update event =
-                new TopologyMetricsRunnable.Update();
-        event.workerMetrics = uploadMetrics;
+        Update event = new Update();
+        event.timestamp = System.currentTimeMillis();
+        event.topologyMetrics = uploadMetrics;
+        event.topologyId = topologyId;
 
         data.getMetricRunnable().pushEvent(event);
     }
 
+    @Override
+    public Map<String, Long> registerMetrics(String topologyId, Set<String> metrics) throws TException {
+        try {
+            return data.getMetricRunnable().registerMetrics(topologyId, metrics);
+        } catch (Exception ex) {
+            return null;
+        }
+    }
+
     public void uploadNewCredentials(String topologyName, Credentials creds) {
-        // TODO Auto-generated method stub
-        
     }
 
     @Override
-    public NettyMetric getNettyMetric(String topologyName, int pos) throws TException {
-        // TODO Auto-generated method stub
-        long start = System.nanoTime();
-        try {
-            String topologyId = getTopologyId(topologyName);
-            
-            if (pos < 0) {
-                LOG.warn("Invalid pos {}, set it as 0", pos);
-                pos = 0;
-            }
-            SortedMap<String, MetricInfo> allConnections = data.getMetricRunnable().getNettyMetric(topologyId);
-            int mapSize = allConnections.size();
-            
-            NettyMetric ret = new NettyMetric();
-            
-            ret.set_connectionNum(mapSize);
-            
-            
-            Map<String, MetricInfo> selectConnections = new TreeMap<String, MetricInfo>();
-            ret.set_connections(selectConnections);
-            int i = 0;
-            int selectMapSize = 0;
-            for (Entry<String, MetricInfo> entry: allConnections.entrySet()) {
-                i++;
-                if (i <= pos) {
-                    continue;
+    public List<MetricInfo> getMetrics(String topologyId, int type) throws TException {
+        MetaType metaType = MetaType.parse(type);
+        return data.getMetricCache().getMetricData(topologyId, metaType);
+    }
+
+    @Override
+    public MetricInfo getNettyMetrics(String topologyId) throws TException {
+        List<MetricInfo> metricInfoList = data.getMetricCache().getMetricData(topologyId, MetaType.NETTY);
+        if (metricInfoList != null && metricInfoList.size() > 0) {
+            return metricInfoList.get(0);
+        }
+        return new MetricInfo();
+    }
+
+    @Override
+    public MetricInfo getNettyMetricsByHost(String topologyId, String host) throws TException {
+        MetricInfo ret = new MetricInfo();
+
+        List<MetricInfo> metricInfoList = data.getMetricCache().getMetricData(topologyId, MetaType.NETTY);
+        if (metricInfoList != null && metricInfoList.size() > 0) {
+            MetricInfo metricInfo = metricInfoList.get(0);
+            for (Entry<String, Map<Integer, MetricSnapshot>> metricEntry : metricInfo.get_metrics().entrySet()) {
+                String metricName = metricEntry.getKey();
+                Map<Integer, MetricSnapshot> data = metricEntry.getValue();
+                if (metricName.contains(host)) {
+                    ret.put_to_metrics(metricName, data);
                 }
-                
-                selectConnections.put(entry.getKey(), entry.getValue());
-                selectMapSize++;
-                if (selectMapSize >= MetricDef.NETTY_METRICS_PACKAGE_SIZE) {
-                    break;
+            }
+        }
+
+        LOG.info("getNettyMetricsByHost, total size:{}", ret.get_metrics_size());
+        return ret;
+    }
+
+    @Override
+    public int getNettyMetricSizeByHost(String topologyId, String host) throws TException {
+        return getNettyMetricsByHost(topologyId, host).get_metrics_size();
+    }
+
+    @Override
+    public MetricInfo getPagingNettyMetrics(String topologyId, String host, int page) throws TException {
+        MetricInfo ret = new MetricInfo();
+
+        int start = (page - 1) * MetricUtils.NETTY_METRIC_PAGE_SIZE;
+        int end = page * MetricUtils.NETTY_METRIC_PAGE_SIZE;
+        int cur = -1;
+        List<MetricInfo> metricInfoList = data.getMetricCache().getMetricData(topologyId, MetaType.NETTY);
+        if (metricInfoList != null && metricInfoList.size() > 0) {
+            MetricInfo metricInfo = metricInfoList.get(0);
+            for (Entry<String, Map<Integer, MetricSnapshot>> metricEntry : metricInfo.get_metrics().entrySet()) {
+                String metricName = metricEntry.getKey();
+                Map<Integer, MetricSnapshot> data = metricEntry.getValue();
+                if (metricName.contains(host)) {
+                    ++cur;
+                    if (cur >= start && cur < end) {
+                        ret.put_to_metrics(metricName, data);
+                    }
+                    if (cur >= end) {
+                        break;
+                    }
                 }
             }
-                       
-            return ret;
-        }finally {
-            double spend = (System.nanoTime() - start)/1000000.0d;
-            SimpleJStormMetric.updateHistorgram("getNettyMetric", spend );
-            LOG.info("getNettyMetric, {}:{} ms", topologyName, spend);
         }
+
+        LOG.info("getNettyMetricsByHost, total size:{}", ret.get_metrics_size());
+        return ret;
     }
 
+    @Override
+    public MetricInfo getTaskMetrics(String topologyId, String component) throws TException {
+        List<MetricInfo> taskMetricList = getMetrics(topologyId, MetaType.TASK.getT());
+        if (taskMetricList != null && taskMetricList.size() > 0) {
+            MetricInfo metricInfo = taskMetricList.get(0);
+            Map<String, Map<Integer, MetricSnapshot>> metrics = metricInfo.get_metrics();
+            for (Iterator<String> itr = metrics.keySet().iterator(); itr.hasNext(); ) {
+                String metricName = itr.next();
+                String[] parts = metricName.split(MetricUtils.DELIM);
+                if (parts.length < 7 || !parts[2].equals(component)) {
+                    itr.remove();
+                }
+            }
+            LOG.info("taskMetric, total size:{}", metricInfo.get_metrics_size());
+            return metricInfo;
+        }
+        return MetricUtils.mkMetricInfo();
+    }
 
     @Override
-    public NettyMetric getServerNettyMetric(String topologyName, String serverName) throws TException {
-        // TODO Auto-generated method stub
-        long start = System.nanoTime();
-        try {
-            String topologyId = getTopologyId(topologyName);
-            
-            SortedMap<String, MetricInfo> allConnections = data.getMetricRunnable().getNettyMetric(topologyId);
-            int mapSize = allConnections.size();
-            
-            NettyMetric ret = new NettyMetric();
-            
-            String serverIp = NetWorkUtils.host2Ip(serverName);    
-            Map<String, MetricInfo> selectConnections = new TreeMap<String, MetricInfo>();
-            for (Entry<String, MetricInfo> entry: allConnections.entrySet()) {
-                if (entry.getKey().contains(serverIp)) {
-                    selectConnections.put(entry.getKey(), entry.getValue());
+    public List<MetricInfo> getTaskAndStreamMetrics(String topologyId, int taskId) throws TException {
+        List<MetricInfo> taskMetricList = getMetrics(topologyId, MetaType.TASK.getT());
+        List<MetricInfo> streamMetricList = getMetrics(topologyId, MetaType.STREAM.getT());
+
+        String taskIdStr = taskId + "";
+        MetricInfo taskMetricInfo;
+        if (taskMetricList != null && taskMetricList.size() > 0) {
+            taskMetricInfo = taskMetricList.get(0);
+            Map<String, Map<Integer, MetricSnapshot>> metrics = taskMetricInfo.get_metrics();
+            for (Iterator<String> itr = metrics.keySet().iterator(); itr.hasNext(); ) {
+                String metricName = itr.next();
+                String[] parts = metricName.split(MetricUtils.DELIM);
+                if (parts.length < 7 || !parts[3].equals(taskIdStr)) {
+                    itr.remove();
                 }
-                
             }
-             ret.set_connectionNum(selectConnections.size());
-             ret.set_connections(selectConnections);
-            return ret;
-        }finally {
-            double spend = (System.nanoTime()- start)/1000000.0d;
-            SimpleJStormMetric.updateHistorgram("getNettyMetric", spend);
-            LOG.info("getServerNettyMetric, {} : {}ms", topologyName, spend);
+        } else {
+            taskMetricInfo = MetricUtils.mkMetricInfo();
         }
+
+        MetricInfo streamMetricInfo;
+        if (streamMetricList != null && streamMetricList.size() > 0) {
+            streamMetricInfo = streamMetricList.get(0);
+            Map<String, Map<Integer, MetricSnapshot>> metrics = streamMetricInfo.get_metrics();
+            for (Iterator<String> itr = metrics.keySet().iterator(); itr.hasNext(); ) {
+                String metricName = itr.next();
+                String[] parts = metricName.split(MetricUtils.DELIM);
+                if (parts.length < 7 || !parts[3].equals(taskIdStr)) {
+                    itr.remove();
+                }
+            }
+        } else {
+            streamMetricInfo = MetricUtils.mkMetricInfo();
+        }
+        return Lists.newArrayList(taskMetricInfo, streamMetricInfo);
     }
 
     @Override
-    public void updateConf(String name, String conf) throws NotAliveException,
-            InvalidTopologyException, TException {
+    public List<MetricInfo> getSummarizedTopologyMetrics(String topologyId) throws TException {
+        return data.getMetricCache().getMetricData(topologyId, MetaType.TOPOLOGY);
+    }
+
+    @Override
+    public void updateTopology(String name, String uploadedLocation,
+            String updateConf) throws NotAliveException,
+                    InvalidTopologyException, TException {
         try {
             checkTopologyActive(data, name, true);
 
+            String topologyId = null;
+            StormClusterState stormClusterState = data.getStormClusterState();
+            topologyId = Cluster.get_topology_id(stormClusterState, name);
+            if (topologyId == null) {
+                throw new NotAliveException(name);
+            }
+            if (uploadedLocation != null) {
+                String stormroot =
+                        StormConfig.masterStormdistRoot(conf, topologyId);
+
+                int lastIndexOf = uploadedLocation.lastIndexOf("/");
+                // /local-dir/nimbus/inbox/xxxx/
+                String tmpDir = uploadedLocation.substring(0, lastIndexOf);
+
+                // /local-dir/nimbus/inbox/xxxx/stormjar.jar
+                String stormJarPath = StormConfig.stormjar_path(tmpDir);
+
+                File file = new File(uploadedLocation);
+                if (file.exists()) {
+                    file.renameTo(new File(stormJarPath));
+                } else {
+                    throw new FileNotFoundException("Source \'"
+                            + uploadedLocation + "\' does not exist");
+                }
+                // move fileDir to /local-dir/nimbus/topologyid/
+                File srcDir = new File(tmpDir);
+                File destDir = new File(stormroot);
+                try {
+                    FileUtils.moveDirectory(srcDir, destDir);
+                } catch (FileExistsException e) {
+                    FileUtils.copyDirectory(srcDir, destDir);
+                    FileUtils.deleteQuietly(srcDir);
+                }
+				// Update downloadCode timeStamp
+				StormConfig.write_nimbus_topology_timestamp(data.getConf(), topologyId, System.currentTimeMillis());
+                LOG.info("update jar of " + name + " successfully");
+            }
+
+            Map topoConf = StormConfig.read_nimbus_topology_conf(data.getConf(),
+                    topologyId);
             Map<Object, Object> config =
-                    (Map<Object, Object>) JStormUtils.from_json(conf);
+                    (Map<Object, Object>) JStormUtils.from_json(updateConf);
+            topoConf.putAll(config);
+            StormConfig.write_nimbus_topology_conf(data.getConf(), topologyId,
+                    topoConf);
 
+            LOG.info("update topology " + name + " successfully");
             NimbusUtils.transitionName(data, name, true,
-                    StatusType.update_conf, config);
+                    StatusType.update_topology, config);
+
         } catch (NotAliveException e) {
-            String errMsg = "Rebalance Error, no this topology " + name;
+            String errMsg = "Error, no this topology " + name;
             LOG.error(errMsg, e);
             throw new NotAliveException(errMsg);
         } catch (Exception e) {
-            String errMsg = "Failed to rebalance topology " + name;
+            String errMsg = "Failed to update topology " + name;
             LOG.error(errMsg, e);
             throw new TException(errMsg);
         }
+
+    }
+
+    @Override
+    public void updateTaskHeartbeat(TopologyTaskHbInfo taskHbs) throws TException {
+        String topologyId = taskHbs.get_topologyId();
+        Integer topologyMasterId = taskHbs.get_topologyMasterId();
+        TopologyTaskHbInfo nimbusTaskHbs = data.getTasksHeartbeat().get(topologyId);
+
+        if (nimbusTaskHbs == null) {
+            nimbusTaskHbs = new TopologyTaskHbInfo(topologyId, topologyMasterId);
+            data.getTasksHeartbeat().put(topologyId, nimbusTaskHbs);
+        }
+
+        Map<Integer, TaskHeartbeat> nimbusTaskHbMap = nimbusTaskHbs.get_taskHbs();
+        if (nimbusTaskHbMap == null) {
+            nimbusTaskHbMap = new ConcurrentHashMap<Integer, TaskHeartbeat>();
+            nimbusTaskHbs.set_taskHbs(nimbusTaskHbMap);
+        }
         
+        Map<Integer, TaskHeartbeat> taskHbMap = taskHbs.get_taskHbs();
+        if (taskHbMap != null) {
+            for (Entry<Integer, TaskHeartbeat> entry : taskHbMap.entrySet()) {
+                nimbusTaskHbMap.put(entry.getKey(), entry.getValue());
+            }
+        }
     }
 }


[35/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/metric/LoggingMetricsConsumer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/metric/LoggingMetricsConsumer.java b/jstorm-core/src/main/java/backtype/storm/metric/LoggingMetricsConsumer.java
index c1c7c0a..840a45d 100755
--- a/jstorm-core/src/main/java/backtype/storm/metric/LoggingMetricsConsumer.java
+++ b/jstorm-core/src/main/java/backtype/storm/metric/LoggingMetricsConsumer.java
@@ -45,28 +45,26 @@ public class LoggingMetricsConsumer implements IMetricsConsumer {
     public static final Logger LOG = LoggerFactory.getLogger(LoggingMetricsConsumer.class);
 
     @Override
-    public void prepare(Map stormConf, Object registrationArgument, TopologyContext context, IErrorReporter errorReporter) { }
+    public void prepare(Map stormConf, Object registrationArgument, TopologyContext context, IErrorReporter errorReporter) {
+    }
 
     static private String padding = "                       ";
 
     @Override
     public void handleDataPoints(TaskInfo taskInfo, Collection<DataPoint> dataPoints) {
         StringBuilder sb = new StringBuilder();
-        String header = String.format("%d\t%15s:%-4d\t%3d:%-11s\t",
-            taskInfo.timestamp,
-            taskInfo.srcWorkerHost, taskInfo.srcWorkerPort,
-            taskInfo.srcTaskId,
-            taskInfo.srcComponentId);
+        String header =
+                String.format("%d\t%15s:%-4d\t%3d:%-11s\t", taskInfo.timestamp, taskInfo.srcWorkerHost, taskInfo.srcWorkerPort, taskInfo.srcTaskId,
+                        taskInfo.srcComponentId);
         sb.append(header);
         for (DataPoint p : dataPoints) {
             sb.delete(header.length(), sb.length());
-            sb.append(p.name)
-                .append(padding).delete(header.length()+23,sb.length()).append("\t")
-                .append(p.value);
+            sb.append(p.name).append(padding).delete(header.length() + 23, sb.length()).append("\t").append(p.value);
             LOG.info(sb.toString());
         }
     }
 
     @Override
-    public void cleanup() { }
+    public void cleanup() {
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/metric/MetricsConsumerBolt.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/metric/MetricsConsumerBolt.java b/jstorm-core/src/main/java/backtype/storm/metric/MetricsConsumerBolt.java
index d8eb3bf..afbc7da 100755
--- a/jstorm-core/src/main/java/backtype/storm/metric/MetricsConsumerBolt.java
+++ b/jstorm-core/src/main/java/backtype/storm/metric/MetricsConsumerBolt.java
@@ -41,18 +41,18 @@ public class MetricsConsumerBolt implements IBolt {
     @Override
     public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
         try {
-            _metricsConsumer = (IMetricsConsumer)Class.forName(_consumerClassName).newInstance();
+            _metricsConsumer = (IMetricsConsumer) Class.forName(_consumerClassName).newInstance();
         } catch (Exception e) {
-            throw new RuntimeException("Could not instantiate a class listed in config under section " +
-                Config.TOPOLOGY_METRICS_CONSUMER_REGISTER + " with fully qualified name " + _consumerClassName, e);
+            throw new RuntimeException("Could not instantiate a class listed in config under section " + Config.TOPOLOGY_METRICS_CONSUMER_REGISTER
+                    + " with fully qualified name " + _consumerClassName, e);
         }
-        _metricsConsumer.prepare(stormConf, _registrationArgument, context, (IErrorReporter)collector);
+        _metricsConsumer.prepare(stormConf, _registrationArgument, context, (IErrorReporter) collector);
         _collector = collector;
     }
-    
+
     @Override
     public void execute(Tuple input) {
-        _metricsConsumer.handleDataPoints((IMetricsConsumer.TaskInfo)input.getValue(0), (Collection)input.getValue(1));
+        _metricsConsumer.handleDataPoints((IMetricsConsumer.TaskInfo) input.getValue(0), (Collection) input.getValue(1));
         _collector.ack(input);
     }
 
@@ -60,5 +60,5 @@ public class MetricsConsumerBolt implements IBolt {
     public void cleanup() {
         _metricsConsumer.cleanup();
     }
-    
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/metric/SystemBolt.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/metric/SystemBolt.java b/jstorm-core/src/main/java/backtype/storm/metric/SystemBolt.java
index 492bc2d..43551ad 100755
--- a/jstorm-core/src/main/java/backtype/storm/metric/SystemBolt.java
+++ b/jstorm-core/src/main/java/backtype/storm/metric/SystemBolt.java
@@ -35,7 +35,6 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
-
 // There is one task inside one executor for each worker of the topology.
 // TaskID is always -1, therefore you can only send-unanchored tuples to co-located SystemBolt.
 // This bolt was conceived to export worker stats via metrics api.
@@ -45,12 +44,14 @@ public class SystemBolt implements IBolt {
 
     private static class MemoryUsageMetric implements IMetric {
         IFn _getUsage;
+
         public MemoryUsageMetric(IFn getUsage) {
             _getUsage = getUsage;
         }
+
         @Override
         public Object getValueAndReset() {
-            MemoryUsage memUsage = (MemoryUsage)_getUsage.invoke();
+            MemoryUsage memUsage = (MemoryUsage) _getUsage.invoke();
             HashMap m = new HashMap();
             m.put("maxBytes", memUsage.getMax());
             m.put("committedBytes", memUsage.getCommitted());
@@ -68,16 +69,18 @@ public class SystemBolt implements IBolt {
         GarbageCollectorMXBean _gcBean;
         Long _collectionCount;
         Long _collectionTime;
+
         public GarbageCollectorMetric(GarbageCollectorMXBean gcBean) {
             _gcBean = gcBean;
         }
+
         @Override
         public Object getValueAndReset() {
             Long collectionCountP = _gcBean.getCollectionCount();
             Long collectionTimeP = _gcBean.getCollectionTime();
 
             Map ret = null;
-            if(_collectionCount!=null && _collectionTime!=null) {
+            if (_collectionCount != null && _collectionTime != null) {
                 ret = new HashMap();
                 ret.put("count", collectionCountP - _collectionCount);
                 ret.put("timeMs", collectionTimeP - _collectionTime);
@@ -91,7 +94,7 @@ public class SystemBolt implements IBolt {
 
     @Override
     public void prepare(final Map stormConf, TopologyContext context, OutputCollector collector) {
-        if(_prepareWasCalled && !"local".equals(stormConf.get(Config.STORM_CLUSTER_MODE))) {
+        if (_prepareWasCalled && !"local".equals(stormConf.get(Config.STORM_CLUSTER_MODE))) {
             throw new RuntimeException("A single worker should have 1 SystemBolt instance.");
         }
         _prepareWasCalled = true;
@@ -103,14 +106,14 @@ public class SystemBolt implements IBolt {
         context.registerMetric("uptimeSecs", new IMetric() {
             @Override
             public Object getValueAndReset() {
-                return jvmRT.getUptime()/1000.0;
+                return jvmRT.getUptime() / 1000.0;
             }
         }, bucketSize);
 
         context.registerMetric("startTimeSecs", new IMetric() {
             @Override
             public Object getValueAndReset() {
-                return jvmRT.getStartTime()/1000.0;
+                return jvmRT.getStartTime() / 1000.0;
             }
         }, bucketSize);
 
@@ -122,7 +125,8 @@ public class SystemBolt implements IBolt {
                 if (doEvent) {
                     doEvent = false;
                     return 1;
-                } else return 0;
+                } else
+                    return 0;
             }
         }, bucketSize);
 
@@ -139,7 +143,7 @@ public class SystemBolt implements IBolt {
             }
         }), bucketSize);
 
-        for(GarbageCollectorMXBean b : ManagementFactory.getGarbageCollectorMXBeans()) {
+        for (GarbageCollectorMXBean b : ManagementFactory.getGarbageCollectorMXBeans()) {
             context.registerMetric("GC/" + b.getName().replaceAll("\\W", ""), new GarbageCollectorMetric(b), bucketSize);
         }
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/metric/api/CombinedMetric.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/metric/api/CombinedMetric.java b/jstorm-core/src/main/java/backtype/storm/metric/api/CombinedMetric.java
index 5764a25..a840851 100755
--- a/jstorm-core/src/main/java/backtype/storm/metric/api/CombinedMetric.java
+++ b/jstorm-core/src/main/java/backtype/storm/metric/api/CombinedMetric.java
@@ -25,7 +25,7 @@ public class CombinedMetric implements IMetric {
         _combiner = combiner;
         _value = _combiner.identity();
     }
-    
+
     public void update(Object value) {
         _value = _combiner.combine(_value, value);
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/metric/api/CountMetric.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/metric/api/CountMetric.java b/jstorm-core/src/main/java/backtype/storm/metric/api/CountMetric.java
index dd048b8..82dc529 100755
--- a/jstorm-core/src/main/java/backtype/storm/metric/api/CountMetric.java
+++ b/jstorm-core/src/main/java/backtype/storm/metric/api/CountMetric.java
@@ -24,7 +24,7 @@ public class CountMetric implements IMetric {
 
     public CountMetric() {
     }
-    
+
     public void incr() {
         _value++;
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/metric/api/ICombiner.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/metric/api/ICombiner.java b/jstorm-core/src/main/java/backtype/storm/metric/api/ICombiner.java
index 04b3156..4dcb0dd 100755
--- a/jstorm-core/src/main/java/backtype/storm/metric/api/ICombiner.java
+++ b/jstorm-core/src/main/java/backtype/storm/metric/api/ICombiner.java
@@ -19,5 +19,6 @@ package backtype.storm.metric.api;
 
 public interface ICombiner<T> {
     public T identity();
+
     public T combine(T a, T b);
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/metric/api/IMetricsConsumer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/metric/api/IMetricsConsumer.java b/jstorm-core/src/main/java/backtype/storm/metric/api/IMetricsConsumer.java
index 14f1bf6..840bc6b 100755
--- a/jstorm-core/src/main/java/backtype/storm/metric/api/IMetricsConsumer.java
+++ b/jstorm-core/src/main/java/backtype/storm/metric/api/IMetricsConsumer.java
@@ -24,37 +24,47 @@ import java.util.Map;
 
 public interface IMetricsConsumer {
     public static class TaskInfo {
-        public TaskInfo() {}
+        public TaskInfo() {
+        }
+
         public TaskInfo(String srcWorkerHost, int srcWorkerPort, String srcComponentId, int srcTaskId, long timestamp, int updateIntervalSecs) {
             this.srcWorkerHost = srcWorkerHost;
             this.srcWorkerPort = srcWorkerPort;
-            this.srcComponentId = srcComponentId; 
-            this.srcTaskId = srcTaskId; 
+            this.srcComponentId = srcComponentId;
+            this.srcTaskId = srcTaskId;
             this.timestamp = timestamp;
-            this.updateIntervalSecs = updateIntervalSecs; 
+            this.updateIntervalSecs = updateIntervalSecs;
         }
+
         public String srcWorkerHost;
         public int srcWorkerPort;
-        public String srcComponentId; 
-        public int srcTaskId; 
+        public String srcComponentId;
+        public int srcTaskId;
         public long timestamp;
-        public int updateIntervalSecs; 
+        public int updateIntervalSecs;
     }
+
     public static class DataPoint {
-        public DataPoint() {}
+        public DataPoint() {
+        }
+
         public DataPoint(String name, Object value) {
             this.name = name;
             this.value = value;
         }
+
         @Override
         public String toString() {
             return "[" + name + " = " + value + "]";
         }
-        public String name; 
+
+        public String name;
         public Object value;
     }
 
     void prepare(Map stormConf, Object registrationArgument, TopologyContext context, IErrorReporter errorReporter);
+
     void handleDataPoints(TaskInfo taskInfo, Collection<DataPoint> dataPoints);
+
     void cleanup();
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/metric/api/IReducer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/metric/api/IReducer.java b/jstorm-core/src/main/java/backtype/storm/metric/api/IReducer.java
index a58df3b..403fe89 100755
--- a/jstorm-core/src/main/java/backtype/storm/metric/api/IReducer.java
+++ b/jstorm-core/src/main/java/backtype/storm/metric/api/IReducer.java
@@ -19,6 +19,8 @@ package backtype.storm.metric.api;
 
 public interface IReducer<T> {
     T init();
+
     T reduce(T accumulator, Object input);
+
     Object extractResult(T accumulator);
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/metric/api/MeanReducer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/metric/api/MeanReducer.java b/jstorm-core/src/main/java/backtype/storm/metric/api/MeanReducer.java
index e25e26d..4138cab 100755
--- a/jstorm-core/src/main/java/backtype/storm/metric/api/MeanReducer.java
+++ b/jstorm-core/src/main/java/backtype/storm/metric/api/MeanReducer.java
@@ -31,23 +31,22 @@ public class MeanReducer implements IReducer<MeanReducerState> {
 
     public MeanReducerState reduce(MeanReducerState acc, Object input) {
         acc.count++;
-        if(input instanceof Double) {
-            acc.sum += (Double)input;
-        } else if(input instanceof Long) {
-            acc.sum += ((Long)input).doubleValue();
-        } else if(input instanceof Integer) {
-            acc.sum += ((Integer)input).doubleValue();
+        if (input instanceof Double) {
+            acc.sum += (Double) input;
+        } else if (input instanceof Long) {
+            acc.sum += ((Long) input).doubleValue();
+        } else if (input instanceof Integer) {
+            acc.sum += ((Integer) input).doubleValue();
         } else {
-            throw new RuntimeException(
-                "MeanReducer::reduce called with unsupported input type `" + input.getClass()
-                + "`. Supported types are Double, Long, Integer.");
+            throw new RuntimeException("MeanReducer::reduce called with unsupported input type `" + input.getClass()
+                    + "`. Supported types are Double, Long, Integer.");
         }
         return acc;
     }
 
     public Object extractResult(MeanReducerState acc) {
-        if(acc.count > 0) {
-            return new Double(acc.sum / (double)acc.count);
+        if (acc.count > 0) {
+            return new Double(acc.sum / (double) acc.count);
         } else {
             return null;
         }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/metric/api/MultiCountMetric.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/metric/api/MultiCountMetric.java b/jstorm-core/src/main/java/backtype/storm/metric/api/MultiCountMetric.java
index c420a16..eae7de3 100755
--- a/jstorm-core/src/main/java/backtype/storm/metric/api/MultiCountMetric.java
+++ b/jstorm-core/src/main/java/backtype/storm/metric/api/MultiCountMetric.java
@@ -26,10 +26,10 @@ public class MultiCountMetric implements IMetric {
 
     public MultiCountMetric() {
     }
-    
+
     public CountMetric scope(String key) {
         CountMetric val = _value.get(key);
-        if(val == null) {
+        if (val == null) {
             _value.put(key, val = new CountMetric());
         }
         return val;
@@ -37,7 +37,7 @@ public class MultiCountMetric implements IMetric {
 
     public Object getValueAndReset() {
         Map ret = new HashMap();
-        for(Map.Entry<String, CountMetric> e : _value.entrySet()) {
+        for (Map.Entry<String, CountMetric> e : _value.entrySet()) {
             ret.put(e.getKey(), e.getValue().getValueAndReset());
         }
         return ret;

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/metric/api/MultiReducedMetric.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/metric/api/MultiReducedMetric.java b/jstorm-core/src/main/java/backtype/storm/metric/api/MultiReducedMetric.java
index 530b168..09b26a7 100755
--- a/jstorm-core/src/main/java/backtype/storm/metric/api/MultiReducedMetric.java
+++ b/jstorm-core/src/main/java/backtype/storm/metric/api/MultiReducedMetric.java
@@ -28,10 +28,10 @@ public class MultiReducedMetric implements IMetric {
     public MultiReducedMetric(IReducer reducer) {
         _reducer = reducer;
     }
-    
+
     public ReducedMetric scope(String key) {
         ReducedMetric val = _value.get(key);
-        if(val == null) {
+        if (val == null) {
             _value.put(key, val = new ReducedMetric(_reducer));
         }
         return val;
@@ -39,9 +39,9 @@ public class MultiReducedMetric implements IMetric {
 
     public Object getValueAndReset() {
         Map ret = new HashMap();
-        for(Map.Entry<String, ReducedMetric> e : _value.entrySet()) {
+        for (Map.Entry<String, ReducedMetric> e : _value.entrySet()) {
             Object val = e.getValue().getValueAndReset();
-            if(val != null) {
+            if (val != null) {
                 ret.put(e.getKey(), val);
             }
         }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/metric/api/rpc/CountShellMetric.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/metric/api/rpc/CountShellMetric.java b/jstorm-core/src/main/java/backtype/storm/metric/api/rpc/CountShellMetric.java
index def74c2..68d6a5c 100755
--- a/jstorm-core/src/main/java/backtype/storm/metric/api/rpc/CountShellMetric.java
+++ b/jstorm-core/src/main/java/backtype/storm/metric/api/rpc/CountShellMetric.java
@@ -21,16 +21,13 @@ import backtype.storm.metric.api.CountMetric;
 
 public class CountShellMetric extends CountMetric implements IShellMetric {
     /***
-     * @param
-     *  params should be null or long
-     *  if value is null, it will call incr()
-     *  if value is long, it will call incrBy((long)params)
+     * @param params should be null or long if value is null, it will call incr() if value is long, it will call incrBy((long)params)
      * */
     public void updateMetricFromRPC(Object value) {
         if (value == null) {
             incr();
         } else if (value instanceof Long) {
-            incrBy((Long)value);
+            incrBy((Long) value);
         } else {
             throw new RuntimeException("CountShellMetric updateMetricFromRPC params should be null or Long");
         }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/metric/api/rpc/IShellMetric.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/metric/api/rpc/IShellMetric.java b/jstorm-core/src/main/java/backtype/storm/metric/api/rpc/IShellMetric.java
index d53baea..4b2d97a 100755
--- a/jstorm-core/src/main/java/backtype/storm/metric/api/rpc/IShellMetric.java
+++ b/jstorm-core/src/main/java/backtype/storm/metric/api/rpc/IShellMetric.java
@@ -21,11 +21,9 @@ import backtype.storm.metric.api.IMetric;
 
 public interface IShellMetric extends IMetric {
     /***
-     * @function
-     *     This interface is used by ShellBolt and ShellSpout through RPC call to update Metric 
-     * @param
-     *     value used to update metric, its's meaning change according implementation
-     *     Object can be any json support types: String, Long, Double, Boolean, Null, List, Map
+     * @function This interface is used by ShellBolt and ShellSpout through RPC call to update Metric
+     * @param value used to update metric, its's meaning change according implementation Object can be any json support types: String, Long, Double, Boolean,
+     *            Null, List, Map
      * */
     public void updateMetricFromRPC(Object value);
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/multilang/BoltMsg.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/multilang/BoltMsg.java b/jstorm-core/src/main/java/backtype/storm/multilang/BoltMsg.java
index 446bdc4..d1eee4e 100755
--- a/jstorm-core/src/main/java/backtype/storm/multilang/BoltMsg.java
+++ b/jstorm-core/src/main/java/backtype/storm/multilang/BoltMsg.java
@@ -20,15 +20,12 @@ package backtype.storm.multilang;
 import java.util.List;
 
 /**
- * BoltMsg is an object that represents the data sent from a shell component to
- * a bolt process that implements a multi-language protocol. It is the union of
- * all data types that a bolt can receive from Storm.
- *
+ * BoltMsg is an object that represents the data sent from a shell component to a bolt process that implements a multi-language protocol. It is the union of all
+ * data types that a bolt can receive from Storm.
+ * 
  * <p>
- * BoltMsgs are objects sent to the ISerializer interface, for serialization
- * according to the wire protocol implemented by the serializer. The BoltMsg
- * class allows for a decoupling between the serialized representation of the
- * data and the data itself.
+ * BoltMsgs are objects sent to the ISerializer interface, for serialization according to the wire protocol implemented by the serializer. The BoltMsg class
+ * allows for a decoupling between the serialized representation of the data and the data itself.
  * </p>
  */
 public class BoltMsg {

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/multilang/ISerializer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/multilang/ISerializer.java b/jstorm-core/src/main/java/backtype/storm/multilang/ISerializer.java
index c9c7ad4..6729d89 100755
--- a/jstorm-core/src/main/java/backtype/storm/multilang/ISerializer.java
+++ b/jstorm-core/src/main/java/backtype/storm/multilang/ISerializer.java
@@ -27,55 +27,52 @@ import java.util.Map;
 import backtype.storm.task.TopologyContext;
 
 /**
- * The ISerializer interface describes the methods that an object should
- * implement to provide serialization and de-serialization capabilities to
- * non-JVM language components.
+ * The ISerializer interface describes the methods that an object should implement to provide serialization and de-serialization capabilities to non-JVM
+ * language components.
  */
 public interface ISerializer extends Serializable {
 
     /**
      * This method sets the input and output streams of the serializer
-     *
+     * 
      * @param processIn output stream to non-JVM component
      * @param processOut input stream from non-JVM component
      */
     void initialize(OutputStream processIn, InputStream processOut);
 
     /**
-     * This method transmits the Storm config to the non-JVM process and
-     * receives its pid.
-     *
+     * This method transmits the Storm config to the non-JVM process and receives its pid.
+     * 
      * @param conf storm configuration
      * @param context topology context
      * @return process pid
      */
-    Number connect(Map conf, TopologyContext context) throws IOException,
-            NoOutputException;
+    Number connect(Map conf, TopologyContext context) throws IOException, NoOutputException;
 
     /**
      * This method receives a shell message from the non-JVM process
-     *
+     * 
      * @return shell message
      */
     ShellMsg readShellMsg() throws IOException, NoOutputException;
 
     /**
      * This method sends a bolt message to a non-JVM bolt process
-     *
+     * 
      * @param msg bolt message
      */
     void writeBoltMsg(BoltMsg msg) throws IOException;
 
     /**
      * This method sends a spout message to a non-JVM spout process
-     *
+     * 
      * @param msg spout message
      */
     void writeSpoutMsg(SpoutMsg msg) throws IOException;
 
     /**
      * This method sends a list of task IDs to a non-JVM bolt process
-     *
+     * 
      * @param taskIds list of task IDs
      */
     void writeTaskIds(List<Integer> taskIds) throws IOException;

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/multilang/JsonSerializer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/multilang/JsonSerializer.java b/jstorm-core/src/main/java/backtype/storm/multilang/JsonSerializer.java
index 0e2e156..bce9a1a 100755
--- a/jstorm-core/src/main/java/backtype/storm/multilang/JsonSerializer.java
+++ b/jstorm-core/src/main/java/backtype/storm/multilang/JsonSerializer.java
@@ -52,8 +52,7 @@ public class JsonSerializer implements ISerializer {
         }
     }
 
-    public Number connect(Map conf, TopologyContext context)
-            throws IOException, NoOutputException {
+    public Number connect(Map conf, TopologyContext context) throws IOException, NoOutputException {
         JSONObject setupInfo = new JSONObject();
         setupInfo.put("pidDir", context.getPIDDir());
         setupInfo.put("conf", conf);
@@ -140,22 +139,22 @@ public class JsonSerializer implements ISerializer {
                 shellMsg.addAnchor((String) o);
             }
         }
-       
-        Object nameObj = msg.get("name"); 
+
+        Object nameObj = msg.get("name");
         String metricName = null;
         if (nameObj != null && nameObj instanceof String) {
             metricName = (String) nameObj;
         }
         shellMsg.setMetricName(metricName);
-        
+
         Object paramsObj = msg.get("params");
         shellMsg.setMetricParams(paramsObj);
 
         if (command.equals("log")) {
             Object logLevelObj = msg.get("level");
             if (logLevelObj != null && logLevelObj instanceof Long) {
-                long logLevel = (Long)logLevelObj;
-                shellMsg.setLogLevel((int)logLevel);
+                long logLevel = (Long) logLevelObj;
+                shellMsg.setLogLevel((int) logLevel);
             }
         }
 
@@ -183,8 +182,7 @@ public class JsonSerializer implements ISerializer {
                 if (line.length() == 0) {
                     errorMessage.append(" No output read.\n");
                 } else {
-                    errorMessage.append(" Currently read output: "
-                            + line.toString() + "\n");
+                    errorMessage.append(" Currently read output: " + line.toString() + "\n");
                 }
                 errorMessage.append("Serializer Exception:\n");
                 throw new NoOutputException(errorMessage.toString());

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/multilang/NoOutputException.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/multilang/NoOutputException.java b/jstorm-core/src/main/java/backtype/storm/multilang/NoOutputException.java
index 1ce75d3..58b0a6e 100755
--- a/jstorm-core/src/main/java/backtype/storm/multilang/NoOutputException.java
+++ b/jstorm-core/src/main/java/backtype/storm/multilang/NoOutputException.java
@@ -18,8 +18,7 @@
 package backtype.storm.multilang;
 
 /**
- * A NoOutputException states that no data has been received from the connected
- * non-JVM process.
+ * A NoOutputException states that no data has been received from the connected non-JVM process.
  */
 public class NoOutputException extends Exception {
     public NoOutputException() {

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/multilang/ShellMsg.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/multilang/ShellMsg.java b/jstorm-core/src/main/java/backtype/storm/multilang/ShellMsg.java
index 9eafb1a..01434e0 100755
--- a/jstorm-core/src/main/java/backtype/storm/multilang/ShellMsg.java
+++ b/jstorm-core/src/main/java/backtype/storm/multilang/ShellMsg.java
@@ -21,15 +21,12 @@ import java.util.ArrayList;
 import java.util.List;
 
 /**
- * ShellMsg is an object that represents the data sent to a shell component from
- * a process that implements a multi-language protocol. It is the union of all
+ * ShellMsg is an object that represents the data sent to a shell component from a process that implements a multi-language protocol. It is the union of all
  * data types that a component can send to Storm.
- *
+ * 
  * <p>
- * ShellMsgs are objects received from the ISerializer interface, after the
- * serializer has deserialized the data from the underlying wire protocol. The
- * ShellMsg class allows for a decoupling between the serialized representation
- * of the data and the data itself.
+ * ShellMsgs are objects received from the ISerializer interface, after the serializer has deserialized the data from the underlying wire protocol. The ShellMsg
+ * class allows for a decoupling between the serialized representation of the data and the data itself.
  * </p>
  */
 public class ShellMsg {
@@ -42,22 +39,28 @@ public class ShellMsg {
     private List<Object> tuple;
     private boolean needTaskIds;
 
-    //metrics rpc 
+    // metrics rpc
     private String metricName;
     private Object metricParams;
 
-    //logLevel
+    // logLevel
     public enum ShellLogLevel {
         TRACE, DEBUG, INFO, WARN, ERROR;
 
         public static ShellLogLevel fromInt(int i) {
             switch (i) {
-                case 0: return TRACE;
-                case 1: return DEBUG;
-                case 2: return INFO;
-                case 3: return WARN;
-                case 4: return ERROR;
-                default: return INFO;
+            case 0:
+                return TRACE;
+            case 1:
+                return DEBUG;
+            case 2:
+                return INFO;
+            case 3:
+                return WARN;
+            case 4:
+                return ERROR;
+            default:
+                return INFO;
             }
         }
     }
@@ -168,18 +171,8 @@ public class ShellMsg {
 
     @Override
     public String toString() {
-        return "ShellMsg{" +
-                "command='" + command + '\'' +
-                ", id=" + id +
-                ", anchors=" + anchors +
-                ", stream='" + stream + '\'' +
-                ", task=" + task +
-                ", msg='" + msg + '\'' +
-                ", tuple=" + tuple +
-                ", needTaskIds=" + needTaskIds +
-                ", metricName='" + metricName + '\'' +
-                ", metricParams=" + metricParams +
-                ", logLevel=" + logLevel +
-                '}';
+        return "ShellMsg{" + "command='" + command + '\'' + ", id=" + id + ", anchors=" + anchors + ", stream='" + stream + '\'' + ", task=" + task + ", msg='"
+                + msg + '\'' + ", tuple=" + tuple + ", needTaskIds=" + needTaskIds + ", metricName='" + metricName + '\'' + ", metricParams=" + metricParams
+                + ", logLevel=" + logLevel + '}';
     }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/multilang/SpoutMsg.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/multilang/SpoutMsg.java b/jstorm-core/src/main/java/backtype/storm/multilang/SpoutMsg.java
index cb1b108..c08646c 100755
--- a/jstorm-core/src/main/java/backtype/storm/multilang/SpoutMsg.java
+++ b/jstorm-core/src/main/java/backtype/storm/multilang/SpoutMsg.java
@@ -18,15 +18,12 @@
 package backtype.storm.multilang;
 
 /**
- * SpoutMsg is an object that represents the data sent from a shell spout to a
- * process that implements a multi-language spout. The SpoutMsg is used to send
- * a "next", "ack" or "fail" message to a spout.
- *
+ * SpoutMsg is an object that represents the data sent from a shell spout to a process that implements a multi-language spout. The SpoutMsg is used to send a
+ * "next", "ack" or "fail" message to a spout.
+ * 
  * <p>
- * Spout messages are objects sent to the ISerializer interface, for
- * serialization according to the wire protocol implemented by the serializer.
- * The SpoutMsg class allows for a decoupling between the serialized
- * representation of the data and the data itself.
+ * Spout messages are objects sent to the ISerializer interface, for serialization according to the wire protocol implemented by the serializer. The SpoutMsg
+ * class allows for a decoupling between the serialized representation of the data and the data itself.
  * </p>
  */
 public class SpoutMsg {

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/nimbus/DefaultTopologyValidator.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/nimbus/DefaultTopologyValidator.java b/jstorm-core/src/main/java/backtype/storm/nimbus/DefaultTopologyValidator.java
index a687215..14c5723 100755
--- a/jstorm-core/src/main/java/backtype/storm/nimbus/DefaultTopologyValidator.java
+++ b/jstorm-core/src/main/java/backtype/storm/nimbus/DefaultTopologyValidator.java
@@ -23,9 +23,10 @@ import java.util.Map;
 
 public class DefaultTopologyValidator implements ITopologyValidator {
     @Override
-    public void prepare(Map StormConf){
+    public void prepare(Map StormConf) {
     }
+
     @Override
-    public void validate(String topologyName, Map topologyConf, StormTopology topology) throws InvalidTopologyException {        
-    }    
+    public void validate(String topologyName, Map topologyConf, StormTopology topology) throws InvalidTopologyException {
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/nimbus/ITopologyValidator.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/nimbus/ITopologyValidator.java b/jstorm-core/src/main/java/backtype/storm/nimbus/ITopologyValidator.java
index 99bd07b..36e2c18 100755
--- a/jstorm-core/src/main/java/backtype/storm/nimbus/ITopologyValidator.java
+++ b/jstorm-core/src/main/java/backtype/storm/nimbus/ITopologyValidator.java
@@ -23,6 +23,6 @@ import java.util.Map;
 
 public interface ITopologyValidator {
     void prepare(Map StormConf);
-    void validate(String topologyName, Map topologyConf, StormTopology topology)
-            throws InvalidTopologyException;
+
+    void validate(String topologyName, Map topologyConf, StormTopology topology) throws InvalidTopologyException;
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/planner/CompoundSpout.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/planner/CompoundSpout.java b/jstorm-core/src/main/java/backtype/storm/planner/CompoundSpout.java
index 141b24b..6665d82 100755
--- a/jstorm-core/src/main/java/backtype/storm/planner/CompoundSpout.java
+++ b/jstorm-core/src/main/java/backtype/storm/planner/CompoundSpout.java
@@ -17,9 +17,8 @@
  */
 package backtype.storm.planner;
 
-
 public class CompoundSpout
-        //implements ISpout
+// implements ISpout
 {
 
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/planner/CompoundTask.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/planner/CompoundTask.java b/jstorm-core/src/main/java/backtype/storm/planner/CompoundTask.java
index 40a7f37..2bd56ee 100755
--- a/jstorm-core/src/main/java/backtype/storm/planner/CompoundTask.java
+++ b/jstorm-core/src/main/java/backtype/storm/planner/CompoundTask.java
@@ -17,9 +17,8 @@
  */
 package backtype.storm.planner;
 
-
 public class CompoundTask
-//        implements IBolt
+// implements IBolt
 {
 
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/planner/TaskBundle.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/planner/TaskBundle.java b/jstorm-core/src/main/java/backtype/storm/planner/TaskBundle.java
index 81c6209..4542aad 100755
--- a/jstorm-core/src/main/java/backtype/storm/planner/TaskBundle.java
+++ b/jstorm-core/src/main/java/backtype/storm/planner/TaskBundle.java
@@ -20,14 +20,13 @@ package backtype.storm.planner;
 import backtype.storm.task.IBolt;
 import java.io.Serializable;
 
-
 public class TaskBundle implements Serializable {
     public IBolt task;
     public int componentId;
-    
+
     public TaskBundle(IBolt task, int componentId) {
         this.task = task;
         this.componentId = componentId;
     }
-    
+
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/scheduler/Cluster.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/scheduler/Cluster.java b/jstorm-core/src/main/java/backtype/storm/scheduler/Cluster.java
index e0c7cc7..14d9ede 100755
--- a/jstorm-core/src/main/java/backtype/storm/scheduler/Cluster.java
+++ b/jstorm-core/src/main/java/backtype/storm/scheduler/Cluster.java
@@ -30,25 +30,25 @@ public class Cluster {
     /**
      * key: supervisor id, value: supervisor details
      */
-    private Map<String, SupervisorDetails>   supervisors;
+    private Map<String, SupervisorDetails> supervisors;
     /**
      * key: topologyId, value: topology's current assignments.
      */
     private Map<String, SchedulerAssignmentImpl> assignments;
     /**
      * key topologyId, Value: scheduler's status.
-     */  
+     */
     private Map<String, String> status;
 
     /**
      * a map from hostname to supervisor id.
      */
-    private Map<String, List<String>>        hostToId;
-    
+    private Map<String, List<String>> hostToId;
+
     private Set<String> blackListedHosts = new HashSet<String>();
     private INimbus inimbus;
 
-    public Cluster(INimbus nimbus, Map<String, SupervisorDetails> supervisors, Map<String, SchedulerAssignmentImpl> assignments){
+    public Cluster(INimbus nimbus, Map<String, SupervisorDetails> supervisors, Map<String, SchedulerAssignmentImpl> assignments) {
         this.inimbus = nimbus;
         this.supervisors = new HashMap<String, SupervisorDetails>(supervisors.size());
         this.supervisors.putAll(supervisors);
@@ -65,35 +65,36 @@ public class Cluster {
             this.hostToId.get(host).add(nodeId);
         }
     }
-    
+
     public void setBlacklistedHosts(Set<String> hosts) {
         blackListedHosts = hosts;
     }
-    
+
     public Set<String> getBlacklistedHosts() {
         return blackListedHosts;
     }
-    
+
     public void blacklistHost(String host) {
         // this is so it plays well with setting blackListedHosts to an immutable list
-        if(blackListedHosts==null) blackListedHosts = new HashSet<String>();
-        if(!(blackListedHosts instanceof HashSet))
+        if (blackListedHosts == null)
+            blackListedHosts = new HashSet<String>();
+        if (!(blackListedHosts instanceof HashSet))
             blackListedHosts = new HashSet<String>(blackListedHosts);
         blackListedHosts.add(host);
     }
-    
+
     public boolean isBlackListed(String supervisorId) {
-        return blackListedHosts != null && blackListedHosts.contains(getHost(supervisorId));        
+        return blackListedHosts != null && blackListedHosts.contains(getHost(supervisorId));
     }
 
     public boolean isBlacklistedHost(String host) {
-        return blackListedHosts != null && blackListedHosts.contains(host);  
+        return blackListedHosts != null && blackListedHosts.contains(host);
     }
-    
+
     public String getHost(String supervisorId) {
         return inimbus.getHostName(supervisors, supervisorId);
     }
-    
+
     /**
      * Gets all the topologies which needs scheduling.
      * 
@@ -116,8 +117,8 @@ public class Cluster {
      * 
      * A topology needs scheduling if one of the following conditions holds:
      * <ul>
-     *   <li>Although the topology is assigned slots, but is squeezed. i.e. the topology is assigned less slots than desired.</li>
-     *   <li>There are unassigned executors in this topology</li>
+     * <li>Although the topology is assigned slots, but is squeezed. i.e. the topology is assigned less slots than desired.</li>
+     * <li>There are unassigned executors in this topology</li>
      * </ul>
      */
     public boolean needsScheduling(TopologyDetails topology) {
@@ -139,7 +140,7 @@ public class Cluster {
      */
     public Map<ExecutorDetails, String> getNeedsSchedulingExecutorToComponents(TopologyDetails topology) {
         Collection<ExecutorDetails> allExecutors = new HashSet(topology.getExecutors());
-        
+
         SchedulerAssignment assignment = this.assignments.get(topology.getId());
         if (assignment != null) {
             Collection<ExecutorDetails> assignedExecutors = assignment.getExecutors();
@@ -148,7 +149,7 @@ public class Cluster {
 
         return topology.selectExecutorToComponent(allExecutors);
     }
-    
+
     /**
      * Gets a component-id -> executors map which needs scheduling in this topology.
      * 
@@ -163,14 +164,13 @@ public class Cluster {
             if (!componentToExecutors.containsKey(component)) {
                 componentToExecutors.put(component, new ArrayList<ExecutorDetails>());
             }
-            
+
             componentToExecutors.get(component).add(executor);
         }
-        
+
         return componentToExecutors;
     }
 
-
     /**
      * Get all the used ports of this supervisor.
      * 
@@ -207,9 +207,10 @@ public class Cluster {
 
         return ret;
     }
-    
+
     public Set<Integer> getAssignablePorts(SupervisorDetails supervisor) {
-        if(isBlackListed(supervisor.id)) return new HashSet();
+        if (isBlackListed(supervisor.id))
+            return new HashSet();
         return supervisor.allPorts;
     }
 
@@ -229,7 +230,7 @@ public class Cluster {
 
         return slots;
     }
-    
+
     public List<WorkerSlot> getAssignableSlots(SupervisorDetails supervisor) {
         Set<Integer> ports = this.getAssignablePorts(supervisor);
         List<WorkerSlot> slots = new ArrayList<WorkerSlot>(ports.size());
@@ -238,9 +239,9 @@ public class Cluster {
             slots.add(new WorkerSlot(supervisor.getId(), port));
         }
 
-        return slots;        
+        return slots;
     }
-    
+
     /**
      * get the unassigned executors of the topology.
      */
@@ -250,13 +251,13 @@ public class Cluster {
         }
 
         Collection<ExecutorDetails> ret = new HashSet(topology.getExecutors());
-        
+
         SchedulerAssignment assignment = this.getAssignmentById(topology.getId());
         if (assignment != null) {
             Set<ExecutorDetails> assignedExecutors = assignment.getExecutors();
             ret.removeAll(assignedExecutors);
         }
-        
+
         return ret;
     }
 
@@ -287,16 +288,16 @@ public class Cluster {
         if (this.isSlotOccupied(slot)) {
             throw new RuntimeException("slot: [" + slot.getNodeId() + ", " + slot.getPort() + "] is already occupied.");
         }
-        
-        SchedulerAssignmentImpl assignment = (SchedulerAssignmentImpl)this.getAssignmentById(topologyId);
+
+        SchedulerAssignmentImpl assignment = (SchedulerAssignmentImpl) this.getAssignmentById(topologyId);
         if (assignment == null) {
             assignment = new SchedulerAssignmentImpl(topologyId, new HashMap<ExecutorDetails, WorkerSlot>());
             this.assignments.put(topologyId, assignment);
         } else {
             for (ExecutorDetails executor : executors) {
-                 if (assignment.isExecutorAssigned(executor)) {
-                     throw new RuntimeException("the executor is already assigned, you should unassign it before assign it to another slot.");
-                 }
+                if (assignment.isExecutorAssigned(executor)) {
+                    throw new RuntimeException("the executor is already assigned, you should unassign it before assign it to another slot.");
+                }
             }
         }
 
@@ -316,7 +317,7 @@ public class Cluster {
 
         return slots;
     }
-    
+
     public List<WorkerSlot> getAssignableSlots() {
         List<WorkerSlot> slots = new ArrayList<WorkerSlot>();
         for (SupervisorDetails supervisor : this.supervisors.values()) {
@@ -339,14 +340,14 @@ public class Cluster {
             }
         }
     }
-    
+
     /**
      * free the slots.
      * 
      * @param slots
      */
     public void freeSlots(Collection<WorkerSlot> slots) {
-        if(slots!=null) {
+        if (slots != null) {
             for (WorkerSlot slot : slots) {
                 this.freeSlot(slot);
             }
@@ -365,10 +366,10 @@ public class Cluster {
                 return true;
             }
         }
-        
+
         return false;
     }
-    
+
     /**
      * get the current assignment for the topology.
      */
@@ -390,10 +391,10 @@ public class Cluster {
 
         return null;
     }
-    
+
     public Collection<WorkerSlot> getUsedSlots() {
         Set<WorkerSlot> ret = new HashSet();
-        for(SchedulerAssignmentImpl s: assignments.values()) {
+        for (SchedulerAssignmentImpl s : assignments.values()) {
             ret.addAll(s.getExecutorToSlot().values());
         }
         return ret;
@@ -423,11 +424,11 @@ public class Cluster {
      */
     public Map<String, SchedulerAssignment> getAssignments() {
         Map<String, SchedulerAssignment> ret = new HashMap<String, SchedulerAssignment>(this.assignments.size());
-        
+
         for (String topologyId : this.assignments.keySet()) {
             ret.put(topologyId, this.assignments.get(topologyId));
         }
-        
+
         return ret;
     }
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/scheduler/ExecutorDetails.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/scheduler/ExecutorDetails.java b/jstorm-core/src/main/java/backtype/storm/scheduler/ExecutorDetails.java
index bcf4aca..5934d33 100755
--- a/jstorm-core/src/main/java/backtype/storm/scheduler/ExecutorDetails.java
+++ b/jstorm-core/src/main/java/backtype/storm/scheduler/ExecutorDetails.java
@@ -21,7 +21,7 @@ public class ExecutorDetails {
     int startTask;
     int endTask;
 
-    public ExecutorDetails(int startTask, int endTask){
+    public ExecutorDetails(int startTask, int endTask) {
         this.startTask = startTask;
         this.endTask = endTask;
     }
@@ -38,17 +38,17 @@ public class ExecutorDetails {
         if (other == null || !(other instanceof ExecutorDetails)) {
             return false;
         }
-        
-        ExecutorDetails executor = (ExecutorDetails)other;
+
+        ExecutorDetails executor = (ExecutorDetails) other;
         return (this.startTask == executor.startTask) && (this.endTask == executor.endTask);
     }
-    
+
     public int hashCode() {
         return this.startTask + 13 * this.endTask;
     }
-    
+
     @Override
     public String toString() {
-    	return "[" + this.startTask + ", " + this.endTask + "]";
+        return "[" + this.startTask + ", " + this.endTask + "]";
     }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/scheduler/INimbus.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/scheduler/INimbus.java b/jstorm-core/src/main/java/backtype/storm/scheduler/INimbus.java
index a0fb417..b13beb3 100755
--- a/jstorm-core/src/main/java/backtype/storm/scheduler/INimbus.java
+++ b/jstorm-core/src/main/java/backtype/storm/scheduler/INimbus.java
@@ -23,17 +23,19 @@ import java.util.Set;
 
 public interface INimbus {
     void prepare(Map stormConf, String schedulerLocalDir);
+
     /**
-     * Returns all slots that are available for the next round of scheduling. A slot is available for scheduling
-     * if it is free and can be assigned to, or if it is used and can be reassigned.
+     * Returns all slots that are available for the next round of scheduling. A slot is available for scheduling if it is free and can be assigned to, or if it
+     * is used and can be reassigned.
      */
-    Collection<WorkerSlot> allSlotsAvailableForScheduling(Collection<SupervisorDetails> existingSupervisors, Topologies topologies, Set<String> topologiesMissingAssignments);
+    Collection<WorkerSlot> allSlotsAvailableForScheduling(Collection<SupervisorDetails> existingSupervisors, Topologies topologies,
+                                                          Set<String> topologiesMissingAssignments);
 
     // this is called after the assignment is changed in ZK
     void assignSlots(Topologies topologies, Map<String, Collection<WorkerSlot>> newSlotsByTopologyId);
-    
+
     // map from node id to supervisor details
     String getHostName(Map<String, SupervisorDetails> existingSupervisors, String nodeId);
-    
-    IScheduler getForcedScheduler(); 
+
+    IScheduler getForcedScheduler();
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/scheduler/IScheduler.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/scheduler/IScheduler.java b/jstorm-core/src/main/java/backtype/storm/scheduler/IScheduler.java
index 5395882..93096f8 100755
--- a/jstorm-core/src/main/java/backtype/storm/scheduler/IScheduler.java
+++ b/jstorm-core/src/main/java/backtype/storm/scheduler/IScheduler.java
@@ -19,22 +19,18 @@ package backtype.storm.scheduler;
 
 import java.util.Map;
 
-
 public interface IScheduler {
-    
+
     void prepare(Map conf);
-    
+
     /**
-     * Set assignments for the topologies which needs scheduling. The new assignments is available 
-     * through <code>cluster.getAssignments()</code>
-     *
-     *@param topologies all the topologies in the cluster, some of them need schedule. Topologies object here 
-     *       only contain static information about topologies. Information like assignments, slots are all in
-     *       the <code>cluster</code>object.
-     *@param cluster the cluster these topologies are running in. <code>cluster</code> contains everything user
-     *       need to develop a new scheduling logic. e.g. supervisors information, available slots, current 
-     *       assignments for all the topologies etc. User can set the new assignment for topologies using
-     *       <code>cluster.setAssignmentById</code>
+     * Set assignments for the topologies which needs scheduling. The new assignments is available through <code>cluster.getAssignments()</code>
+     * 
+     * @param topologies all the topologies in the cluster, some of them need schedule. Topologies object here only contain static information about topologies.
+     *            Information like assignments, slots are all in the <code>cluster</code>object.
+     * @param cluster the cluster these topologies are running in. <code>cluster</code> contains everything user need to develop a new scheduling logic. e.g.
+     *            supervisors information, available slots, current assignments for all the topologies etc. User can set the new assignment for topologies using
+     *            <code>cluster.setAssignmentById</code>
      */
     void schedule(Topologies topologies, Cluster cluster);
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/scheduler/ISupervisor.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/scheduler/ISupervisor.java b/jstorm-core/src/main/java/backtype/storm/scheduler/ISupervisor.java
index 64e1595..d64f851 100755
--- a/jstorm-core/src/main/java/backtype/storm/scheduler/ISupervisor.java
+++ b/jstorm-core/src/main/java/backtype/storm/scheduler/ISupervisor.java
@@ -20,26 +20,29 @@ package backtype.storm.scheduler;
 import java.util.Map;
 import java.util.Collection;
 
-
 public interface ISupervisor {
     void prepare(Map stormConf, String schedulerLocalDir);
+
     // for mesos, this is {hostname}-{topologyid}
     /**
      * The id used for writing metadata into ZK.
      */
     String getSupervisorId();
+
     /**
-     * The id used in assignments. This combined with confirmAssigned decides what
-     * this supervisor is responsible for. The combination of this and getSupervisorId
-     * allows Nimbus to assign to a single machine and have multiple supervisors
-     * on that machine execute the assignment. This is important for achieving resource isolation.
+     * The id used in assignments. This combined with confirmAssigned decides what this supervisor is responsible for. The combination of this and
+     * getSupervisorId allows Nimbus to assign to a single machine and have multiple supervisors on that machine execute the assignment. This is important for
+     * achieving resource isolation.
      */
     String getAssignmentId();
+
     Object getMetadata();
-    
+
     boolean confirmAssigned(int port);
+
     // calls this before actually killing the worker locally...
     // sends a "task finished" update
     void killedWorker(int port);
+
     void assigned(Collection<Integer> ports);
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/scheduler/SchedulerAssignment.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/scheduler/SchedulerAssignment.java b/jstorm-core/src/main/java/backtype/storm/scheduler/SchedulerAssignment.java
index 0212e48..7451dcc 100755
--- a/jstorm-core/src/main/java/backtype/storm/scheduler/SchedulerAssignment.java
+++ b/jstorm-core/src/main/java/backtype/storm/scheduler/SchedulerAssignment.java
@@ -23,6 +23,7 @@ import java.util.Set;
 public interface SchedulerAssignment {
     /**
      * Does this slot occupied by this assignment?
+     * 
      * @param slot
      * @return
      */
@@ -35,24 +36,27 @@ public interface SchedulerAssignment {
      * @return
      */
     public boolean isExecutorAssigned(ExecutorDetails executor);
-    
+
     /**
      * get the topology-id this assignment is for.
+     * 
      * @return
      */
     public String getTopologyId();
 
     /**
      * get the executor -> slot map.
+     * 
      * @return
      */
     public Map<ExecutorDetails, WorkerSlot> getExecutorToSlot();
 
     /**
      * Return the executors covered by this assignments
+     * 
      * @return
      */
     public Set<ExecutorDetails> getExecutors();
-    
+
     public Set<WorkerSlot> getSlots();
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/scheduler/SchedulerAssignmentImpl.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/scheduler/SchedulerAssignmentImpl.java b/jstorm-core/src/main/java/backtype/storm/scheduler/SchedulerAssignmentImpl.java
index 08af4b7..7a6947f 100755
--- a/jstorm-core/src/main/java/backtype/storm/scheduler/SchedulerAssignmentImpl.java
+++ b/jstorm-core/src/main/java/backtype/storm/scheduler/SchedulerAssignmentImpl.java
@@ -35,7 +35,7 @@ public class SchedulerAssignmentImpl implements SchedulerAssignment {
      * assignment detail, a mapping from executor to <code>WorkerSlot</code>
      */
     Map<ExecutorDetails, WorkerSlot> executorToSlot;
-    
+
     public SchedulerAssignmentImpl(String topologyId, Map<ExecutorDetails, WorkerSlot> executorToSlots) {
         this.topologyId = topologyId;
         this.executorToSlot = new HashMap<ExecutorDetails, WorkerSlot>(0);
@@ -47,10 +47,11 @@ public class SchedulerAssignmentImpl implements SchedulerAssignment {
     @Override
     public Set<WorkerSlot> getSlots() {
         return new HashSet(executorToSlot.values());
-    }    
-    
+    }
+
     /**
      * Assign the slot to executors.
+     * 
      * @param slot
      * @param executors
      */
@@ -59,9 +60,10 @@ public class SchedulerAssignmentImpl implements SchedulerAssignment {
             this.executorToSlot.put(executor, slot);
         }
     }
-    
+
     /**
      * Release the slot occupied by this assignment.
+     * 
      * @param slot
      */
     public void unassignBySlot(WorkerSlot slot) {
@@ -72,7 +74,7 @@ public class SchedulerAssignmentImpl implements SchedulerAssignment {
                 executors.add(executor);
             }
         }
-        
+
         // remove
         for (ExecutorDetails executor : executors) {
             this.executorToSlot.remove(executor);
@@ -81,6 +83,7 @@ public class SchedulerAssignmentImpl implements SchedulerAssignment {
 
     /**
      * Does this slot occupied by this assignment?
+     * 
      * @param slot
      * @return
      */
@@ -91,7 +94,7 @@ public class SchedulerAssignmentImpl implements SchedulerAssignment {
     public boolean isExecutorAssigned(ExecutorDetails executor) {
         return this.executorToSlot.containsKey(executor);
     }
-    
+
     public String getTopologyId() {
         return this.topologyId;
     }
@@ -102,6 +105,7 @@ public class SchedulerAssignmentImpl implements SchedulerAssignment {
 
     /**
      * Return the executors covered by this assignments
+     * 
      * @return
      */
     public Set<ExecutorDetails> getExecutors() {

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/scheduler/SupervisorDetails.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/scheduler/SupervisorDetails.java b/jstorm-core/src/main/java/backtype/storm/scheduler/SupervisorDetails.java
index 7497f26..2b8a400 100755
--- a/jstorm-core/src/main/java/backtype/storm/scheduler/SupervisorDetails.java
+++ b/jstorm-core/src/main/java/backtype/storm/scheduler/SupervisorDetails.java
@@ -38,19 +38,19 @@ public class SupervisorDetails {
      */
     Set<Integer> allPorts;
 
-    public SupervisorDetails(String id, Object meta){
+    public SupervisorDetails(String id, Object meta) {
         this.id = id;
         this.meta = meta;
         allPorts = new HashSet();
     }
-    
-    public SupervisorDetails(String id, Object meta, Collection<Number> allPorts){
+
+    public SupervisorDetails(String id, Object meta, Collection<Number> allPorts) {
         this.id = id;
         this.meta = meta;
         setAllPorts(allPorts);
     }
 
-    public SupervisorDetails(String id, String host, Object schedulerMeta, Collection<Number> allPorts){
+    public SupervisorDetails(String id, String host, Object schedulerMeta, Collection<Number> allPorts) {
         this.id = id;
         this.host = host;
         this.schedulerMeta = schedulerMeta;
@@ -60,8 +60,8 @@ public class SupervisorDetails {
 
     private void setAllPorts(Collection<Number> allPorts) {
         this.allPorts = new HashSet<Integer>();
-        if(allPorts!=null) {
-            for(Number n: allPorts) {
+        if (allPorts != null) {
+            for (Number n : allPorts) {
                 this.allPorts.add(n.intValue());
             }
         }
@@ -78,7 +78,7 @@ public class SupervisorDetails {
     public Object getMeta() {
         return meta;
     }
-    
+
     public Set<Integer> getAllPorts() {
         return allPorts;
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/scheduler/Topologies.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/scheduler/Topologies.java b/jstorm-core/src/main/java/backtype/storm/scheduler/Topologies.java
index 70af1b4..771fcf2 100755
--- a/jstorm-core/src/main/java/backtype/storm/scheduler/Topologies.java
+++ b/jstorm-core/src/main/java/backtype/storm/scheduler/Topologies.java
@@ -24,33 +24,34 @@ import java.util.Map;
 public class Topologies {
     Map<String, TopologyDetails> topologies;
     Map<String, String> nameToId;
-    
+
     public Topologies(Map<String, TopologyDetails> topologies) {
-        if(topologies==null) topologies = new HashMap();
+        if (topologies == null)
+            topologies = new HashMap();
         this.topologies = new HashMap<String, TopologyDetails>(topologies.size());
         this.topologies.putAll(topologies);
         this.nameToId = new HashMap<String, String>(topologies.size());
-        
+
         for (String topologyId : topologies.keySet()) {
             TopologyDetails topology = topologies.get(topologyId);
             this.nameToId.put(topology.getName(), topologyId);
         }
     }
-    
+
     public TopologyDetails getById(String topologyId) {
         return this.topologies.get(topologyId);
     }
-    
+
     public TopologyDetails getByName(String topologyName) {
         String topologyId = this.nameToId.get(topologyName);
-        
+
         if (topologyId == null) {
             return null;
         } else {
             return this.getById(topologyId);
         }
     }
-    
+
     public Collection<TopologyDetails> getTopologies() {
         return this.topologies.values();
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/scheduler/TopologyDetails.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/scheduler/TopologyDetails.java b/jstorm-core/src/main/java/backtype/storm/scheduler/TopologyDetails.java
index 6daf4ed..84b3966 100755
--- a/jstorm-core/src/main/java/backtype/storm/scheduler/TopologyDetails.java
+++ b/jstorm-core/src/main/java/backtype/storm/scheduler/TopologyDetails.java
@@ -24,21 +24,20 @@ import java.util.Map;
 import backtype.storm.Config;
 import backtype.storm.generated.StormTopology;
 
-
 public class TopologyDetails {
     String topologyId;
     Map topologyConf;
     StormTopology topology;
     Map<ExecutorDetails, String> executorToComponent;
     int numWorkers;
- 
+
     public TopologyDetails(String topologyId, Map topologyConf, StormTopology topology, int numWorkers) {
         this.topologyId = topologyId;
         this.topologyConf = topologyConf;
         this.topology = topology;
         this.numWorkers = numWorkers;
     }
-    
+
     public TopologyDetails(String topologyId, Map topologyConf, StormTopology topology, int numWorkers, Map<ExecutorDetails, String> executorToComponents) {
         this(topologyId, topologyConf, topology, numWorkers);
         this.executorToComponent = new HashMap<ExecutorDetails, String>(0);
@@ -46,23 +45,23 @@ public class TopologyDetails {
             this.executorToComponent.putAll(executorToComponents);
         }
     }
-    
+
     public String getId() {
         return topologyId;
     }
-    
+
     public String getName() {
-        return (String)this.topologyConf.get(Config.TOPOLOGY_NAME);
+        return (String) this.topologyConf.get(Config.TOPOLOGY_NAME);
     }
-    
+
     public Map getConf() {
         return topologyConf;
     }
-    
+
     public int getNumWorkers() {
         return numWorkers;
     }
-    
+
     public StormTopology getTopology() {
         return topology;
     }
@@ -79,10 +78,10 @@ public class TopologyDetails {
                 ret.put(executor, compId);
             }
         }
-        
+
         return ret;
     }
-    
+
     public Collection<ExecutorDetails> getExecutors() {
         return this.executorToComponent.keySet();
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/scheduler/WorkerSlot.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/scheduler/WorkerSlot.java b/jstorm-core/src/main/java/backtype/storm/scheduler/WorkerSlot.java
index 8331ad8..baeb233 100755
--- a/jstorm-core/src/main/java/backtype/storm/scheduler/WorkerSlot.java
+++ b/jstorm-core/src/main/java/backtype/storm/scheduler/WorkerSlot.java
@@ -20,36 +20,36 @@ package backtype.storm.scheduler;
 import java.io.Serializable;
 
 public class WorkerSlot implements Comparable<WorkerSlot>, Serializable {
-    
+
     private static final long serialVersionUID = -4451854497340313268L;
     String nodeId;
     int port;
-    
+
     public WorkerSlot(String nodeId, Number port) {
         this.nodeId = nodeId;
         this.port = port.intValue();
     }
-    
+
     public WorkerSlot() {
-        
+
     }
-    
+
     public String getNodeId() {
         return nodeId;
     }
-    
+
     public int getPort() {
         return port;
     }
-    
+
     public void setNodeId(String nodeId) {
         this.nodeId = nodeId;
     }
-    
+
     public void setPort(int port) {
         this.port = port;
     }
-    
+
     @Override
     public int hashCode() {
         final int prime = 31;
@@ -58,7 +58,7 @@ public class WorkerSlot implements Comparable<WorkerSlot>, Serializable {
         result = prime * result + port;
         return result;
     }
-    
+
     @Override
     public boolean equals(Object obj) {
         if (this == obj)
@@ -77,12 +77,12 @@ public class WorkerSlot implements Comparable<WorkerSlot>, Serializable {
             return false;
         return true;
     }
-    
+
     @Override
     public String toString() {
         return this.nodeId + ":" + this.port;
     }
-    
+
     @Override
     public int compareTo(WorkerSlot o) {
         String otherNode = o.getNodeId();

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/scheduler/multitenant/DefaultPool.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/scheduler/multitenant/DefaultPool.java b/jstorm-core/src/main/java/backtype/storm/scheduler/multitenant/DefaultPool.java
index 3053b5b..8064a0d 100755
--- a/jstorm-core/src/main/java/backtype/storm/scheduler/multitenant/DefaultPool.java
+++ b/jstorm-core/src/main/java/backtype/storm/scheduler/multitenant/DefaultPool.java
@@ -36,184 +36,183 @@ import backtype.storm.scheduler.WorkerSlot;
  * A pool of machines that anyone can use, but topologies are not isolated
  */
 public class DefaultPool extends NodePool {
-  private static final Logger LOG = LoggerFactory.getLogger(DefaultPool.class);
-  private Set<Node> _nodes = new HashSet<Node>();
-  private HashMap<String, TopologyDetails> _tds = new HashMap<String, TopologyDetails>();
-  
-  @Override
-  public void addTopology(TopologyDetails td) {
-    String topId = td.getId();
-    LOG.debug("Adding in Topology {}", topId);
-    _tds.put(topId, td);
-    SchedulerAssignment assignment = _cluster.getAssignmentById(topId);
-    if (assignment != null) {
-      for (WorkerSlot ws: assignment.getSlots()) {
-        Node n = _nodeIdToNode.get(ws.getNodeId());
-        _nodes.add(n);
-      }
-    }
-  }
-
-  @Override
-  public boolean canAdd(TopologyDetails td) {
-    return true;
-  }
-
-  @Override
-  public Collection<Node> takeNodes(int nodesNeeded) {
-    HashSet<Node> ret = new HashSet<Node>();
-    LinkedList<Node> sortedNodes = new LinkedList<Node>(_nodes);
-    Collections.sort(sortedNodes, Node.FREE_NODE_COMPARATOR_DEC);
-    for (Node n: sortedNodes) {
-      if (nodesNeeded <= ret.size()) {
-        break;
-      }
-      if (n.isAlive()) {
-        n.freeAllSlots(_cluster);
-        _nodes.remove(n);
-        ret.add(n);
-      }
-    }
-    return ret;
-  }
-  
-  @Override
-  public int nodesAvailable() {
-    int total = 0;
-    for (Node n: _nodes) {
-      if (n.isAlive()) total++;
-    }
-    return total;
-  }
-  
-  @Override
-  public int slotsAvailable() {
-    return Node.countTotalSlotsAlive(_nodes);
-  }
-
-  @Override
-  public NodeAndSlotCounts getNodeAndSlotCountIfSlotsWereTaken(int slotsNeeded) {
-    int nodesFound = 0;
-    int slotsFound = 0;
-    LinkedList<Node> sortedNodes = new LinkedList<Node>(_nodes);
-    Collections.sort(sortedNodes, Node.FREE_NODE_COMPARATOR_DEC);
-    for (Node n: sortedNodes) {
-      if (slotsNeeded <= 0) {
-        break;
-      }
-      if (n.isAlive()) {
-        nodesFound++;
-        int totalSlotsFree = n.totalSlots();
-        slotsFound += totalSlotsFree;
-        slotsNeeded -= totalSlotsFree;
-      }
+    private static final Logger LOG = LoggerFactory.getLogger(DefaultPool.class);
+    private Set<Node> _nodes = new HashSet<Node>();
+    private HashMap<String, TopologyDetails> _tds = new HashMap<String, TopologyDetails>();
+
+    @Override
+    public void addTopology(TopologyDetails td) {
+        String topId = td.getId();
+        LOG.debug("Adding in Topology {}", topId);
+        _tds.put(topId, td);
+        SchedulerAssignment assignment = _cluster.getAssignmentById(topId);
+        if (assignment != null) {
+            for (WorkerSlot ws : assignment.getSlots()) {
+                Node n = _nodeIdToNode.get(ws.getNodeId());
+                _nodes.add(n);
+            }
+        }
     }
-    return new NodeAndSlotCounts(nodesFound, slotsFound);
-  }
-  
-  @Override
-  public Collection<Node> takeNodesBySlots(int slotsNeeded) {
-    HashSet<Node> ret = new HashSet<Node>();
-    LinkedList<Node> sortedNodes = new LinkedList<Node>(_nodes);
-    Collections.sort(sortedNodes, Node.FREE_NODE_COMPARATOR_DEC);
-    for (Node n: sortedNodes) {
-      if (slotsNeeded <= 0) {
-        break;
-      }
-      if (n.isAlive()) {
-        n.freeAllSlots(_cluster);
-        _nodes.remove(n);
-        ret.add(n);
-        slotsNeeded -= n.totalSlotsFree();
-      }
+
+    @Override
+    public boolean canAdd(TopologyDetails td) {
+        return true;
     }
-    return ret;
-  }
-
-  @Override
-  public void scheduleAsNeeded(NodePool... lesserPools) {
-    for (TopologyDetails td : _tds.values()) {
-      String topId = td.getId();
-      if (_cluster.needsScheduling(td)) {
-        LOG.debug("Scheduling topology {}",topId);
-        int totalTasks = td.getExecutors().size();
-        int origRequest = td.getNumWorkers();
-        int slotsRequested = Math.min(totalTasks, origRequest);
-        int slotsUsed = Node.countSlotsUsed(topId, _nodes);
-        int slotsFree = Node.countFreeSlotsAlive(_nodes);
-        //Check to see if we have enough slots before trying to get them
-        int slotsAvailable = 0;
-        if (slotsRequested > slotsFree) {
-          slotsAvailable = NodePool.slotsAvailable(lesserPools);
-        }
-        int slotsToUse = Math.min(slotsRequested - slotsUsed, slotsFree + slotsAvailable);
-        int executorsNotRunning = _cluster.getUnassignedExecutors(td).size();
-        LOG.debug("Slots... requested {} used {} free {} available {} to be used {}, executors not running {}", 
-            new Object[] {slotsRequested, slotsUsed, slotsFree, slotsAvailable, slotsToUse, executorsNotRunning}); 
-        if (slotsToUse <= 0) {
-          if (executorsNotRunning > 0) {
-            _cluster.setStatus(topId,"Not fully scheduled (No free slots in default pool) "+executorsNotRunning+" executors not scheduled");
-          } else {
-            if (slotsUsed < slotsRequested) {
-              _cluster.setStatus(topId,"Running with fewer slots than requested ("+slotsUsed+"/"+origRequest+")");
-            } else { //slotsUsed < origRequest
-              _cluster.setStatus(topId,"Fully Scheduled (requested "+origRequest+" slots, but could only use "+slotsUsed+")");
+
+    @Override
+    public Collection<Node> takeNodes(int nodesNeeded) {
+        HashSet<Node> ret = new HashSet<Node>();
+        LinkedList<Node> sortedNodes = new LinkedList<Node>(_nodes);
+        Collections.sort(sortedNodes, Node.FREE_NODE_COMPARATOR_DEC);
+        for (Node n : sortedNodes) {
+            if (nodesNeeded <= ret.size()) {
+                break;
+            }
+            if (n.isAlive()) {
+                n.freeAllSlots(_cluster);
+                _nodes.remove(n);
+                ret.add(n);
             }
-          }
-          continue;
         }
+        return ret;
+    }
 
-        int slotsNeeded = slotsToUse - slotsFree;
-        if (slotsNeeded > 0) {
-          _nodes.addAll(NodePool.takeNodesBySlot(slotsNeeded, lesserPools));
+    @Override
+    public int nodesAvailable() {
+        int total = 0;
+        for (Node n : _nodes) {
+            if (n.isAlive())
+                total++;
         }
+        return total;
+    }
+
+    @Override
+    public int slotsAvailable() {
+        return Node.countTotalSlotsAlive(_nodes);
+    }
 
-        if (executorsNotRunning <= 0) {
-          //There are free slots that we can take advantage of now.
-          for (Node n: _nodes) {
-            n.freeTopology(topId, _cluster); 
-          }
-          slotsFree = Node.countFreeSlotsAlive(_nodes);
-          slotsToUse = Math.min(slotsRequested, slotsFree);
+    @Override
+    public NodeAndSlotCounts getNodeAndSlotCountIfSlotsWereTaken(int slotsNeeded) {
+        int nodesFound = 0;
+        int slotsFound = 0;
+        LinkedList<Node> sortedNodes = new LinkedList<Node>(_nodes);
+        Collections.sort(sortedNodes, Node.FREE_NODE_COMPARATOR_DEC);
+        for (Node n : sortedNodes) {
+            if (slotsNeeded <= 0) {
+                break;
+            }
+            if (n.isAlive()) {
+                nodesFound++;
+                int totalSlotsFree = n.totalSlots();
+                slotsFound += totalSlotsFree;
+                slotsNeeded -= totalSlotsFree;
+            }
         }
-        
-        RoundRobinSlotScheduler slotSched = 
-          new RoundRobinSlotScheduler(td, slotsToUse, _cluster);
-        
-        LinkedList<Node> nodes = new LinkedList<Node>(_nodes);
-        while (true) {
-          Node n = null;
-          do {
-            if (nodes.isEmpty()) {
-              throw new IllegalStateException("This should not happen, we" +
-              " messed up and did not get enough slots");
+        return new NodeAndSlotCounts(nodesFound, slotsFound);
+    }
+
+    @Override
+    public Collection<Node> takeNodesBySlots(int slotsNeeded) {
+        HashSet<Node> ret = new HashSet<Node>();
+        LinkedList<Node> sortedNodes = new LinkedList<Node>(_nodes);
+        Collections.sort(sortedNodes, Node.FREE_NODE_COMPARATOR_DEC);
+        for (Node n : sortedNodes) {
+            if (slotsNeeded <= 0) {
+                break;
             }
-            n = nodes.peekFirst();
-            if (n.totalSlotsFree() == 0) {
-              nodes.remove();
-              n = null;
+            if (n.isAlive()) {
+                n.freeAllSlots(_cluster);
+                _nodes.remove(n);
+                ret.add(n);
+                slotsNeeded -= n.totalSlotsFree();
             }
-          } while (n == null);
-          if (!slotSched.assignSlotTo(n)) {
-            break;
-          }
         }
-        int afterSchedSlotsUsed = Node.countSlotsUsed(topId, _nodes);
-        if (afterSchedSlotsUsed < slotsRequested) {
-          _cluster.setStatus(topId,"Running with fewer slots than requested ("+afterSchedSlotsUsed+"/"+origRequest+")");
-        } else if (afterSchedSlotsUsed < origRequest) {
-          _cluster.setStatus(topId,"Fully Scheduled (requested "+origRequest+" slots, but could only use "+afterSchedSlotsUsed+")");
-        } else {
-          _cluster.setStatus(topId,"Fully Scheduled");
+        return ret;
+    }
+
+    @Override
+    public void scheduleAsNeeded(NodePool... lesserPools) {
+        for (TopologyDetails td : _tds.values()) {
+            String topId = td.getId();
+            if (_cluster.needsScheduling(td)) {
+                LOG.debug("Scheduling topology {}", topId);
+                int totalTasks = td.getExecutors().size();
+                int origRequest = td.getNumWorkers();
+                int slotsRequested = Math.min(totalTasks, origRequest);
+                int slotsUsed = Node.countSlotsUsed(topId, _nodes);
+                int slotsFree = Node.countFreeSlotsAlive(_nodes);
+                // Check to see if we have enough slots before trying to get them
+                int slotsAvailable = 0;
+                if (slotsRequested > slotsFree) {
+                    slotsAvailable = NodePool.slotsAvailable(lesserPools);
+                }
+                int slotsToUse = Math.min(slotsRequested - slotsUsed, slotsFree + slotsAvailable);
+                int executorsNotRunning = _cluster.getUnassignedExecutors(td).size();
+                LOG.debug("Slots... requested {} used {} free {} available {} to be used {}, executors not running {}", new Object[] { slotsRequested,
+                        slotsUsed, slotsFree, slotsAvailable, slotsToUse, executorsNotRunning });
+                if (slotsToUse <= 0) {
+                    if (executorsNotRunning > 0) {
+                        _cluster.setStatus(topId, "Not fully scheduled (No free slots in default pool) " + executorsNotRunning + " executors not scheduled");
+                    } else {
+                        if (slotsUsed < slotsRequested) {
+                            _cluster.setStatus(topId, "Running with fewer slots than requested (" + slotsUsed + "/" + origRequest + ")");
+                        } else { // slotsUsed < origRequest
+                            _cluster.setStatus(topId, "Fully Scheduled (requested " + origRequest + " slots, but could only use " + slotsUsed + ")");
+                        }
+                    }
+                    continue;
+                }
+
+                int slotsNeeded = slotsToUse - slotsFree;
+                if (slotsNeeded > 0) {
+                    _nodes.addAll(NodePool.takeNodesBySlot(slotsNeeded, lesserPools));
+                }
+
+                if (executorsNotRunning <= 0) {
+                    // There are free slots that we can take advantage of now.
+                    for (Node n : _nodes) {
+                        n.freeTopology(topId, _cluster);
+                    }
+                    slotsFree = Node.countFreeSlotsAlive(_nodes);
+                    slotsToUse = Math.min(slotsRequested, slotsFree);
+                }
+
+                RoundRobinSlotScheduler slotSched = new RoundRobinSlotScheduler(td, slotsToUse, _cluster);
+
+                LinkedList<Node> nodes = new LinkedList<Node>(_nodes);
+                while (true) {
+                    Node n = null;
+                    do {
+                        if (nodes.isEmpty()) {
+                            throw new IllegalStateException("This should not happen, we" + " messed up and did not get enough slots");
+                        }
+                        n = nodes.peekFirst();
+                        if (n.totalSlotsFree() == 0) {
+                            nodes.remove();
+                            n = null;
+                        }
+                    } while (n == null);
+                    if (!slotSched.assignSlotTo(n)) {
+                        break;
+                    }
+                }
+                int afterSchedSlotsUsed = Node.countSlotsUsed(topId, _nodes);
+                if (afterSchedSlotsUsed < slotsRequested) {
+                    _cluster.setStatus(topId, "Running with fewer slots than requested (" + afterSchedSlotsUsed + "/" + origRequest + ")");
+                } else if (afterSchedSlotsUsed < origRequest) {
+                    _cluster.setStatus(topId, "Fully Scheduled (requested " + origRequest + " slots, but could only use " + afterSchedSlotsUsed + ")");
+                } else {
+                    _cluster.setStatus(topId, "Fully Scheduled");
+                }
+            } else {
+                _cluster.setStatus(topId, "Fully Scheduled");
+            }
         }
-      } else {
-        _cluster.setStatus(topId,"Fully Scheduled");
-      }
     }
-  }
-  
-  @Override
-  public String toString() {
-    return "DefaultPool  " + _nodes.size() + " nodes " + _tds.size() + " topologies";
-  }
+
+    @Override
+    public String toString() {
+        return "DefaultPool  " + _nodes.size() + " nodes " + _tds.size() + " topologies";
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/scheduler/multitenant/FreePool.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/scheduler/multitenant/FreePool.java b/jstorm-core/src/main/java/backtype/storm/scheduler/multitenant/FreePool.java
index c625895..239e529 100755
--- a/jstorm-core/src/main/java/backtype/storm/scheduler/multitenant/FreePool.java
+++ b/jstorm-core/src/main/java/backtype/storm/scheduler/multitenant/FreePool.java
@@ -34,92 +34,92 @@ import backtype.storm.scheduler.TopologyDetails;
  * All of the machines that currently have nothing assigned to them
  */
 public class FreePool extends NodePool {
-  private static final Logger LOG = LoggerFactory.getLogger(FreePool.class);
-  private Set<Node> _nodes = new HashSet<Node>();
-  private int _totalSlots = 0;
+    private static final Logger LOG = LoggerFactory.getLogger(FreePool.class);
+    private Set<Node> _nodes = new HashSet<Node>();
+    private int _totalSlots = 0;
 
-  @Override
-  public void init(Cluster cluster, Map<String, Node> nodeIdToNode) {
-    super.init(cluster, nodeIdToNode);
-    for (Node n: nodeIdToNode.values()) {
-      if(n.isTotallyFree() && n.isAlive()) {
-        _nodes.add(n);
-        _totalSlots += n.totalSlotsFree();
-      }
+    @Override
+    public void init(Cluster cluster, Map<String, Node> nodeIdToNode) {
+        super.init(cluster, nodeIdToNode);
+        for (Node n : nodeIdToNode.values()) {
+            if (n.isTotallyFree() && n.isAlive()) {
+                _nodes.add(n);
+                _totalSlots += n.totalSlotsFree();
+            }
+        }
+        LOG.debug("Found {} nodes with {} slots", _nodes.size(), _totalSlots);
     }
-    LOG.debug("Found {} nodes with {} slots", _nodes.size(), _totalSlots);
-  }
-  
-  @Override
-  public void addTopology(TopologyDetails td) {
-    throw new IllegalArgumentException("The free pool cannot run any topologies");
-  }
 
-  @Override
-  public boolean canAdd(TopologyDetails td) {
-    // The free pool never has anything running
-    return false;
-  }
-  
-  @Override
-  public Collection<Node> takeNodes(int nodesNeeded) {
-    HashSet<Node> ret = new HashSet<Node>();
-    Iterator<Node> it = _nodes.iterator();
-    while (it.hasNext() && nodesNeeded > ret.size()) {
-      Node n = it.next();
-      ret.add(n);
-      _totalSlots -= n.totalSlotsFree();
-      it.remove();
+    @Override
+    public void addTopology(TopologyDetails td) {
+        throw new IllegalArgumentException("The free pool cannot run any topologies");
     }
-    return ret;
-  }
-  
-  @Override
-  public int nodesAvailable() {
-    return _nodes.size();
-  }
 
-  @Override
-  public int slotsAvailable() {
-    return _totalSlots;
-  }
+    @Override
+    public boolean canAdd(TopologyDetails td) {
+        // The free pool never has anything running
+        return false;
+    }
+
+    @Override
+    public Collection<Node> takeNodes(int nodesNeeded) {
+        HashSet<Node> ret = new HashSet<Node>();
+        Iterator<Node> it = _nodes.iterator();
+        while (it.hasNext() && nodesNeeded > ret.size()) {
+            Node n = it.next();
+            ret.add(n);
+            _totalSlots -= n.totalSlotsFree();
+            it.remove();
+        }
+        return ret;
+    }
 
-  @Override
-  public Collection<Node> takeNodesBySlots(int slotsNeeded) {
-    HashSet<Node> ret = new HashSet<Node>();
-    Iterator<Node> it = _nodes.iterator();
-    while (it.hasNext() && slotsNeeded > 0) {
-      Node n = it.next();
-      ret.add(n);
-      _totalSlots -= n.totalSlotsFree();
-      slotsNeeded -= n.totalSlotsFree();
-      it.remove();
+    @Override
+    public int nodesAvailable() {
+        return _nodes.size();
     }
-    return ret;
-  }
-  
-  @Override
-  public NodeAndSlotCounts getNodeAndSlotCountIfSlotsWereTaken(int slotsNeeded) {
-    int slotsFound = 0;
-    int nodesFound = 0;
-    Iterator<Node> it = _nodes.iterator();
-    while (it.hasNext() && slotsNeeded > 0) {
-      Node n = it.next();
-      nodesFound++;
-      int totalSlots = n.totalSlots();
-      slotsFound += totalSlots;
-      slotsNeeded -= totalSlots;
+
+    @Override
+    public int slotsAvailable() {
+        return _totalSlots;
+    }
+
+    @Override
+    public Collection<Node> takeNodesBySlots(int slotsNeeded) {
+        HashSet<Node> ret = new HashSet<Node>();
+        Iterator<Node> it = _nodes.iterator();
+        while (it.hasNext() && slotsNeeded > 0) {
+            Node n = it.next();
+            ret.add(n);
+            _totalSlots -= n.totalSlotsFree();
+            slotsNeeded -= n.totalSlotsFree();
+            it.remove();
+        }
+        return ret;
     }
-    return new NodeAndSlotCounts(nodesFound, slotsFound);
-  }
 
-  @Override
-  public void scheduleAsNeeded(NodePool... lesserPools) {
-    //No topologies running so NOOP
-  }
-  
-  @Override
-  public String toString() {
-    return "FreePool of "+_nodes.size()+" nodes with "+_totalSlots+" slots";
-  }
+    @Override
+    public NodeAndSlotCounts getNodeAndSlotCountIfSlotsWereTaken(int slotsNeeded) {
+        int slotsFound = 0;
+        int nodesFound = 0;
+        Iterator<Node> it = _nodes.iterator();
+        while (it.hasNext() && slotsNeeded > 0) {
+            Node n = it.next();
+            nodesFound++;
+            int totalSlots = n.totalSlots();
+            slotsFound += totalSlots;
+            slotsNeeded -= totalSlots;
+        }
+        return new NodeAndSlotCounts(nodesFound, slotsFound);
+    }
+
+    @Override
+    public void scheduleAsNeeded(NodePool... lesserPools) {
+        // No topologies running so NOOP
+    }
+
+    @Override
+    public String toString() {
+        return "FreePool of " + _nodes.size() + " nodes with " + _totalSlots + " slots";
+    }
 }


[03/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/spout/SpoutExecutors.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/spout/SpoutExecutors.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/spout/SpoutExecutors.java
index 05f745c..4bfbf2d 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/spout/SpoutExecutors.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/spout/SpoutExecutors.java
@@ -17,53 +17,56 @@
  */
 package com.alibaba.jstorm.task.execute.spout;
 
+import java.util.List;
 import java.util.Map;
 import java.util.concurrent.TimeUnit;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import backtype.storm.Config;
-import backtype.storm.Constants;
-import backtype.storm.spout.ISpoutOutputCollector;
-import backtype.storm.spout.SpoutOutputCollector;
-import backtype.storm.task.TopologyContext;
-import backtype.storm.tuple.BatchTuple;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.utils.DisruptorQueue;
-import backtype.storm.utils.WorkerClassLoader;
-
 import com.alibaba.jstorm.callback.AsyncLoopThread;
 import com.alibaba.jstorm.client.ConfigExtension;
-import com.alibaba.jstorm.common.metric.Histogram;
+import com.alibaba.jstorm.cluster.Common;
+import com.alibaba.jstorm.common.metric.AsmGauge;
+import com.alibaba.jstorm.common.metric.AsmHistogram;
 import com.alibaba.jstorm.common.metric.TimerRatio;
 import com.alibaba.jstorm.daemon.worker.timer.TaskBatchFlushTrigger;
 import com.alibaba.jstorm.daemon.worker.timer.TimerConstants;
 import com.alibaba.jstorm.daemon.worker.timer.TimerTrigger;
 import com.alibaba.jstorm.metric.JStormMetrics;
+import com.alibaba.jstorm.metric.JStormMetricsReporter;
 import com.alibaba.jstorm.metric.MetricDef;
+import com.alibaba.jstorm.metric.MetricType;
+import com.alibaba.jstorm.metric.MetricUtils;
 import com.alibaba.jstorm.task.Task;
-import com.alibaba.jstorm.task.TaskBaseMetric;
-import com.alibaba.jstorm.task.TaskStatus;
-import com.alibaba.jstorm.task.TaskTransfer;
 import com.alibaba.jstorm.task.TaskBatchTransfer;
+import com.alibaba.jstorm.task.TaskTransfer;
 import com.alibaba.jstorm.task.acker.Acker;
 import com.alibaba.jstorm.task.comm.TaskSendTargets;
 import com.alibaba.jstorm.task.comm.TupleInfo;
-import com.alibaba.jstorm.task.error.ITaskReportErr;
 import com.alibaba.jstorm.task.execute.BaseExecutors;
-import com.alibaba.jstorm.task.heartbeat.TaskHeartbeatRunable;
+import com.alibaba.jstorm.task.master.TopoMasterCtrlEvent;
 import com.alibaba.jstorm.utils.JStormUtils;
 import com.alibaba.jstorm.utils.RotatingMap;
+import com.alibaba.jstorm.utils.TimeUtils;
+import com.codahale.metrics.Gauge;
 import com.lmax.disruptor.EventHandler;
 
+import backtype.storm.Config;
+import backtype.storm.Constants;
+import backtype.storm.spout.ISpout;
+import backtype.storm.spout.SpoutOutputCollector;
+import backtype.storm.task.TopologyContext;
+import backtype.storm.tuple.BatchTuple;
+import backtype.storm.tuple.Tuple;
+import backtype.storm.utils.WorkerClassLoader;
+
 /**
  * spout executor
- * 
+ * <p/>
  * All spout actions will be done here
- * 
+ *
  * @author yannian/Longda
- * 
  */
 public class SpoutExecutors extends BaseExecutors implements EventHandler {
     private static Logger LOG = LoggerFactory.getLogger(SpoutExecutors.class);
@@ -73,123 +76,107 @@ public class SpoutExecutors extends BaseExecutors implements EventHandler {
     protected backtype.storm.spout.ISpout spout;
     protected RotatingMap<Long, TupleInfo> pending;
 
-    protected ISpoutOutputCollector output_collector;
+    protected SpoutOutputCollector outputCollector;
 
-    protected boolean firstTime = true;
+    protected AsmHistogram nextTupleTimer;
+    protected AsmHistogram ackerTimer;
+    protected TimerRatio emptyCpuGauge;
 
-    protected Histogram nextTupleTimer;
-    protected Histogram ackerTimer;
-    protected TimerRatio emptyCpuCounter;
+    private String topologyId;
+    private String componentId;
+    private int taskId;
 
     protected AsyncLoopThread ackerRunnableThread;
 
     protected boolean isSpoutFullSleep;
 
-    public SpoutExecutors(Task task, backtype.storm.spout.ISpout _spout,
-            TaskTransfer _transfer_fn,
-            Map<Integer, DisruptorQueue> innerTaskTransfer, Map _storm_conf,
-            TaskSendTargets sendTargets, TaskStatus taskStatus,
-            TopologyContext topology_context, TopologyContext _user_context,
-            TaskBaseMetric _task_stats, ITaskReportErr _report_error) {
-        super(task, _transfer_fn, _storm_conf, innerTaskTransfer,
-                topology_context, _user_context, _task_stats, taskStatus,
-                _report_error);
+    //, backtype.storm.spout.ISpout _spout, TaskTransfer _transfer_fn, Map<Integer, DisruptorQueue> innerTaskTransfer,
+    //Map _storm_conf, TaskSendTargets sendTargets, TaskStatus taskStatus, TopologyContext topology_context, TopologyContext _user_context,
+    //TaskBaseMetric _task_stats, ITaskReportErr _report_error, JStormMetricsReporter metricReporter
+    public SpoutExecutors(Task task) {
+        super(task);
+
+        this.spout = (ISpout)task.getTaskObj();
 
-        this.spout = _spout;
+        this.max_spout_pending = JStormUtils.parseInt(storm_conf.get(Config.TOPOLOGY_MAX_SPOUT_PENDING));
 
-        this.max_spout_pending =
-                JStormUtils.parseInt(storm_conf
-                        .get(Config.TOPOLOGY_MAX_SPOUT_PENDING));
+        this.topologyId = sysTopologyCtx.getTopologyId();
+        this.componentId = sysTopologyCtx.getThisComponentId();
+        this.taskId = task.getTaskId();
 
         this.nextTupleTimer =
-                JStormMetrics.registerTaskHistogram(taskId,
-                        MetricDef.EXECUTE_TIME);
+                (AsmHistogram) JStormMetrics.registerTaskMetric(
+                        MetricUtils.taskMetricName(topologyId, componentId, taskId, MetricDef.EXECUTE_TIME, MetricType.HISTOGRAM), new AsmHistogram());
 
         this.ackerTimer =
-                JStormMetrics.registerTaskHistogram(taskId,
-                        MetricDef.ACKER_TIME);
+                (AsmHistogram) JStormMetrics.registerTaskMetric(
+                        MetricUtils.taskMetricName(topologyId, componentId, taskId, MetricDef.ACKER_TIME, MetricType.HISTOGRAM), new AsmHistogram());
 
-        this.emptyCpuCounter = new TimerRatio();
-        JStormMetrics.registerTaskGauge(emptyCpuCounter, taskId,
-                MetricDef.EMPTY_CPU_RATIO);
+        this.emptyCpuGauge = new TimerRatio();
+        JStormMetrics.registerTaskMetric(MetricUtils.taskMetricName(topologyId, componentId, taskId, MetricDef.EMPTY_CPU_RATIO, MetricType.GAUGE),
+                new AsmGauge(emptyCpuGauge));
 
         isSpoutFullSleep = ConfigExtension.isSpoutPendFullSleep(storm_conf);
 
-        if (ConfigExtension.isTaskBatchTuple(storm_conf)) {
-            TaskBatchFlushTrigger batchFlushTrigger =
-                    new TaskBatchFlushTrigger(5, idStr
-                            + Constants.SYSTEM_COMPONENT_ID,
-                            (TaskBatchTransfer) _transfer_fn);
-            batchFlushTrigger.register(TimeUnit.MILLISECONDS);
-        }
-
         LOG.info("isSpoutFullSleep:" + isSpoutFullSleep);
-
-    }
-
-    public void prepare(TaskSendTargets sendTargets, TaskTransfer transferFn,
-            TopologyContext topologyContext) {
-
-        JStormMetrics.registerTaskGauge(
-                new com.codahale.metrics.Gauge<Double>() {
-
+        
+        mkPending();
+        
+        JStormMetrics.registerTaskMetric(
+        		MetricUtils.taskMetricName(topologyId, componentId, taskId, MetricDef.PENDING_MAP, MetricType.GAUGE), new AsmGauge(
+                new Gauge<Double>() {
                     @Override
                     public Double getValue() {
                         return (double) pending.size();
                     }
-
-                }, taskId, MetricDef.PENDING_MAP);
+                }));
 
         // collector, in fact it call send_spout_msg
-        this.output_collector =
-                new SpoutCollector(taskId, spout, task_stats, sendTargets,
-                        storm_conf, transferFn, pending, topologyContext,
-                        exeQueue, report_error);
-
-        try {
-            WorkerClassLoader.switchThreadContext();
-            this.spout.open(storm_conf, userTopologyCtx,
-                    new SpoutOutputCollector(output_collector));
-        } catch (Throwable e) {
-            error = e;
-            LOG.error("spout open error ", e);
-            report_error.report(e);
-        } finally {
-            WorkerClassLoader.restoreThreadContext();
-        }
+        SpoutCollector collector = new SpoutCollector(task, pending, exeQueue);
+        this.outputCollector = new SpoutOutputCollector(collector);
+        taskTransfer.getBackpressureController().setOutputCollector(outputCollector);
+        taskHbTrigger.setSpoutOutputCollector(outputCollector);
 
         LOG.info("Successfully create SpoutExecutors " + idStr);
-
+    }
+    
+    public void mkPending() {
+    	// this function will be override by subclass
+        throw new RuntimeException("Should override this function");
     }
 
-    public void nextTuple() {
-        if (firstTime == true) {
+    @Override
+    public void init() throws Exception {
+        
+        this.spout.open(storm_conf, userTopologyCtx, outputCollector);
 
-            int delayRun = ConfigExtension.getSpoutDelayRunSeconds(storm_conf);
+        LOG.info("Successfully open SpoutExecutors " + idStr);
+        
+		int delayRun = ConfigExtension.getSpoutDelayRunSeconds(storm_conf);
 
-            // wait other bolt is ready
-            JStormUtils.sleepMs(delayRun * 1000);
+		// wait other bolt is ready
+		JStormUtils.sleepMs(delayRun * 1000);
 
-            emptyCpuCounter.init();
+		if (taskStatus.isRun()) {
+			spout.activate();
+		} else {
+			spout.deactivate();
+		}
 
-            if (taskStatus.isRun() == true) {
-                spout.activate();
-            } else {
-                spout.deactivate();
-            }
+		LOG.info(idStr + " is ready ");
 
-            firstTime = false;
-            LOG.info(idStr + " is ready ");
-        }
+    }
 
-        if (taskStatus.isRun() == false) {
+    public void nextTuple() {
+        
+        if (!taskStatus.isRun()) {
             JStormUtils.sleepMs(1);
             return;
         }
 
         // if don't need ack, pending map will be always empty
         if (max_spout_pending == null || pending.size() < max_spout_pending) {
-            emptyCpuCounter.stop();
+            emptyCpuGauge.stop();
 
             long start = System.nanoTime();
             try {
@@ -200,15 +187,13 @@ public class SpoutExecutors extends BaseExecutors implements EventHandler {
                 report_error.report(e);
             } finally {
                 long end = System.nanoTime();
-                nextTupleTimer.update((end - start) / 1000000.0d);
+                nextTupleTimer.update((end - start) / TimeUtils.NS_PER_US);
             }
-
-            return;
         } else {
             if (isSpoutFullSleep) {
                 JStormUtils.sleepMs(1);
             }
-            emptyCpuCounter.start();
+            emptyCpuGauge.start();
             // just return, no sleep
         }
     }
@@ -221,25 +206,23 @@ public class SpoutExecutors extends BaseExecutors implements EventHandler {
 
     /**
      * Handle acker message
-     * 
-     * @see com.lmax.disruptor.EventHandler#onEvent(java.lang.Object, long,
-     *      boolean)
+     *
+     * @see EventHandler#onEvent(Object, long, boolean)
      */
     @Override
-    public void onEvent(Object event, long sequence, boolean endOfBatch)
-            throws Exception {
+    public void onEvent(Object event, long sequence, boolean endOfBatch) throws Exception {
         long start = System.nanoTime();
         try {
-
             if (event == null) {
                 return;
             }
-
             Runnable runnable = null;
             if (event instanceof Tuple) {
+                processControlEvent();
                 runnable = processTupleEvent((Tuple) event);
             } else if (event instanceof BatchTuple) {
                 for (Tuple tuple : ((BatchTuple) event).getTuples()) {
+                    processControlEvent();
                     runnable = processTupleEvent(tuple);
                     if (runnable != null) {
                         runnable.run();
@@ -257,8 +240,7 @@ public class SpoutExecutors extends BaseExecutors implements EventHandler {
                 runnable = (Runnable) event;
             } else {
 
-                LOG.warn("Receive one unknow event-" + event.toString() + " "
-                        + idStr);
+                LOG.warn("Receive one unknow event-" + event.toString() + " " + idStr);
                 return;
             }
 
@@ -272,42 +254,43 @@ public class SpoutExecutors extends BaseExecutors implements EventHandler {
             }
         } finally {
             long end = System.nanoTime();
-            ackerTimer.update((end - start) / 1000000.0d);
+            ackerTimer.update((end - start) / TimeUtils.NS_PER_US);
         }
     }
 
     private Runnable processTupleEvent(Tuple event) {
-        Runnable runnable;
+        Runnable runnable = null;
         Tuple tuple = (Tuple) event;
-        Object id = tuple.getValue(0);
-        Object obj = pending.remove((Long) id);
-
-        if (obj == null) {
-            if (isDebug) {
-                LOG.info("Pending map no entry:" + id);
-            }
-            runnable = null;
+        if (event.getSourceStreamId().equals(Common.TOPOLOGY_MASTER_CONTROL_STREAM_ID)) {
+            TopoMasterCtrlEvent ctrlEvent = (TopoMasterCtrlEvent) tuple.getValueByField("ctrlEvent");
+            taskTransfer.getBackpressureController().control(ctrlEvent);
         } else {
-            TupleInfo tupleInfo = (TupleInfo) obj;
+            Object id = tuple.getValue(0);
+            Object obj = pending.remove((Long) id);
 
-            String stream_id = tuple.getSourceStreamId();
+            if (obj == null) {
+                if (isDebug) {
+                    LOG.info("Pending map no entry:" + id);
+                }
+                runnable = null;
+            } else {
+                TupleInfo tupleInfo = (TupleInfo) obj;
 
-            if (stream_id.equals(Acker.ACKER_ACK_STREAM_ID)) {
+                String stream_id = tuple.getSourceStreamId();
 
-                runnable =
-                        new AckSpoutMsg(spout, tupleInfo, task_stats, isDebug);
-            } else if (stream_id.equals(Acker.ACKER_FAIL_STREAM_ID)) {
-                runnable =
-                        new FailSpoutMsg(id, spout, tupleInfo, task_stats,
-                                isDebug);
-            } else {
-                LOG.warn("Receive one unknow source Tuple " + idStr);
-                runnable = null;
+                if (stream_id.equals(Acker.ACKER_ACK_STREAM_ID)) {
+
+                    runnable = new AckSpoutMsg(spout, tuple, tupleInfo, task_stats, isDebug);
+                } else if (stream_id.equals(Acker.ACKER_FAIL_STREAM_ID)) {
+                    runnable = new FailSpoutMsg(id, spout, tupleInfo, task_stats, isDebug);
+                } else {
+                    LOG.warn("Receive one unknow source Tuple " + idStr);
+                    runnable = null;
+                }
             }
-        }
 
-        task_stats.recv_tuple(tuple.getSourceComponent(),
-                tuple.getSourceStreamId());
+            task_stats.recv_tuple(tuple.getSourceComponent(), tuple.getSourceStreamId());
+        }
         return runnable;
     }
 
@@ -317,28 +300,23 @@ public class SpoutExecutors extends BaseExecutors implements EventHandler {
 
     private void processTimerEvent(TimerTrigger.TimerEvent event) {
         switch (event.getOpCode()) {
-        case TimerConstants.ROTATING_MAP: {
-            Map<Long, TupleInfo> timeoutMap = pending.rotate();
-            for (java.util.Map.Entry<Long, TupleInfo> entry : timeoutMap
-                    .entrySet()) {
-                TupleInfo tupleInfo = entry.getValue();
-                FailSpoutMsg fail =
-                        new FailSpoutMsg(entry.getKey(), spout,
-                                (TupleInfo) tupleInfo, task_stats, isDebug);
-                fail.run();
+            case TimerConstants.ROTATING_MAP: {
+                Map<Long, TupleInfo> timeoutMap = pending.rotate();
+                for (Map.Entry<Long, TupleInfo> entry : timeoutMap.entrySet()) {
+                    TupleInfo tupleInfo = entry.getValue();
+                    FailSpoutMsg fail = new FailSpoutMsg(entry.getKey(), spout, (TupleInfo) tupleInfo, task_stats, isDebug);
+                    fail.run();
+                }
+                break;
+            }
+            case TimerConstants.TASK_HEARTBEAT: {
+                taskHbTrigger.setExeThreadHbTime(TimeUtils.current_time_secs());
+                break;
+            }
+            default: {
+                LOG.warn("Receive unsupported timer event, opcode=" + event.getOpCode());
+                break;
             }
-            break;
-        }
-        case TimerConstants.TASK_HEARTBEAT: {
-            Integer taskId = (Integer) event.getMsg();
-            TaskHeartbeatRunable.updateTaskHbStats(taskId, task);
-            break;
-        }
-        default: {
-            LOG.warn("Receive unsupported timer event, opcode="
-                    + event.getOpCode());
-            break;
-        }
         }
     }
 
@@ -349,9 +327,12 @@ public class SpoutExecutors extends BaseExecutors implements EventHandler {
             if (event instanceof TimerTrigger.TimerEvent) {
                 processTimerEvent((TimerTrigger.TimerEvent) event);
             } else {
-                LOG.warn("Received unknown control event, "
-                        + event.getClass().getName());
+                LOG.warn("Received unknown control event, " + event.getClass().getName());
             }
         }
     }
+    
+    public Object getOutputCollector() {
+    	return outputCollector;
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/spout/SpoutTimeoutCallBack.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/spout/SpoutTimeoutCallBack.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/spout/SpoutTimeoutCallBack.java
index 968831b..b64bc30 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/spout/SpoutTimeoutCallBack.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/execute/spout/SpoutTimeoutCallBack.java
@@ -31,8 +31,7 @@ import com.alibaba.jstorm.utils.ExpiredCallback;
 import com.alibaba.jstorm.utils.JStormUtils;
 
 public class SpoutTimeoutCallBack<K, V> implements ExpiredCallback<K, V> {
-    private static Logger LOG = LoggerFactory
-            .getLogger(SpoutTimeoutCallBack.class);
+    private static Logger LOG = LoggerFactory.getLogger(SpoutTimeoutCallBack.class);
 
     private DisruptorQueue disruptorEventQueue;
     private backtype.storm.spout.ISpout spout;
@@ -40,16 +39,12 @@ public class SpoutTimeoutCallBack<K, V> implements ExpiredCallback<K, V> {
     private TaskBaseMetric task_stats;
     private boolean isDebug;
 
-    public SpoutTimeoutCallBack(DisruptorQueue disruptorEventQueue,
-            backtype.storm.spout.ISpout _spout, Map _storm_conf,
-            TaskBaseMetric stat) {
+    public SpoutTimeoutCallBack(DisruptorQueue disruptorEventQueue, backtype.storm.spout.ISpout _spout, Map _storm_conf, TaskBaseMetric stat) {
         this.storm_conf = _storm_conf;
         this.disruptorEventQueue = disruptorEventQueue;
         this.spout = _spout;
         this.task_stats = stat;
-        this.isDebug =
-                JStormUtils.parseBoolean(storm_conf.get(Config.TOPOLOGY_DEBUG),
-                        false);
+        this.isDebug = JStormUtils.parseBoolean(storm_conf.get(Config.TOPOLOGY_DEBUG), false);
     }
 
     /**
@@ -62,9 +57,7 @@ public class SpoutTimeoutCallBack<K, V> implements ExpiredCallback<K, V> {
         }
         try {
             TupleInfo tupleInfo = (TupleInfo) val;
-            FailSpoutMsg fail =
-                    new FailSpoutMsg(key, spout, (TupleInfo) tupleInfo,
-                            task_stats, isDebug);
+            FailSpoutMsg fail = new FailSpoutMsg(key, spout, (TupleInfo) tupleInfo, task_stats, isDebug);
 
             disruptorEventQueue.publish(fail);
         } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/group/MkCustomGrouper.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/group/MkCustomGrouper.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/group/MkCustomGrouper.java
index bb6ad9c..46eefe8 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/task/group/MkCustomGrouper.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/group/MkCustomGrouper.java
@@ -34,9 +34,7 @@ public class MkCustomGrouper {
 
     private int myTaskId;
 
-    public MkCustomGrouper(TopologyContext context,
-            CustomStreamGrouping _grouping, GlobalStreamId stream,
-            List<Integer> targetTask, int myTaskId) {
+    public MkCustomGrouper(TopologyContext context, CustomStreamGrouping _grouping, GlobalStreamId stream, List<Integer> targetTask, int myTaskId) {
         this.myTaskId = myTaskId;
         this.grouping = _grouping;
         this.grouping.prepare(context, stream, targetTask);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/group/MkFieldsGrouper.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/group/MkFieldsGrouper.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/group/MkFieldsGrouper.java
index 3bf6518..66f2567 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/task/group/MkFieldsGrouper.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/group/MkFieldsGrouper.java
@@ -26,17 +26,16 @@ import com.alibaba.jstorm.utils.JStormUtils;
 
 /**
  * field grouping
- * 
+ *
  * @author yannian
- * 
+ *
  */
 public class MkFieldsGrouper {
     private Fields out_fields;
     private Fields group_fields;
     private List<Integer> out_tasks;
 
-    public MkFieldsGrouper(Fields _out_fields, Fields _group_fields,
-            List<Integer> _out_tasks) {
+    public MkFieldsGrouper(Fields _out_fields, Fields _group_fields, List<Integer> _out_tasks) {
 
         for (Iterator<String> it = _group_fields.iterator(); it.hasNext();) {
             String groupField = it.next();
@@ -52,8 +51,7 @@ public class MkFieldsGrouper {
     }
 
     public List<Integer> grouper(List<Object> values) {
-        int hashcode =
-                this.out_fields.select(this.group_fields, values).hashCode();
+        int hashcode = this.out_fields.select(this.group_fields, values).hashCode();
         int group = Math.abs(hashcode % this.out_tasks.size());
         return JStormUtils.mk_list(out_tasks.get(group));
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/group/MkGrouper.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/group/MkGrouper.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/group/MkGrouper.java
index 5408afd..30d641c 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/task/group/MkGrouper.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/group/MkGrouper.java
@@ -17,14 +17,6 @@
  */
 package com.alibaba.jstorm.task.group;
 
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Random;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import backtype.storm.generated.GlobalStreamId;
 import backtype.storm.generated.Grouping;
 import backtype.storm.generated.JavaObject;
@@ -32,11 +24,17 @@ import backtype.storm.grouping.CustomStreamGrouping;
 import backtype.storm.task.TopologyContext;
 import backtype.storm.tuple.Fields;
 import backtype.storm.utils.Utils;
-
 import com.alibaba.jstorm.daemon.worker.WorkerData;
 import com.alibaba.jstorm.utils.JStormUtils;
 import com.alibaba.jstorm.utils.RandomRange;
 import com.alibaba.jstorm.utils.Thrift;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Random;
 
 /**
  * Grouper, get which task should be send to for one tuple
@@ -66,9 +64,8 @@ public class MkGrouper {
     private MkLocalShuffer local_shuffer_grouper;
     private MkLocalFirst localFirst;
 
-    public MkGrouper(TopologyContext _topology_context, Fields _out_fields,
-            Grouping _thrift_grouping, List<Integer> _outTasks,
-            String streamId, WorkerData workerData) {
+    public MkGrouper(TopologyContext _topology_context, Fields _out_fields, Grouping _thrift_grouping, List<Integer> _outTasks, String streamId,
+            WorkerData workerData) {
         this.topology_context = _topology_context;
         this.out_fields = _out_fields;
         this.thrift_grouping = _thrift_grouping;
@@ -83,8 +80,7 @@ public class MkGrouper {
         this.grouptype = this.parseGroupType(workerData);
 
         String id = _topology_context.getThisTaskId() + ":" + streamId;
-        LOG.info(id + " grouptype is " + grouptype + ", out_tasks is "
-                + out_tasks + ", local_tasks" + local_tasks);
+        LOG.info(id + " grouptype is " + grouptype + ", out_tasks is " + out_tasks + ", local_tasks" + local_tasks);
 
     }
 
@@ -104,12 +100,10 @@ public class MkGrouper {
                 grouperType = GrouperType.global;
             } else {
 
-                List<String> fields_group =
-                        Thrift.fieldGrouping(thrift_grouping);
+                List<String> fields_group = Thrift.fieldGrouping(thrift_grouping);
                 Fields fields = new Fields(fields_group);
 
-                fields_grouper =
-                        new MkFieldsGrouper(out_fields, fields, out_tasks);
+                fields_grouper = new MkFieldsGrouper(out_fields, fields, out_tasks);
 
                 // hashcode by fields
                 grouperType = GrouperType.fields;
@@ -132,29 +126,23 @@ public class MkGrouper {
             int myTaskId = topology_context.getThisTaskId();
             String componentId = topology_context.getComponentId(myTaskId);
             GlobalStreamId stream = new GlobalStreamId(componentId, streamId);
-            custom_grouper =
-                    new MkCustomGrouper(topology_context, g, stream, out_tasks,
-                            myTaskId);
+            custom_grouper = new MkCustomGrouper(topology_context, g, stream, out_tasks, myTaskId);
             grouperType = GrouperType.custom_obj;
         } else if (Grouping._Fields.CUSTOM_SERIALIZED.equals(fields)) {
             // user custom group by serialized Object
             byte[] obj = thrift_grouping.get_custom_serialized();
-            CustomStreamGrouping g =
-                    (CustomStreamGrouping) Utils.javaDeserialize(obj);
+            CustomStreamGrouping g = (CustomStreamGrouping) Utils.javaDeserialize(obj);
             int myTaskId = topology_context.getThisTaskId();
             String componentId = topology_context.getComponentId(myTaskId);
             GlobalStreamId stream = new GlobalStreamId(componentId, streamId);
-            custom_grouper =
-                    new MkCustomGrouper(topology_context, g, stream, out_tasks,
-                            myTaskId);
+            custom_grouper = new MkCustomGrouper(topology_context, g, stream, out_tasks, myTaskId);
             grouperType = GrouperType.custom_serialized;
         } else if (Grouping._Fields.DIRECT.equals(fields)) {
             // directly send to a special task
             grouperType = GrouperType.direct;
         } else if (Grouping._Fields.LOCAL_OR_SHUFFLE.equals(fields)) {
             grouperType = GrouperType.local_or_shuffle;
-            local_shuffer_grouper =
-                    new MkLocalShuffer(local_tasks, out_tasks, workerData);
+            local_shuffer_grouper = new MkLocalShuffer(local_tasks, out_tasks, workerData);
         } else if (Grouping._Fields.LOCAL_FIRST.equals(fields)) {
             grouperType = GrouperType.localFirst;
             localFirst = new MkLocalFirst(local_tasks, out_tasks, workerData);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/group/MkLocalFirst.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/group/MkLocalFirst.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/group/MkLocalFirst.java
index 56f9175..92fa18b 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/task/group/MkLocalFirst.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/group/MkLocalFirst.java
@@ -40,8 +40,7 @@ import com.alibaba.jstorm.utils.RandomRange;
  * @version
  */
 public class MkLocalFirst extends Shuffer {
-    private static final Logger LOG = LoggerFactory
-            .getLogger(MkLocalFirst.class);
+    private static final Logger LOG = LoggerFactory.getLogger(MkLocalFirst.class);
 
     private List<Integer> allOutTasks = new ArrayList<Integer>();
     private List<Integer> localOutTasks = new ArrayList<Integer>();
@@ -52,8 +51,7 @@ public class MkLocalFirst extends Shuffer {
     private WorkerData workerData;
     private IntervalCheck intervalCheck;
 
-    public MkLocalFirst(List<Integer> workerTasks, List<Integer> allOutTasks,
-            WorkerData workerData) {
+    public MkLocalFirst(List<Integer> workerTasks, List<Integer> allOutTasks, WorkerData workerData) {
         super(workerData);
 
         intervalCheck = new IntervalCheck();
@@ -74,7 +72,6 @@ public class MkLocalFirst extends Shuffer {
         if (localWorkerOutTasks.size() != 0) {
             isLocalWorkerAvail = true;
             localOutTasks.addAll(localWorkerOutTasks);
-            remoteOutTasks.removeAll(localWorkerOutTasks);
         } else {
             isLocalWorkerAvail = false;
         }
@@ -93,8 +90,7 @@ public class MkLocalFirst extends Shuffer {
         for (i = 0; i < size; i++) {
             Integer taskId = outTasks.get(index);
             boolean taskStatus = workerData.isOutboundTaskActive(taskId);
-            DisruptorQueue exeQueue =
-                    (workerData.getInnerTaskTransfer().get(taskId));
+            DisruptorQueue exeQueue = (workerData.getInnerTaskTransfer().get(taskId));
             float queueLoadRatio = exeQueue != null ? exeQueue.pctFull() : 0;
             if (taskStatus && queueLoadRatio < 1.0)
                 break;
@@ -123,7 +119,6 @@ public class MkLocalFirst extends Shuffer {
         }
         return JStormUtils.mk_list(remoteOutTasks.get(index));
     }
-    
 
     public List<Integer> grouper(List<Object> values) {
         List<Integer> ret;

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/group/MkLocalShuffer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/group/MkLocalShuffer.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/group/MkLocalShuffer.java
index 324e1e6..c57d380 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/task/group/MkLocalShuffer.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/group/MkLocalShuffer.java
@@ -1,37 +1,28 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
 package com.alibaba.jstorm.task.group;
 
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Set;
+
+import org.apache.log4j.Logger;
 
 import com.alibaba.jstorm.daemon.worker.WorkerData;
+import com.alibaba.jstorm.utils.IntervalCheck;
 import com.alibaba.jstorm.utils.JStormUtils;
 import com.alibaba.jstorm.utils.RandomRange;
 
 public class MkLocalShuffer extends Shuffer {
+    private static final Logger LOG = Logger.getLogger(MkLocalShuffer.class);
 
     private List<Integer> outTasks;
     private RandomRange randomrange;
+    private Set<Integer> lastLocalNodeTasks;
+    private IntervalCheck intervalCheck;
+    private WorkerData workerData;
     private boolean isLocal;
 
     public MkLocalShuffer(List<Integer> workerTasks, List<Integer> allOutTasks,
-            WorkerData workerData) {
+                          WorkerData workerData) {
         super(workerData);
         List<Integer> localOutTasks = new ArrayList<Integer>();
 
@@ -40,6 +31,9 @@ public class MkLocalShuffer extends Shuffer {
                 localOutTasks.add(outTask);
             }
         }
+        this.workerData = workerData;
+        intervalCheck = new IntervalCheck();
+        intervalCheck.setInterval(60);
 
         if (localOutTasks.size() != 0) {
             this.outTasks = localOutTasks;
@@ -47,13 +41,43 @@ public class MkLocalShuffer extends Shuffer {
         } else {
             this.outTasks = new ArrayList<Integer>();
             this.outTasks.addAll(allOutTasks);
+            refreshLocalNodeTasks();
             isLocal = false;
         }
+        randomrange = new RandomRange(outTasks.size());
+    }
+
+    /**
+     * Don't need to take care of multiple thread, One task one thread
+     */
+    private void refreshLocalNodeTasks() {
+        Set<Integer> localNodeTasks = workerData.getLocalNodeTasks();
+
+        if (localNodeTasks == null || localNodeTasks.equals(lastLocalNodeTasks) ) {
+            return;
+        }
+        LOG.info("Old localNodeTasks:" + lastLocalNodeTasks + ", new:"
+                + localNodeTasks);
+        lastLocalNodeTasks = localNodeTasks;
+
+        List<Integer> localNodeOutTasks = new ArrayList<Integer>();
 
+        for (Integer outTask : outTasks) {
+            if (localNodeTasks.contains(outTask)) {
+                localNodeOutTasks.add(outTask);
+            }
+        }
+
+        if (localNodeOutTasks.isEmpty() == false) {
+            this.outTasks = localNodeOutTasks;
+        }
         randomrange = new RandomRange(outTasks.size());
     }
 
     public List<Integer> grouper(List<Object> values) {
+        if (!isLocal && intervalCheck.check()) {
+            refreshLocalNodeTasks();
+        }
         int index = getActiveTask(randomrange, outTasks);
         // If none active tasks were found, still send message to a task
         if (index == -1)

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/group/Shuffer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/group/Shuffer.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/group/Shuffer.java
index acad674..3d272bf 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/task/group/Shuffer.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/group/Shuffer.java
@@ -38,8 +38,7 @@ public abstract class Shuffer {
         int i = 0;
 
         for (i = 0; i < size; i++) {
-            if (workerData.isOutboundTaskActive(Integer.valueOf(outTasks
-                    .get(index))))
+            if (workerData.isOutboundTaskActive(Integer.valueOf(outTasks.get(index))))
                 break;
             else
                 index = randomrange.nextInt();

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/heartbeat/TaskHeartbeat.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/heartbeat/TaskHeartbeat.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/heartbeat/TaskHeartbeat.java
deleted file mode 100755
index 532f553..0000000
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/task/heartbeat/TaskHeartbeat.java
+++ /dev/null
@@ -1,98 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.alibaba.jstorm.task.heartbeat;
-
-import java.io.Serializable;
-
-import org.apache.commons.lang.builder.ToStringBuilder;
-import org.apache.commons.lang.builder.ToStringStyle;
-
-/**
- * Task heartbeat, this Object will be updated to ZK timely
- * 
- * @author yannian
- * 
- */
-public class TaskHeartbeat implements Serializable {
-
-    private static final long serialVersionUID = -6369195955255963810L;
-    private Integer timeSecs;
-    private Integer uptimeSecs;
-
-    public TaskHeartbeat(int timeSecs, int uptimeSecs) {
-        this.timeSecs = timeSecs;
-        this.uptimeSecs = uptimeSecs;
-    }
-
-    public int getTimeSecs() {
-        return timeSecs;
-    }
-
-    @Override
-    public String toString() {
-        return ToStringBuilder.reflectionToString(this,
-                ToStringStyle.SHORT_PREFIX_STYLE);
-    }
-
-    public void setTimeSecs(int timeSecs) {
-        this.timeSecs = timeSecs;
-    }
-
-    public int getUptimeSecs() {
-        return uptimeSecs;
-    }
-
-    public void setUptimeSecs(int uptimeSecs) {
-        this.uptimeSecs = uptimeSecs;
-    }
-
-    @Override
-    public int hashCode() {
-        final int prime = 31;
-        int result = 1;
-        result =
-                prime * result + ((timeSecs == null) ? 0 : timeSecs.hashCode());
-        result =
-                prime * result
-                        + ((uptimeSecs == null) ? 0 : uptimeSecs.hashCode());
-        return result;
-    }
-
-    @Override
-    public boolean equals(Object obj) {
-        if (this == obj)
-            return true;
-        if (obj == null)
-            return false;
-        if (getClass() != obj.getClass())
-            return false;
-        TaskHeartbeat other = (TaskHeartbeat) obj;
-        if (timeSecs == null) {
-            if (other.timeSecs != null)
-                return false;
-        } else if (!timeSecs.equals(other.timeSecs))
-            return false;
-        if (uptimeSecs == null) {
-            if (other.uptimeSecs != null)
-                return false;
-        } else if (!uptimeSecs.equals(other.uptimeSecs))
-            return false;
-        return true;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/heartbeat/TaskHeartbeatRunable.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/heartbeat/TaskHeartbeatRunable.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/heartbeat/TaskHeartbeatRunable.java
deleted file mode 100644
index be66911..0000000
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/task/heartbeat/TaskHeartbeatRunable.java
+++ /dev/null
@@ -1,191 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.alibaba.jstorm.task.heartbeat;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.concurrent.LinkedBlockingDeque;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import backtype.storm.Config;
-
-import com.alibaba.jstorm.callback.RunnableCallback;
-import com.alibaba.jstorm.cluster.StormClusterState;
-import com.alibaba.jstorm.daemon.worker.WorkerData;
-import com.alibaba.jstorm.schedule.Assignment.AssignmentType;
-import com.alibaba.jstorm.task.Task;
-import com.alibaba.jstorm.task.UptimeComputer;
-import com.alibaba.jstorm.utils.JStormUtils;
-import com.alibaba.jstorm.utils.TimeUtils;
-
-/**
- * Task hearbeat
- * 
- * @author yannian
- * 
- */
-public class TaskHeartbeatRunable extends RunnableCallback {
-    private static final Logger LOG = LoggerFactory
-            .getLogger(TaskHeartbeatRunable.class);
-
-    private StormClusterState zkCluster;
-    private String topology_id;
-    private UptimeComputer uptime;
-    private Map storm_conf;
-    private Integer frequence;
-    private Map<Integer, Long> taskAssignTsMap = new HashMap<Integer, Long>();
-
-    private static Map<Integer, TaskStats> taskStatsMap =
-            new HashMap<Integer, TaskStats>();
-    private static LinkedBlockingDeque<Event> eventQueue =
-            new LinkedBlockingDeque<TaskHeartbeatRunable.Event>();
-
-    public static void registerTaskStats(int taskId, TaskStats taskStats) {
-        Event event = new Event(Event.REGISTER_TYPE, taskId, taskStats);
-        eventQueue.offer(event);
-    }
-
-    public static void unregisterTaskStats(int taskId) {
-        Event event = new Event(Event.UNREGISTER_TYPE, taskId, null);
-        eventQueue.offer(event);
-    }
-
-    public static void updateTaskHbStats(int taskId, Task taskData) {
-        Event event = new Event(Event.TASK_HEARTBEAT_TYPE, taskId, taskData);
-        eventQueue.offer(event);
-    }
-
-    public TaskHeartbeatRunable(WorkerData workerData) {
-
-        this.zkCluster = workerData.getZkCluster();
-        this.topology_id = workerData.getTopologyId();
-        this.uptime = new UptimeComputer();
-        this.storm_conf = workerData.getStormConf();
-
-        String key = Config.TASK_HEARTBEAT_FREQUENCY_SECS;
-        Object time = storm_conf.get(key);
-        frequence = JStormUtils.parseInt(time, 10);
-
-    }
-
-    public void handle() throws InterruptedException {
-        Event event = eventQueue.take();
-        while (event != null) {
-            switch (event.getType()) {
-            case Event.TASK_HEARTBEAT_TYPE: {
-                updateTaskHbStats(event);
-                break;
-            }
-            case Event.REGISTER_TYPE: {
-                Event<TaskStats> regEvent = event;
-                taskStatsMap.put(event.getTaskId(), regEvent.getEventValue());
-                taskAssignTsMap.put(event.getTaskId(),
-                        System.currentTimeMillis());
-                break;
-            }
-            case Event.UNREGISTER_TYPE: {
-                taskStatsMap.remove(event.getTaskId());
-                taskAssignTsMap.remove(event.getTaskId());
-                break;
-            }
-            default: {
-                LOG.warn("Unknown event type received:" + event.getType());
-                break;
-            }
-            }
-
-            event = eventQueue.take();
-        }
-    }
-
-    @Override
-    public void run() {
-        try {
-            handle();
-        } catch (InterruptedException e) {
-            LOG.info(e.getMessage());
-        }
-    }
-
-    @Override
-    public Object getResult() {
-        return frequence;
-    }
-
-    public void updateTaskHbStats(Event event) {
-        Integer currtime = TimeUtils.current_time_secs();
-        Event<Task> taskHbEvent = event;
-        int taskId = taskHbEvent.getTaskId();
-        String idStr = " " + topology_id + ":" + taskId + " ";
-
-        try {
-
-            TaskHeartbeat hb = new TaskHeartbeat(currtime, uptime.uptime());
-            zkCluster.task_heartbeat(topology_id, taskId, hb);
-
-            LOG.info("update task hearbeat ts " + currtime + " for" + idStr);
-
-            // Check if assignment is changed. e.g scale-out
-            Task task = taskHbEvent.getEventValue();
-            Long timeStamp = taskAssignTsMap.get(taskId);
-            if (timeStamp != null) {
-                if (timeStamp < task.getWorkerAssignmentTs() && 
-                        task.getWorkerAssignmentType().equals(AssignmentType.Assign)) {
-                    LOG.info("Start to update the task data for task-" + taskId);
-                    task.updateTaskData();
-                    taskAssignTsMap.put(taskId, task.getWorkerAssignmentTs());
-                }
-            }
-        } catch (Exception e) {
-            // TODO Auto-generated catch block
-            String errMsg = "Failed to update heartbeat to ZK " + idStr + "\n";
-            LOG.error(errMsg, e);
-        }
-    }
-
-    private static class Event<T> {
-        public static final int REGISTER_TYPE = 0;
-        public static final int UNREGISTER_TYPE = 1;
-        public static final int TASK_HEARTBEAT_TYPE = 2;
-        private final int type;
-        private final int taskId;
-        private final T value;
-
-        public Event(int type, int taskId, T value) {
-            this.type = type;
-            this.taskId = taskId;
-            this.value = value;
-        }
-
-        public int getType() {
-            return type;
-        }
-
-        public int getTaskId() {
-            return taskId;
-        }
-
-        public T getEventValue() {
-            return value;
-        }
-
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/heartbeat/TaskHeartbeatUpdater.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/heartbeat/TaskHeartbeatUpdater.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/heartbeat/TaskHeartbeatUpdater.java
new file mode 100644
index 0000000..86d72f4
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/heartbeat/TaskHeartbeatUpdater.java
@@ -0,0 +1,156 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.task.heartbeat;
+
+import backtype.storm.generated.TaskHeartbeat;
+import backtype.storm.generated.TopologyTaskHbInfo;
+import backtype.storm.tuple.Tuple;
+import backtype.storm.utils.NimbusClient;
+
+import com.alibaba.jstorm.client.ConfigExtension;
+import com.alibaba.jstorm.cluster.StormClusterState;
+import com.alibaba.jstorm.task.UptimeComputer;
+import com.alibaba.jstorm.utils.TimeUtils;
+
+import org.apache.thrift.TException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * Update the task heartbeat information of topology to Nimbus
+ * 
+ * @author Basti Liu
+ * 
+ */
+public class TaskHeartbeatUpdater{
+    private static final Logger LOG = LoggerFactory
+            .getLogger(TaskHeartbeatUpdater.class);
+
+    private int MAX_NUM_TASK_HB_SEND;
+
+    private String topologyId;
+    private int taskId;
+    
+    private Map conf;
+    private NimbusClient client;
+
+    private Map<Integer, TaskHeartbeat> taskHbMap;
+    private TopologyTaskHbInfo taskHbs;
+
+    private StormClusterState zkCluster;
+    
+    public TaskHeartbeatUpdater(Map conf, String topologyId, int taskId, StormClusterState zkCluster) {
+        this.topologyId = topologyId;
+        this.taskId = taskId;
+
+        this.conf = conf;
+        this.client = NimbusClient.getConfiguredClient(conf);
+
+        this.zkCluster = zkCluster;
+        
+        try {
+            TopologyTaskHbInfo taskHbInfo = zkCluster.topology_heartbeat(topologyId);
+            if (taskHbInfo != null) {
+                LOG.info("Found task heartbeat info left in zk for " + topologyId + ": " + taskHbInfo.toString());
+                this.taskHbs = taskHbInfo;
+                this.taskHbMap = taskHbInfo.get_taskHbs();
+                if (this.taskHbMap == null) {
+                    this.taskHbMap = new ConcurrentHashMap<Integer, TaskHeartbeat>();
+                    taskHbs.set_taskHbs(this.taskHbMap);
+                }
+                this.taskHbs.set_topologyId(topologyId);
+                this.taskHbs.set_topologyMasterId(this.taskId);
+            } else {
+                LOG.info("There is not any previous task heartbeat info left in zk for " + topologyId);
+                this.taskHbMap = new ConcurrentHashMap<Integer, TaskHeartbeat>();
+                this.taskHbs = new TopologyTaskHbInfo(this.topologyId, this.taskId);
+                this.taskHbs.set_taskHbs(taskHbMap);
+            }
+        } catch (Exception e) {
+            LOG.warn("Failed to get topology heartbeat from zk", e);
+        }
+        this.MAX_NUM_TASK_HB_SEND = ConfigExtension.getTopologyTaskHbSendNumber(conf);
+    }
+
+    public void process(Tuple input) {
+        int sourceTask = input.getSourceTask();
+        int uptime = (Integer) input.getValue(0);
+        
+        // Update the heartbeat for source task
+        TaskHeartbeat taskHb = taskHbMap.get(sourceTask);
+        if (taskHb == null) {
+            taskHb = new TaskHeartbeat(TimeUtils.current_time_secs(), uptime);
+            taskHbMap.put(sourceTask, taskHb);
+        } else {
+            taskHb.set_time(TimeUtils.current_time_secs());
+            taskHb.set_uptime(uptime);
+        }
+
+        // Send heartbeat info of all tasks to nimbus
+        if (sourceTask == taskId) {
+            // Send heartbeat info of MAX_NUM_TASK_HB_SEND tasks each time
+            TopologyTaskHbInfo tmpTaskHbInfo = new TopologyTaskHbInfo(topologyId, taskId);
+            Map<Integer, TaskHeartbeat> tmpTaskHbMap = new ConcurrentHashMap<Integer, TaskHeartbeat>();
+            tmpTaskHbInfo.set_taskHbs(tmpTaskHbMap);
+
+            int sendCount = 0;
+            for (Entry<Integer, TaskHeartbeat> entry : taskHbMap.entrySet()) {
+                tmpTaskHbMap.put(entry.getKey(), entry.getValue());
+                sendCount++;
+
+                if (sendCount >= MAX_NUM_TASK_HB_SEND) {
+                    setTaskHeatbeat(tmpTaskHbInfo);
+                    tmpTaskHbMap.clear();
+                    sendCount = 0;
+                }
+            }
+            if (tmpTaskHbMap.size() > 0) {
+                setTaskHeatbeat(tmpTaskHbInfo);
+            }
+        }
+    }
+    
+    private void setTaskHeatbeat(TopologyTaskHbInfo topologyTaskHbInfo) {
+        try {
+            if (topologyTaskHbInfo == null) {
+                return;
+            }
+            if (topologyTaskHbInfo.get_taskHbs() == null) {
+                return;
+            }
+
+            client.getClient().updateTaskHeartbeat(topologyTaskHbInfo);
+
+            String info = "";
+            for (Entry<Integer, TaskHeartbeat> entry : topologyTaskHbInfo.get_taskHbs().entrySet()) {
+                info += " " + entry.getKey() + "-" + entry.getValue().get_time(); 
+            }
+            LOG.info("Update task heartbeat:" + info);
+        } catch (TException e) {
+            LOG.error("Failed to update task heartbeat info", e);
+            if (client != null) {
+                client.close();
+                client = NimbusClient.getConfiguredClient(conf);
+            }
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/master/TopoMasterCtrlEvent.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/master/TopoMasterCtrlEvent.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/master/TopoMasterCtrlEvent.java
new file mode 100644
index 0000000..adc8dc0
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/master/TopoMasterCtrlEvent.java
@@ -0,0 +1,75 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.task.master;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Definition of control event which is used for the control purpose in
+ * topology, e.g. back pressure
+ * 
+ * @author Basti Liu 
+ */
+
+public class TopoMasterCtrlEvent implements Serializable {
+
+    private static final long serialVersionUID = 5929540385279089750L;
+
+    public enum EventType {
+        startBackpressure, stopBackpressure, syncBackpressureState, updateBackpressureConfig, defaultType
+    }
+
+    private EventType eventType;
+    private List<Object> eventValue;
+
+    public TopoMasterCtrlEvent() {
+        eventType = EventType.defaultType;
+        eventValue = null;
+    }
+
+    public TopoMasterCtrlEvent(EventType type, List<Object> value) {
+        this.eventType = type;
+        this.eventValue = value;
+    }
+
+    public EventType getEventType() {
+        return eventType;
+    }
+
+    public void setEventType(EventType type) {
+        this.eventType = type;
+    }
+
+    public List<Object> getEventValue() {
+        return eventValue;
+    }
+
+    public void setEventValue(List<Object> value) {
+        this.eventValue = value;
+    }
+
+    public void addEventValue(Object value) {
+        if (eventValue == null) {
+            eventValue = new ArrayList<Object>();
+        }
+
+        eventValue.add(value);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/task/master/TopologyMaster.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/task/master/TopologyMaster.java b/jstorm-core/src/main/java/com/alibaba/jstorm/task/master/TopologyMaster.java
new file mode 100644
index 0000000..b5fb22c
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/task/master/TopologyMaster.java
@@ -0,0 +1,359 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.task.master;
+
+import backtype.storm.generated.*;
+import backtype.storm.task.IBolt;
+import backtype.storm.task.OutputCollector;
+import backtype.storm.task.TopologyContext;
+import backtype.storm.topology.IDynamicComponent;
+import backtype.storm.tuple.Tuple;
+import backtype.storm.utils.NimbusClient;
+import com.alibaba.jstorm.cluster.Common;
+import com.alibaba.jstorm.cluster.StormClusterState;
+import com.alibaba.jstorm.cluster.StormConfig;
+import com.alibaba.jstorm.metric.MetaType;
+import com.alibaba.jstorm.metric.MetricUtils;
+import com.alibaba.jstorm.metric.TopologyMetricContext;
+import com.alibaba.jstorm.schedule.Assignment;
+import com.alibaba.jstorm.schedule.default_assign.ResourceWorkerSlot;
+import com.alibaba.jstorm.task.backpressure.BackpressureCoordinator;
+import com.alibaba.jstorm.task.heartbeat.TaskHeartbeatUpdater;
+import com.alibaba.jstorm.utils.IntervalCheck;
+import com.alibaba.jstorm.utils.JStormUtils;
+import com.alibaba.jstorm.utils.TimeUtils;
+import com.google.common.collect.Maps;
+import org.slf4j.Logger;
+
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.Executors;
+import java.util.concurrent.LinkedBlockingDeque;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+
+import static org.slf4j.LoggerFactory.getLogger;
+
+/**
+ * Topology master is responsible for the process of general topology
+ * information, e.g. task heartbeat update, metrics data update....
+ *
+ * @author Basti Liu
+ */
+public class TopologyMaster implements IBolt, IDynamicComponent  {
+
+    private static final long serialVersionUID = 4690656768333833626L;
+
+    private static final Logger LOG = getLogger(TopologyMaster.class);
+    private final Logger metricLogger = getLogger(TopologyMetricContext.class);
+
+    public static final int MAX_BATCH_SIZE = 10000;
+    private final MetricInfo dummy = MetricUtils.mkMetricInfo();
+
+    public static final String FIELD_METRIC_WORKER = "worker";
+    public static final String FIELD_METRIC_METRICS = "metrics";
+    public static final String FILED_HEARBEAT_EVENT = "hbEvent";
+    public static final String FILED_CTRL_EVENT = "ctrlEvent";
+
+    private Map conf;
+    private StormClusterState zkCluster;
+    private OutputCollector collector;
+
+    private int taskId;
+    private String topologyId;
+    private volatile Set<ResourceWorkerSlot> workerSet;
+    private IntervalCheck intervalCheck;
+
+    private TaskHeartbeatUpdater taskHeartbeatUpdater;
+
+    private BackpressureCoordinator backpressureCoordinator;
+
+    private TopologyMetricContext topologyMetricContext;
+
+    private ScheduledExecutorService uploadMetricsExecutor;
+
+    private Thread updateThread;
+    private BlockingQueue<Tuple> queue = new LinkedBlockingDeque<Tuple>();
+    private IntervalCheck threadAliveCheck;
+
+    private volatile boolean isActive = true;
+
+    private class TopologyMasterRunnable implements Runnable {
+        @Override
+        public void run() {
+            while (isActive) {
+                try {
+                    Tuple event = queue.take();
+                    if (event != null) {
+                        eventHandle(event);
+                    }
+                } catch (Throwable e) {
+                    LOG.error("Failed to process event", e);
+                }
+            }
+        }
+        
+    }
+    @Override
+    public void prepare(Map stormConf, TopologyContext context,
+                        OutputCollector collector) {
+        this.conf = context.getStormConf();
+        this.collector = collector;
+        this.taskId = context.getThisTaskId();
+        this.topologyId = context.getTopologyId();
+        this.zkCluster = context.getZkCluster();
+
+        try {
+            Assignment assignment = zkCluster.assignment_info(topologyId, null);
+            this.workerSet = assignment.getWorkers();
+            intervalCheck = new IntervalCheck();
+            intervalCheck.setInterval(10);
+            intervalCheck.start();
+        } catch (Exception e) {
+            LOG.error("Failed to get assignment for " + topologyId);
+        }
+
+        this.taskHeartbeatUpdater = new TaskHeartbeatUpdater(this.conf, topologyId, taskId, zkCluster);
+
+        this.backpressureCoordinator = new BackpressureCoordinator(collector, context, taskId);
+
+        this.topologyMetricContext = new TopologyMetricContext(topologyId, this.workerSet, this.conf);
+
+        this.uploadMetricsExecutor = Executors.newSingleThreadScheduledExecutor();
+        this.uploadMetricsExecutor.scheduleAtFixedRate(new Runnable() {
+            @Override
+            public void run() {
+                int secOffset = TimeUtils.secOffset();
+                int offset = 35;
+                if (secOffset < offset) {
+                    JStormUtils.sleepMs((offset - secOffset) * 1000);
+                } else if (secOffset == offset) {
+                    // do nothing
+                } else {
+                    JStormUtils.sleepMs((60 - secOffset + offset) * 1000);
+                }
+                if (topologyMetricContext.getUploadedWorkerNum() > 0) {
+                    metricLogger.info("force upload metrics.");
+                    mergeAndUpload();
+                }
+            }
+        }, 5, 60, TimeUnit.SECONDS);
+
+        updateThread = new Thread(new TopologyMasterRunnable());
+        updateThread.start();
+
+        threadAliveCheck = new IntervalCheck();
+        threadAliveCheck.setInterval(30);
+        threadAliveCheck.start();
+    }
+
+    @Override
+    public void execute(Tuple input) {
+        if (input != null) {
+            
+            try {
+                queue.put(input);
+            } catch (InterruptedException e) {
+                LOG.error("Failed to put event to taskHb updater's queue", e);
+            }
+            
+            if (threadAliveCheck.check()) {
+                if (updateThread == null || updateThread.isAlive() == false) {
+                    updateThread = new Thread(new TopologyMasterRunnable());
+                    updateThread.start();
+                }
+            }
+
+            collector.ack(input);
+        } else {
+            LOG.error("Received null tuple!");
+        }
+    }
+
+    @Override
+    public void cleanup() {
+	    isActive = false;
+        LOG.info("Successfully cleanup");
+    }
+
+    private void updateTopologyWorkerSet() {
+        if (intervalCheck.check()) {
+            Assignment assignment;
+            try {
+                assignment = zkCluster.assignment_info(topologyId, null);
+                this.workerSet = assignment.getWorkers();
+            } catch (Exception e) {
+                LOG.error("Failed to get assignment for " + topologyId);
+            }
+
+        }
+    }
+
+    private void eventHandle(Tuple input) {
+        updateTopologyWorkerSet();
+
+        String stream = input.getSourceStreamId();
+
+        try {
+            if (stream.equals(Common.TOPOLOGY_MASTER_HB_STREAM_ID)) {
+                taskHeartbeatUpdater.process(input);
+            } else if (stream.equals(Common.TOPOLOGY_MASTER_METRICS_STREAM_ID)) {
+                updateMetrics(input);
+            } else if (stream.equals(Common.TOPOLOGY_MASTER_CONTROL_STREAM_ID)) {
+                backpressureCoordinator.process(input);
+            }
+        } catch (Exception e) {
+            LOG.error("Failed to handle event: " + input.toString(), e);
+        }
+    }
+
+    @Override
+    public void update(Map conf) {
+        LOG.info("Topology master received new conf:" + conf);
+
+        if (backpressureCoordinator.isBackpressureConfigChange(conf)) {
+            backpressureCoordinator.updateBackpressureConfig(conf);
+        }
+    }
+
+    private void updateMetrics(Tuple input) {
+        String workerSlot = (String) input.getValueByField(FIELD_METRIC_WORKER);
+        WorkerUploadMetrics metrics = (WorkerUploadMetrics) input.getValueByField(FIELD_METRIC_METRICS);
+        topologyMetricContext.addToMemCache(workerSlot, metrics.get_allMetrics());
+        metricLogger.info("received metrics from:{}, size:{}", workerSlot, metrics.get_allMetrics().get_metrics_size());
+
+        if (topologyMetricContext.readyToUpload()) {
+            metricLogger.info("all {} worker slots have updated metrics, start merging & uploading...",
+                    topologyMetricContext.getWorkerNum());
+            uploadMetricsExecutor.submit(new Runnable() {
+                @Override
+                public void run() {
+                    mergeAndUpload();
+                }
+            });
+        }
+    }
+
+    private void mergeAndUpload() {
+        // double check
+        if (topologyMetricContext.getUploadedWorkerNum() > 0) {
+            TopologyMetric tpMetric = topologyMetricContext.mergeMetrics();
+            if (tpMetric != null) {
+                uploadMetrics(tpMetric);
+            }
+
+            topologyMetricContext.resetUploadedMetrics();
+            //MetricUtils.logMetrics(tpMetric.get_componentMetric());
+        }
+    }
+
+    /**
+     * upload metrics sequentially due to thrift frame size limit (15MB)
+     */
+    private void uploadMetrics(TopologyMetric tpMetric) {
+        long start = System.currentTimeMillis();
+        if (StormConfig.local_mode(conf)) {
+            return;
+        } else {
+            NimbusClient client = null;
+            try {
+                client = NimbusClient.getConfiguredClient(conf);
+                Nimbus.Client client1 = client.getClient();
+
+                MetricInfo topologyMetrics = tpMetric.get_topologyMetric();
+                MetricInfo componentMetrics = tpMetric.get_componentMetric();
+                MetricInfo taskMetrics = tpMetric.get_taskMetric();
+                MetricInfo streamMetrics = tpMetric.get_streamMetric();
+                MetricInfo workerMetrics = tpMetric.get_workerMetric();
+                MetricInfo nettyMetrics = tpMetric.get_nettyMetric();
+
+                int totalSize = topologyMetrics.get_metrics_size() + componentMetrics.get_metrics_size() +
+                        taskMetrics.get_metrics_size() + streamMetrics.get_metrics_size() +
+                        workerMetrics.get_metrics_size() + nettyMetrics.get_metrics_size();
+
+                // for small topologies, send all metrics together to ease the pressure of nimbus
+                if (totalSize < MAX_BATCH_SIZE) {
+                    client1.uploadTopologyMetrics(topologyId,
+                            new TopologyMetric(topologyMetrics, componentMetrics, workerMetrics, taskMetrics,
+                                    streamMetrics, nettyMetrics));
+                } else {
+                    client1.uploadTopologyMetrics(topologyId,
+                            new TopologyMetric(topologyMetrics, componentMetrics, dummy, dummy, dummy, dummy));
+                    batchUploadMetrics(client1, topologyId, workerMetrics, MetaType.WORKER);
+                    batchUploadMetrics(client1, topologyId, taskMetrics, MetaType.TASK);
+                    batchUploadMetrics(client1, topologyId, streamMetrics, MetaType.STREAM);
+                    batchUploadMetrics(client1, topologyId, nettyMetrics, MetaType.NETTY);
+                }
+            } catch (Exception e) {
+                LOG.error("Failed to upload worker metrics", e);
+            } finally {
+                if (client != null) {
+                    client.close();
+                }
+            }
+        }
+        metricLogger.info("upload metrics, cost:{}", System.currentTimeMillis() - start);
+    }
+
+    private void batchUploadMetrics(Nimbus.Client client, String topologyId, MetricInfo metricInfo, MetaType metaType) {
+        if (metricInfo.get_metrics_size() > MAX_BATCH_SIZE) {
+            Map<String, Map<Integer, MetricSnapshot>> data = metricInfo.get_metrics();
+
+            Map<String, Map<Integer, MetricSnapshot>> part = Maps.newHashMapWithExpectedSize(MAX_BATCH_SIZE);
+            MetricInfo uploadPart = new MetricInfo();
+            int i = 0;
+            for (Map.Entry<String, Map<Integer, MetricSnapshot>> entry : data.entrySet()) {
+                part.put(entry.getKey(), entry.getValue());
+                if (++i >= MAX_BATCH_SIZE) {
+                    uploadPart.set_metrics(part);
+                    doUpload(client, topologyId, uploadPart, metaType);
+
+                    i = 0;
+                    part.clear();
+                }
+            }
+            if (part.size() > 0) {
+                uploadPart.set_metrics(part);
+                doUpload(client, topologyId, uploadPart, metaType);
+            }
+        } else {
+            doUpload(client, topologyId, metricInfo, metaType);
+        }
+    }
+
+    private void doUpload(Nimbus.Client client, String topologyId, MetricInfo part, MetaType metaType) {
+        try {
+            if (metaType == MetaType.TASK) {
+                client.uploadTopologyMetrics(topologyId,
+                        new TopologyMetric(dummy, dummy, dummy, part, dummy, dummy));
+            } else if (metaType == MetaType.STREAM) {
+                client.uploadTopologyMetrics(topologyId,
+                        new TopologyMetric(dummy, dummy, dummy, dummy, part, dummy));
+            } else if (metaType == MetaType.WORKER) {
+                client.uploadTopologyMetrics(topologyId,
+                        new TopologyMetric(dummy, dummy, part, dummy, dummy, dummy));
+            } else if (metaType == MetaType.NETTY) {
+                client.uploadTopologyMetrics(topologyId,
+                        new TopologyMetric(dummy, dummy, dummy, dummy, dummy, part));
+            }
+        } catch (Exception ex) {
+            LOG.error("Error", ex);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/utils/DisruptorQueue.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/DisruptorQueue.java b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/DisruptorQueue.java
index 4a4a72b..8f0138a 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/DisruptorQueue.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/DisruptorQueue.java
@@ -1,4 +1,4 @@
-/**
+package com.alibaba.jstorm.utils; /**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/utils/DisruptorRunable.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/DisruptorRunable.java b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/DisruptorRunable.java
index 17b7885..161156b 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/DisruptorRunable.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/DisruptorRunable.java
@@ -17,67 +17,59 @@
  */
 package com.alibaba.jstorm.utils;
 
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import backtype.storm.utils.DisruptorQueue;
-
 import com.alibaba.jstorm.callback.AsyncLoopRunnable;
 import com.alibaba.jstorm.callback.RunnableCallback;
-import com.alibaba.jstorm.common.metric.QueueGauge;
-import com.alibaba.jstorm.common.metric.Timer;
-import com.alibaba.jstorm.metric.JStormHealthCheck;
+import com.alibaba.jstorm.common.metric.*;
 import com.alibaba.jstorm.metric.JStormMetrics;
+import com.alibaba.jstorm.metric.MetricType;
+import com.alibaba.jstorm.metric.MetricUtils;
+import com.alibaba.jstorm.metric.JStormHealthCheck;
 import com.alibaba.jstorm.metric.MetricDef;
 import com.lmax.disruptor.EventHandler;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.atomic.AtomicBoolean;
 
 //import com.alibaba.jstorm.message.zeroMq.ISendConnection;
 
 /**
- * 
  * Disruptor Consumer thread
  * 
  * @author yannian
- * 
  */
-public abstract class DisruptorRunable extends RunnableCallback implements
-        EventHandler {
-    private final static Logger LOG = LoggerFactory
-            .getLogger(DisruptorRunable.class);
+public abstract class DisruptorRunable extends RunnableCallback implements EventHandler {
+    private final static Logger LOG = LoggerFactory.getLogger(DisruptorRunable.class);
 
     protected DisruptorQueue queue;
     protected String idStr;
-    protected Timer timer;
+    protected AsmHistogram timer;
     protected AtomicBoolean shutdown = AsyncLoopRunnable.getShutdown();
 
     public DisruptorRunable(DisruptorQueue queue, String idStr) {
         this.queue = queue;
-        this.timer =
-                JStormMetrics.registerWorkerTimer(idStr + MetricDef.TIME_TYPE);
         this.idStr = idStr;
 
-        QueueGauge queueGauge =
-                new QueueGauge(idStr + MetricDef.QUEUE_TYPE, queue);
-        JStormMetrics.registerWorkerGauge(queueGauge, idStr
-                + MetricDef.QUEUE_TYPE);
+        this.timer =
+                (AsmHistogram) JStormMetrics.registerWorkerMetric(MetricUtils.workerMetricName(idStr + MetricDef.TIME_TYPE, MetricType.HISTOGRAM),
+                        new AsmHistogram());
+
+        QueueGauge queueGauge = new QueueGauge(queue, idStr, MetricDef.QUEUE_TYPE);
+        JStormMetrics.registerWorkerMetric(MetricUtils.workerMetricName(idStr + MetricDef.QUEUE_TYPE, MetricType.GAUGE), new AsmGauge(queueGauge));
 
         JStormHealthCheck.registerWorkerHealthCheck(idStr, queueGauge);
     }
 
-    public abstract void handleEvent(Object event, boolean endOfBatch)
-            throws Exception;
+    public abstract void handleEvent(Object event, boolean endOfBatch) throws Exception;
 
     /**
      * This function need to be implements
      * 
-     * @see com.lmax.disruptor.EventHandler#onEvent(java.lang.Object, long,
-     *      boolean)
+     * @see EventHandler#onEvent(Object, long, boolean)
      */
     @Override
-    public void onEvent(Object event, long sequence, boolean endOfBatch)
-            throws Exception {
+    public void onEvent(Object event, long sequence, boolean endOfBatch) throws Exception {
         if (event == null) {
             return;
         }
@@ -87,7 +79,7 @@ public abstract class DisruptorRunable extends RunnableCallback implements
             handleEvent(event, endOfBatch);
         } finally {
             long end = System.nanoTime();
-            timer.update((end - start)/1000000.0d);
+            timer.update((end - start) / TimeUtils.NS_PER_US);
         }
     }
 
@@ -96,17 +88,15 @@ public abstract class DisruptorRunable extends RunnableCallback implements
         LOG.info("Successfully start thread " + idStr);
         queue.consumerStarted();
 
-        while (shutdown.get() == false) {
+        while (!shutdown.get()) {
             queue.consumeBatchWhenAvailable(this);
-
         }
-
         LOG.info("Successfully exit thread " + idStr);
     }
 
     @Override
     public void shutdown() {
-        JStormMetrics.unregisterWorkerMetric(idStr + MetricDef.QUEUE_TYPE);
+        JStormMetrics.unregisterWorkerMetric(MetricUtils.workerMetricName(idStr + MetricDef.QUEUE_TYPE, MetricType.GAUGE));
         JStormHealthCheck.unregisterWorkerHealthCheck(idStr);
     }
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/utils/EPlatform.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/EPlatform.java b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/EPlatform.java
index 8c62d6f..2773963 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/EPlatform.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/EPlatform.java
@@ -17,34 +17,18 @@
  */
 package com.alibaba.jstorm.utils;
 
-public enum EPlatform {  
-    Any("any"),  
-    Linux("Linux"),  
-    Mac_OS("Mac OS"),  
-    Mac_OS_X("Mac OS X"),  
-    Windows("Windows"),  
-    OS2("OS/2"),  
-    Solaris("Solaris"),  
-    SunOS("SunOS"),  
-    MPEiX("MPE/iX"),  
-    HP_UX("HP-UX"),  
-    AIX("AIX"),  
-    OS390("OS/390"),  
-    FreeBSD("FreeBSD"),  
-    Irix("Irix"),  
-    Digital_Unix("Digital Unix"),  
-    NetWare_411("NetWare"),  
-    OSF1("OSF1"),  
-    OpenVMS("OpenVMS"),  
-    Others("Others");  
-      
-    private EPlatform(String desc){  
-        this.description = desc;  
-    }  
-      
-    public String toString(){  
-        return description;  
-    }  
-      
-    private String description;  
-} 
+public enum EPlatform {
+    Any("any"), Linux("Linux"), Mac_OS("Mac OS"), Mac_OS_X("Mac OS X"), Windows("Windows"), OS2("OS/2"), Solaris("Solaris"), SunOS("SunOS"), MPEiX("MPE/iX"), HP_UX(
+            "HP-UX"), AIX("AIX"), OS390("OS/390"), FreeBSD("FreeBSD"), Irix("Irix"), Digital_Unix("Digital Unix"), NetWare_411("NetWare"), OSF1("OSF1"), OpenVMS(
+            "OpenVMS"), Others("Others");
+
+    private EPlatform(String desc) {
+        this.description = desc;
+    }
+
+    public String toString() {
+        return description;
+    }
+
+    private String description;
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/utils/FileAttribute.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/FileAttribute.java b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/FileAttribute.java
index e33167a..7099169 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/FileAttribute.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/FileAttribute.java
@@ -77,8 +77,7 @@ public class FileAttribute implements Serializable, JSONAware {
 
     @Override
     public String toString() {
-        return ToStringBuilder.reflectionToString(this,
-                ToStringStyle.SHORT_PREFIX_STYLE);
+        return ToStringBuilder.reflectionToString(this, ToStringStyle.SHORT_PREFIX_STYLE);
     }
 
     @Override
@@ -122,8 +121,7 @@ public class FileAttribute implements Serializable, JSONAware {
 
         String jsonString = JStormUtils.to_json(map);
 
-        Map<String, Map> map2 =
-                (Map<String, Map>) JStormUtils.from_json(jsonString);
+        Map<String, Map> map2 = (Map<String, Map>) JStormUtils.from_json(jsonString);
 
         Map jObject = map2.get("test");
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/utils/HttpserverUtils.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/HttpserverUtils.java b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/HttpserverUtils.java
index 20c1f7a..378ee26 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/HttpserverUtils.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/HttpserverUtils.java
@@ -29,8 +29,7 @@ public class HttpserverUtils {
 
     public static final String HTTPSERVER_LOGVIEW_PARAM_CMD_JSTACK = "jstack";
 
-    public static final String HTTPSERVER_LOGVIEW_PARAM_CMD_SHOW_CONF =
-            "showConf";
+    public static final String HTTPSERVER_LOGVIEW_PARAM_CMD_SHOW_CONF = "showConf";
 
     public static final String HTTPSERVER_LOGVIEW_PARAM_LOGFILE = "log";
 
@@ -38,8 +37,7 @@ public class HttpserverUtils {
 
     public static final String HTTPSERVER_LOGVIEW_PARAM_DIR = "dir";
 
-    public static final String HTTPSERVER_LOGVIEW_PARAM_WORKER_PORT =
-            "workerPort";
+    public static final String HTTPSERVER_LOGVIEW_PARAM_WORKER_PORT = "workerPort";
 
     public static final String HTTPSERVER_LOGVIEW_PARAM_SIZE_FORMAT = "%016d\n";
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/utils/IntervalCheck.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/IntervalCheck.java b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/IntervalCheck.java
index 992659c..de7b504 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/IntervalCheck.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/IntervalCheck.java
@@ -1,83 +1,74 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.alibaba.jstorm.utils;
-
-import java.io.Serializable;
-
-public class IntervalCheck implements Serializable {
-
-    /**
-	 * 
-	 */
-    private static final long serialVersionUID = 8952971673547362883L;
-
-    long lastCheck = System.currentTimeMillis();
-
-    // default interval is 1 second
-    long interval = 1000;
-
-    /*
-     * if last check time is before interval seconds, return true, otherwise
-     * return false
-     */
-    public boolean check() {
-        return checkAndGet() != null;
-    }
-
-    /**
-     * 
-     * @return
-     */
-    public Double checkAndGet() {
-        long now = System.currentTimeMillis();
-
-        synchronized (this) {
-            if (now >= interval + lastCheck) {
-                double pastSecond = ((double) (now - lastCheck)) / 1000;
-                lastCheck = now;
-                return pastSecond;
-            }
-        }
-
-        return null;
-    }
-
-    public long getInterval() {
-        return interval/1000;
-    }
-    
-    public long getIntervalMs() {
-        return interval;
-    }
-
-    public void setInterval(long interval) {
-        this.interval = interval * 1000;
-    }
-    
-    public void setIntervalMs(long interval) {
-        this.interval = interval;
-    }
-
-    public void adjust(long addTimeMillis) {
-        lastCheck += addTimeMillis;
-    }
-
-    public void start() {
-        lastCheck = System.currentTimeMillis();
-    }
-}
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.utils;
+
+import java.io.Serializable;
+
+public class IntervalCheck implements Serializable {
+    private static final long serialVersionUID = 8952971673547362883L;
+
+    long lastCheck = System.currentTimeMillis();
+
+    // default interval is 1 second
+    long interval = 1000;
+
+    /*
+     * if last check time is before interval seconds, return true, otherwise return false
+     */
+    public boolean check() {
+        return checkAndGet() != null;
+    }
+
+    public Double checkAndGet() {
+        long now = System.currentTimeMillis();
+
+        synchronized (this) {
+            if (now >= interval + lastCheck) {
+                double pastSecond = ((double) (now - lastCheck)) / 1000;
+                lastCheck = now;
+                return pastSecond;
+            }
+        }
+
+        return null;
+    }
+
+    public long getInterval() {
+        return interval / 1000;
+    }
+
+    public long getIntervalMs() {
+        return interval;
+    }
+
+    public void setInterval(long interval) {
+        this.interval = interval * 1000;
+    }
+
+    public void setIntervalMs(long interval) {
+        this.interval = interval;
+    }
+
+    public void adjust(long addTimeMillis) {
+        lastCheck += addTimeMillis;
+    }
+
+    public void start() {
+        lastCheck = System.currentTimeMillis();
+    }
+}


[43/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/MetricSnapshot.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/MetricSnapshot.java b/jstorm-core/src/main/java/backtype/storm/generated/MetricSnapshot.java
new file mode 100644
index 0000000..bfcbfe0
--- /dev/null
+++ b/jstorm-core/src/main/java/backtype/storm/generated/MetricSnapshot.java
@@ -0,0 +1,2221 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.2)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package backtype.storm.generated;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
+public class MetricSnapshot implements org.apache.thrift.TBase<MetricSnapshot, MetricSnapshot._Fields>, java.io.Serializable, Cloneable, Comparable<MetricSnapshot> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("MetricSnapshot");
+
+  private static final org.apache.thrift.protocol.TField METRIC_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("metricId", org.apache.thrift.protocol.TType.I64, (short)1);
+  private static final org.apache.thrift.protocol.TField TS_FIELD_DESC = new org.apache.thrift.protocol.TField("ts", org.apache.thrift.protocol.TType.I64, (short)2);
+  private static final org.apache.thrift.protocol.TField METRIC_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("metricType", org.apache.thrift.protocol.TType.I32, (short)3);
+  private static final org.apache.thrift.protocol.TField LONG_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("longValue", org.apache.thrift.protocol.TType.I64, (short)4);
+  private static final org.apache.thrift.protocol.TField DOUBLE_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("doubleValue", org.apache.thrift.protocol.TType.DOUBLE, (short)5);
+  private static final org.apache.thrift.protocol.TField M1_FIELD_DESC = new org.apache.thrift.protocol.TField("m1", org.apache.thrift.protocol.TType.DOUBLE, (short)6);
+  private static final org.apache.thrift.protocol.TField M5_FIELD_DESC = new org.apache.thrift.protocol.TField("m5", org.apache.thrift.protocol.TType.DOUBLE, (short)7);
+  private static final org.apache.thrift.protocol.TField M15_FIELD_DESC = new org.apache.thrift.protocol.TField("m15", org.apache.thrift.protocol.TType.DOUBLE, (short)8);
+  private static final org.apache.thrift.protocol.TField MEAN_FIELD_DESC = new org.apache.thrift.protocol.TField("mean", org.apache.thrift.protocol.TType.DOUBLE, (short)9);
+  private static final org.apache.thrift.protocol.TField MIN_FIELD_DESC = new org.apache.thrift.protocol.TField("min", org.apache.thrift.protocol.TType.I64, (short)10);
+  private static final org.apache.thrift.protocol.TField MAX_FIELD_DESC = new org.apache.thrift.protocol.TField("max", org.apache.thrift.protocol.TType.I64, (short)11);
+  private static final org.apache.thrift.protocol.TField P50_FIELD_DESC = new org.apache.thrift.protocol.TField("p50", org.apache.thrift.protocol.TType.DOUBLE, (short)12);
+  private static final org.apache.thrift.protocol.TField P75_FIELD_DESC = new org.apache.thrift.protocol.TField("p75", org.apache.thrift.protocol.TType.DOUBLE, (short)13);
+  private static final org.apache.thrift.protocol.TField P95_FIELD_DESC = new org.apache.thrift.protocol.TField("p95", org.apache.thrift.protocol.TType.DOUBLE, (short)14);
+  private static final org.apache.thrift.protocol.TField P98_FIELD_DESC = new org.apache.thrift.protocol.TField("p98", org.apache.thrift.protocol.TType.DOUBLE, (short)15);
+  private static final org.apache.thrift.protocol.TField P99_FIELD_DESC = new org.apache.thrift.protocol.TField("p99", org.apache.thrift.protocol.TType.DOUBLE, (short)16);
+  private static final org.apache.thrift.protocol.TField P999_FIELD_DESC = new org.apache.thrift.protocol.TField("p999", org.apache.thrift.protocol.TType.DOUBLE, (short)17);
+  private static final org.apache.thrift.protocol.TField STDDEV_FIELD_DESC = new org.apache.thrift.protocol.TField("stddev", org.apache.thrift.protocol.TType.DOUBLE, (short)18);
+  private static final org.apache.thrift.protocol.TField POINTS_FIELD_DESC = new org.apache.thrift.protocol.TField("points", org.apache.thrift.protocol.TType.LIST, (short)19);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new MetricSnapshotStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new MetricSnapshotTupleSchemeFactory());
+  }
+
+  private long metricId; // required
+  private long ts; // required
+  private int metricType; // required
+  private long longValue; // optional
+  private double doubleValue; // optional
+  private double m1; // optional
+  private double m5; // optional
+  private double m15; // optional
+  private double mean; // optional
+  private long min; // optional
+  private long max; // optional
+  private double p50; // optional
+  private double p75; // optional
+  private double p95; // optional
+  private double p98; // optional
+  private double p99; // optional
+  private double p999; // optional
+  private double stddev; // optional
+  private List<Long> points; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    METRIC_ID((short)1, "metricId"),
+    TS((short)2, "ts"),
+    METRIC_TYPE((short)3, "metricType"),
+    LONG_VALUE((short)4, "longValue"),
+    DOUBLE_VALUE((short)5, "doubleValue"),
+    M1((short)6, "m1"),
+    M5((short)7, "m5"),
+    M15((short)8, "m15"),
+    MEAN((short)9, "mean"),
+    MIN((short)10, "min"),
+    MAX((short)11, "max"),
+    P50((short)12, "p50"),
+    P75((short)13, "p75"),
+    P95((short)14, "p95"),
+    P98((short)15, "p98"),
+    P99((short)16, "p99"),
+    P999((short)17, "p999"),
+    STDDEV((short)18, "stddev"),
+    POINTS((short)19, "points");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // METRIC_ID
+          return METRIC_ID;
+        case 2: // TS
+          return TS;
+        case 3: // METRIC_TYPE
+          return METRIC_TYPE;
+        case 4: // LONG_VALUE
+          return LONG_VALUE;
+        case 5: // DOUBLE_VALUE
+          return DOUBLE_VALUE;
+        case 6: // M1
+          return M1;
+        case 7: // M5
+          return M5;
+        case 8: // M15
+          return M15;
+        case 9: // MEAN
+          return MEAN;
+        case 10: // MIN
+          return MIN;
+        case 11: // MAX
+          return MAX;
+        case 12: // P50
+          return P50;
+        case 13: // P75
+          return P75;
+        case 14: // P95
+          return P95;
+        case 15: // P98
+          return P98;
+        case 16: // P99
+          return P99;
+        case 17: // P999
+          return P999;
+        case 18: // STDDEV
+          return STDDEV;
+        case 19: // POINTS
+          return POINTS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __METRICID_ISSET_ID = 0;
+  private static final int __TS_ISSET_ID = 1;
+  private static final int __METRICTYPE_ISSET_ID = 2;
+  private static final int __LONGVALUE_ISSET_ID = 3;
+  private static final int __DOUBLEVALUE_ISSET_ID = 4;
+  private static final int __M1_ISSET_ID = 5;
+  private static final int __M5_ISSET_ID = 6;
+  private static final int __M15_ISSET_ID = 7;
+  private static final int __MEAN_ISSET_ID = 8;
+  private static final int __MIN_ISSET_ID = 9;
+  private static final int __MAX_ISSET_ID = 10;
+  private static final int __P50_ISSET_ID = 11;
+  private static final int __P75_ISSET_ID = 12;
+  private static final int __P95_ISSET_ID = 13;
+  private static final int __P98_ISSET_ID = 14;
+  private static final int __P99_ISSET_ID = 15;
+  private static final int __P999_ISSET_ID = 16;
+  private static final int __STDDEV_ISSET_ID = 17;
+  private int __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.LONG_VALUE,_Fields.DOUBLE_VALUE,_Fields.M1,_Fields.M5,_Fields.M15,_Fields.MEAN,_Fields.MIN,_Fields.MAX,_Fields.P50,_Fields.P75,_Fields.P95,_Fields.P98,_Fields.P99,_Fields.P999,_Fields.STDDEV,_Fields.POINTS};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.METRIC_ID, new org.apache.thrift.meta_data.FieldMetaData("metricId", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.TS, new org.apache.thrift.meta_data.FieldMetaData("ts", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.METRIC_TYPE, new org.apache.thrift.meta_data.FieldMetaData("metricType", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+    tmpMap.put(_Fields.LONG_VALUE, new org.apache.thrift.meta_data.FieldMetaData("longValue", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.DOUBLE_VALUE, new org.apache.thrift.meta_data.FieldMetaData("doubleValue", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE)));
+    tmpMap.put(_Fields.M1, new org.apache.thrift.meta_data.FieldMetaData("m1", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE)));
+    tmpMap.put(_Fields.M5, new org.apache.thrift.meta_data.FieldMetaData("m5", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE)));
+    tmpMap.put(_Fields.M15, new org.apache.thrift.meta_data.FieldMetaData("m15", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE)));
+    tmpMap.put(_Fields.MEAN, new org.apache.thrift.meta_data.FieldMetaData("mean", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE)));
+    tmpMap.put(_Fields.MIN, new org.apache.thrift.meta_data.FieldMetaData("min", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.MAX, new org.apache.thrift.meta_data.FieldMetaData("max", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.P50, new org.apache.thrift.meta_data.FieldMetaData("p50", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE)));
+    tmpMap.put(_Fields.P75, new org.apache.thrift.meta_data.FieldMetaData("p75", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE)));
+    tmpMap.put(_Fields.P95, new org.apache.thrift.meta_data.FieldMetaData("p95", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE)));
+    tmpMap.put(_Fields.P98, new org.apache.thrift.meta_data.FieldMetaData("p98", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE)));
+    tmpMap.put(_Fields.P99, new org.apache.thrift.meta_data.FieldMetaData("p99", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE)));
+    tmpMap.put(_Fields.P999, new org.apache.thrift.meta_data.FieldMetaData("p999", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE)));
+    tmpMap.put(_Fields.STDDEV, new org.apache.thrift.meta_data.FieldMetaData("stddev", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE)));
+    tmpMap.put(_Fields.POINTS, new org.apache.thrift.meta_data.FieldMetaData("points", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(MetricSnapshot.class, metaDataMap);
+  }
+
+  public MetricSnapshot() {
+  }
+
+  public MetricSnapshot(
+    long metricId,
+    long ts,
+    int metricType)
+  {
+    this();
+    this.metricId = metricId;
+    set_metricId_isSet(true);
+    this.ts = ts;
+    set_ts_isSet(true);
+    this.metricType = metricType;
+    set_metricType_isSet(true);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public MetricSnapshot(MetricSnapshot other) {
+    __isset_bitfield = other.__isset_bitfield;
+    this.metricId = other.metricId;
+    this.ts = other.ts;
+    this.metricType = other.metricType;
+    this.longValue = other.longValue;
+    this.doubleValue = other.doubleValue;
+    this.m1 = other.m1;
+    this.m5 = other.m5;
+    this.m15 = other.m15;
+    this.mean = other.mean;
+    this.min = other.min;
+    this.max = other.max;
+    this.p50 = other.p50;
+    this.p75 = other.p75;
+    this.p95 = other.p95;
+    this.p98 = other.p98;
+    this.p99 = other.p99;
+    this.p999 = other.p999;
+    this.stddev = other.stddev;
+    if (other.is_set_points()) {
+      List<Long> __this__points = new ArrayList<Long>(other.points);
+      this.points = __this__points;
+    }
+  }
+
+  public MetricSnapshot deepCopy() {
+    return new MetricSnapshot(this);
+  }
+
+  @Override
+  public void clear() {
+    set_metricId_isSet(false);
+    this.metricId = 0;
+    set_ts_isSet(false);
+    this.ts = 0;
+    set_metricType_isSet(false);
+    this.metricType = 0;
+    set_longValue_isSet(false);
+    this.longValue = 0;
+    set_doubleValue_isSet(false);
+    this.doubleValue = 0.0;
+    set_m1_isSet(false);
+    this.m1 = 0.0;
+    set_m5_isSet(false);
+    this.m5 = 0.0;
+    set_m15_isSet(false);
+    this.m15 = 0.0;
+    set_mean_isSet(false);
+    this.mean = 0.0;
+    set_min_isSet(false);
+    this.min = 0;
+    set_max_isSet(false);
+    this.max = 0;
+    set_p50_isSet(false);
+    this.p50 = 0.0;
+    set_p75_isSet(false);
+    this.p75 = 0.0;
+    set_p95_isSet(false);
+    this.p95 = 0.0;
+    set_p98_isSet(false);
+    this.p98 = 0.0;
+    set_p99_isSet(false);
+    this.p99 = 0.0;
+    set_p999_isSet(false);
+    this.p999 = 0.0;
+    set_stddev_isSet(false);
+    this.stddev = 0.0;
+    this.points = null;
+  }
+
+  public long get_metricId() {
+    return this.metricId;
+  }
+
+  public void set_metricId(long metricId) {
+    this.metricId = metricId;
+    set_metricId_isSet(true);
+  }
+
+  public void unset_metricId() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __METRICID_ISSET_ID);
+  }
+
+  /** Returns true if field metricId is set (has been assigned a value) and false otherwise */
+  public boolean is_set_metricId() {
+    return EncodingUtils.testBit(__isset_bitfield, __METRICID_ISSET_ID);
+  }
+
+  public void set_metricId_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __METRICID_ISSET_ID, value);
+  }
+
+  public long get_ts() {
+    return this.ts;
+  }
+
+  public void set_ts(long ts) {
+    this.ts = ts;
+    set_ts_isSet(true);
+  }
+
+  public void unset_ts() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TS_ISSET_ID);
+  }
+
+  /** Returns true if field ts is set (has been assigned a value) and false otherwise */
+  public boolean is_set_ts() {
+    return EncodingUtils.testBit(__isset_bitfield, __TS_ISSET_ID);
+  }
+
+  public void set_ts_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TS_ISSET_ID, value);
+  }
+
+  public int get_metricType() {
+    return this.metricType;
+  }
+
+  public void set_metricType(int metricType) {
+    this.metricType = metricType;
+    set_metricType_isSet(true);
+  }
+
+  public void unset_metricType() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __METRICTYPE_ISSET_ID);
+  }
+
+  /** Returns true if field metricType is set (has been assigned a value) and false otherwise */
+  public boolean is_set_metricType() {
+    return EncodingUtils.testBit(__isset_bitfield, __METRICTYPE_ISSET_ID);
+  }
+
+  public void set_metricType_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __METRICTYPE_ISSET_ID, value);
+  }
+
+  public long get_longValue() {
+    return this.longValue;
+  }
+
+  public void set_longValue(long longValue) {
+    this.longValue = longValue;
+    set_longValue_isSet(true);
+  }
+
+  public void unset_longValue() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __LONGVALUE_ISSET_ID);
+  }
+
+  /** Returns true if field longValue is set (has been assigned a value) and false otherwise */
+  public boolean is_set_longValue() {
+    return EncodingUtils.testBit(__isset_bitfield, __LONGVALUE_ISSET_ID);
+  }
+
+  public void set_longValue_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __LONGVALUE_ISSET_ID, value);
+  }
+
+  public double get_doubleValue() {
+    return this.doubleValue;
+  }
+
+  public void set_doubleValue(double doubleValue) {
+    this.doubleValue = doubleValue;
+    set_doubleValue_isSet(true);
+  }
+
+  public void unset_doubleValue() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __DOUBLEVALUE_ISSET_ID);
+  }
+
+  /** Returns true if field doubleValue is set (has been assigned a value) and false otherwise */
+  public boolean is_set_doubleValue() {
+    return EncodingUtils.testBit(__isset_bitfield, __DOUBLEVALUE_ISSET_ID);
+  }
+
+  public void set_doubleValue_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __DOUBLEVALUE_ISSET_ID, value);
+  }
+
+  public double get_m1() {
+    return this.m1;
+  }
+
+  public void set_m1(double m1) {
+    this.m1 = m1;
+    set_m1_isSet(true);
+  }
+
+  public void unset_m1() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __M1_ISSET_ID);
+  }
+
+  /** Returns true if field m1 is set (has been assigned a value) and false otherwise */
+  public boolean is_set_m1() {
+    return EncodingUtils.testBit(__isset_bitfield, __M1_ISSET_ID);
+  }
+
+  public void set_m1_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __M1_ISSET_ID, value);
+  }
+
+  public double get_m5() {
+    return this.m5;
+  }
+
+  public void set_m5(double m5) {
+    this.m5 = m5;
+    set_m5_isSet(true);
+  }
+
+  public void unset_m5() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __M5_ISSET_ID);
+  }
+
+  /** Returns true if field m5 is set (has been assigned a value) and false otherwise */
+  public boolean is_set_m5() {
+    return EncodingUtils.testBit(__isset_bitfield, __M5_ISSET_ID);
+  }
+
+  public void set_m5_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __M5_ISSET_ID, value);
+  }
+
+  public double get_m15() {
+    return this.m15;
+  }
+
+  public void set_m15(double m15) {
+    this.m15 = m15;
+    set_m15_isSet(true);
+  }
+
+  public void unset_m15() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __M15_ISSET_ID);
+  }
+
+  /** Returns true if field m15 is set (has been assigned a value) and false otherwise */
+  public boolean is_set_m15() {
+    return EncodingUtils.testBit(__isset_bitfield, __M15_ISSET_ID);
+  }
+
+  public void set_m15_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __M15_ISSET_ID, value);
+  }
+
+  public double get_mean() {
+    return this.mean;
+  }
+
+  public void set_mean(double mean) {
+    this.mean = mean;
+    set_mean_isSet(true);
+  }
+
+  public void unset_mean() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MEAN_ISSET_ID);
+  }
+
+  /** Returns true if field mean is set (has been assigned a value) and false otherwise */
+  public boolean is_set_mean() {
+    return EncodingUtils.testBit(__isset_bitfield, __MEAN_ISSET_ID);
+  }
+
+  public void set_mean_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MEAN_ISSET_ID, value);
+  }
+
+  public long get_min() {
+    return this.min;
+  }
+
+  public void set_min(long min) {
+    this.min = min;
+    set_min_isSet(true);
+  }
+
+  public void unset_min() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MIN_ISSET_ID);
+  }
+
+  /** Returns true if field min is set (has been assigned a value) and false otherwise */
+  public boolean is_set_min() {
+    return EncodingUtils.testBit(__isset_bitfield, __MIN_ISSET_ID);
+  }
+
+  public void set_min_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MIN_ISSET_ID, value);
+  }
+
+  public long get_max() {
+    return this.max;
+  }
+
+  public void set_max(long max) {
+    this.max = max;
+    set_max_isSet(true);
+  }
+
+  public void unset_max() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MAX_ISSET_ID);
+  }
+
+  /** Returns true if field max is set (has been assigned a value) and false otherwise */
+  public boolean is_set_max() {
+    return EncodingUtils.testBit(__isset_bitfield, __MAX_ISSET_ID);
+  }
+
+  public void set_max_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAX_ISSET_ID, value);
+  }
+
+  public double get_p50() {
+    return this.p50;
+  }
+
+  public void set_p50(double p50) {
+    this.p50 = p50;
+    set_p50_isSet(true);
+  }
+
+  public void unset_p50() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __P50_ISSET_ID);
+  }
+
+  /** Returns true if field p50 is set (has been assigned a value) and false otherwise */
+  public boolean is_set_p50() {
+    return EncodingUtils.testBit(__isset_bitfield, __P50_ISSET_ID);
+  }
+
+  public void set_p50_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __P50_ISSET_ID, value);
+  }
+
+  public double get_p75() {
+    return this.p75;
+  }
+
+  public void set_p75(double p75) {
+    this.p75 = p75;
+    set_p75_isSet(true);
+  }
+
+  public void unset_p75() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __P75_ISSET_ID);
+  }
+
+  /** Returns true if field p75 is set (has been assigned a value) and false otherwise */
+  public boolean is_set_p75() {
+    return EncodingUtils.testBit(__isset_bitfield, __P75_ISSET_ID);
+  }
+
+  public void set_p75_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __P75_ISSET_ID, value);
+  }
+
+  public double get_p95() {
+    return this.p95;
+  }
+
+  public void set_p95(double p95) {
+    this.p95 = p95;
+    set_p95_isSet(true);
+  }
+
+  public void unset_p95() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __P95_ISSET_ID);
+  }
+
+  /** Returns true if field p95 is set (has been assigned a value) and false otherwise */
+  public boolean is_set_p95() {
+    return EncodingUtils.testBit(__isset_bitfield, __P95_ISSET_ID);
+  }
+
+  public void set_p95_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __P95_ISSET_ID, value);
+  }
+
+  public double get_p98() {
+    return this.p98;
+  }
+
+  public void set_p98(double p98) {
+    this.p98 = p98;
+    set_p98_isSet(true);
+  }
+
+  public void unset_p98() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __P98_ISSET_ID);
+  }
+
+  /** Returns true if field p98 is set (has been assigned a value) and false otherwise */
+  public boolean is_set_p98() {
+    return EncodingUtils.testBit(__isset_bitfield, __P98_ISSET_ID);
+  }
+
+  public void set_p98_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __P98_ISSET_ID, value);
+  }
+
+  public double get_p99() {
+    return this.p99;
+  }
+
+  public void set_p99(double p99) {
+    this.p99 = p99;
+    set_p99_isSet(true);
+  }
+
+  public void unset_p99() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __P99_ISSET_ID);
+  }
+
+  /** Returns true if field p99 is set (has been assigned a value) and false otherwise */
+  public boolean is_set_p99() {
+    return EncodingUtils.testBit(__isset_bitfield, __P99_ISSET_ID);
+  }
+
+  public void set_p99_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __P99_ISSET_ID, value);
+  }
+
+  public double get_p999() {
+    return this.p999;
+  }
+
+  public void set_p999(double p999) {
+    this.p999 = p999;
+    set_p999_isSet(true);
+  }
+
+  public void unset_p999() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __P999_ISSET_ID);
+  }
+
+  /** Returns true if field p999 is set (has been assigned a value) and false otherwise */
+  public boolean is_set_p999() {
+    return EncodingUtils.testBit(__isset_bitfield, __P999_ISSET_ID);
+  }
+
+  public void set_p999_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __P999_ISSET_ID, value);
+  }
+
+  public double get_stddev() {
+    return this.stddev;
+  }
+
+  public void set_stddev(double stddev) {
+    this.stddev = stddev;
+    set_stddev_isSet(true);
+  }
+
+  public void unset_stddev() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __STDDEV_ISSET_ID);
+  }
+
+  /** Returns true if field stddev is set (has been assigned a value) and false otherwise */
+  public boolean is_set_stddev() {
+    return EncodingUtils.testBit(__isset_bitfield, __STDDEV_ISSET_ID);
+  }
+
+  public void set_stddev_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __STDDEV_ISSET_ID, value);
+  }
+
+  public int get_points_size() {
+    return (this.points == null) ? 0 : this.points.size();
+  }
+
+  public java.util.Iterator<Long> get_points_iterator() {
+    return (this.points == null) ? null : this.points.iterator();
+  }
+
+  public void add_to_points(long elem) {
+    if (this.points == null) {
+      this.points = new ArrayList<Long>();
+    }
+    this.points.add(elem);
+  }
+
+  public List<Long> get_points() {
+    return this.points;
+  }
+
+  public void set_points(List<Long> points) {
+    this.points = points;
+  }
+
+  public void unset_points() {
+    this.points = null;
+  }
+
+  /** Returns true if field points is set (has been assigned a value) and false otherwise */
+  public boolean is_set_points() {
+    return this.points != null;
+  }
+
+  public void set_points_isSet(boolean value) {
+    if (!value) {
+      this.points = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case METRIC_ID:
+      if (value == null) {
+        unset_metricId();
+      } else {
+        set_metricId((Long)value);
+      }
+      break;
+
+    case TS:
+      if (value == null) {
+        unset_ts();
+      } else {
+        set_ts((Long)value);
+      }
+      break;
+
+    case METRIC_TYPE:
+      if (value == null) {
+        unset_metricType();
+      } else {
+        set_metricType((Integer)value);
+      }
+      break;
+
+    case LONG_VALUE:
+      if (value == null) {
+        unset_longValue();
+      } else {
+        set_longValue((Long)value);
+      }
+      break;
+
+    case DOUBLE_VALUE:
+      if (value == null) {
+        unset_doubleValue();
+      } else {
+        set_doubleValue((Double)value);
+      }
+      break;
+
+    case M1:
+      if (value == null) {
+        unset_m1();
+      } else {
+        set_m1((Double)value);
+      }
+      break;
+
+    case M5:
+      if (value == null) {
+        unset_m5();
+      } else {
+        set_m5((Double)value);
+      }
+      break;
+
+    case M15:
+      if (value == null) {
+        unset_m15();
+      } else {
+        set_m15((Double)value);
+      }
+      break;
+
+    case MEAN:
+      if (value == null) {
+        unset_mean();
+      } else {
+        set_mean((Double)value);
+      }
+      break;
+
+    case MIN:
+      if (value == null) {
+        unset_min();
+      } else {
+        set_min((Long)value);
+      }
+      break;
+
+    case MAX:
+      if (value == null) {
+        unset_max();
+      } else {
+        set_max((Long)value);
+      }
+      break;
+
+    case P50:
+      if (value == null) {
+        unset_p50();
+      } else {
+        set_p50((Double)value);
+      }
+      break;
+
+    case P75:
+      if (value == null) {
+        unset_p75();
+      } else {
+        set_p75((Double)value);
+      }
+      break;
+
+    case P95:
+      if (value == null) {
+        unset_p95();
+      } else {
+        set_p95((Double)value);
+      }
+      break;
+
+    case P98:
+      if (value == null) {
+        unset_p98();
+      } else {
+        set_p98((Double)value);
+      }
+      break;
+
+    case P99:
+      if (value == null) {
+        unset_p99();
+      } else {
+        set_p99((Double)value);
+      }
+      break;
+
+    case P999:
+      if (value == null) {
+        unset_p999();
+      } else {
+        set_p999((Double)value);
+      }
+      break;
+
+    case STDDEV:
+      if (value == null) {
+        unset_stddev();
+      } else {
+        set_stddev((Double)value);
+      }
+      break;
+
+    case POINTS:
+      if (value == null) {
+        unset_points();
+      } else {
+        set_points((List<Long>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case METRIC_ID:
+      return Long.valueOf(get_metricId());
+
+    case TS:
+      return Long.valueOf(get_ts());
+
+    case METRIC_TYPE:
+      return Integer.valueOf(get_metricType());
+
+    case LONG_VALUE:
+      return Long.valueOf(get_longValue());
+
+    case DOUBLE_VALUE:
+      return Double.valueOf(get_doubleValue());
+
+    case M1:
+      return Double.valueOf(get_m1());
+
+    case M5:
+      return Double.valueOf(get_m5());
+
+    case M15:
+      return Double.valueOf(get_m15());
+
+    case MEAN:
+      return Double.valueOf(get_mean());
+
+    case MIN:
+      return Long.valueOf(get_min());
+
+    case MAX:
+      return Long.valueOf(get_max());
+
+    case P50:
+      return Double.valueOf(get_p50());
+
+    case P75:
+      return Double.valueOf(get_p75());
+
+    case P95:
+      return Double.valueOf(get_p95());
+
+    case P98:
+      return Double.valueOf(get_p98());
+
+    case P99:
+      return Double.valueOf(get_p99());
+
+    case P999:
+      return Double.valueOf(get_p999());
+
+    case STDDEV:
+      return Double.valueOf(get_stddev());
+
+    case POINTS:
+      return get_points();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case METRIC_ID:
+      return is_set_metricId();
+    case TS:
+      return is_set_ts();
+    case METRIC_TYPE:
+      return is_set_metricType();
+    case LONG_VALUE:
+      return is_set_longValue();
+    case DOUBLE_VALUE:
+      return is_set_doubleValue();
+    case M1:
+      return is_set_m1();
+    case M5:
+      return is_set_m5();
+    case M15:
+      return is_set_m15();
+    case MEAN:
+      return is_set_mean();
+    case MIN:
+      return is_set_min();
+    case MAX:
+      return is_set_max();
+    case P50:
+      return is_set_p50();
+    case P75:
+      return is_set_p75();
+    case P95:
+      return is_set_p95();
+    case P98:
+      return is_set_p98();
+    case P99:
+      return is_set_p99();
+    case P999:
+      return is_set_p999();
+    case STDDEV:
+      return is_set_stddev();
+    case POINTS:
+      return is_set_points();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof MetricSnapshot)
+      return this.equals((MetricSnapshot)that);
+    return false;
+  }
+
+  public boolean equals(MetricSnapshot that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_metricId = true;
+    boolean that_present_metricId = true;
+    if (this_present_metricId || that_present_metricId) {
+      if (!(this_present_metricId && that_present_metricId))
+        return false;
+      if (this.metricId != that.metricId)
+        return false;
+    }
+
+    boolean this_present_ts = true;
+    boolean that_present_ts = true;
+    if (this_present_ts || that_present_ts) {
+      if (!(this_present_ts && that_present_ts))
+        return false;
+      if (this.ts != that.ts)
+        return false;
+    }
+
+    boolean this_present_metricType = true;
+    boolean that_present_metricType = true;
+    if (this_present_metricType || that_present_metricType) {
+      if (!(this_present_metricType && that_present_metricType))
+        return false;
+      if (this.metricType != that.metricType)
+        return false;
+    }
+
+    boolean this_present_longValue = true && this.is_set_longValue();
+    boolean that_present_longValue = true && that.is_set_longValue();
+    if (this_present_longValue || that_present_longValue) {
+      if (!(this_present_longValue && that_present_longValue))
+        return false;
+      if (this.longValue != that.longValue)
+        return false;
+    }
+
+    boolean this_present_doubleValue = true && this.is_set_doubleValue();
+    boolean that_present_doubleValue = true && that.is_set_doubleValue();
+    if (this_present_doubleValue || that_present_doubleValue) {
+      if (!(this_present_doubleValue && that_present_doubleValue))
+        return false;
+      if (this.doubleValue != that.doubleValue)
+        return false;
+    }
+
+    boolean this_present_m1 = true && this.is_set_m1();
+    boolean that_present_m1 = true && that.is_set_m1();
+    if (this_present_m1 || that_present_m1) {
+      if (!(this_present_m1 && that_present_m1))
+        return false;
+      if (this.m1 != that.m1)
+        return false;
+    }
+
+    boolean this_present_m5 = true && this.is_set_m5();
+    boolean that_present_m5 = true && that.is_set_m5();
+    if (this_present_m5 || that_present_m5) {
+      if (!(this_present_m5 && that_present_m5))
+        return false;
+      if (this.m5 != that.m5)
+        return false;
+    }
+
+    boolean this_present_m15 = true && this.is_set_m15();
+    boolean that_present_m15 = true && that.is_set_m15();
+    if (this_present_m15 || that_present_m15) {
+      if (!(this_present_m15 && that_present_m15))
+        return false;
+      if (this.m15 != that.m15)
+        return false;
+    }
+
+    boolean this_present_mean = true && this.is_set_mean();
+    boolean that_present_mean = true && that.is_set_mean();
+    if (this_present_mean || that_present_mean) {
+      if (!(this_present_mean && that_present_mean))
+        return false;
+      if (this.mean != that.mean)
+        return false;
+    }
+
+    boolean this_present_min = true && this.is_set_min();
+    boolean that_present_min = true && that.is_set_min();
+    if (this_present_min || that_present_min) {
+      if (!(this_present_min && that_present_min))
+        return false;
+      if (this.min != that.min)
+        return false;
+    }
+
+    boolean this_present_max = true && this.is_set_max();
+    boolean that_present_max = true && that.is_set_max();
+    if (this_present_max || that_present_max) {
+      if (!(this_present_max && that_present_max))
+        return false;
+      if (this.max != that.max)
+        return false;
+    }
+
+    boolean this_present_p50 = true && this.is_set_p50();
+    boolean that_present_p50 = true && that.is_set_p50();
+    if (this_present_p50 || that_present_p50) {
+      if (!(this_present_p50 && that_present_p50))
+        return false;
+      if (this.p50 != that.p50)
+        return false;
+    }
+
+    boolean this_present_p75 = true && this.is_set_p75();
+    boolean that_present_p75 = true && that.is_set_p75();
+    if (this_present_p75 || that_present_p75) {
+      if (!(this_present_p75 && that_present_p75))
+        return false;
+      if (this.p75 != that.p75)
+        return false;
+    }
+
+    boolean this_present_p95 = true && this.is_set_p95();
+    boolean that_present_p95 = true && that.is_set_p95();
+    if (this_present_p95 || that_present_p95) {
+      if (!(this_present_p95 && that_present_p95))
+        return false;
+      if (this.p95 != that.p95)
+        return false;
+    }
+
+    boolean this_present_p98 = true && this.is_set_p98();
+    boolean that_present_p98 = true && that.is_set_p98();
+    if (this_present_p98 || that_present_p98) {
+      if (!(this_present_p98 && that_present_p98))
+        return false;
+      if (this.p98 != that.p98)
+        return false;
+    }
+
+    boolean this_present_p99 = true && this.is_set_p99();
+    boolean that_present_p99 = true && that.is_set_p99();
+    if (this_present_p99 || that_present_p99) {
+      if (!(this_present_p99 && that_present_p99))
+        return false;
+      if (this.p99 != that.p99)
+        return false;
+    }
+
+    boolean this_present_p999 = true && this.is_set_p999();
+    boolean that_present_p999 = true && that.is_set_p999();
+    if (this_present_p999 || that_present_p999) {
+      if (!(this_present_p999 && that_present_p999))
+        return false;
+      if (this.p999 != that.p999)
+        return false;
+    }
+
+    boolean this_present_stddev = true && this.is_set_stddev();
+    boolean that_present_stddev = true && that.is_set_stddev();
+    if (this_present_stddev || that_present_stddev) {
+      if (!(this_present_stddev && that_present_stddev))
+        return false;
+      if (this.stddev != that.stddev)
+        return false;
+    }
+
+    boolean this_present_points = true && this.is_set_points();
+    boolean that_present_points = true && that.is_set_points();
+    if (this_present_points || that_present_points) {
+      if (!(this_present_points && that_present_points))
+        return false;
+      if (!this.points.equals(that.points))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_metricId = true;
+    list.add(present_metricId);
+    if (present_metricId)
+      list.add(metricId);
+
+    boolean present_ts = true;
+    list.add(present_ts);
+    if (present_ts)
+      list.add(ts);
+
+    boolean present_metricType = true;
+    list.add(present_metricType);
+    if (present_metricType)
+      list.add(metricType);
+
+    boolean present_longValue = true && (is_set_longValue());
+    list.add(present_longValue);
+    if (present_longValue)
+      list.add(longValue);
+
+    boolean present_doubleValue = true && (is_set_doubleValue());
+    list.add(present_doubleValue);
+    if (present_doubleValue)
+      list.add(doubleValue);
+
+    boolean present_m1 = true && (is_set_m1());
+    list.add(present_m1);
+    if (present_m1)
+      list.add(m1);
+
+    boolean present_m5 = true && (is_set_m5());
+    list.add(present_m5);
+    if (present_m5)
+      list.add(m5);
+
+    boolean present_m15 = true && (is_set_m15());
+    list.add(present_m15);
+    if (present_m15)
+      list.add(m15);
+
+    boolean present_mean = true && (is_set_mean());
+    list.add(present_mean);
+    if (present_mean)
+      list.add(mean);
+
+    boolean present_min = true && (is_set_min());
+    list.add(present_min);
+    if (present_min)
+      list.add(min);
+
+    boolean present_max = true && (is_set_max());
+    list.add(present_max);
+    if (present_max)
+      list.add(max);
+
+    boolean present_p50 = true && (is_set_p50());
+    list.add(present_p50);
+    if (present_p50)
+      list.add(p50);
+
+    boolean present_p75 = true && (is_set_p75());
+    list.add(present_p75);
+    if (present_p75)
+      list.add(p75);
+
+    boolean present_p95 = true && (is_set_p95());
+    list.add(present_p95);
+    if (present_p95)
+      list.add(p95);
+
+    boolean present_p98 = true && (is_set_p98());
+    list.add(present_p98);
+    if (present_p98)
+      list.add(p98);
+
+    boolean present_p99 = true && (is_set_p99());
+    list.add(present_p99);
+    if (present_p99)
+      list.add(p99);
+
+    boolean present_p999 = true && (is_set_p999());
+    list.add(present_p999);
+    if (present_p999)
+      list.add(p999);
+
+    boolean present_stddev = true && (is_set_stddev());
+    list.add(present_stddev);
+    if (present_stddev)
+      list.add(stddev);
+
+    boolean present_points = true && (is_set_points());
+    list.add(present_points);
+    if (present_points)
+      list.add(points);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(MetricSnapshot other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(is_set_metricId()).compareTo(other.is_set_metricId());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_metricId()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.metricId, other.metricId);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(is_set_ts()).compareTo(other.is_set_ts());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_ts()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ts, other.ts);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(is_set_metricType()).compareTo(other.is_set_metricType());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_metricType()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.metricType, other.metricType);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(is_set_longValue()).compareTo(other.is_set_longValue());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_longValue()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.longValue, other.longValue);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(is_set_doubleValue()).compareTo(other.is_set_doubleValue());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_doubleValue()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.doubleValue, other.doubleValue);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(is_set_m1()).compareTo(other.is_set_m1());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_m1()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.m1, other.m1);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(is_set_m5()).compareTo(other.is_set_m5());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_m5()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.m5, other.m5);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(is_set_m15()).compareTo(other.is_set_m15());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_m15()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.m15, other.m15);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(is_set_mean()).compareTo(other.is_set_mean());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_mean()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.mean, other.mean);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(is_set_min()).compareTo(other.is_set_min());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_min()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.min, other.min);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(is_set_max()).compareTo(other.is_set_max());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_max()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.max, other.max);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(is_set_p50()).compareTo(other.is_set_p50());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_p50()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.p50, other.p50);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(is_set_p75()).compareTo(other.is_set_p75());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_p75()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.p75, other.p75);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(is_set_p95()).compareTo(other.is_set_p95());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_p95()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.p95, other.p95);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(is_set_p98()).compareTo(other.is_set_p98());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_p98()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.p98, other.p98);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(is_set_p99()).compareTo(other.is_set_p99());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_p99()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.p99, other.p99);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(is_set_p999()).compareTo(other.is_set_p999());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_p999()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.p999, other.p999);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(is_set_stddev()).compareTo(other.is_set_stddev());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_stddev()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.stddev, other.stddev);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(is_set_points()).compareTo(other.is_set_points());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_points()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.points, other.points);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("MetricSnapshot(");
+    boolean first = true;
+
+    sb.append("metricId:");
+    sb.append(this.metricId);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("ts:");
+    sb.append(this.ts);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("metricType:");
+    sb.append(this.metricType);
+    first = false;
+    if (is_set_longValue()) {
+      if (!first) sb.append(", ");
+      sb.append("longValue:");
+      sb.append(this.longValue);
+      first = false;
+    }
+    if (is_set_doubleValue()) {
+      if (!first) sb.append(", ");
+      sb.append("doubleValue:");
+      sb.append(this.doubleValue);
+      first = false;
+    }
+    if (is_set_m1()) {
+      if (!first) sb.append(", ");
+      sb.append("m1:");
+      sb.append(this.m1);
+      first = false;
+    }
+    if (is_set_m5()) {
+      if (!first) sb.append(", ");
+      sb.append("m5:");
+      sb.append(this.m5);
+      first = false;
+    }
+    if (is_set_m15()) {
+      if (!first) sb.append(", ");
+      sb.append("m15:");
+      sb.append(this.m15);
+      first = false;
+    }
+    if (is_set_mean()) {
+      if (!first) sb.append(", ");
+      sb.append("mean:");
+      sb.append(this.mean);
+      first = false;
+    }
+    if (is_set_min()) {
+      if (!first) sb.append(", ");
+      sb.append("min:");
+      sb.append(this.min);
+      first = false;
+    }
+    if (is_set_max()) {
+      if (!first) sb.append(", ");
+      sb.append("max:");
+      sb.append(this.max);
+      first = false;
+    }
+    if (is_set_p50()) {
+      if (!first) sb.append(", ");
+      sb.append("p50:");
+      sb.append(this.p50);
+      first = false;
+    }
+    if (is_set_p75()) {
+      if (!first) sb.append(", ");
+      sb.append("p75:");
+      sb.append(this.p75);
+      first = false;
+    }
+    if (is_set_p95()) {
+      if (!first) sb.append(", ");
+      sb.append("p95:");
+      sb.append(this.p95);
+      first = false;
+    }
+    if (is_set_p98()) {
+      if (!first) sb.append(", ");
+      sb.append("p98:");
+      sb.append(this.p98);
+      first = false;
+    }
+    if (is_set_p99()) {
+      if (!first) sb.append(", ");
+      sb.append("p99:");
+      sb.append(this.p99);
+      first = false;
+    }
+    if (is_set_p999()) {
+      if (!first) sb.append(", ");
+      sb.append("p999:");
+      sb.append(this.p999);
+      first = false;
+    }
+    if (is_set_stddev()) {
+      if (!first) sb.append(", ");
+      sb.append("stddev:");
+      sb.append(this.stddev);
+      first = false;
+    }
+    if (is_set_points()) {
+      if (!first) sb.append(", ");
+      sb.append("points:");
+      if (this.points == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.points);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws TException {
+    // check for required fields
+    if (!is_set_metricId()) {
+      throw new TProtocolException("Required field 'metricId' is unset! Struct:" + toString());
+    }
+
+    if (!is_set_ts()) {
+      throw new TProtocolException("Required field 'ts' is unset! Struct:" + toString());
+    }
+
+    if (!is_set_metricType()) {
+      throw new TProtocolException("Required field 'metricType' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class MetricSnapshotStandardSchemeFactory implements SchemeFactory {
+    public MetricSnapshotStandardScheme getScheme() {
+      return new MetricSnapshotStandardScheme();
+    }
+  }
+
+  private static class MetricSnapshotStandardScheme extends StandardScheme<MetricSnapshot> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, MetricSnapshot struct) throws TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // METRIC_ID
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.metricId = iprot.readI64();
+              struct.set_metricId_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // TS
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.ts = iprot.readI64();
+              struct.set_ts_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // METRIC_TYPE
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.metricType = iprot.readI32();
+              struct.set_metricType_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // LONG_VALUE
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.longValue = iprot.readI64();
+              struct.set_longValue_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // DOUBLE_VALUE
+            if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) {
+              struct.doubleValue = iprot.readDouble();
+              struct.set_doubleValue_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 6: // M1
+            if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) {
+              struct.m1 = iprot.readDouble();
+              struct.set_m1_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 7: // M5
+            if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) {
+              struct.m5 = iprot.readDouble();
+              struct.set_m5_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 8: // M15
+            if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) {
+              struct.m15 = iprot.readDouble();
+              struct.set_m15_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 9: // MEAN
+            if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) {
+              struct.mean = iprot.readDouble();
+              struct.set_mean_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 10: // MIN
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.min = iprot.readI64();
+              struct.set_min_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 11: // MAX
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.max = iprot.readI64();
+              struct.set_max_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 12: // P50
+            if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) {
+              struct.p50 = iprot.readDouble();
+              struct.set_p50_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 13: // P75
+            if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) {
+              struct.p75 = iprot.readDouble();
+              struct.set_p75_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 14: // P95
+            if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) {
+              struct.p95 = iprot.readDouble();
+              struct.set_p95_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 15: // P98
+            if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) {
+              struct.p98 = iprot.readDouble();
+              struct.set_p98_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 16: // P99
+            if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) {
+              struct.p99 = iprot.readDouble();
+              struct.set_p99_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 17: // P999
+            if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) {
+              struct.p999 = iprot.readDouble();
+              struct.set_p999_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 18: // STDDEV
+            if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) {
+              struct.stddev = iprot.readDouble();
+              struct.set_stddev_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 19: // POINTS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list116 = iprot.readListBegin();
+                struct.points = new ArrayList<Long>(_list116.size);
+                long _elem117;
+                for (int _i118 = 0; _i118 < _list116.size; ++_i118)
+                {
+                  _elem117 = iprot.readI64();
+                  struct.points.add(_elem117);
+                }
+                iprot.readListEnd();
+              }
+              struct.set_points_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, MetricSnapshot struct) throws TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      oprot.writeFieldBegin(METRIC_ID_FIELD_DESC);
+      oprot.writeI64(struct.metricId);
+      oprot.writeFieldEnd();
+      oprot.writeFieldBegin(TS_FIELD_DESC);
+      oprot.writeI64(struct.ts);
+      oprot.writeFieldEnd();
+      oprot.writeFieldBegin(METRIC_TYPE_FIELD_DESC);
+      oprot.writeI32(struct.metricType);
+      oprot.writeFieldEnd();
+      if (struct.is_set_longValue()) {
+        oprot.writeFieldBegin(LONG_VALUE_FIELD_DESC);
+        oprot.writeI64(struct.longValue);
+        oprot.writeFieldEnd();
+      }
+      if (struct.is_set_doubleValue()) {
+        oprot.writeFieldBegin(DOUBLE_VALUE_FIELD_DESC);
+        oprot.writeDouble(struct.doubleValue);
+        oprot.writeFieldEnd();
+      }
+      if (struct.is_set_m1()) {
+        oprot.writeFieldBegin(M1_FIELD_DESC);
+        oprot.writeDouble(struct.m1);
+        oprot.writeFieldEnd();
+      }
+      if (struct.is_set_m5()) {
+        oprot.writeFieldBegin(M5_FIELD_DESC);
+        oprot.writeDouble(struct.m5);
+        oprot.writeFieldEnd();
+      }
+      if (struct.is_set_m15()) {
+        oprot.writeFieldBegin(M15_FIELD_DESC);
+        oprot.writeDouble(struct.m15);
+        oprot.writeFieldEnd();
+      }
+      if (struct.is_set_mean()) {
+        oprot.writeFieldBegin(MEAN_FIELD_DESC);
+        oprot.writeDouble(struct.mean);
+        oprot.writeFieldEnd();
+      }
+      if (struct.is_set_min()) {
+        oprot.writeFieldBegin(MIN_FIELD_DESC);
+        oprot.writeI64(struct.min);
+        oprot.writeFieldEnd();
+      }
+      if (struct.is_set_max()) {
+        oprot.writeFieldBegin(MAX_FIELD_DESC);
+        oprot.writeI64(struct.max);
+        oprot.writeFieldEnd();
+      }
+      if (struct.is_set_p50()) {
+        oprot.writeFieldBegin(P50_FIELD_DESC);
+        oprot.writeDouble(struct.p50);
+        oprot.writeFieldEnd();
+      }
+      if (struct.is_set_p75()) {
+        oprot.writeFieldBegin(P75_FIELD_DESC);
+        oprot.writeDouble(struct.p75);
+        oprot.writeFieldEnd();
+      }
+      if (struct.is_set_p95()) {
+        oprot.writeFieldBegin(P95_FIELD_DESC);
+        oprot.writeDouble(struct.p95);
+        oprot.writeFieldEnd();
+      }
+      if (struct.is_set_p98()) {
+        oprot.writeFieldBegin(P98_FIELD_DESC);
+        oprot.writeDouble(struct.p98);
+        oprot.writeFieldEnd();
+      }
+      if (struct.is_set_p99()) {
+        oprot.writeFieldBegin(P99_FIELD_DESC);
+        oprot.writeDouble(struct.p99);
+        oprot.writeFieldEnd();
+      }
+      if (struct.is_set_p999()) {
+        oprot.writeFieldBegin(P999_FIELD_DESC);
+        oprot.writeDouble(struct.p999);
+        oprot.writeFieldEnd();
+      }
+      if (struct.is_set_stddev()) {
+        oprot.writeFieldBegin(STDDEV_FIELD_DESC);
+        oprot.writeDouble(struct.stddev);
+        oprot.writeFieldEnd();
+      }
+      if (struct.points != null) {
+        if (struct.is_set_points()) {
+          oprot.writeFieldBegin(POINTS_FIELD_DESC);
+          {
+            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.points.size()));
+            for (long _iter119 : struct.points)
+            {
+              oprot.writeI64(_iter119);
+            }
+            oprot.writeListEnd();
+          }
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class MetricSnapshotTupleSchemeFactory implements SchemeFactory {
+    public MetricSnapshotTupleScheme getScheme() {
+      return new MetricSnapshotTupleScheme();
+    }
+  }
+
+  private static class MetricSnapshotTupleScheme extends TupleScheme<MetricSnapshot> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, MetricSnapshot struct) throws TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeI64(struct.metricId);
+      oprot.writeI64(struct.ts);
+      oprot.writeI32(struct.metricType);
+      BitSet optionals = new BitSet();
+      if (struct.is_set_longValue()) {
+        optionals.set(0);
+      }
+      if (struct.is_set_doubleValue()) {
+        optionals.set(1);
+      }
+      if (struct.is_set_m1()) {
+        optionals.set(2);
+      }
+      if (struct.is_set_m5()) {
+        optionals.set(3);
+      }
+      if (struct.is_set_m15()) {
+        optionals.set(4);
+      }
+      if (struct.is_set_mean()) {
+        optionals.set(5);
+      }
+      if (struct.is_set_min()) {
+        optionals.set(6);
+      }
+      if (struct.is_set_max()) {
+        optionals.set(7);
+      }
+      if (struct.is_set_p50()) {
+        optionals.set(8);
+      }
+      if (struct.is_set_p75()) {
+        optionals.set(9);
+      }
+      if (struct.is_set_p95()) {
+        optionals.set(10);
+      }
+      if (struct.is_set_p98()) {
+        optionals.set(11);
+      }
+      if (struct.is_set_p99()) {
+        optionals.set(12);
+      }
+      if (struct.is_set_p999()) {
+        optionals.set(13);
+      }
+      if (struct.is_set_stddev()) {
+        optionals.set(14);
+      }
+      if (struct.is_set_points()) {
+        optionals.set(15);
+      }
+      oprot.writeBitSet(optionals, 16);
+      if (struct.is_set_longValue()) {
+        oprot.writeI64(struct.longValue);
+      }
+      if (struct.is_set_doubleValue()) {
+        oprot.writeDouble(struct.doubleValue);
+      }
+      if (struct.is_set_m1()) {
+        oprot.writeDouble(struct.m1);
+      }
+      if (struct.is_set_m5()) {
+        oprot.writeDouble(struct.m5);
+      }
+      if (struct.is_set_m15()) {
+        oprot.writeDouble(struct.m15);
+      }
+      if (struct.is_set_mean()) {
+        oprot.writeDouble(struct.mean);
+      }
+      if (struct.is_set_min()) {
+        oprot.writeI64(struct.min);
+      }
+      if (struct.is_set_max()) {
+        oprot.writeI64(struct.max);
+      }
+      if (struct.is_set_p50()) {
+        oprot.writeDouble(struct.p50);
+      }
+      if (struct.is_set_p75()) {
+        oprot.writeDouble(struct.p75);
+      }
+      if (struct.is_set_p95()) {
+        oprot.writeDouble(struct.p95);
+      }
+      if (struct.is_set_p98()) {
+        oprot.writeDouble(struct.p98);
+      }
+      if (struct.is_set_p99()) {
+        oprot.writeDouble(struct.p99);
+      }
+      if (struct.is_set_p999()) {
+        oprot.writeDouble(struct.p999);
+      }
+      if (struct.is_set_stddev()) {
+        oprot.writeDouble(struct.stddev);
+      }
+      if (struct.is_set_points()) {
+        {
+          oprot.writeI32(struct.points.size());
+          for (long _iter120 : struct.points)
+          {
+            oprot.writeI64(_iter120);
+          }
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, MetricSnapshot struct) throws TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.metricId = iprot.readI64();
+      struct.set_metricId_isSet(true);
+      struct.ts = iprot.readI64();
+      struct.set_ts_isSet(true);
+      struct.metricType = iprot.readI32();
+      struct.set_metricType_isSet(true);
+      BitSet incoming = iprot.readBitSet(16);
+      if (incoming.get(0)) {
+        struct.longValue = iprot.readI64();
+        struct.set_longValue_isSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.doubleValue = iprot.readDouble();
+        struct.set_doubleValue_isSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.m1 = iprot.readDouble();
+        struct.set_m1_isSet(true);
+      }
+      if (incoming.get(3)) {
+        struct.m5 = iprot.readDouble();
+        struct.set_m5_isSet(true);
+      }
+      if (incoming.get(4)) {
+        struct.m15 = iprot.readDouble();
+        struct.set_m15_isSet(true);
+      }
+      if (incoming.get(5)) {
+        struct.mean = iprot.readDouble();
+        struct.set_mean_isSet(true);
+      }
+      if (incoming.get(6)) {
+        struct.min = iprot.readI64();
+        struct.set_min_isSet(true);
+      }
+      if (incoming.get(7)) {
+        struct.max = iprot.readI64();
+        struct.set_max_isSet(true);
+      }
+      if (incoming.get(8)) {
+        struct.p50 = iprot.readDouble();
+        struct.set_p50_isSet(true);
+      }
+      if (incoming.get(9)) {
+        struct.p75 = iprot.readDouble();
+        struct.set_p75_isSet(true);
+      }
+      if (incoming.get(10)) {
+        struct.p95 = iprot.readDouble();
+        struct.set_p95_isSet(true);
+      }
+      if (incoming.get(11)) {
+        struct.p98 = iprot.readDouble();
+        struct.set_p98_isSet(true);
+      }
+      if (incoming.get(12)) {
+        struct.p99 = iprot.readDouble();
+        struct.set_p99_isSet(true);
+      }
+      if (incoming.get(13)) {
+        struct.p999 = iprot.readDouble();
+        struct.set_p999_isSet(true);
+      }
+      if (incoming.get(14)) {
+        struct.stddev = iprot.readDouble();
+        struct.set_stddev_isSet(true);
+      }
+      if (incoming.get(15)) {
+        {
+          org.apache.thrift.protocol.TList _list121 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32());
+          struct.points = new ArrayList<Long>(_list121.size);
+          long _elem122;
+          for (int _i123 = 0; _i123 < _list121.size; ++_i123)
+          {
+            _elem122 = iprot.readI64();
+            struct.points.add(_elem122);
+          }
+        }
+        struct.set_points_isSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/MetricWindow.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/MetricWindow.java b/jstorm-core/src/main/java/backtype/storm/generated/MetricWindow.java
index 73faddd..b117a21 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/MetricWindow.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/MetricWindow.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class MetricWindow implements org.apache.thrift.TBase<MetricWindow, MetricWindow._Fields>, java.io.Serializable, Cloneable, Comparable<MetricWindow> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("MetricWindow");
 
@@ -278,11 +278,11 @@ public class MetricWindow implements org.apache.thrift.TBase<MetricWindow, Metri
     return _Fields.findByThriftId(fieldId);
   }
 
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
@@ -302,10 +302,10 @@ public class MetricWindow implements org.apache.thrift.TBase<MetricWindow, Metri
     return sb.toString();
   }
 
-  public void validate() throws org.apache.thrift.TException {
+  public void validate() throws TException {
     // check for required fields
     if (!is_set_metricWindow()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'metricWindow' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'metricWindow' is unset! Struct:" + toString());
     }
 
     // check for sub-struct validity
@@ -314,7 +314,7 @@ public class MetricWindow implements org.apache.thrift.TBase<MetricWindow, Metri
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -322,7 +322,7 @@ public class MetricWindow implements org.apache.thrift.TBase<MetricWindow, Metri
   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
     try {
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -335,7 +335,7 @@ public class MetricWindow implements org.apache.thrift.TBase<MetricWindow, Metri
 
   private static class MetricWindowStandardScheme extends StandardScheme<MetricWindow> {
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot, MetricWindow struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot, MetricWindow struct) throws TException {
       org.apache.thrift.protocol.TField schemeField;
       iprot.readStructBegin();
       while (true)
@@ -374,7 +374,7 @@ public class MetricWindow implements org.apache.thrift.TBase<MetricWindow, Metri
       struct.validate();
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot, MetricWindow struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot, MetricWindow struct) throws TException {
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
@@ -406,7 +406,7 @@ public class MetricWindow implements org.apache.thrift.TBase<MetricWindow, Metri
   private static class MetricWindowTupleScheme extends TupleScheme<MetricWindow> {
 
     @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, MetricWindow struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol prot, MetricWindow struct) throws TException {
       TTupleProtocol oprot = (TTupleProtocol) prot;
       {
         oprot.writeI32(struct.metricWindow.size());
@@ -419,7 +419,7 @@ public class MetricWindow implements org.apache.thrift.TBase<MetricWindow, Metri
     }
 
     @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, MetricWindow struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol prot, MetricWindow struct) throws TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       {
         org.apache.thrift.protocol.TMap _map112 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I32, org.apache.thrift.protocol.TType.DOUBLE, iprot.readI32());

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/MonitorOptions.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/MonitorOptions.java b/jstorm-core/src/main/java/backtype/storm/generated/MonitorOptions.java
index 9b80b78..ec4f6b7 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/MonitorOptions.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/MonitorOptions.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class MonitorOptions implements org.apache.thrift.TBase<MonitorOptions, MonitorOptions._Fields>, java.io.Serializable, Cloneable, Comparable<MonitorOptions> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("MonitorOptions");
 
@@ -259,11 +259,11 @@ public class MonitorOptions implements org.apache.thrift.TBase<MonitorOptions, M
     return _Fields.findByThriftId(fieldId);
   }
 
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
@@ -281,7 +281,7 @@ public class MonitorOptions implements org.apache.thrift.TBase<MonitorOptions, M
     return sb.toString();
   }
 
-  public void validate() throws org.apache.thrift.TException {
+  public void validate() throws TException {
     // check for required fields
     // check for sub-struct validity
   }
@@ -289,7 +289,7 @@ public class MonitorOptions implements org.apache.thrift.TBase<MonitorOptions, M
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -299,7 +299,7 @@ public class MonitorOptions implements org.apache.thrift.TBase<MonitorOptions, M
       // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
       __isset_bitfield = 0;
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -312,7 +312,7 @@ public class MonitorOptions implements org.apache.thrift.TBase<MonitorOptions, M
 
   private static class MonitorOptionsStandardScheme extends StandardScheme<MonitorOptions> {
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot, MonitorOptions struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot, MonitorOptions struct) throws TException {
       org.apache.thrift.protocol.TField schemeField;
       iprot.readStructBegin();
       while (true)
@@ -339,7 +339,7 @@ public class MonitorOptions implements org.apache.thrift.TBase<MonitorOptions, M
       struct.validate();
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot, MonitorOptions struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot, MonitorOptions struct) throws TException {
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
@@ -363,7 +363,7 @@ public class MonitorOptions implements org.apache.thrift.TBase<MonitorOptions, M
   private static class MonitorOptionsTupleScheme extends TupleScheme<MonitorOptions> {
 
     @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, MonitorOptions struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol prot, MonitorOptions struct) throws TException {
       TTupleProtocol oprot = (TTupleProtocol) prot;
       BitSet optionals = new BitSet();
       if (struct.is_set_isEnable()) {
@@ -376,7 +376,7 @@ public class MonitorOptions implements org.apache.thrift.TBase<MonitorOptions, M
     }
 
     @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, MonitorOptions struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol prot, MonitorOptions struct) throws TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       BitSet incoming = iprot.readBitSet(1);
       if (incoming.get(0)) {


[34/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/scheduler/multitenant/IsolatedPool.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/scheduler/multitenant/IsolatedPool.java b/jstorm-core/src/main/java/backtype/storm/scheduler/multitenant/IsolatedPool.java
index dc7eded..3a50a3f 100755
--- a/jstorm-core/src/main/java/backtype/storm/scheduler/multitenant/IsolatedPool.java
+++ b/jstorm-core/src/main/java/backtype/storm/scheduler/multitenant/IsolatedPool.java
@@ -40,307 +40,297 @@ import backtype.storm.scheduler.WorkerSlot;
  * A pool of machines that can be used to run isolated topologies
  */
 public class IsolatedPool extends NodePool {
-  private static final Logger LOG = LoggerFactory.getLogger(IsolatedPool.class);
-  private Map<String, Set<Node>> _topologyIdToNodes = new HashMap<String, Set<Node>>();
-  private HashMap<String, TopologyDetails> _tds = new HashMap<String, TopologyDetails>();
-  private HashSet<String> _isolated = new HashSet<String>();
-  private int _maxNodes;
-  private int _usedNodes;
+    private static final Logger LOG = LoggerFactory.getLogger(IsolatedPool.class);
+    private Map<String, Set<Node>> _topologyIdToNodes = new HashMap<String, Set<Node>>();
+    private HashMap<String, TopologyDetails> _tds = new HashMap<String, TopologyDetails>();
+    private HashSet<String> _isolated = new HashSet<String>();
+    private int _maxNodes;
+    private int _usedNodes;
 
-  public IsolatedPool(int maxNodes) {
-    _maxNodes = maxNodes;
-    _usedNodes = 0;
-  }
-
-  @Override
-  public void addTopology(TopologyDetails td) {
-    String topId = td.getId();
-    LOG.debug("Adding in Topology {}", topId);
-    SchedulerAssignment assignment = _cluster.getAssignmentById(topId);
-    Set<Node> assignedNodes = new HashSet<Node>();
-    if (assignment != null) {
-      for (WorkerSlot ws: assignment.getSlots()) {
-        Node n = _nodeIdToNode.get(ws.getNodeId());
-        assignedNodes.add(n);
-      }
-    }
-    _usedNodes += assignedNodes.size();
-    _topologyIdToNodes.put(topId, assignedNodes);
-    _tds.put(topId, td);
-    if (td.getConf().get(Config.TOPOLOGY_ISOLATED_MACHINES) != null) {
-      _isolated.add(topId);
+    public IsolatedPool(int maxNodes) {
+        _maxNodes = maxNodes;
+        _usedNodes = 0;
     }
-  }
 
-  @Override
-  public boolean canAdd(TopologyDetails td) {
-    //Only add topologies that are not sharing nodes with other topologies
-    String topId = td.getId();
-    SchedulerAssignment assignment = _cluster.getAssignmentById(topId);
-    if (assignment != null) {
-      for (WorkerSlot ws: assignment.getSlots()) {
-        Node n = _nodeIdToNode.get(ws.getNodeId());
-        if (n.getRunningTopologies().size() > 1) {
-          return false;
-        }
-      }
-    }
-    return true;
-  }
-  
-  @Override
-  public void scheduleAsNeeded(NodePool ... lesserPools) {
-    for (String topId : _topologyIdToNodes.keySet()) {
-      TopologyDetails td = _tds.get(topId);
-      if (_cluster.needsScheduling(td)) {
-        LOG.debug("Scheduling topology {}",topId);
-        Set<Node> allNodes = _topologyIdToNodes.get(topId);
-        Number nodesRequested = (Number) td.getConf().get(Config.TOPOLOGY_ISOLATED_MACHINES);
-        int slotsToUse = 0;
-        if (nodesRequested == null) {
-          slotsToUse = getNodesForNotIsolatedTop(td, allNodes, lesserPools);
-        } else {
-          slotsToUse = getNodesForIsolatedTop(td, allNodes, lesserPools, 
-              nodesRequested.intValue());
+    @Override
+    public void addTopology(TopologyDetails td) {
+        String topId = td.getId();
+        LOG.debug("Adding in Topology {}", topId);
+        SchedulerAssignment assignment = _cluster.getAssignmentById(topId);
+        Set<Node> assignedNodes = new HashSet<Node>();
+        if (assignment != null) {
+            for (WorkerSlot ws : assignment.getSlots()) {
+                Node n = _nodeIdToNode.get(ws.getNodeId());
+                assignedNodes.add(n);
+            }
         }
-        //No slots to schedule for some reason, so skip it.
-        if (slotsToUse <= 0) {
-          continue;
+        _usedNodes += assignedNodes.size();
+        _topologyIdToNodes.put(topId, assignedNodes);
+        _tds.put(topId, td);
+        if (td.getConf().get(Config.TOPOLOGY_ISOLATED_MACHINES) != null) {
+            _isolated.add(topId);
         }
-        
-        RoundRobinSlotScheduler slotSched = 
-          new RoundRobinSlotScheduler(td, slotsToUse, _cluster);
-        
-        LinkedList<Node> sortedNodes = new LinkedList<Node>(allNodes);
-        Collections.sort(sortedNodes, Node.FREE_NODE_COMPARATOR_DEC);
+    }
 
-        LOG.debug("Nodes sorted by free space {}", sortedNodes);
-        while (true) {
-          Node n = sortedNodes.remove();
-          if (!slotSched.assignSlotTo(n)) {
-            break;
-          }
-          int freeSlots = n.totalSlotsFree();
-          for (int i = 0; i < sortedNodes.size(); i++) {
-            if (freeSlots >= sortedNodes.get(i).totalSlotsFree()) {
-              sortedNodes.add(i, n);
-              n = null;
-              break;
+    @Override
+    public boolean canAdd(TopologyDetails td) {
+        // Only add topologies that are not sharing nodes with other topologies
+        String topId = td.getId();
+        SchedulerAssignment assignment = _cluster.getAssignmentById(topId);
+        if (assignment != null) {
+            for (WorkerSlot ws : assignment.getSlots()) {
+                Node n = _nodeIdToNode.get(ws.getNodeId());
+                if (n.getRunningTopologies().size() > 1) {
+                    return false;
+                }
             }
-          }
-          if (n != null) {
-            sortedNodes.add(n);
-          }
         }
-      }
-      Set<Node> found = _topologyIdToNodes.get(topId);
-      int nc = found == null ? 0 : found.size();
-      _cluster.setStatus(topId,"Scheduled Isolated on "+nc+" Nodes");
+        return true;
     }
-  }
-  
-  /**
-   * Get the nodes needed to schedule an isolated topology.
-   * @param td the topology to be scheduled
-   * @param allNodes the nodes already scheduled for this topology.
-   * This will be updated to include new nodes if needed. 
-   * @param lesserPools node pools we can steal nodes from
-   * @return the number of additional slots that should be used for scheduling.
-   */
-  private int getNodesForIsolatedTop(TopologyDetails td, Set<Node> allNodes,
-      NodePool[] lesserPools, int nodesRequested) {
-    String topId = td.getId();
-    LOG.debug("Topology {} is isolated", topId);
-    int nodesFromUsAvailable = nodesAvailable();
-    int nodesFromOthersAvailable = NodePool.nodesAvailable(lesserPools);
 
-    int nodesUsed = _topologyIdToNodes.get(topId).size();
-    int nodesNeeded = nodesRequested - nodesUsed;
-    LOG.debug("Nodes... requested {} used {} available from us {} " +
-        "avail from other {} needed {}", new Object[] {nodesRequested, 
-        nodesUsed, nodesFromUsAvailable, nodesFromOthersAvailable,
-        nodesNeeded});
-    if ((nodesNeeded - nodesFromUsAvailable) > (_maxNodes - _usedNodes)) {
-      _cluster.setStatus(topId,"Max Nodes("+_maxNodes+") for this user would be exceeded. "
-        + ((nodesNeeded - nodesFromUsAvailable) - (_maxNodes - _usedNodes)) 
-        + " more nodes needed to run topology.");
-      return 0;
-    }
+    @Override
+    public void scheduleAsNeeded(NodePool... lesserPools) {
+        for (String topId : _topologyIdToNodes.keySet()) {
+            TopologyDetails td = _tds.get(topId);
+            if (_cluster.needsScheduling(td)) {
+                LOG.debug("Scheduling topology {}", topId);
+                Set<Node> allNodes = _topologyIdToNodes.get(topId);
+                Number nodesRequested = (Number) td.getConf().get(Config.TOPOLOGY_ISOLATED_MACHINES);
+                int slotsToUse = 0;
+                if (nodesRequested == null) {
+                    slotsToUse = getNodesForNotIsolatedTop(td, allNodes, lesserPools);
+                } else {
+                    slotsToUse = getNodesForIsolatedTop(td, allNodes, lesserPools, nodesRequested.intValue());
+                }
+                // No slots to schedule for some reason, so skip it.
+                if (slotsToUse <= 0) {
+                    continue;
+                }
 
-    //In order to avoid going over _maxNodes I may need to steal from
-    // myself even though other pools have free nodes. so figure out how
-    // much each group should provide
-    int nodesNeededFromOthers = Math.min(Math.min(_maxNodes - _usedNodes, 
-        nodesFromOthersAvailable), nodesNeeded);
-    int nodesNeededFromUs = nodesNeeded - nodesNeededFromOthers; 
-    LOG.debug("Nodes... needed from us {} needed from others {}", 
-        nodesNeededFromUs, nodesNeededFromOthers);
+                RoundRobinSlotScheduler slotSched = new RoundRobinSlotScheduler(td, slotsToUse, _cluster);
 
-    if (nodesNeededFromUs > nodesFromUsAvailable) {
-      _cluster.setStatus(topId, "Not Enough Nodes Available to Schedule Topology");
-      return 0;
+                LinkedList<Node> sortedNodes = new LinkedList<Node>(allNodes);
+                Collections.sort(sortedNodes, Node.FREE_NODE_COMPARATOR_DEC);
+
+                LOG.debug("Nodes sorted by free space {}", sortedNodes);
+                while (true) {
+                    Node n = sortedNodes.remove();
+                    if (!slotSched.assignSlotTo(n)) {
+                        break;
+                    }
+                    int freeSlots = n.totalSlotsFree();
+                    for (int i = 0; i < sortedNodes.size(); i++) {
+                        if (freeSlots >= sortedNodes.get(i).totalSlotsFree()) {
+                            sortedNodes.add(i, n);
+                            n = null;
+                            break;
+                        }
+                    }
+                    if (n != null) {
+                        sortedNodes.add(n);
+                    }
+                }
+            }
+            Set<Node> found = _topologyIdToNodes.get(topId);
+            int nc = found == null ? 0 : found.size();
+            _cluster.setStatus(topId, "Scheduled Isolated on " + nc + " Nodes");
+        }
     }
 
-    //Get the nodes
-    Collection<Node> found = NodePool.takeNodes(nodesNeededFromOthers, lesserPools);
-    _usedNodes += found.size();
-    allNodes.addAll(found);
-    Collection<Node> foundMore = takeNodes(nodesNeededFromUs);
-    _usedNodes += foundMore.size();
-    allNodes.addAll(foundMore);
+    /**
+     * Get the nodes needed to schedule an isolated topology.
+     * 
+     * @param td the topology to be scheduled
+     * @param allNodes the nodes already scheduled for this topology. This will be updated to include new nodes if needed.
+     * @param lesserPools node pools we can steal nodes from
+     * @return the number of additional slots that should be used for scheduling.
+     */
+    private int getNodesForIsolatedTop(TopologyDetails td, Set<Node> allNodes, NodePool[] lesserPools, int nodesRequested) {
+        String topId = td.getId();
+        LOG.debug("Topology {} is isolated", topId);
+        int nodesFromUsAvailable = nodesAvailable();
+        int nodesFromOthersAvailable = NodePool.nodesAvailable(lesserPools);
 
-    int totalTasks = td.getExecutors().size();
-    int origRequest = td.getNumWorkers();
-    int slotsRequested = Math.min(totalTasks, origRequest);
-    int slotsUsed = Node.countSlotsUsed(allNodes);
-    int slotsFree = Node.countFreeSlotsAlive(allNodes);
-    int slotsToUse = Math.min(slotsRequested - slotsUsed, slotsFree);
-    if (slotsToUse <= 0) {
-      _cluster.setStatus(topId, "Node has partially crashed, if this situation persists rebalance the topology.");
-    }
-    return slotsToUse;
-  }
-  
-  /**
-   * Get the nodes needed to schedule a non-isolated topology.
-   * @param td the topology to be scheduled
-   * @param allNodes the nodes already scheduled for this topology.
-   * This will be updated to include new nodes if needed. 
-   * @param lesserPools node pools we can steal nodes from
-   * @return the number of additional slots that should be used for scheduling.
-   */
-  private int getNodesForNotIsolatedTop(TopologyDetails td, Set<Node> allNodes,
-      NodePool[] lesserPools) {
-    String topId = td.getId();
-    LOG.debug("Topology {} is not isolated",topId);
-    int totalTasks = td.getExecutors().size();
-    int origRequest = td.getNumWorkers();
-    int slotsRequested = Math.min(totalTasks, origRequest);
-    int slotsUsed = Node.countSlotsUsed(topId, allNodes);
-    int slotsFree = Node.countFreeSlotsAlive(allNodes);
-    //Check to see if we have enough slots before trying to get them
-    int slotsAvailable = 0;
-    if (slotsRequested > slotsFree) {
-      slotsAvailable = NodePool.slotsAvailable(lesserPools);
-    }
-    int slotsToUse = Math.min(slotsRequested - slotsUsed, slotsFree + slotsAvailable);
-    LOG.debug("Slots... requested {} used {} free {} available {} to be used {}", 
-        new Object[] {slotsRequested, slotsUsed, slotsFree, slotsAvailable, slotsToUse});
-    if (slotsToUse <= 0) {
-      _cluster.setStatus(topId, "Not Enough Slots Available to Schedule Topology");
-      return 0;
+        int nodesUsed = _topologyIdToNodes.get(topId).size();
+        int nodesNeeded = nodesRequested - nodesUsed;
+        LOG.debug("Nodes... requested {} used {} available from us {} " + "avail from other {} needed {}", new Object[] { nodesRequested, nodesUsed,
+                nodesFromUsAvailable, nodesFromOthersAvailable, nodesNeeded });
+        if ((nodesNeeded - nodesFromUsAvailable) > (_maxNodes - _usedNodes)) {
+            _cluster.setStatus(topId, "Max Nodes(" + _maxNodes + ") for this user would be exceeded. "
+                    + ((nodesNeeded - nodesFromUsAvailable) - (_maxNodes - _usedNodes)) + " more nodes needed to run topology.");
+            return 0;
+        }
+
+        // In order to avoid going over _maxNodes I may need to steal from
+        // myself even though other pools have free nodes. so figure out how
+        // much each group should provide
+        int nodesNeededFromOthers = Math.min(Math.min(_maxNodes - _usedNodes, nodesFromOthersAvailable), nodesNeeded);
+        int nodesNeededFromUs = nodesNeeded - nodesNeededFromOthers;
+        LOG.debug("Nodes... needed from us {} needed from others {}", nodesNeededFromUs, nodesNeededFromOthers);
+
+        if (nodesNeededFromUs > nodesFromUsAvailable) {
+            _cluster.setStatus(topId, "Not Enough Nodes Available to Schedule Topology");
+            return 0;
+        }
+
+        // Get the nodes
+        Collection<Node> found = NodePool.takeNodes(nodesNeededFromOthers, lesserPools);
+        _usedNodes += found.size();
+        allNodes.addAll(found);
+        Collection<Node> foundMore = takeNodes(nodesNeededFromUs);
+        _usedNodes += foundMore.size();
+        allNodes.addAll(foundMore);
+
+        int totalTasks = td.getExecutors().size();
+        int origRequest = td.getNumWorkers();
+        int slotsRequested = Math.min(totalTasks, origRequest);
+        int slotsUsed = Node.countSlotsUsed(allNodes);
+        int slotsFree = Node.countFreeSlotsAlive(allNodes);
+        int slotsToUse = Math.min(slotsRequested - slotsUsed, slotsFree);
+        if (slotsToUse <= 0) {
+            _cluster.setStatus(topId, "Node has partially crashed, if this situation persists rebalance the topology.");
+        }
+        return slotsToUse;
     }
-    int slotsNeeded = slotsToUse - slotsFree;
-    int numNewNodes = NodePool.getNodeCountIfSlotsWereTaken(slotsNeeded, lesserPools);
-    LOG.debug("Nodes... new {} used {} max {}",
-        new Object[]{numNewNodes, _usedNodes, _maxNodes});
-    if ((numNewNodes + _usedNodes) > _maxNodes) {
-      _cluster.setStatus(topId,"Max Nodes("+_maxNodes+") for this user would be exceeded. " +
-      (numNewNodes - (_maxNodes - _usedNodes)) + " more nodes needed to run topology.");
-      return 0;
+
+    /**
+     * Get the nodes needed to schedule a non-isolated topology.
+     * 
+     * @param td the topology to be scheduled
+     * @param allNodes the nodes already scheduled for this topology. This will be updated to include new nodes if needed.
+     * @param lesserPools node pools we can steal nodes from
+     * @return the number of additional slots that should be used for scheduling.
+     */
+    private int getNodesForNotIsolatedTop(TopologyDetails td, Set<Node> allNodes, NodePool[] lesserPools) {
+        String topId = td.getId();
+        LOG.debug("Topology {} is not isolated", topId);
+        int totalTasks = td.getExecutors().size();
+        int origRequest = td.getNumWorkers();
+        int slotsRequested = Math.min(totalTasks, origRequest);
+        int slotsUsed = Node.countSlotsUsed(topId, allNodes);
+        int slotsFree = Node.countFreeSlotsAlive(allNodes);
+        // Check to see if we have enough slots before trying to get them
+        int slotsAvailable = 0;
+        if (slotsRequested > slotsFree) {
+            slotsAvailable = NodePool.slotsAvailable(lesserPools);
+        }
+        int slotsToUse = Math.min(slotsRequested - slotsUsed, slotsFree + slotsAvailable);
+        LOG.debug("Slots... requested {} used {} free {} available {} to be used {}", new Object[] { slotsRequested, slotsUsed, slotsFree, slotsAvailable,
+                slotsToUse });
+        if (slotsToUse <= 0) {
+            _cluster.setStatus(topId, "Not Enough Slots Available to Schedule Topology");
+            return 0;
+        }
+        int slotsNeeded = slotsToUse - slotsFree;
+        int numNewNodes = NodePool.getNodeCountIfSlotsWereTaken(slotsNeeded, lesserPools);
+        LOG.debug("Nodes... new {} used {} max {}", new Object[] { numNewNodes, _usedNodes, _maxNodes });
+        if ((numNewNodes + _usedNodes) > _maxNodes) {
+            _cluster.setStatus(topId, "Max Nodes(" + _maxNodes + ") for this user would be exceeded. " + (numNewNodes - (_maxNodes - _usedNodes))
+                    + " more nodes needed to run topology.");
+            return 0;
+        }
+
+        Collection<Node> found = NodePool.takeNodesBySlot(slotsNeeded, lesserPools);
+        _usedNodes += found.size();
+        allNodes.addAll(found);
+        return slotsToUse;
     }
-    
-    Collection<Node> found = NodePool.takeNodesBySlot(slotsNeeded, lesserPools);
-    _usedNodes += found.size();
-    allNodes.addAll(found);
-    return slotsToUse;
-  }
 
-  @Override
-  public Collection<Node> takeNodes(int nodesNeeded) {
-    LOG.debug("Taking {} from {}", nodesNeeded, this);
-    HashSet<Node> ret = new HashSet<Node>();
-    for (Entry<String, Set<Node>> entry: _topologyIdToNodes.entrySet()) {
-      if (!_isolated.contains(entry.getKey())) {
-        Iterator<Node> it = entry.getValue().iterator();
-        while (it.hasNext()) {
-          if (nodesNeeded <= 0) {
-            return ret;
-          }
-          Node n = it.next();
-          it.remove();
-          n.freeAllSlots(_cluster);
-          ret.add(n);
-          nodesNeeded--;
-          _usedNodes--;
+    @Override
+    public Collection<Node> takeNodes(int nodesNeeded) {
+        LOG.debug("Taking {} from {}", nodesNeeded, this);
+        HashSet<Node> ret = new HashSet<Node>();
+        for (Entry<String, Set<Node>> entry : _topologyIdToNodes.entrySet()) {
+            if (!_isolated.contains(entry.getKey())) {
+                Iterator<Node> it = entry.getValue().iterator();
+                while (it.hasNext()) {
+                    if (nodesNeeded <= 0) {
+                        return ret;
+                    }
+                    Node n = it.next();
+                    it.remove();
+                    n.freeAllSlots(_cluster);
+                    ret.add(n);
+                    nodesNeeded--;
+                    _usedNodes--;
+                }
+            }
         }
-      }
+        return ret;
     }
-    return ret;
-  }
-  
-  @Override
-  public int nodesAvailable() {
-    int total = 0;
-    for (Entry<String, Set<Node>> entry: _topologyIdToNodes.entrySet()) {
-      if (!_isolated.contains(entry.getKey())) {
-        total += entry.getValue().size();
-      }
+
+    @Override
+    public int nodesAvailable() {
+        int total = 0;
+        for (Entry<String, Set<Node>> entry : _topologyIdToNodes.entrySet()) {
+            if (!_isolated.contains(entry.getKey())) {
+                total += entry.getValue().size();
+            }
+        }
+        return total;
     }
-    return total;
-  }
-  
-  @Override
-  public int slotsAvailable() {
-    int total = 0;
-    for (Entry<String, Set<Node>> entry: _topologyIdToNodes.entrySet()) {
-      if (!_isolated.contains(entry.getKey())) {
-        total += Node.countTotalSlotsAlive(entry.getValue());
-      }
+
+    @Override
+    public int slotsAvailable() {
+        int total = 0;
+        for (Entry<String, Set<Node>> entry : _topologyIdToNodes.entrySet()) {
+            if (!_isolated.contains(entry.getKey())) {
+                total += Node.countTotalSlotsAlive(entry.getValue());
+            }
+        }
+        return total;
     }
-    return total;
-  }
 
-  @Override
-  public Collection<Node> takeNodesBySlots(int slotsNeeded) {
-    HashSet<Node> ret = new HashSet<Node>();
-    for (Entry<String, Set<Node>> entry: _topologyIdToNodes.entrySet()) {
-      if (!_isolated.contains(entry.getKey())) {
-        Iterator<Node> it = entry.getValue().iterator();
-        while (it.hasNext()) {
-          Node n = it.next();
-          if (n.isAlive()) {
-            it.remove();
-            _usedNodes--;
-            n.freeAllSlots(_cluster);
-            ret.add(n);
-            slotsNeeded -= n.totalSlots();
-            if (slotsNeeded <= 0) {
-              return ret;
+    @Override
+    public Collection<Node> takeNodesBySlots(int slotsNeeded) {
+        HashSet<Node> ret = new HashSet<Node>();
+        for (Entry<String, Set<Node>> entry : _topologyIdToNodes.entrySet()) {
+            if (!_isolated.contains(entry.getKey())) {
+                Iterator<Node> it = entry.getValue().iterator();
+                while (it.hasNext()) {
+                    Node n = it.next();
+                    if (n.isAlive()) {
+                        it.remove();
+                        _usedNodes--;
+                        n.freeAllSlots(_cluster);
+                        ret.add(n);
+                        slotsNeeded -= n.totalSlots();
+                        if (slotsNeeded <= 0) {
+                            return ret;
+                        }
+                    }
+                }
             }
-          }
         }
-      }
+        return ret;
     }
-    return ret;
-  }
-  
-  @Override
-  public NodeAndSlotCounts getNodeAndSlotCountIfSlotsWereTaken(int slotsNeeded) {
-    int nodesFound = 0;
-    int slotsFound = 0;
-    for (Entry<String, Set<Node>> entry: _topologyIdToNodes.entrySet()) {
-      if (!_isolated.contains(entry.getKey())) {
-        Iterator<Node> it = entry.getValue().iterator();
-        while (it.hasNext()) {
-          Node n = it.next();
-          if (n.isAlive()) {
-            nodesFound++;
-            int totalSlotsFree = n.totalSlots();
-            slotsFound += totalSlotsFree;
-            slotsNeeded -= totalSlotsFree;
-            if (slotsNeeded <= 0) {
-              return new NodeAndSlotCounts(nodesFound, slotsFound);
+
+    @Override
+    public NodeAndSlotCounts getNodeAndSlotCountIfSlotsWereTaken(int slotsNeeded) {
+        int nodesFound = 0;
+        int slotsFound = 0;
+        for (Entry<String, Set<Node>> entry : _topologyIdToNodes.entrySet()) {
+            if (!_isolated.contains(entry.getKey())) {
+                Iterator<Node> it = entry.getValue().iterator();
+                while (it.hasNext()) {
+                    Node n = it.next();
+                    if (n.isAlive()) {
+                        nodesFound++;
+                        int totalSlotsFree = n.totalSlots();
+                        slotsFound += totalSlotsFree;
+                        slotsNeeded -= totalSlotsFree;
+                        if (slotsNeeded <= 0) {
+                            return new NodeAndSlotCounts(nodesFound, slotsFound);
+                        }
+                    }
+                }
             }
-          }
         }
-      }
+        return new NodeAndSlotCounts(nodesFound, slotsFound);
+    }
+
+    @Override
+    public String toString() {
+        return "IsolatedPool... ";
     }
-    return new NodeAndSlotCounts(nodesFound, slotsFound);
-  }
-  
-  @Override
-  public String toString() {
-    return "IsolatedPool... ";
-  }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/scheduler/multitenant/MultitenantScheduler.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/scheduler/multitenant/MultitenantScheduler.java b/jstorm-core/src/main/java/backtype/storm/scheduler/multitenant/MultitenantScheduler.java
index 320b388..27475d9 100755
--- a/jstorm-core/src/main/java/backtype/storm/scheduler/multitenant/MultitenantScheduler.java
+++ b/jstorm-core/src/main/java/backtype/storm/scheduler/multitenant/MultitenantScheduler.java
@@ -32,67 +32,66 @@ import backtype.storm.scheduler.TopologyDetails;
 import backtype.storm.utils.Utils;
 
 public class MultitenantScheduler implements IScheduler {
-  private static final Logger LOG = LoggerFactory.getLogger(MultitenantScheduler.class);
-  @SuppressWarnings("rawtypes")
-  private Map _conf;
-  
-  @Override
-  public void prepare(@SuppressWarnings("rawtypes") Map conf) {
-    _conf = conf;
-  }
- 
-  private Map<String, Number> getUserConf() {
-    Map<String, Number> ret = (Map<String, Number>)_conf.get(Config.MULTITENANT_SCHEDULER_USER_POOLS);
-    if (ret == null) {
-      ret = new HashMap<String, Number>();
-    } else {
-      ret = new HashMap<String, Number>(ret); 
-    }
+    private static final Logger LOG = LoggerFactory.getLogger(MultitenantScheduler.class);
+    @SuppressWarnings("rawtypes")
+    private Map _conf;
 
-    Map fromFile = Utils.findAndReadConfigFile("multitenant-scheduler.yaml", false);
-    Map<String, Number> tmp = (Map<String, Number>)fromFile.get(Config.MULTITENANT_SCHEDULER_USER_POOLS);
-    if (tmp != null) {
-      ret.putAll(tmp);
+    @Override
+    public void prepare(@SuppressWarnings("rawtypes") Map conf) {
+        _conf = conf;
     }
-    return ret;
-  }
 
- 
-  @Override
-  public void schedule(Topologies topologies, Cluster cluster) {
-    LOG.debug("Rerunning scheduling...");
-    Map<String, Node> nodeIdToNode = Node.getAllNodesFrom(cluster);
-    
-    Map<String, Number> userConf = getUserConf();
-    
-    Map<String, IsolatedPool> userPools = new HashMap<String, IsolatedPool>();
-    for (Map.Entry<String, Number> entry : userConf.entrySet()) {
-      userPools.put(entry.getKey(), new IsolatedPool(entry.getValue().intValue()));
-    }
-    DefaultPool defaultPool = new DefaultPool();
-    FreePool freePool = new FreePool();
-    
-    freePool.init(cluster, nodeIdToNode);
-    for (IsolatedPool pool : userPools.values()) {
-      pool.init(cluster, nodeIdToNode);
-    }
-    defaultPool.init(cluster, nodeIdToNode);
-    
-    for (TopologyDetails td: topologies.getTopologies()) {
-      String user = (String)td.getConf().get(Config.TOPOLOGY_SUBMITTER_USER);
-      LOG.debug("Found top {} run by user {}",td.getId(), user);
-      NodePool pool = userPools.get(user);
-      if (pool == null || !pool.canAdd(td)) {
-        pool = defaultPool;
-      }
-      pool.addTopology(td);
+    private Map<String, Number> getUserConf() {
+        Map<String, Number> ret = (Map<String, Number>) _conf.get(Config.MULTITENANT_SCHEDULER_USER_POOLS);
+        if (ret == null) {
+            ret = new HashMap<String, Number>();
+        } else {
+            ret = new HashMap<String, Number>(ret);
+        }
+
+        Map fromFile = Utils.findAndReadConfigFile("multitenant-scheduler.yaml", false);
+        Map<String, Number> tmp = (Map<String, Number>) fromFile.get(Config.MULTITENANT_SCHEDULER_USER_POOLS);
+        if (tmp != null) {
+            ret.putAll(tmp);
+        }
+        return ret;
     }
-    
-    //Now schedule all of the topologies that need to be scheduled
-    for (IsolatedPool pool : userPools.values()) {
-      pool.scheduleAsNeeded(freePool, defaultPool);
+
+    @Override
+    public void schedule(Topologies topologies, Cluster cluster) {
+        LOG.debug("Rerunning scheduling...");
+        Map<String, Node> nodeIdToNode = Node.getAllNodesFrom(cluster);
+
+        Map<String, Number> userConf = getUserConf();
+
+        Map<String, IsolatedPool> userPools = new HashMap<String, IsolatedPool>();
+        for (Map.Entry<String, Number> entry : userConf.entrySet()) {
+            userPools.put(entry.getKey(), new IsolatedPool(entry.getValue().intValue()));
+        }
+        DefaultPool defaultPool = new DefaultPool();
+        FreePool freePool = new FreePool();
+
+        freePool.init(cluster, nodeIdToNode);
+        for (IsolatedPool pool : userPools.values()) {
+            pool.init(cluster, nodeIdToNode);
+        }
+        defaultPool.init(cluster, nodeIdToNode);
+
+        for (TopologyDetails td : topologies.getTopologies()) {
+            String user = (String) td.getConf().get(Config.TOPOLOGY_SUBMITTER_USER);
+            LOG.debug("Found top {} run by user {}", td.getId(), user);
+            NodePool pool = userPools.get(user);
+            if (pool == null || !pool.canAdd(td)) {
+                pool = defaultPool;
+            }
+            pool.addTopology(td);
+        }
+
+        // Now schedule all of the topologies that need to be scheduled
+        for (IsolatedPool pool : userPools.values()) {
+            pool.scheduleAsNeeded(freePool, defaultPool);
+        }
+        defaultPool.scheduleAsNeeded(freePool);
+        LOG.debug("Scheduling done...");
     }
-    defaultPool.scheduleAsNeeded(freePool);
-    LOG.debug("Scheduling done...");
-  }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/scheduler/multitenant/Node.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/scheduler/multitenant/Node.java b/jstorm-core/src/main/java/backtype/storm/scheduler/multitenant/Node.java
index 883c65f..2cc49a8 100755
--- a/jstorm-core/src/main/java/backtype/storm/scheduler/multitenant/Node.java
+++ b/jstorm-core/src/main/java/backtype/storm/scheduler/multitenant/Node.java
@@ -39,305 +39,299 @@ import backtype.storm.scheduler.WorkerSlot;
  * Represents a single node in the cluster.
  */
 public class Node {
-  private static final Logger LOG = LoggerFactory.getLogger(Node.class);
-  private Map<String, Set<WorkerSlot>> _topIdToUsedSlots = new HashMap<String,Set<WorkerSlot>>();
-  private Set<WorkerSlot> _freeSlots = new HashSet<WorkerSlot>();
-  private final String _nodeId;
-  private boolean _isAlive;
-  
-  public Node(String nodeId, Set<Integer> allPorts, boolean isAlive) {
-    _nodeId = nodeId;
-    _isAlive = isAlive;
-    if (_isAlive && allPorts != null) {
-      for (int port: allPorts) {
-        _freeSlots.add(new WorkerSlot(_nodeId, port));
-      }
-    }
-  }
-
-  public String getId() {
-    return _nodeId;
-  }
-  
-  public boolean isAlive() {
-    return _isAlive;
-  }
-  
-  /**
-   * @return a collection of the topology ids currently running on this node
-   */
-  public Collection<String> getRunningTopologies() {
-    return _topIdToUsedSlots.keySet();
-  }
-  
-  public boolean isTotallyFree() {
-    return _topIdToUsedSlots.isEmpty();
-  }
-  
-  public int totalSlotsFree() {
-    return _freeSlots.size();
-  }
-  
-  public int totalSlotsUsed() {
-    int total = 0;
-    for (Set<WorkerSlot> slots: _topIdToUsedSlots.values()) {
-      total += slots.size();
+    private static final Logger LOG = LoggerFactory.getLogger(Node.class);
+    private Map<String, Set<WorkerSlot>> _topIdToUsedSlots = new HashMap<String, Set<WorkerSlot>>();
+    private Set<WorkerSlot> _freeSlots = new HashSet<WorkerSlot>();
+    private final String _nodeId;
+    private boolean _isAlive;
+
+    public Node(String nodeId, Set<Integer> allPorts, boolean isAlive) {
+        _nodeId = nodeId;
+        _isAlive = isAlive;
+        if (_isAlive && allPorts != null) {
+            for (int port : allPorts) {
+                _freeSlots.add(new WorkerSlot(_nodeId, port));
+            }
+        }
     }
-    return total;
-  }
-  
-  public int totalSlots() {
-    return totalSlotsFree() + totalSlotsUsed();
-  }
-  
-  public int totalSlotsUsed(String topId) {
-    int total = 0;
-    Set<WorkerSlot> slots = _topIdToUsedSlots.get(topId);
-    if (slots != null) {
-      total = slots.size();
+
+    public String getId() {
+        return _nodeId;
     }
-    return total;
-  }
-
-  private void validateSlot(WorkerSlot ws) {
-    if (!_nodeId.equals(ws.getNodeId())) {
-      throw new IllegalArgumentException(
-          "Trying to add a slot to the wrong node " + ws + 
-          " is not a part of " + _nodeId);
+
+    public boolean isAlive() {
+        return _isAlive;
     }
-  }
- 
-  private void addOrphanedSlot(WorkerSlot ws) {
-    if (_isAlive) {
-      throw new IllegalArgumentException("Orphaned Slots " +
-        "only are allowed on dead nodes.");
+
+    /**
+     * @return a collection of the topology ids currently running on this node
+     */
+    public Collection<String> getRunningTopologies() {
+        return _topIdToUsedSlots.keySet();
     }
-    validateSlot(ws);
-    if (_freeSlots.contains(ws)) {
-      return;
+
+    public boolean isTotallyFree() {
+        return _topIdToUsedSlots.isEmpty();
     }
-    for (Set<WorkerSlot> used: _topIdToUsedSlots.values()) {
-      if (used.contains(ws)) {
-        return;
-      }
+
+    public int totalSlotsFree() {
+        return _freeSlots.size();
     }
-    _freeSlots.add(ws);
-  }
- 
-  boolean assignInternal(WorkerSlot ws, String topId, boolean dontThrow) {
-    validateSlot(ws);
-    if (!_freeSlots.remove(ws)) {
-      for (Entry<String, Set<WorkerSlot>> topologySetEntry : _topIdToUsedSlots.entrySet()) {
-        if (topologySetEntry.getValue().contains(ws)) {
-          if (dontThrow) {
-            LOG.warn("Worker slot [" + ws + "] can't be assigned to " + topId +
-                    ". Its already assigned to " + topologySetEntry.getKey() + ".");
-            return true;
-          }
-          throw new IllegalStateException("Worker slot [" + ws + "] can't be assigned to "
-                  + topId + ". Its already assigned to " + topologySetEntry.getKey() + ".");
+
+    public int totalSlotsUsed() {
+        int total = 0;
+        for (Set<WorkerSlot> slots : _topIdToUsedSlots.values()) {
+            total += slots.size();
         }
-      }
-      LOG.warn("Adding Worker slot [" + ws + "] that was not reported in the supervisor heartbeats," +
-              " but the worker is already running for topology " + topId + ".");
-    }
-    Set<WorkerSlot> usedSlots = _topIdToUsedSlots.get(topId);
-    if (usedSlots == null) {
-      usedSlots = new HashSet<WorkerSlot>();
-      _topIdToUsedSlots.put(topId, usedSlots);
+        return total;
     }
-    usedSlots.add(ws);
-    return false;
-  }
-  
-  /**
-   * Free all slots on this node.  This will update the Cluster too.
-   * @param cluster the cluster to be updated
-   */
-  public void freeAllSlots(Cluster cluster) {
-    if (!_isAlive) {
-      LOG.warn("Freeing all slots on a dead node {} ",_nodeId);
-    } 
-    for (Entry<String, Set<WorkerSlot>> entry : _topIdToUsedSlots.entrySet()) {
-      cluster.freeSlots(entry.getValue());
-      if (_isAlive) {
-        _freeSlots.addAll(entry.getValue());
-      }
+
+    public int totalSlots() {
+        return totalSlotsFree() + totalSlotsUsed();
     }
-    _topIdToUsedSlots = new HashMap<String,Set<WorkerSlot>>();
-  }
-  
-  /**
-   * Frees a single slot in this node
-   * @param ws the slot to free
-   * @param cluster the cluster to update
-   */
-  public void free(WorkerSlot ws, Cluster cluster, boolean forceFree) {
-    if (_freeSlots.contains(ws)) return;
-    boolean wasFound = false;
-    for (Entry<String, Set<WorkerSlot>> entry : _topIdToUsedSlots.entrySet()) {
-      Set<WorkerSlot> slots = entry.getValue();
-      if (slots.remove(ws)) {
-        cluster.freeSlot(ws);
-        if (_isAlive) {
-          _freeSlots.add(ws);
+
+    public int totalSlotsUsed(String topId) {
+        int total = 0;
+        Set<WorkerSlot> slots = _topIdToUsedSlots.get(topId);
+        if (slots != null) {
+            total = slots.size();
         }
-        wasFound = true;
-      }
+        return total;
     }
-    if(!wasFound)
-    {
-      if(forceFree)
-      {
-        LOG.info("Forcefully freeing the " + ws);
-        cluster.freeSlot(ws);
-        _freeSlots.add(ws);
-      } else {
-        throw new IllegalArgumentException("Tried to free a slot that was not" +
-              " part of this node " + _nodeId);
-      }
+
+    private void validateSlot(WorkerSlot ws) {
+        if (!_nodeId.equals(ws.getNodeId())) {
+            throw new IllegalArgumentException("Trying to add a slot to the wrong node " + ws + " is not a part of " + _nodeId);
+        }
     }
-  }
-   
-  /**
-   * Frees all the slots for a topology.
-   * @param topId the topology to free slots for
-   * @param cluster the cluster to update
-   */
-  public void freeTopology(String topId, Cluster cluster) {
-    Set<WorkerSlot> slots = _topIdToUsedSlots.get(topId);
-    if (slots == null || slots.isEmpty()) return;
-    for (WorkerSlot ws : slots) {
-      cluster.freeSlot(ws);
-      if (_isAlive) {
+
+    private void addOrphanedSlot(WorkerSlot ws) {
+        if (_isAlive) {
+            throw new IllegalArgumentException("Orphaned Slots " + "only are allowed on dead nodes.");
+        }
+        validateSlot(ws);
+        if (_freeSlots.contains(ws)) {
+            return;
+        }
+        for (Set<WorkerSlot> used : _topIdToUsedSlots.values()) {
+            if (used.contains(ws)) {
+                return;
+            }
+        }
         _freeSlots.add(ws);
-      }
     }
-    _topIdToUsedSlots.remove(topId);
-  }
- 
-  /**
-   * Assign a free slot on the node to the following topology and executors.
-   * This will update the cluster too.
-   * @param topId the topology to assign a free slot to.
-   * @param executors the executors to run in that slot.
-   * @param cluster the cluster to be updated
-   */
-  public void assign(String topId, Collection<ExecutorDetails> executors, 
-      Cluster cluster) {
-    if (!_isAlive) {
-      throw new IllegalStateException("Trying to adding to a dead node " + _nodeId);
+
+    boolean assignInternal(WorkerSlot ws, String topId, boolean dontThrow) {
+        validateSlot(ws);
+        if (!_freeSlots.remove(ws)) {
+            for (Entry<String, Set<WorkerSlot>> topologySetEntry : _topIdToUsedSlots.entrySet()) {
+                if (topologySetEntry.getValue().contains(ws)) {
+                    if (dontThrow) {
+                        LOG.warn("Worker slot [" + ws + "] can't be assigned to " + topId + ". Its already assigned to " + topologySetEntry.getKey() + ".");
+                        return true;
+                    }
+                    throw new IllegalStateException("Worker slot [" + ws + "] can't be assigned to " + topId + ". Its already assigned to "
+                            + topologySetEntry.getKey() + ".");
+                }
+            }
+            LOG.warn("Adding Worker slot [" + ws + "] that was not reported in the supervisor heartbeats," + " but the worker is already running for topology "
+                    + topId + ".");
+        }
+        Set<WorkerSlot> usedSlots = _topIdToUsedSlots.get(topId);
+        if (usedSlots == null) {
+            usedSlots = new HashSet<WorkerSlot>();
+            _topIdToUsedSlots.put(topId, usedSlots);
+        }
+        usedSlots.add(ws);
+        return false;
     }
-    if (_freeSlots.isEmpty()) {
-      throw new IllegalStateException("Trying to assign to a full node " + _nodeId);
+
+    /**
+     * Free all slots on this node. This will update the Cluster too.
+     * 
+     * @param cluster the cluster to be updated
+     */
+    public void freeAllSlots(Cluster cluster) {
+        if (!_isAlive) {
+            LOG.warn("Freeing all slots on a dead node {} ", _nodeId);
+        }
+        for (Entry<String, Set<WorkerSlot>> entry : _topIdToUsedSlots.entrySet()) {
+            cluster.freeSlots(entry.getValue());
+            if (_isAlive) {
+                _freeSlots.addAll(entry.getValue());
+            }
+        }
+        _topIdToUsedSlots = new HashMap<String, Set<WorkerSlot>>();
     }
-    if (executors.size() == 0) {
-      LOG.warn("Trying to assign nothing from " + topId + " to " + _nodeId + " (Ignored)");
-    } else {
-      WorkerSlot slot = _freeSlots.iterator().next();
-      cluster.assign(slot, topId, executors);
-      assignInternal(slot, topId, false);
+
+    /**
+     * Frees a single slot in this node
+     * 
+     * @param ws the slot to free
+     * @param cluster the cluster to update
+     */
+    public void free(WorkerSlot ws, Cluster cluster, boolean forceFree) {
+        if (_freeSlots.contains(ws))
+            return;
+        boolean wasFound = false;
+        for (Entry<String, Set<WorkerSlot>> entry : _topIdToUsedSlots.entrySet()) {
+            Set<WorkerSlot> slots = entry.getValue();
+            if (slots.remove(ws)) {
+                cluster.freeSlot(ws);
+                if (_isAlive) {
+                    _freeSlots.add(ws);
+                }
+                wasFound = true;
+            }
+        }
+        if (!wasFound) {
+            if (forceFree) {
+                LOG.info("Forcefully freeing the " + ws);
+                cluster.freeSlot(ws);
+                _freeSlots.add(ws);
+            } else {
+                throw new IllegalArgumentException("Tried to free a slot that was not" + " part of this node " + _nodeId);
+            }
+        }
     }
-  }
-  
-  @Override
-  public boolean equals(Object other) {
-    if (other instanceof Node) {
-      return _nodeId.equals(((Node)other)._nodeId);
+
+    /**
+     * Frees all the slots for a topology.
+     * 
+     * @param topId the topology to free slots for
+     * @param cluster the cluster to update
+     */
+    public void freeTopology(String topId, Cluster cluster) {
+        Set<WorkerSlot> slots = _topIdToUsedSlots.get(topId);
+        if (slots == null || slots.isEmpty())
+            return;
+        for (WorkerSlot ws : slots) {
+            cluster.freeSlot(ws);
+            if (_isAlive) {
+                _freeSlots.add(ws);
+            }
+        }
+        _topIdToUsedSlots.remove(topId);
     }
-    return false;
-  }
-  
-  @Override
-  public int hashCode() {
-    return _nodeId.hashCode();
-  }
-  
-  @Override
-  public String toString() {
-    return "Node: " + _nodeId;
-  }
-
-  public static int countSlotsUsed(String topId, Collection<Node> nodes) {
-    int total = 0;
-    for (Node n: nodes) {
-      total += n.totalSlotsUsed(topId);
+
+    /**
+     * Assign a free slot on the node to the following topology and executors. This will update the cluster too.
+     * 
+     * @param topId the topology to assign a free slot to.
+     * @param executors the executors to run in that slot.
+     * @param cluster the cluster to be updated
+     */
+    public void assign(String topId, Collection<ExecutorDetails> executors, Cluster cluster) {
+        if (!_isAlive) {
+            throw new IllegalStateException("Trying to adding to a dead node " + _nodeId);
+        }
+        if (_freeSlots.isEmpty()) {
+            throw new IllegalStateException("Trying to assign to a full node " + _nodeId);
+        }
+        if (executors.size() == 0) {
+            LOG.warn("Trying to assign nothing from " + topId + " to " + _nodeId + " (Ignored)");
+        } else {
+            WorkerSlot slot = _freeSlots.iterator().next();
+            cluster.assign(slot, topId, executors);
+            assignInternal(slot, topId, false);
+        }
     }
-    return total;
-  }
-  
-  public static int countSlotsUsed(Collection<Node> nodes) {
-    int total = 0;
-    for (Node n: nodes) {
-      total += n.totalSlotsUsed();
+
+    @Override
+    public boolean equals(Object other) {
+        if (other instanceof Node) {
+            return _nodeId.equals(((Node) other)._nodeId);
+        }
+        return false;
     }
-    return total;
-  }
-  
-  public static int countFreeSlotsAlive(Collection<Node> nodes) {
-    int total = 0;
-    for (Node n: nodes) {
-      if (n.isAlive()) {
-        total += n.totalSlotsFree();
-      }
+
+    @Override
+    public int hashCode() {
+        return _nodeId.hashCode();
     }
-    return total;
-  }
-  
-  public static int countTotalSlotsAlive(Collection<Node> nodes) {
-    int total = 0;
-    for (Node n: nodes) {
-      if (n.isAlive()) {
-        total += n.totalSlots();
-      }
+
+    @Override
+    public String toString() {
+        return "Node: " + _nodeId;
     }
-    return total;
-  }
-  
-  public static Map<String, Node> getAllNodesFrom(Cluster cluster) {
-    Map<String, Node> nodeIdToNode = new HashMap<String, Node>();
-    for (SupervisorDetails sup : cluster.getSupervisors().values()) {
-      //Node ID and supervisor ID are the same.
-      String id = sup.getId();
-      boolean isAlive = !cluster.isBlackListed(id);
-      LOG.debug("Found a {} Node {} {}",
-          new Object[] {isAlive? "living":"dead", id, sup.getAllPorts()});
-      nodeIdToNode.put(id, new Node(id, sup.getAllPorts(), isAlive));
+
+    public static int countSlotsUsed(String topId, Collection<Node> nodes) {
+        int total = 0;
+        for (Node n : nodes) {
+            total += n.totalSlotsUsed(topId);
+        }
+        return total;
     }
-    
-    for (Entry<String, SchedulerAssignment> entry : cluster.getAssignments().entrySet()) {
-      String topId = entry.getValue().getTopologyId();
-      for (WorkerSlot ws: entry.getValue().getSlots()) {
-        String id = ws.getNodeId();
-        Node node = nodeIdToNode.get(id);
-        if (node == null) {
-          LOG.debug("Found an assigned slot on a dead supervisor {}", ws);
-          node = new Node(id, null, false);
-          nodeIdToNode.put(id, node);
+
+    public static int countSlotsUsed(Collection<Node> nodes) {
+        int total = 0;
+        for (Node n : nodes) {
+            total += n.totalSlotsUsed();
         }
-        if (!node.isAlive()) {
-          //The supervisor on the node down so add an orphaned slot to hold the unsupervised worker 
-          node.addOrphanedSlot(ws);
+        return total;
+    }
+
+    public static int countFreeSlotsAlive(Collection<Node> nodes) {
+        int total = 0;
+        for (Node n : nodes) {
+            if (n.isAlive()) {
+                total += n.totalSlotsFree();
+            }
         }
-        if (node.assignInternal(ws, topId, true)) {
-          LOG.warn("Bad scheduling state for topology [" + topId+ "], the slot " +
-                  ws + " assigned to multiple workers, un-assigning everything...");
-          node.free(ws, cluster, true);
+        return total;
+    }
+
+    public static int countTotalSlotsAlive(Collection<Node> nodes) {
+        int total = 0;
+        for (Node n : nodes) {
+            if (n.isAlive()) {
+                total += n.totalSlots();
+            }
         }
-      }
+        return total;
     }
-    
-    return nodeIdToNode;
-  }
-  
-  /**
-   * Used to sort a list of nodes so the node with the most free slots comes
-   * first.
-   */
-  public static final Comparator<Node> FREE_NODE_COMPARATOR_DEC = new Comparator<Node>() {
-    @Override
-    public int compare(Node o1, Node o2) {
-      return o2.totalSlotsFree() - o1.totalSlotsFree();
+
+    public static Map<String, Node> getAllNodesFrom(Cluster cluster) {
+        Map<String, Node> nodeIdToNode = new HashMap<String, Node>();
+        for (SupervisorDetails sup : cluster.getSupervisors().values()) {
+            // Node ID and supervisor ID are the same.
+            String id = sup.getId();
+            boolean isAlive = !cluster.isBlackListed(id);
+            LOG.debug("Found a {} Node {} {}", new Object[] { isAlive ? "living" : "dead", id, sup.getAllPorts() });
+            nodeIdToNode.put(id, new Node(id, sup.getAllPorts(), isAlive));
+        }
+
+        for (Entry<String, SchedulerAssignment> entry : cluster.getAssignments().entrySet()) {
+            String topId = entry.getValue().getTopologyId();
+            for (WorkerSlot ws : entry.getValue().getSlots()) {
+                String id = ws.getNodeId();
+                Node node = nodeIdToNode.get(id);
+                if (node == null) {
+                    LOG.debug("Found an assigned slot on a dead supervisor {}", ws);
+                    node = new Node(id, null, false);
+                    nodeIdToNode.put(id, node);
+                }
+                if (!node.isAlive()) {
+                    // The supervisor on the node down so add an orphaned slot to hold the unsupervised worker
+                    node.addOrphanedSlot(ws);
+                }
+                if (node.assignInternal(ws, topId, true)) {
+                    LOG.warn("Bad scheduling state for topology [" + topId + "], the slot " + ws + " assigned to multiple workers, un-assigning everything...");
+                    node.free(ws, cluster, true);
+                }
+            }
+        }
+
+        return nodeIdToNode;
     }
-  };
+
+    /**
+     * Used to sort a list of nodes so the node with the most free slots comes first.
+     */
+    public static final Comparator<Node> FREE_NODE_COMPARATOR_DEC = new Comparator<Node>() {
+        @Override
+        public int compare(Node o1, Node o2) {
+            return o2.totalSlotsFree() - o1.totalSlotsFree();
+        }
+    };
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/scheduler/multitenant/NodePool.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/scheduler/multitenant/NodePool.java b/jstorm-core/src/main/java/backtype/storm/scheduler/multitenant/NodePool.java
index 21d1577..9537fa8 100755
--- a/jstorm-core/src/main/java/backtype/storm/scheduler/multitenant/NodePool.java
+++ b/jstorm-core/src/main/java/backtype/storm/scheduler/multitenant/NodePool.java
@@ -42,255 +42,259 @@ import backtype.storm.scheduler.WorkerSlot;
  * A pool of nodes that can be used to run topologies.
  */
 public abstract class NodePool {
-  protected Cluster _cluster;
-  protected Map<String, Node> _nodeIdToNode;
-  
-  public static class NodeAndSlotCounts {
-    public final int _nodes;
-    public final int _slots;
-    
-    public NodeAndSlotCounts(int nodes, int slots) {
-      _nodes = nodes;
-      _slots = slots;
+    protected Cluster _cluster;
+    protected Map<String, Node> _nodeIdToNode;
+
+    public static class NodeAndSlotCounts {
+        public final int _nodes;
+        public final int _slots;
+
+        public NodeAndSlotCounts(int nodes, int slots) {
+            _nodes = nodes;
+            _slots = slots;
+        }
     }
-  }
 
-  /**
-   * Place executors into slots in a round robin way, taking into account
-   * component spreading among different hosts.
-   */
-  public static class RoundRobinSlotScheduler {
-    private Map<String,Set<String>> _nodeToComps;
-    private HashMap<String, List<ExecutorDetails>> _spreadToSchedule;
-    private LinkedList<Set<ExecutorDetails>> _slots;
-    private Set<ExecutorDetails> _lastSlot;
-    private Cluster _cluster;
-    private String _topId;
-    
     /**
-     * Create a new scheduler for a given topology
-     * @param td the topology to schedule
-     * @param slotsToUse the number of slots to use for the executors left to 
-     * schedule.
-     * @param cluster the cluster to schedule this on. 
+     * Place executors into slots in a round robin way, taking into account component spreading among different hosts.
      */
-    public RoundRobinSlotScheduler(TopologyDetails td, int slotsToUse, 
-        Cluster cluster) {
-      _topId = td.getId();
-      _cluster = cluster;
-      
-      Map<ExecutorDetails, String> execToComp = td.getExecutorToComponent();
-      SchedulerAssignment assignment = _cluster.getAssignmentById(_topId);
-      _nodeToComps = new HashMap<String, Set<String>>();
+    public static class RoundRobinSlotScheduler {
+        private Map<String, Set<String>> _nodeToComps;
+        private HashMap<String, List<ExecutorDetails>> _spreadToSchedule;
+        private LinkedList<Set<ExecutorDetails>> _slots;
+        private Set<ExecutorDetails> _lastSlot;
+        private Cluster _cluster;
+        private String _topId;
 
-      if (assignment != null) {
-        Map<ExecutorDetails, WorkerSlot> execToSlot = assignment.getExecutorToSlot();
-        
-        for (Entry<ExecutorDetails, WorkerSlot> entry: execToSlot.entrySet()) {
-          String nodeId = entry.getValue().getNodeId();
-          Set<String> comps = _nodeToComps.get(nodeId);
-          if (comps == null) {
-            comps = new HashSet<String>();
-            _nodeToComps.put(nodeId, comps);
-          }
-          comps.add(execToComp.get(entry.getKey()));
-        }
-      }
-      
-      _spreadToSchedule = new HashMap<String, List<ExecutorDetails>>();
-      List<String> spreadComps = (List<String>)td.getConf().get(Config.TOPOLOGY_SPREAD_COMPONENTS);
-      if (spreadComps != null) {
-        for (String comp: spreadComps) {
-          _spreadToSchedule.put(comp, new ArrayList<ExecutorDetails>());
+        /**
+         * Create a new scheduler for a given topology
+         * 
+         * @param td the topology to schedule
+         * @param slotsToUse the number of slots to use for the executors left to schedule.
+         * @param cluster the cluster to schedule this on.
+         */
+        public RoundRobinSlotScheduler(TopologyDetails td, int slotsToUse, Cluster cluster) {
+            _topId = td.getId();
+            _cluster = cluster;
+
+            Map<ExecutorDetails, String> execToComp = td.getExecutorToComponent();
+            SchedulerAssignment assignment = _cluster.getAssignmentById(_topId);
+            _nodeToComps = new HashMap<String, Set<String>>();
+
+            if (assignment != null) {
+                Map<ExecutorDetails, WorkerSlot> execToSlot = assignment.getExecutorToSlot();
+
+                for (Entry<ExecutorDetails, WorkerSlot> entry : execToSlot.entrySet()) {
+                    String nodeId = entry.getValue().getNodeId();
+                    Set<String> comps = _nodeToComps.get(nodeId);
+                    if (comps == null) {
+                        comps = new HashSet<String>();
+                        _nodeToComps.put(nodeId, comps);
+                    }
+                    comps.add(execToComp.get(entry.getKey()));
+                }
+            }
+
+            _spreadToSchedule = new HashMap<String, List<ExecutorDetails>>();
+            List<String> spreadComps = (List<String>) td.getConf().get(Config.TOPOLOGY_SPREAD_COMPONENTS);
+            if (spreadComps != null) {
+                for (String comp : spreadComps) {
+                    _spreadToSchedule.put(comp, new ArrayList<ExecutorDetails>());
+                }
+            }
+
+            _slots = new LinkedList<Set<ExecutorDetails>>();
+            for (int i = 0; i < slotsToUse; i++) {
+                _slots.add(new HashSet<ExecutorDetails>());
+            }
+
+            int at = 0;
+            for (Entry<String, List<ExecutorDetails>> entry : _cluster.getNeedsSchedulingComponentToExecutors(td).entrySet()) {
+                LOG.debug("Scheduling for {}", entry.getKey());
+                if (_spreadToSchedule.containsKey(entry.getKey())) {
+                    LOG.debug("Saving {} for spread...", entry.getKey());
+                    _spreadToSchedule.get(entry.getKey()).addAll(entry.getValue());
+                } else {
+                    for (ExecutorDetails ed : entry.getValue()) {
+                        LOG.debug("Assigning {} {} to slot {}", new Object[] { entry.getKey(), ed, at });
+                        _slots.get(at).add(ed);
+                        at++;
+                        if (at >= _slots.size()) {
+                            at = 0;
+                        }
+                    }
+                }
+            }
+            _lastSlot = _slots.get(_slots.size() - 1);
         }
-      }
-      
-      _slots = new LinkedList<Set<ExecutorDetails>>();
-      for (int i = 0; i < slotsToUse; i++) {
-        _slots.add(new HashSet<ExecutorDetails>());
-      }
 
-      int at = 0;
-      for (Entry<String, List<ExecutorDetails>> entry: _cluster.getNeedsSchedulingComponentToExecutors(td).entrySet()) {
-        LOG.debug("Scheduling for {}", entry.getKey());
-        if (_spreadToSchedule.containsKey(entry.getKey())) {
-          LOG.debug("Saving {} for spread...",entry.getKey());
-          _spreadToSchedule.get(entry.getKey()).addAll(entry.getValue());
-        } else {
-          for (ExecutorDetails ed: entry.getValue()) {
-            LOG.debug("Assigning {} {} to slot {}", new Object[]{entry.getKey(), ed, at});
-            _slots.get(at).add(ed);
-            at++;
-            if (at >= _slots.size()) {
-              at = 0;
+        /**
+         * Assign a slot to the given node.
+         * 
+         * @param n the node to assign a slot to.
+         * @return true if there are more slots to assign else false.
+         */
+        public boolean assignSlotTo(Node n) {
+            if (_slots.isEmpty()) {
+                return false;
             }
-          }
+            Set<ExecutorDetails> slot = _slots.pop();
+            if (slot == _lastSlot) {
+                // The last slot fill it up
+                for (Entry<String, List<ExecutorDetails>> entry : _spreadToSchedule.entrySet()) {
+                    if (entry.getValue().size() > 0) {
+                        slot.addAll(entry.getValue());
+                    }
+                }
+            } else {
+                String nodeId = n.getId();
+                Set<String> nodeComps = _nodeToComps.get(nodeId);
+                if (nodeComps == null) {
+                    nodeComps = new HashSet<String>();
+                    _nodeToComps.put(nodeId, nodeComps);
+                }
+                for (Entry<String, List<ExecutorDetails>> entry : _spreadToSchedule.entrySet()) {
+                    if (entry.getValue().size() > 0) {
+                        String comp = entry.getKey();
+                        if (!nodeComps.contains(comp)) {
+                            nodeComps.add(comp);
+                            slot.add(entry.getValue().remove(0));
+                        }
+                    }
+                }
+            }
+            n.assign(_topId, slot, _cluster);
+            return !_slots.isEmpty();
         }
-      }
-      _lastSlot = _slots.get(_slots.size() - 1);
     }
-    
+
+    private static final Logger LOG = LoggerFactory.getLogger(NodePool.class);
+
+    /**
+     * Initialize the pool.
+     * 
+     * @param cluster the cluster
+     * @param nodeIdToNode the mapping of node id to nodes
+     */
+    public void init(Cluster cluster, Map<String, Node> nodeIdToNode) {
+        _cluster = cluster;
+        _nodeIdToNode = nodeIdToNode;
+    }
+
     /**
-     * Assign a slot to the given node.
-     * @param n the node to assign a slot to.
-     * @return true if there are more slots to assign else false.
+     * Add a topology to the pool
+     * 
+     * @param td the topology to add.
      */
-    public boolean assignSlotTo(Node n) {
-      if (_slots.isEmpty()) {
-        return false;
-      }
-      Set<ExecutorDetails> slot = _slots.pop();
-      if (slot == _lastSlot) {
-        //The last slot fill it up
-        for (Entry<String, List<ExecutorDetails>> entry: _spreadToSchedule.entrySet()) {
-          if (entry.getValue().size() > 0) {
-            slot.addAll(entry.getValue());
-          }
+    public abstract void addTopology(TopologyDetails td);
+
+    /**
+     * Check if this topology can be added to this pool
+     * 
+     * @param td the topology
+     * @return true if it can else false
+     */
+    public abstract boolean canAdd(TopologyDetails td);
+
+    /**
+     * @return the number of nodes that are available to be taken
+     */
+    public abstract int slotsAvailable();
+
+    /**
+     * Take nodes from this pool that can fulfill possibly up to the slotsNeeded
+     * 
+     * @param slotsNeeded the number of slots that are needed.
+     * @return a Collection of nodes with the removed nodes in it. This may be empty, but should not be null.
+     */
+    public abstract Collection<Node> takeNodesBySlots(int slotsNeeded);
+
+    /**
+     * Get the number of nodes and slots this would provide to get the slots needed
+     * 
+     * @param slots the number of slots needed
+     * @return the number of nodes and slots that would be returned.
+     */
+    public abstract NodeAndSlotCounts getNodeAndSlotCountIfSlotsWereTaken(int slots);
+
+    /**
+     * @return the number of nodes that are available to be taken
+     */
+    public abstract int nodesAvailable();
+
+    /**
+     * Take up to nodesNeeded from this pool
+     * 
+     * @param nodesNeeded the number of nodes that are needed.
+     * @return a Collection of nodes with the removed nodes in it. This may be empty, but should not be null.
+     */
+    public abstract Collection<Node> takeNodes(int nodesNeeded);
+
+    /**
+     * Reschedule any topologies as needed.
+     * 
+     * @param lesserPools pools that may be used to steal nodes from.
+     */
+    public abstract void scheduleAsNeeded(NodePool... lesserPools);
+
+    public static int slotsAvailable(NodePool[] pools) {
+        int slotsAvailable = 0;
+        for (NodePool pool : pools) {
+            slotsAvailable += pool.slotsAvailable();
         }
-      } else {
-        String nodeId = n.getId();
-        Set<String> nodeComps = _nodeToComps.get(nodeId);
-        if (nodeComps == null) {
-          nodeComps = new HashSet<String>();
-          _nodeToComps.put(nodeId, nodeComps);
+        return slotsAvailable;
+    }
+
+    public static int nodesAvailable(NodePool[] pools) {
+        int nodesAvailable = 0;
+        for (NodePool pool : pools) {
+            nodesAvailable += pool.nodesAvailable();
         }
-        for (Entry<String, List<ExecutorDetails>> entry: _spreadToSchedule.entrySet()) {
-          if (entry.getValue().size() > 0) {
-            String comp = entry.getKey();
-            if (!nodeComps.contains(comp)) {
-              nodeComps.add(comp);
-              slot.add(entry.getValue().remove(0));
+        return nodesAvailable;
+    }
+
+    public static Collection<Node> takeNodesBySlot(int slotsNeeded, NodePool[] pools) {
+        LOG.debug("Trying to grab {} free slots from {}", slotsNeeded, pools);
+        HashSet<Node> ret = new HashSet<Node>();
+        for (NodePool pool : pools) {
+            Collection<Node> got = pool.takeNodesBySlots(slotsNeeded);
+            ret.addAll(got);
+            slotsNeeded -= Node.countFreeSlotsAlive(got);
+            LOG.debug("Got {} nodes so far need {} more slots", ret.size(), slotsNeeded);
+            if (slotsNeeded <= 0) {
+                break;
             }
-          }
         }
-      }
-      n.assign(_topId, slot, _cluster);
-      return !_slots.isEmpty();
+        return ret;
     }
-  }
-  
-  private static final Logger LOG = LoggerFactory.getLogger(NodePool.class);
-  /**
-   * Initialize the pool.
-   * @param cluster the cluster
-   * @param nodeIdToNode the mapping of node id to nodes
-   */
-  public void init(Cluster cluster, Map<String, Node> nodeIdToNode) {
-    _cluster = cluster;
-    _nodeIdToNode = nodeIdToNode;
-  }
-  
-  /**
-   * Add a topology to the pool
-   * @param td the topology to add.
-   */
-  public abstract void addTopology(TopologyDetails td);
-  
-  /**
-   * Check if this topology can be added to this pool
-   * @param td the topology
-   * @return true if it can else false
-   */
-  public abstract boolean canAdd(TopologyDetails td);
-  
-  /**
-   * @return the number of nodes that are available to be taken
-   */
-  public abstract int slotsAvailable();
-  
-  /**
-   * Take nodes from this pool that can fulfill possibly up to the
-   * slotsNeeded
-   * @param slotsNeeded the number of slots that are needed.
-   * @return a Collection of nodes with the removed nodes in it.  
-   * This may be empty, but should not be null.
-   */
-  public abstract Collection<Node> takeNodesBySlots(int slotsNeeded);
 
-  /**
-   * Get the number of nodes and slots this would provide to get the slots needed
-   * @param slots the number of slots needed
-   * @return the number of nodes and slots that would be returned.
-   */
-  public abstract NodeAndSlotCounts getNodeAndSlotCountIfSlotsWereTaken(int slots);
-  
-  /**
-   * @return the number of nodes that are available to be taken
-   */
-  public abstract int nodesAvailable();
-  
-  /**
-   * Take up to nodesNeeded from this pool
-   * @param nodesNeeded the number of nodes that are needed.
-   * @return a Collection of nodes with the removed nodes in it.  
-   * This may be empty, but should not be null.
-   */
-  public abstract Collection<Node> takeNodes(int nodesNeeded);
-  
-  /**
-   * Reschedule any topologies as needed.
-   * @param lesserPools pools that may be used to steal nodes from.
-   */
-  public abstract void scheduleAsNeeded(NodePool ... lesserPools);
-  
-  public static int slotsAvailable(NodePool[] pools) {
-    int slotsAvailable = 0;
-    for (NodePool pool: pools) {
-      slotsAvailable += pool.slotsAvailable();
-    }
-    return slotsAvailable;
-  }
-  
-  public static int nodesAvailable(NodePool[] pools) {
-    int nodesAvailable = 0;
-    for (NodePool pool: pools) {
-      nodesAvailable += pool.nodesAvailable();
-    }
-    return nodesAvailable;
-  }
-  
-  public static Collection<Node> takeNodesBySlot(int slotsNeeded,NodePool[] pools) {
-    LOG.debug("Trying to grab {} free slots from {}",slotsNeeded, pools);
-    HashSet<Node> ret = new HashSet<Node>();
-    for (NodePool pool: pools) {
-      Collection<Node> got = pool.takeNodesBySlots(slotsNeeded);
-      ret.addAll(got);
-      slotsNeeded -= Node.countFreeSlotsAlive(got);
-      LOG.debug("Got {} nodes so far need {} more slots",ret.size(),slotsNeeded);
-      if (slotsNeeded <= 0) {
-        break;
-      }
-    }
-    return ret;
-  }
-  
-  public static Collection<Node> takeNodes(int nodesNeeded,NodePool[] pools) {
-    LOG.debug("Trying to grab {} free nodes from {}",nodesNeeded, pools);
-    HashSet<Node> ret = new HashSet<Node>();
-    for (NodePool pool: pools) {
-      Collection<Node> got = pool.takeNodes(nodesNeeded);
-      ret.addAll(got);
-      nodesNeeded -= got.size();
-      LOG.debug("Got {} nodes so far need {} more nodes", ret.size(), nodesNeeded);
-      if (nodesNeeded <= 0) {
-        break;
-      }
+    public static Collection<Node> takeNodes(int nodesNeeded, NodePool[] pools) {
+        LOG.debug("Trying to grab {} free nodes from {}", nodesNeeded, pools);
+        HashSet<Node> ret = new HashSet<Node>();
+        for (NodePool pool : pools) {
+            Collection<Node> got = pool.takeNodes(nodesNeeded);
+            ret.addAll(got);
+            nodesNeeded -= got.size();
+            LOG.debug("Got {} nodes so far need {} more nodes", ret.size(), nodesNeeded);
+            if (nodesNeeded <= 0) {
+                break;
+            }
+        }
+        return ret;
     }
-    return ret;
-  }
 
-  public static int getNodeCountIfSlotsWereTaken(int slots,NodePool[] pools) {
-    LOG.debug("How many nodes to get {} slots from {}",slots, pools);
-    int total = 0;
-    for (NodePool pool: pools) {
-      NodeAndSlotCounts ns = pool.getNodeAndSlotCountIfSlotsWereTaken(slots);
-      total += ns._nodes;
-      slots -= ns._slots;
-      LOG.debug("Found {} nodes so far {} more slots needed", total, slots);
-      if (slots <= 0) {
-        break;
-      }
-    }    
-    return total;
-  }
+    public static int getNodeCountIfSlotsWereTaken(int slots, NodePool[] pools) {
+        LOG.debug("How many nodes to get {} slots from {}", slots, pools);
+        int total = 0;
+        for (NodePool pool : pools) {
+            NodeAndSlotCounts ns = pool.getNodeAndSlotCountIfSlotsWereTaken(slots);
+            total += ns._nodes;
+            slots -= ns._slots;
+            LOG.debug("Found {} nodes so far {} more slots needed", total, slots);
+            if (slots <= 0) {
+                break;
+            }
+        }
+        return total;
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/security/INimbusCredentialPlugin.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/security/INimbusCredentialPlugin.java b/jstorm-core/src/main/java/backtype/storm/security/INimbusCredentialPlugin.java
index 9670045..761eac0 100755
--- a/jstorm-core/src/main/java/backtype/storm/security/INimbusCredentialPlugin.java
+++ b/jstorm-core/src/main/java/backtype/storm/security/INimbusCredentialPlugin.java
@@ -22,23 +22,23 @@ import backtype.storm.daemon.Shutdownable;
 import java.util.Map;
 
 /**
- * Nimbus auto credential plugin that will be called on nimbus host
- * during submit topology option. User can specify a list of implementation using config key
+ * Nimbus auto credential plugin that will be called on nimbus host during submit topology option. User can specify a list of implementation using config key
  * nimbus.autocredential.plugins.classes.
  */
 public interface INimbusCredentialPlugin extends Shutdownable {
 
     /**
      * this method will be called when nimbus initializes.
+     * 
      * @param conf
      */
     void prepare(Map conf);
 
     /**
-     * Method that will be called on nimbus as part of submit topology. This plugin will be called
-     * at least once during the submit Topology action. It will be not be called during activate instead
-     * the credentials return by this method will be merged with the other credentials in the topology
-     * and stored in zookeeper.
+     * Method that will be called on nimbus as part of submit topology. This plugin will be called at least once during the submit Topology action. It will be
+     * not be called during activate instead the credentials return by this method will be merged with the other credentials in the topology and stored in
+     * zookeeper.
+     * 
      * @param credentials credentials map where more credentials will be added.
      * @param conf topology configuration
      * @return

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/security/auth/AuthUtils.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/security/auth/AuthUtils.java b/jstorm-core/src/main/java/backtype/storm/security/auth/AuthUtils.java
index ac3fb53..60653a1 100755
--- a/jstorm-core/src/main/java/backtype/storm/security/auth/AuthUtils.java
+++ b/jstorm-core/src/main/java/backtype/storm/security/auth/AuthUtils.java
@@ -45,19 +45,19 @@ public class AuthUtils {
 
     /**
      * Construct a JAAS configuration object per storm configuration file
+     * 
      * @param storm_conf Storm configuration
      * @return JAAS configuration object
      */
     public static Configuration GetConfiguration(Map storm_conf) {
         Configuration login_conf = null;
 
-        //find login file configuration from Storm configuration
-        String loginConfigurationFile = (String)storm_conf.get("java.security.auth.login.config");
-        if ((loginConfigurationFile != null) && (loginConfigurationFile.length()>0)) {
+        // find login file configuration from Storm configuration
+        String loginConfigurationFile = (String) storm_conf.get("java.security.auth.login.config");
+        if ((loginConfigurationFile != null) && (loginConfigurationFile.length() > 0)) {
             File config_file = new File(loginConfigurationFile);
-            if (! config_file.canRead()) {
-                throw new RuntimeException("File " + loginConfigurationFile +
-                        " cannot be read.");
+            if (!config_file.canRead()) {
+                throw new RuntimeException("File " + loginConfigurationFile + " cannot be read.");
             }
             try {
                 URI config_uri = config_file.toURI();
@@ -72,24 +72,26 @@ public class AuthUtils {
 
     /**
      * Construct a principal to local plugin
+     * 
      * @param conf storm configuration
      * @return the plugin
      */
     public static IPrincipalToLocal GetPrincipalToLocalPlugin(Map storm_conf) {
         IPrincipalToLocal ptol = null;
         try {
-          String ptol_klassName = (String) storm_conf.get(Config.STORM_PRINCIPAL_TO_LOCAL_PLUGIN);
-          Class klass = Class.forName(ptol_klassName);
-          ptol = (IPrincipalToLocal)klass.newInstance();
-          ptol.prepare(storm_conf);
+            String ptol_klassName = (String) storm_conf.get(Config.STORM_PRINCIPAL_TO_LOCAL_PLUGIN);
+            Class klass = Class.forName(ptol_klassName);
+            ptol = (IPrincipalToLocal) klass.newInstance();
+            ptol.prepare(storm_conf);
         } catch (Exception e) {
-          throw new RuntimeException(e);
+            throw new RuntimeException(e);
         }
         return ptol;
     }
 
     /**
      * Construct a group mapping service provider plugin
+     * 
      * @param conf storm configuration
      * @return the plugin
      */
@@ -98,26 +100,27 @@ public class AuthUtils {
         try {
             String gmsp_klassName = (String) storm_conf.get(Config.STORM_GROUP_MAPPING_SERVICE_PROVIDER_PLUGIN);
             Class klass = Class.forName(gmsp_klassName);
-            gmsp = (IGroupMappingServiceProvider)klass.newInstance();
+            gmsp = (IGroupMappingServiceProvider) klass.newInstance();
             gmsp.prepare(storm_conf);
         } catch (Exception e) {
-          throw new RuntimeException(e);
+            throw new RuntimeException(e);
         }
         return gmsp;
     }
 
     /**
      * Get all of the configured Credential Renwer Plugins.
+     * 
      * @param storm_conf the storm configuration to use.
      * @return the configured credential renewers.
      */
     public static Collection<ICredentialsRenewer> GetCredentialRenewers(Map conf) {
         try {
             Set<ICredentialsRenewer> ret = new HashSet<ICredentialsRenewer>();
-            Collection<String> clazzes = (Collection<String>)conf.get(Config.NIMBUS_CREDENTIAL_RENEWERS);
+            Collection<String> clazzes = (Collection<String>) conf.get(Config.NIMBUS_CREDENTIAL_RENEWERS);
             if (clazzes != null) {
                 for (String clazz : clazzes) {
-                    ICredentialsRenewer inst = (ICredentialsRenewer)Class.forName(clazz).newInstance();
+                    ICredentialsRenewer inst = (ICredentialsRenewer) Class.forName(clazz).newInstance();
                     inst.prepare(conf);
                     ret.add(inst);
                 }
@@ -130,16 +133,17 @@ public class AuthUtils {
 
     /**
      * Get all the Nimbus Auto cred plugins.
+     * 
      * @param conf nimbus configuration to use.
      * @return nimbus auto credential plugins.
      */
     public static Collection<INimbusCredentialPlugin> getNimbusAutoCredPlugins(Map conf) {
         try {
             Set<INimbusCredentialPlugin> ret = new HashSet<INimbusCredentialPlugin>();
-            Collection<String> clazzes = (Collection<String>)conf.get(Config.NIMBUS_AUTO_CRED_PLUGINS);
+            Collection<String> clazzes = (Collection<String>) conf.get(Config.NIMBUS_AUTO_CRED_PLUGINS);
             if (clazzes != null) {
                 for (String clazz : clazzes) {
-                    INimbusCredentialPlugin inst = (INimbusCredentialPlugin)Class.forName(clazz).newInstance();
+                    INimbusCredentialPlugin inst = (INimbusCredentialPlugin) Class.forName(clazz).newInstance();
                     inst.prepare(conf);
                     ret.add(inst);
                 }
@@ -152,21 +156,22 @@ public class AuthUtils {
 
     /**
      * Get all of the configured AutoCredential Plugins.
+     * 
      * @param storm_conf the storm configuration to use.
      * @return the configured auto credentials.
      */
     public static Collection<IAutoCredentials> GetAutoCredentials(Map storm_conf) {
         try {
             Set<IAutoCredentials> autos = new HashSet<IAutoCredentials>();
-            Collection<String> clazzes = (Collection<String>)storm_conf.get(Config.TOPOLOGY_AUTO_CREDENTIALS);
+            Collection<String> clazzes = (Collection<String>) storm_conf.get(Config.TOPOLOGY_AUTO_CREDENTIALS);
             if (clazzes != null) {
                 for (String clazz : clazzes) {
-                    IAutoCredentials a = (IAutoCredentials)Class.forName(clazz).newInstance();
+                    IAutoCredentials a = (IAutoCredentials) Class.forName(clazz).newInstance();
                     a.prepare(storm_conf);
                     autos.add(a);
                 }
             }
-            LOG.info("Got AutoCreds "+autos);
+            LOG.info("Got AutoCreds " + autos);
             return autos;
         } catch (Exception e) {
             throw new RuntimeException(e);
@@ -175,12 +180,13 @@ public class AuthUtils {
 
     /**
      * Populate a subject from credentials using the IAutoCredentials.
+     * 
      * @param subject the subject to populate or null if a new Subject should be created.
      * @param autos the IAutoCredentials to call to populate the subject.
      * @param credentials the credentials to pull from
      * @return the populated subject.
      */
-    public static Subject populateSubject(Subject subject, Collection<IAutoCredentials> autos, Map<String,String> credentials) {
+    public static Subject populateSubject(Subject subject, Collection<IAutoCredentials> autos, Map<String, String> credentials) {
         try {
             if (subject == null) {
                 subject = new Subject();
@@ -196,11 +202,12 @@ public class AuthUtils {
 
     /**
      * Update a subject from credentials using the IAutoCredentials.
+     * 
      * @param subject the subject to update
      * @param autos the IAutoCredentials to call to update the subject.
      * @param credentials the credentials to pull from
      */
-    public static void updateSubject(Subject subject, Collection<IAutoCredentials> autos, Map<String,String> credentials) {
+    public static void updateSubject(Subject subject, Collection<IAutoCredentials> autos, Map<String, String> credentials) {
         if (subject == null) {
             throw new RuntimeException("The subject cannot be null when updating a subject with credentials");
         }
@@ -216,68 +223,68 @@ public class AuthUtils {
 
     /**
      * Construct a transport plugin per storm configuration
+     * 
      * @param conf storm configuration
      * @return
      */
     public static ITransportPlugin GetTransportPlugin(ThriftConnectionType type, Map storm_conf, Configuration login_conf) {
-        ITransportPlugin  transportPlugin = null;
+        ITransportPlugin transportPlugin = null;
         try {
             String transport_plugin_klassName = type.getTransportPlugin(storm_conf);
             Class klass = Class.forName(transport_plugin_klassName);
-            transportPlugin = (ITransportPlugin)klass.newInstance();
+            transportPlugin = (ITransportPlugin) klass.newInstance();
             transportPlugin.prepare(type, storm_conf, login_conf);
-        } catch(Exception e) {
+        } catch (Exception e) {
             throw new RuntimeException(e);
         }
         return transportPlugin;
     }
 
-    private static IHttpCredentialsPlugin GetHttpCredentialsPlugin(Map conf,
-            String klassName) {
+    private static IHttpCredentialsPlugin GetHttpCredentialsPlugin(Map conf, String klassName) {
         IHttpCredentialsPlugin plugin = null;
         try {
             Class klass = Class.forName(klassName);
-            plugin = (IHttpCredentialsPlugin)klass.newInstance();
+            plugin = (IHttpCredentialsPlugin) klass.newInstance();
             plugin.prepare(conf);
-        } catch(Exception e) {
+        } catch (Exception e) {
             throw new RuntimeException(e);
         }
         return plugin;
     }
 
     /**
-     * Construct an HttpServletRequest credential plugin specified by the UI
-     * storm configuration
+     * Construct an HttpServletRequest credential plugin specified by the UI storm configuration
+     * 
      * @param conf storm configuration
      * @return the plugin
      */
     public static IHttpCredentialsPlugin GetUiHttpCredentialsPlugin(Map conf) {
-        String klassName = (String)conf.get(Config.UI_HTTP_CREDS_PLUGIN);
+        String klassName = (String) conf.get(Config.UI_HTTP_CREDS_PLUGIN);
         return AuthUtils.GetHttpCredentialsPlugin(conf, klassName);
     }
 
     /**
-     * Construct an HttpServletRequest credential plugin specified by the DRPC
-     * storm configuration
+     * Construct an HttpServletRequest credential plugin specified by the DRPC storm configuration
+     * 
      * @param conf storm configuration
      * @return the plugin
      */
     public static IHttpCredentialsPlugin GetDrpcHttpCredentialsPlugin(Map conf) {
-        String klassName = (String)conf.get(Config.DRPC_HTTP_CREDS_PLUGIN);
+        String klassName = (String) conf.get(Config.DRPC_HTTP_CREDS_PLUGIN);
         return AuthUtils.GetHttpCredentialsPlugin(conf, klassName);
     }
 
     public static String get(Configuration configuration, String section, String key) throws IOException {
         AppConfigurationEntry configurationEntries[] = configuration.getAppConfigurationEntry(section);
         if (configurationEntries == null) {
-            String errorMessage = "Could not find a '"+ section + "' entry in this configuration.";
+            String errorMessage = "Could not find a '" + section + "' entry in this configuration.";
             throw new IOException(errorMessage);
         }
 
-        for(AppConfigurationEntry entry: configurationEntries) {
+        for (AppConfigurationEntry entry : configurationEntries) {
             Object val = entry.getOptions().get(key);
             if (val != null)
-                return (String)val;
+                return (String) val;
         }
         return null;
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/security/auth/DefaultHttpCredentialsPlugin.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/security/auth/DefaultHttpCredentialsPlugin.java b/jstorm-core/src/main/java/backtype/storm/security/auth/DefaultHttpCredentialsPlugin.java
index e2469e5..6386992 100755
--- a/jstorm-core/src/main/java/backtype/storm/security/auth/DefaultHttpCredentialsPlugin.java
+++ b/jstorm-core/src/main/java/backtype/storm/security/auth/DefaultHttpCredentialsPlugin.java
@@ -31,11 +31,11 @@ import org.slf4j.LoggerFactory;
 import backtype.storm.security.auth.ReqContext;
 
 public class DefaultHttpCredentialsPlugin implements IHttpCredentialsPlugin {
-    private static final Logger LOG =
-            LoggerFactory.getLogger(DefaultHttpCredentialsPlugin.class);
+    private static final Logger LOG = LoggerFactory.getLogger(DefaultHttpCredentialsPlugin.class);
 
     /**
      * No-op
+     * 
      * @param storm_conf Storm configuration
      */
     @Override
@@ -45,6 +45,7 @@ public class DefaultHttpCredentialsPlugin implements IHttpCredentialsPlugin {
 
     /**
      * Gets the user name from the request principal.
+     * 
      * @param req the servlet request
      * @return the authenticated user, or null if none is authenticated
      */
@@ -54,7 +55,7 @@ public class DefaultHttpCredentialsPlugin implements IHttpCredentialsPlugin {
         if (req != null && (princ = req.getUserPrincipal()) != null) {
             String userName = princ.getName();
             if (userName != null && !userName.isEmpty()) {
-                LOG.debug("HTTP request had user ("+userName+")");
+                LOG.debug("HTTP request had user (" + userName + ")");
                 return userName;
             }
         }
@@ -62,29 +63,28 @@ public class DefaultHttpCredentialsPlugin implements IHttpCredentialsPlugin {
     }
 
     /**
-     * Populates a given context with a new Subject derived from the
-     * credentials in a servlet request.
+     * Populates a given context with a new Subject derived from the credentials in a servlet request.
+     * 
      * @param context the context to be populated
      * @param req the servlet request
      * @return the context
      */
     @Override
-    public ReqContext populateContext(ReqContext context,
-            HttpServletRequest req) {
+    public ReqContext populateContext(ReqContext context, HttpServletRequest req) {
         String userName = getUserName(req);
 
         String doAsUser = req.getHeader("doAsUser");
-        if(doAsUser == null) {
+        if (doAsUser == null) {
             doAsUser = req.getParameter("doAsUser");
         }
 
-        if(doAsUser != null) {
+        if (doAsUser != null) {
             context.setRealPrincipal(new SingleUserPrincipal(userName));
             userName = doAsUser;
         }
 
         Set<Principal> principals = new HashSet<Principal>();
-        if(userName != null) {
+        if (userName != null) {
             Principal p = new SingleUserPrincipal(userName);
             principals.add(p);
         }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/security/auth/DefaultPrincipalToLocal.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/security/auth/DefaultPrincipalToLocal.java b/jstorm-core/src/main/java/backtype/storm/security/auth/DefaultPrincipalToLocal.java
index 729d744..47e23b0 100755
--- a/jstorm-core/src/main/java/backtype/storm/security/auth/DefaultPrincipalToLocal.java
+++ b/jstorm-core/src/main/java/backtype/storm/security/auth/DefaultPrincipalToLocal.java
@@ -22,22 +22,24 @@ import java.util.Map;
 import java.security.Principal;
 
 /**
- * Storm can be configured to launch worker processed as a given user.
- * Some transports need to map the Principal to a local user name.
+ * Storm can be configured to launch worker processed as a given user. Some transports need to map the Principal to a local user name.
  */
 public class DefaultPrincipalToLocal implements IPrincipalToLocal {
     /**
      * Invoked once immediately after construction
-     * @param conf Storm configuration 
+     * 
+     * @param conf Storm configuration
      */
-    public void prepare(Map storm_conf) {}
-    
+    public void prepare(Map storm_conf) {
+    }
+
     /**
      * Convert a Principal to a local user name.
+     * 
      * @param principal the principal to convert
      * @return The local user name.
      */
     public String toLocal(Principal principal) {
-      return principal == null ? null : principal.getName();
+        return principal == null ? null : principal.getName();
     }
 }


[07/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/metric/SimpleJStormMetric.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/metric/SimpleJStormMetric.java b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/SimpleJStormMetric.java
index 1d77089..7db0ed4 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/metric/SimpleJStormMetric.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/SimpleJStormMetric.java
@@ -17,80 +17,99 @@
  */
 package com.alibaba.jstorm.metric;
 
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.alibaba.jstorm.common.metric.Histogram;
-import com.alibaba.jstorm.common.metric.MetricRegistry;
-import com.alibaba.jstorm.common.metric.window.Metric;
-
-public class SimpleJStormMetric extends JStormMetrics implements Runnable{
-    private static final Logger LOG = LoggerFactory.getLogger(SimpleJStormMetric.class);
-    
-    protected static MetricRegistry metrics = JStormMetrics.workerMetrics;
-    static {
-        Metric.setEnable(true);
+import com.alibaba.jstorm.common.metric.*;
+import com.codahale.metrics.Gauge;
+
+/**
+ * simplified metrics, only for worker metrics. all metrics are logged locally without reporting to TM or nimbus.
+ *
+ * @author Cody (weiyue.wy@alibaba-inc.com)
+ * @since 2.0.5
+ */
+public class SimpleJStormMetric extends JStormMetrics {
+    private static final long serialVersionUID = 7468005641982249536L;
+
+    protected static final AsmMetricRegistry metrics = JStormMetrics.getWorkerMetrics();
+
+    public static void updateNimbusHistogram(String name, Number obj) {
+        updateHistogram(NIMBUS_METRIC_KEY, name, obj);
     }
-    
-    protected static SimpleJStormMetric instance = null;
-    
-    
-    public static SimpleJStormMetric mkInstance() {
-        synchronized (SimpleJStormMetric.class) {
-            if (instance == null) {
-                instance = new SimpleJStormMetric();
-            }
-            
-            return instance;
-        }
+
+    public static void updateSupervisorHistogram(String name, Number obj) {
+        updateHistogram(SUPERVISOR_METRIC_KEY, name, obj);
     }
-    
-    protected SimpleJStormMetric() {
-        
+
+    public static void updateNimbusMeter(String name, Number obj) {
+        updateMeter(NIMBUS_METRIC_KEY, name, obj);
+    }
+
+    public static void updateSupervisorMeter(String name, Number obj) {
+        updateMeter(SUPERVISOR_METRIC_KEY, name, obj);
+    }
+
+    public static void updateNimbusCounter(String name, Number obj) {
+        updateCounter(NIMBUS_METRIC_KEY, name, obj);
     }
-    
-    public static Histogram registerHistorgram(String name) {
-        return JStormMetrics.registerWorkerHistogram(name);
+
+    public static void updateSupervisorCounter(String name, Number obj) {
+        updateCounter(SUPERVISOR_METRIC_KEY, name, obj);
     }
-    
-    public static void updateHistorgram(String name, Number obj) {
-        LOG.debug(name  + ":" + obj.doubleValue());
-        Histogram histogram =  (Histogram)metrics.getMetric(name);
+
+    public static void updateHistogram(String key, String name, Number obj) {
+        String formalName = MetricUtils.workerMetricName(key, host, 0, name, MetricType.HISTOGRAM);
+        AsmHistogram histogram = (AsmHistogram) metrics.getMetric(formalName);
         if (histogram == null) {
-        	try {
-        		histogram = registerHistorgram(name);
-        	}catch(Exception e) {
-        		LOG.info("{} has been register", name);
-        		return;
-        	}
+            histogram = registerHistogram(name);
         }
-        
+
         histogram.update(obj);
-        
     }
 
-    @Override
-    public void run() {
-        // TODO Auto-generated method stub
-        Map<String, Metric> map = metrics.getMetrics();
-        
-        for (Entry<String, Metric> entry : map.entrySet()) {
-            String key = entry.getKey();
-            Metric metric = entry.getValue();
-            
-            LOG.info(key + ":" +  metric.getSnapshot());
+    public static void updateMeter(String key, String name, Number obj) {
+        String formalName = MetricUtils.workerMetricName(key, host, 0, name, MetricType.METER);
+        AsmMeter meter = (AsmMeter) metrics.getMetric(formalName);
+        if (meter == null) {
+            meter = registerMeter(name);
         }
+
+        meter.update(obj);
+    }
+
+    public static void updateCounter(String key, String name, Number obj) {
+        String formalName = MetricUtils.workerMetricName(key, host, 0, name, MetricType.COUNTER);
+        AsmCounter counter = (AsmCounter) metrics.getMetric(formalName);
+        if (counter == null) {
+            counter = registerCounter(name);
+        }
+
+        counter.update(obj);
+    }
+
+    private static AsmGauge registerGauge(Gauge<Double> gauge, String name) {
+        AsmGauge gauge1 = new AsmGauge(gauge);
+        gauge1.setOp(AsmMetric.MetricOp.LOG);
+
+        return registerWorkerGauge(topologyId, name, gauge1);
     }
-    
-    
-    public static void main(String[] args) {
-        updateHistorgram("test", 11100.0);
-        
-        SimpleJStormMetric instance = new SimpleJStormMetric();
-        
-        instance.run();
+
+    private static AsmHistogram registerHistogram(String name) {
+        AsmHistogram histogram = new AsmHistogram();
+        histogram.setOp(AsmMetric.MetricOp.LOG);
+
+        return registerWorkerHistogram(NIMBUS_METRIC_KEY, name, histogram);
+    }
+
+    public static AsmMeter registerMeter(String name) {
+        AsmMeter meter = new AsmMeter();
+        meter.setOp(AsmMetric.MetricOp.LOG);
+
+        return registerWorkerMeter(NIMBUS_METRIC_KEY, name, meter);
+    }
+
+    public static AsmCounter registerCounter(String name) {
+        AsmCounter counter = new AsmCounter();
+        counter.setOp(AsmMetric.MetricOp.LOG);
+
+        return registerWorkerCounter(NIMBUS_METRIC_KEY, name, counter);
     }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/metric/TimeTicker.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/metric/TimeTicker.java b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/TimeTicker.java
new file mode 100644
index 0000000..e44e7b5
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/TimeTicker.java
@@ -0,0 +1,52 @@
+package com.alibaba.jstorm.metric;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * a simple util class to calculate run time
+ *
+ * @author Cody (weiyue.wy@alibaba-inc.com)
+ * @since 2.0.5
+ */
+public class TimeTicker {
+    private TimeUnit unit;
+    private long start;
+    private long end;
+
+    public TimeTicker(TimeUnit unit) {
+        if (unit != TimeUnit.NANOSECONDS && unit != TimeUnit.MILLISECONDS) {
+            throw new IllegalArgumentException("invalid unit!");
+        }
+        this.unit = unit;
+    }
+
+    public TimeTicker(TimeUnit unit, boolean start) {
+        this(unit);
+        if (start) {
+            start();
+        }
+    }
+
+    public void start() {
+        if (unit == TimeUnit.MILLISECONDS) {
+            this.start = System.currentTimeMillis();
+        } else if (unit == TimeUnit.NANOSECONDS) {
+            this.start = System.nanoTime();
+        }
+    }
+
+    public long stop() {
+        if (unit == TimeUnit.MILLISECONDS) {
+            this.end = System.currentTimeMillis();
+        } else if (unit == TimeUnit.NANOSECONDS) {
+            this.end = System.nanoTime();
+        }
+        return end - start;
+    }
+
+    public long stopAndRestart() {
+        long elapsed = stop();
+        start();
+        return elapsed;
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/metric/TopologyMetricContext.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/metric/TopologyMetricContext.java b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/TopologyMetricContext.java
new file mode 100644
index 0000000..8dae281
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/TopologyMetricContext.java
@@ -0,0 +1,528 @@
+package com.alibaba.jstorm.metric;
+
+import backtype.storm.generated.MetricInfo;
+import backtype.storm.generated.MetricSnapshot;
+import backtype.storm.generated.TopologyMetric;
+import com.alibaba.jstorm.client.ConfigExtension;
+import com.alibaba.jstorm.schedule.default_assign.ResourceWorkerSlot;
+import com.codahale.metrics.Histogram;
+import com.codahale.metrics.Snapshot;
+import com.codahale.metrics.Timer;
+import org.apache.commons.lang.ArrayUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.*;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.locks.ReentrantLock;
+
+/**
+ * A topology metric context contains all in-memory metric data of a topology.
+ * This class resides in TopologyMaster.
+ *
+ * @author Cody (weiyue.wy@alibaba-inc.com)
+ * @since 2.0.5
+ */
+public class TopologyMetricContext {
+    private final Logger LOG = LoggerFactory.getLogger(getClass());
+
+    private final ReentrantLock lock = new ReentrantLock();
+    private Set<ResourceWorkerSlot> workerSet;
+    private int taskNum = 1;
+    private ConcurrentMap<String, MetricInfo> memCache = new ConcurrentHashMap<>();
+    private final ConcurrentMap<String, Long> memMeta = new ConcurrentHashMap<>();
+    private final AtomicBoolean isMerging = new AtomicBoolean(false);
+    private String topologyId;
+    private volatile int flushedMetaNum = 0;
+
+    /**
+     * sync meta from metric cache on startup
+     */
+    private volatile boolean syncMeta = false;
+
+    private Map conf;
+
+    public TopologyMetricContext() {
+    }
+
+    public TopologyMetricContext(Set<ResourceWorkerSlot> workerSet) {
+        this.workerSet = workerSet;
+    }
+
+    public TopologyMetricContext(String topologyId, Set<ResourceWorkerSlot> workerSet, Map conf) {
+        this(workerSet);
+        this.topologyId = topologyId;
+        this.conf = conf;
+    }
+
+    public ConcurrentMap<String, Long> getMemMeta() {
+        return memMeta;
+    }
+
+    public String getTopologyId() {
+        return topologyId;
+    }
+
+    public void setTopologyId(String topologyId) {
+        this.topologyId = topologyId;
+    }
+
+    public boolean syncMeta() {
+        return syncMeta;
+    }
+
+    public void setSyncMeta(boolean syncMeta) {
+        this.syncMeta = syncMeta;
+    }
+
+    public int getTaskNum() {
+        return taskNum;
+    }
+
+    public void setTaskNum(int taskNum) {
+        this.taskNum = taskNum;
+    }
+
+    public int getFlushedMetaNum() {
+        return flushedMetaNum;
+    }
+
+    public void setFlushedMetaNum(int flushedMetaNum) {
+        this.flushedMetaNum = flushedMetaNum;
+    }
+
+    public ReentrantLock getLock() {
+        return lock;
+    }
+
+    public int getWorkerNum() {
+        return workerSet.size();
+    }
+
+    public void setWorkerSet(Set<ResourceWorkerSlot> workerSet) {
+        this.workerSet = workerSet;
+    }
+
+    public void resetUploadedMetrics() {
+        this.memCache.clear();
+    }
+
+    public final ConcurrentMap<String, MetricInfo> getMemCache() {
+        return memCache;
+    }
+
+    public void addToMemCache(String workerSlot, MetricInfo metricInfo) {
+        memCache.put(workerSlot, metricInfo);
+        LOG.info("update mem cache, worker:{}, total uploaded:{}", workerSlot, memCache.size());
+    }
+
+    public boolean readyToUpload() {
+        return memCache.size() >= workerSet.size();
+    }
+
+    public boolean isMerging() {
+        return isMerging.get();
+    }
+
+    public void setMerging(boolean isMerging) {
+        this.isMerging.set(isMerging);
+    }
+
+    public int getUploadedWorkerNum() {
+        return memCache.size();
+    }
+
+    public TopologyMetric mergeMetrics() {
+        long start = System.currentTimeMillis();
+
+        if (getMemCache().size() == 0) {
+            //LOG.info("topology:{}, metric size is 0, skip...", topologyId);
+            return null;
+        }
+        if (isMerging()) {
+            LOG.info("topology {} is already merging, skip...", topologyId);
+            return null;
+        }
+
+        setMerging(true);
+
+        try {
+            Map<String, MetricInfo> workerMetricMap = this.memCache;
+            // reset mem cache
+            this.memCache = new ConcurrentHashMap<>();
+
+            MetricInfo topologyMetrics = MetricUtils.mkMetricInfo();
+            MetricInfo componentMetrics = MetricUtils.mkMetricInfo();
+            MetricInfo taskMetrics = MetricUtils.mkMetricInfo();
+            MetricInfo streamMetrics = MetricUtils.mkMetricInfo();
+            MetricInfo workerMetrics = MetricUtils.mkMetricInfo();
+            MetricInfo nettyMetrics = MetricUtils.mkMetricInfo();
+            TopologyMetric tpMetric =
+                    new TopologyMetric(topologyMetrics, componentMetrics, workerMetrics, taskMetrics, streamMetrics, nettyMetrics);
+
+
+            // metric name => worker count
+            Map<String, Integer> metricNameCounters = new HashMap<>();
+
+            // special for histograms & timers, we merge the points to get a new snapshot data.
+            Map<String, Map<Integer, Histogram>> histograms = new HashMap<>();
+            Map<String, Map<Integer, Timer>> timers = new HashMap<>();
+
+            // iterate metrics of all workers within the same topology
+            for (ConcurrentMap.Entry<String, MetricInfo> metricEntry : workerMetricMap.entrySet()) {
+                MetricInfo metricInfo = metricEntry.getValue();
+
+                // merge counters: add old and new values, note we only add incoming new metrics and overwrite
+                // existing data, same for all below.
+                Map<String, Map<Integer, MetricSnapshot>> metrics = metricInfo.get_metrics();
+                for (Map.Entry<String, Map<Integer, MetricSnapshot>> metric : metrics.entrySet()) {
+                    String metricName = metric.getKey();
+                    Map<Integer, MetricSnapshot> data = metric.getValue();
+                    MetaType metaType = MetricUtils.metaType(metricName);
+
+                    MetricType metricType = MetricUtils.metricType(metricName);
+                    if (metricType == MetricType.COUNTER) {
+                        mergeCounters(tpMetric, metaType, metricName, data);
+                    } else if (metricType == MetricType.GAUGE) {
+                        mergeGauges(tpMetric, metaType, metricName, data);
+                    } else if (metricType == MetricType.METER) {
+                        mergeMeters(getMetricInfoByType(tpMetric, metaType), metricName, data, metricNameCounters);
+                    } else if (metricType == MetricType.HISTOGRAM) {
+                        mergeHistograms(getMetricInfoByType(tpMetric, metaType),
+                                metricName, data, metricNameCounters, histograms);
+                    } else if (metricType == MetricType.TIMER) {
+                        mergeTimers(getMetricInfoByType(tpMetric, metaType),
+                                metricName, data, metricNameCounters, timers);
+                    }
+                }
+            }
+            adjustHistogramTimerMetrics(tpMetric, metricNameCounters, histograms, timers);
+            // for counters, we only report delta data every time, need to sum with old data
+            //adjustCounterMetrics(tpMetric, oldTpMetric);
+
+            LOG.info("merge topology metrics:{}, cost:{}", topologyId, System.currentTimeMillis() - start);
+            // debug logs
+            //MetricUtils.printMetricWinSize(componentMetrics);
+
+            return tpMetric;
+        } finally {
+            setMerging(false);
+        }
+    }
+
+
+    protected MetricInfo getMetricInfoByType(TopologyMetric topologyMetric, MetaType type) {
+        if (type == MetaType.TASK) {
+            return topologyMetric.get_taskMetric();
+        } else if (type == MetaType.WORKER) {
+            return topologyMetric.get_workerMetric();
+        } else if (type == MetaType.COMPONENT) {
+            return topologyMetric.get_componentMetric();
+        } else if (type == MetaType.STREAM) {
+            return topologyMetric.get_streamMetric();
+        } else if (type == MetaType.NETTY) {
+            return topologyMetric.get_nettyMetric();
+        } else if (type == MetaType.TOPOLOGY) {
+            return topologyMetric.get_topologyMetric();
+        }
+        return null;
+    }
+
+    public void mergeCounters(TopologyMetric tpMetric, MetaType metaType, String meta,
+                              Map<Integer, MetricSnapshot> data) {
+        MetricInfo metricInfo = getMetricInfoByType(tpMetric, metaType);
+        Map<Integer, MetricSnapshot> existing = metricInfo.get_metrics().get(meta);
+        if (existing == null) {
+            metricInfo.put_to_metrics(meta, data);
+        } else {
+            for (Map.Entry<Integer, MetricSnapshot> dataEntry : data.entrySet()) {
+                Integer win = dataEntry.getKey();
+                MetricSnapshot snapshot = dataEntry.getValue();
+                MetricSnapshot old = existing.get(win);
+                if (old == null) {
+                    existing.put(win, snapshot);
+                } else {
+                    old.set_ts(snapshot.get_ts());
+                    old.set_longValue(old.get_longValue() + snapshot.get_longValue());
+                }
+            }
+        }
+    }
+
+    public void mergeGauges(TopologyMetric tpMetric, MetaType metaType, String meta,
+                            Map<Integer, MetricSnapshot> data) {
+        MetricInfo metricInfo = getMetricInfoByType(tpMetric, metaType);
+        Map<Integer, MetricSnapshot> existing = metricInfo.get_metrics().get(meta);
+        if (existing == null) {
+            metricInfo.put_to_metrics(meta, data);
+        } else {
+            for (Map.Entry<Integer, MetricSnapshot> dataEntry : data.entrySet()) {
+                Integer win = dataEntry.getKey();
+                MetricSnapshot snapshot = dataEntry.getValue();
+                MetricSnapshot old = existing.get(win);
+                if (old == null) {
+                    existing.put(win, snapshot);
+                } else {
+                    if (snapshot.get_ts() >= old.get_ts()) {
+                        old.set_ts(snapshot.get_ts());
+                        if (metaType != MetaType.TOPOLOGY) {
+                            old.set_doubleValue(snapshot.get_doubleValue());
+                        } else { // for topology metric, gauge might be add-able, e.g., cpu, memory, etc.
+                            old.set_doubleValue(old.get_doubleValue() + snapshot.get_doubleValue());
+                        }
+                    }
+                }
+            }
+        }
+    }
+
+    /**
+     * meters are not sampled.
+     */
+    public void mergeMeters(MetricInfo metricInfo, String meta, Map<Integer, MetricSnapshot> data,
+                            Map<String, Integer> metaCounters) {
+        Map<Integer, MetricSnapshot> existing = metricInfo.get_metrics().get(meta);
+        if (existing == null) {
+            metricInfo.put_to_metrics(meta, data);
+        } else {
+            for (Map.Entry<Integer, MetricSnapshot> dataEntry : data.entrySet()) {
+                Integer win = dataEntry.getKey();
+                MetricSnapshot snapshot = dataEntry.getValue();
+                MetricSnapshot old = existing.get(win);
+                if (old == null) {
+                    existing.put(win, snapshot);
+                } else {
+                    if (snapshot.get_ts() >= old.get_ts()) {
+                        old.set_ts(snapshot.get_ts());
+                        old.set_mean(old.get_mean() + snapshot.get_mean());
+                        old.set_m1(old.get_m1() + snapshot.get_m1());
+                        old.set_m5(old.get_m5() + snapshot.get_m5());
+                        old.set_m15(old.get_m15() + snapshot.get_m15());
+                    }
+                }
+            }
+        }
+        updateMetricCounters(meta, metaCounters);
+    }
+
+    /**
+     * histograms are sampled, but we just update points
+     */
+    public void mergeHistograms(MetricInfo metricInfo, String meta, Map<Integer, MetricSnapshot> data,
+                                Map<String, Integer> metaCounters, Map<String, Map<Integer, Histogram>> histograms) {
+        Map<Integer, MetricSnapshot> existing = metricInfo.get_metrics().get(meta);
+        if (existing == null) {
+            metricInfo.put_to_metrics(meta, data);
+            Map<Integer, Histogram> histogramMap = new HashMap<>();
+            for (Map.Entry<Integer, MetricSnapshot> dataEntry : data.entrySet()) {
+                Histogram histogram = MetricUtils.metricSnapshot2Histogram(dataEntry.getValue());
+                histogramMap.put(dataEntry.getKey(), histogram);
+            }
+            histograms.put(meta, histogramMap);
+        } else {
+            for (Map.Entry<Integer, MetricSnapshot> dataEntry : data.entrySet()) {
+                Integer win = dataEntry.getKey();
+                MetricSnapshot snapshot = dataEntry.getValue();
+                MetricSnapshot old = existing.get(win);
+                if (old == null) {
+                    existing.put(win, snapshot);
+                    histograms.get(meta).put(win, MetricUtils.metricSnapshot2Histogram(snapshot));
+                } else {
+                    if (snapshot.get_ts() >= old.get_ts()) {
+                        old.set_ts(snapshot.get_ts());
+                        // update points
+                        MetricUtils.updateHistogramPoints(histograms.get(meta).get(win), snapshot.get_points());
+                    }
+                }
+            }
+        }
+        updateMetricCounters(meta, metaCounters);
+    }
+
+    /**
+     * timers are sampled, we just update points
+     */
+    public void mergeTimers(MetricInfo metricInfo, String meta, Map<Integer, MetricSnapshot> data,
+                            Map<String, Integer> metaCounters, Map<String, Map<Integer, Timer>> timers) {
+        Map<Integer, MetricSnapshot> existing = metricInfo.get_metrics().get(meta);
+        if (existing == null) {
+            metricInfo.put_to_metrics(meta, data);
+            Map<Integer, Timer> timerMap = new HashMap<>();
+            for (Map.Entry<Integer, MetricSnapshot> dataEntry : data.entrySet()) {
+                Timer timer = MetricUtils.metricSnapshot2Timer(dataEntry.getValue());
+                timerMap.put(dataEntry.getKey(), timer);
+            }
+            timers.put(meta, timerMap);
+        } else {
+            for (Map.Entry<Integer, MetricSnapshot> dataEntry : data.entrySet()) {
+                Integer win = dataEntry.getKey();
+                MetricSnapshot snapshot = dataEntry.getValue();
+                MetricSnapshot old = existing.get(win);
+                if (old == null) {
+                    existing.put(win, snapshot);
+                    timers.get(meta).put(win, MetricUtils.metricSnapshot2Timer(snapshot));
+                } else {
+                    if (snapshot.get_ts() >= old.get_ts()) {
+                        old.set_ts(snapshot.get_ts());
+                        old.set_m1(old.get_m1() + snapshot.get_m1());
+                        old.set_m5(old.get_m5() + snapshot.get_m5());
+                        old.set_m15(old.get_m15() + snapshot.get_m15());
+
+                        // update points
+                        MetricUtils.updateTimerPoints(timers.get(meta).get(win), snapshot.get_points());
+                    }
+                }
+            }
+        }
+        updateMetricCounters(meta, metaCounters);
+    }
+
+    /**
+     * computes occurrences of specified metric name
+     */
+    protected void updateMetricCounters(String metricName, Map<String, Integer> metricNameCounters) {
+        if (metricNameCounters.containsKey(metricName)) {
+            metricNameCounters.put(metricName, metricNameCounters.get(metricName) + 1);
+        } else {
+            metricNameCounters.put(metricName, 1);
+        }
+    }
+
+    protected void adjustHistogramTimerMetrics(TopologyMetric tpMetric, Map<String, Integer> metaCounters,
+                                               Map<String, Map<Integer, Histogram>> histograms,
+                                               Map<String, Map<Integer, Timer>> timers) {
+        resetPoints(tpMetric.get_taskMetric().get_metrics());
+        resetPoints(tpMetric.get_streamMetric().get_metrics());
+        resetPoints(tpMetric.get_nettyMetric().get_metrics());
+        resetPoints(tpMetric.get_workerMetric().get_metrics());
+
+        Map<String, Map<Integer, MetricSnapshot>> compMetrics =
+                tpMetric.get_componentMetric().get_metrics();
+        Map<String, Map<Integer, MetricSnapshot>> topologyMetrics =
+                tpMetric.get_topologyMetric().get_metrics();
+
+        adjustMetrics(compMetrics, metaCounters, histograms, timers);
+        adjustMetrics(topologyMetrics, metaCounters, histograms, timers);
+    }
+
+    private void adjustMetrics(Map<String, Map<Integer, MetricSnapshot>> metrics, Map<String, Integer> metaCounters,
+                               Map<String, Map<Integer, Histogram>> histograms, Map<String, Map<Integer, Timer>> timers) {
+        for (Map.Entry<String, Map<Integer, MetricSnapshot>> metricEntry : metrics.entrySet()) {
+            String meta = metricEntry.getKey();
+            MetricType metricType = MetricUtils.metricType(meta);
+            MetaType metaType = MetricUtils.metaType(meta);
+            Map<Integer, MetricSnapshot> winData = metricEntry.getValue();
+
+            if (metricType == MetricType.HISTOGRAM) {
+                for (Map.Entry<Integer, MetricSnapshot> dataEntry : winData.entrySet()) {
+                    MetricSnapshot snapshot = dataEntry.getValue();
+                    Integer cnt = metaCounters.get(meta);
+                    Histogram histogram = histograms.get(meta).get(dataEntry.getKey());
+                    if (cnt != null && cnt > 1) {
+
+                        Snapshot snapshot1 = histogram.getSnapshot();
+                        snapshot.set_mean(snapshot1.getMean());
+                        snapshot.set_p50(snapshot1.getMedian());
+                        snapshot.set_p75(snapshot1.get75thPercentile());
+                        snapshot.set_p95(snapshot1.get95thPercentile());
+                        snapshot.set_p98(snapshot1.get98thPercentile());
+                        snapshot.set_p99(snapshot1.get99thPercentile());
+                        snapshot.set_p999(snapshot1.get999thPercentile());
+                        snapshot.set_stddev(snapshot1.getStdDev());
+                        snapshot.set_min(snapshot1.getMin());
+                        snapshot.set_max(snapshot1.getMax());
+
+                        if (metaType == MetaType.TOPOLOGY) {
+                            snapshot.set_points(Arrays.asList(ArrayUtils.toObject(snapshot1.getValues())));
+                        }
+                    }
+                    if (metaType != MetaType.TOPOLOGY) {
+                        snapshot.set_points(new ArrayList<Long>(0));
+                    }
+                }
+
+            } else if (metricType == MetricType.TIMER) {
+                for (Map.Entry<Integer, MetricSnapshot> dataEntry : winData.entrySet()) {
+                    MetricSnapshot snapshot = dataEntry.getValue();
+                    Integer cnt = metaCounters.get(meta);
+                    if (cnt != null && cnt > 1) {
+                        Timer timer = timers.get(meta).get(dataEntry.getKey());
+                        Snapshot snapshot1 = timer.getSnapshot();
+                        snapshot.set_p50(snapshot1.getMedian());
+                        snapshot.set_p75(snapshot1.get75thPercentile());
+                        snapshot.set_p95(snapshot1.get95thPercentile());
+                        snapshot.set_p98(snapshot1.get98thPercentile());
+                        snapshot.set_p99(snapshot1.get99thPercentile());
+                        snapshot.set_p999(snapshot1.get999thPercentile());
+                        snapshot.set_stddev(snapshot1.getStdDev());
+                        snapshot.set_min(snapshot1.getMin());
+                        snapshot.set_max(snapshot1.getMax());
+                    }
+                    snapshot.set_points(new ArrayList<Long>(0));
+                }
+            }
+        }
+    }
+
+    private void resetPoints(Map<String, Map<Integer, MetricSnapshot>> metrics) {
+        for (Map.Entry<String, Map<Integer, MetricSnapshot>> metricEntry : metrics.entrySet()) {
+            String meta = metricEntry.getKey();
+            MetricType metricType = MetricUtils.metricType(meta);
+            Map<Integer, MetricSnapshot> winData = metricEntry.getValue();
+
+            if (metricType == MetricType.HISTOGRAM || metricType == MetricType.TIMER) {
+                for (MetricSnapshot snapshot : winData.values()) {
+                    snapshot.set_points(new ArrayList<Long>(0));
+                }
+            }
+        }
+    }
+
+    protected void adjustCounterMetrics(TopologyMetric tpMetric, TopologyMetric oldMetric) {
+        if (oldMetric != null) {
+            mergeCounters(tpMetric.get_streamMetric().get_metrics(),
+                    oldMetric.get_streamMetric().get_metrics());
+
+            mergeCounters(tpMetric.get_taskMetric().get_metrics(),
+                    oldMetric.get_taskMetric().get_metrics());
+
+            mergeCounters(tpMetric.get_componentMetric().get_metrics(),
+                    oldMetric.get_componentMetric().get_metrics());
+
+            mergeCounters(tpMetric.get_workerMetric().get_metrics(),
+                    oldMetric.get_workerMetric().get_metrics());
+
+            mergeCounters(tpMetric.get_nettyMetric().get_metrics(),
+                    oldMetric.get_nettyMetric().get_metrics());
+        }
+    }
+
+    /**
+     * sum old counter snapshots and new counter snapshots, sums are stored in new snapshots.
+     */
+    private void mergeCounters(Map<String, Map<Integer, MetricSnapshot>> newCounters,
+                               Map<String, Map<Integer, MetricSnapshot>> oldCounters) {
+        for (Map.Entry<String, Map<Integer, MetricSnapshot>> entry : newCounters.entrySet()) {
+            String metricName = entry.getKey();
+            Map<Integer, MetricSnapshot> snapshots = entry.getValue();
+            Map<Integer, MetricSnapshot> oldSnapshots = oldCounters.get(metricName);
+            if (oldSnapshots != null && oldSnapshots.size() > 0) {
+                for (Map.Entry<Integer, MetricSnapshot> snapshotEntry : snapshots.entrySet()) {
+                    Integer win = snapshotEntry.getKey();
+                    MetricSnapshot snapshot = snapshotEntry.getValue();
+                    MetricSnapshot oldSnapshot = oldSnapshots.get(win);
+                    if (oldSnapshot != null) {
+                        snapshot.set_longValue(snapshot.get_longValue() + oldSnapshot.get_longValue());
+                    }
+                }
+            }
+        }
+    }
+
+    private double getSampleRate() {
+        return ConfigExtension.getMetricSampleRate(conf);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/queue/disruptor/JstormEvent.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/queue/disruptor/JstormEvent.java b/jstorm-core/src/main/java/com/alibaba/jstorm/queue/disruptor/JstormEvent.java
index eabcd44..71e7cac 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/queue/disruptor/JstormEvent.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/queue/disruptor/JstormEvent.java
@@ -31,11 +31,10 @@ public class JstormEvent {
         this.msgId = msgId;
     }
 
-    public final static EventFactory<JstormEvent> EVENT_FACTORY =
-            new EventFactory<JstormEvent>() {
-                public JstormEvent newInstance() {
-                    return new JstormEvent();
-                }
-            };
+    public final static EventFactory<JstormEvent> EVENT_FACTORY = new EventFactory<JstormEvent>() {
+        public JstormEvent newInstance() {
+            return new JstormEvent();
+        }
+    };
 
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/queue/disruptor/JstormEventHandler.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/queue/disruptor/JstormEventHandler.java b/jstorm-core/src/main/java/com/alibaba/jstorm/queue/disruptor/JstormEventHandler.java
index dfc43d1..107441d 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/queue/disruptor/JstormEventHandler.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/queue/disruptor/JstormEventHandler.java
@@ -33,8 +33,7 @@ public class JstormEventHandler implements EventHandler {
     }
 
     @Override
-    public void onEvent(Object event, long sequence, boolean endOfBatch)
-            throws Exception {
+    public void onEvent(Object event, long sequence, boolean endOfBatch) throws Exception {
         long msgId = Long.parseLong(((JstormEvent) event).getMsgId());
         // if (msgId % size ==0) {
         // logger.warn("consumer msgId=" + msgId + ", seq=" + sequence);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/Assignment.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/Assignment.java b/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/Assignment.java
index 69620bd..8a05662 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/Assignment.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/Assignment.java
@@ -29,31 +29,28 @@ import org.apache.commons.lang.builder.ToStringStyle;
 import com.alibaba.jstorm.schedule.default_assign.ResourceWorkerSlot;
 
 /**
- * Assignment of one Toplogy, stored in /ZK-DIR/assignments/{topologyid}
- * nodeHost {supervisorid: hostname} -- assigned supervisor Map
- * taskStartTimeSecs: {taskid, taskStartSeconds} masterCodeDir: topology source
- * code's dir in Nimbus taskToResource: {taskid, ResourceAssignment}
+ * Assignment of one Toplogy, stored in /ZK-DIR/assignments/{topologyid} nodeHost {supervisorid: hostname} -- assigned supervisor Map taskStartTimeSecs:
+ * {taskid, taskStartSeconds} masterCodeDir: topology source code's dir in Nimbus taskToResource: {taskid, ResourceAssignment}
  * 
  * @author Lixin/Longda
  */
 public class Assignment implements Serializable {
     public enum AssignmentType {
-        Assign, Config
+        Assign, UpdateTopology, ScaleTopology
     }
 
     private static final long serialVersionUID = 6087667851333314069L;
 
     private final String masterCodeDir;
     /**
-     * @@@ nodeHost store <supervisorId, hostname>, this will waste some zk
-     *     storage
+     * @@@ nodeHost store <supervisorId, hostname>, this will waste some zk storage
      */
     private final Map<String, String> nodeHost;
     private final Map<Integer, Integer> taskStartTimeSecs;
     private final Set<ResourceWorkerSlot> workers;
 
     private long timeStamp;
-    
+
     private AssignmentType type;
 
     public Assignment() {
@@ -64,10 +61,8 @@ public class Assignment implements Serializable {
         this.timeStamp = System.currentTimeMillis();
         this.type = AssignmentType.Assign;
     }
-    
-    public Assignment(String masterCodeDir, Set<ResourceWorkerSlot> workers,
-            Map<String, String> nodeHost,
-            Map<Integer, Integer> taskStartTimeSecs) {
+
+    public Assignment(String masterCodeDir, Set<ResourceWorkerSlot> workers, Map<String, String> nodeHost, Map<Integer, Integer> taskStartTimeSecs) {
         this.workers = workers;
         this.nodeHost = nodeHost;
         this.taskStartTimeSecs = taskStartTimeSecs;
@@ -79,11 +74,11 @@ public class Assignment implements Serializable {
     public void setAssignmentType(AssignmentType type) {
         this.type = type;
     }
-    
+
     public AssignmentType getAssignmentType() {
         return type;
     }
-    
+
     public Map<String, String> getNodeHost() {
         return nodeHost;
     }
@@ -106,11 +101,9 @@ public class Assignment implements Serializable {
      * @param supervisorId
      * @return Map<Integer, WorkerSlot>
      */
-    public Map<Integer, ResourceWorkerSlot> getTaskToNodePortbyNode(
-            String supervisorId) {
+    public Map<Integer, ResourceWorkerSlot> getTaskToNodePortbyNode(String supervisorId) {
 
-        Map<Integer, ResourceWorkerSlot> result =
-                new HashMap<Integer, ResourceWorkerSlot>();
+        Map<Integer, ResourceWorkerSlot> result = new HashMap<Integer, ResourceWorkerSlot>();
         for (ResourceWorkerSlot worker : workers) {
             if (worker.getNodeId().equals(supervisorId)) {
                 result.put(worker.getPort(), worker);
@@ -144,8 +137,7 @@ public class Assignment implements Serializable {
     public Set<Integer> getCurrentWorkerTasks(String supervisorId, int port) {
 
         for (ResourceWorkerSlot worker : workers) {
-            if (worker.getNodeId().equals(supervisorId)
-                    && worker.getPort() == port)
+            if (worker.getNodeId().equals(supervisorId) && worker.getPort() == port)
                 return worker.getTasks();
         }
 
@@ -164,26 +156,24 @@ public class Assignment implements Serializable {
         return this.timeStamp;
     }
 
+    public boolean isTopologyChange(long oldTimeStamp) {
+        boolean isChange = false;
+        if (timeStamp > oldTimeStamp && (type.equals(AssignmentType.UpdateTopology) || type.equals(AssignmentType.ScaleTopology)))
+            isChange = true;
+        return isChange;
+    }
+
     public void updateTimeStamp() {
         timeStamp = System.currentTimeMillis();
     }
-    
+
     @Override
     public int hashCode() {
         final int prime = 31;
         int result = 1;
-        result =
-                prime
-                        * result
-                        + ((masterCodeDir == null) ? 0 : masterCodeDir
-                                .hashCode());
-        result =
-                prime * result + ((nodeHost == null) ? 0 : nodeHost.hashCode());
-        result =
-                prime
-                        * result
-                        + ((taskStartTimeSecs == null) ? 0 : taskStartTimeSecs
-                                .hashCode());
+        result = prime * result + ((masterCodeDir == null) ? 0 : masterCodeDir.hashCode());
+        result = prime * result + ((nodeHost == null) ? 0 : nodeHost.hashCode());
+        result = prime * result + ((taskStartTimeSecs == null) ? 0 : taskStartTimeSecs.hashCode());
         result = prime * result + ((workers == null) ? 0 : workers.hashCode());
         result = prime * result + (int) (timeStamp & 0xFFFFFFFF);
         return result;
@@ -225,8 +215,7 @@ public class Assignment implements Serializable {
 
     @Override
     public String toString() {
-        return ToStringBuilder.reflectionToString(this,
-                ToStringStyle.SHORT_PREFIX_STYLE);
+        return ToStringBuilder.reflectionToString(this, ToStringStyle.SHORT_PREFIX_STYLE);
     }
 
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/AssignmentBak.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/AssignmentBak.java b/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/AssignmentBak.java
index 3e7a770..4b9bbb3 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/AssignmentBak.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/AssignmentBak.java
@@ -32,8 +32,7 @@ public class AssignmentBak implements Serializable {
     private final Map<String, List<Integer>> componentTasks;
     private final Assignment assignment;
 
-    public AssignmentBak(Map<String, List<Integer>> componentTasks,
-            Assignment assignment) {
+    public AssignmentBak(Map<String, List<Integer>> componentTasks, Assignment assignment) {
         super();
         this.componentTasks = componentTasks;
         this.assignment = assignment;
@@ -49,7 +48,6 @@ public class AssignmentBak implements Serializable {
 
     @Override
     public String toString() {
-        return ToStringBuilder.reflectionToString(this,
-                ToStringStyle.SHORT_PREFIX_STYLE);
+        return ToStringBuilder.reflectionToString(this, ToStringStyle.SHORT_PREFIX_STYLE);
     }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/CleanRunnable.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/CleanRunnable.java b/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/CleanRunnable.java
index d73adfd..77bb883 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/CleanRunnable.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/CleanRunnable.java
@@ -62,8 +62,7 @@ public class CleanRunnable implements Runnable {
                 try {
                     f.delete();
                 } catch (Exception e) {
-                    log.error("Cleaning inbox ... error deleting:"
-                            + f.getName() + "," + e);
+                    log.error("Cleaning inbox ... error deleting:" + f.getName() + "," + e);
                 }
             } else {
                 clean(f);
@@ -72,8 +71,7 @@ public class CleanRunnable implements Runnable {
                     try {
                         f.delete();
                     } catch (Exception e) {
-                        log.error("Cleaning inbox ... error deleting:"
-                                + f.getName() + "," + e);
+                        log.error("Cleaning inbox ... error deleting:" + f.getName() + "," + e);
                     }
                 }
             }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/DelayEventRunnable.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/DelayEventRunnable.java b/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/DelayEventRunnable.java
index 14d38d8..a9683fd 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/DelayEventRunnable.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/DelayEventRunnable.java
@@ -28,8 +28,7 @@ public class DelayEventRunnable implements Runnable {
     private StatusType status;
     private Object[] args;
 
-    public DelayEventRunnable(NimbusData data, String topologyid,
-            StatusType status, Object[] args) {
+    public DelayEventRunnable(NimbusData data, String topologyid, StatusType status, Object[] args) {
         this.data = data;
         this.topologyid = topologyid;
         this.status = status;

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/FollowerRunnable.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/FollowerRunnable.java b/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/FollowerRunnable.java
index e62c61b..d87b65e 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/FollowerRunnable.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/FollowerRunnable.java
@@ -18,21 +18,21 @@
 package com.alibaba.jstorm.schedule;
 
 import java.io.File;
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.UUID;
 
+import org.apache.commons.io.FileExistsException;
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.lang.StringUtils;
 import org.apache.thrift.TException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import backtype.storm.Config;
-import backtype.storm.utils.Utils;
-
 import com.alibaba.jstorm.callback.RunnableCallback;
 import com.alibaba.jstorm.client.ConfigExtension;
 import com.alibaba.jstorm.cluster.Cluster;
@@ -44,10 +44,12 @@ import com.alibaba.jstorm.utils.JStormUtils;
 import com.alibaba.jstorm.utils.NetWorkUtils;
 import com.alibaba.jstorm.utils.PathUtils;
 
+import backtype.storm.Config;
+import backtype.storm.utils.Utils;
+
 public class FollowerRunnable implements Runnable {
 
-    private static final Logger LOG = LoggerFactory
-            .getLogger(FollowerRunnable.class);
+    private static final Logger LOG = LoggerFactory.getLogger(FollowerRunnable.class);
 
     private NimbusData data;
 
@@ -59,50 +61,62 @@ public class FollowerRunnable implements Runnable {
 
     private final String hostPort;
 
+    public static final String NIMBUS_DIFFER_COUNT_ZK = "nimbus.differ.count.zk";
+
+    public static final Integer SLAVE_NIMBUS_WAIT_TIME = 120;
+
     @SuppressWarnings("unchecked")
     public FollowerRunnable(final NimbusData data, int sleepTime) {
         this.data = data;
         this.sleepTime = sleepTime;
+
         if (!ConfigExtension.isNimbusUseIp(data.getConf())) {
-            this.hostPort =
-                    NetWorkUtils.hostname()
-                            + ":"
-                            + String.valueOf(Utils.getInt(data.getConf().get(
-                                    Config.NIMBUS_THRIFT_PORT)));
+            this.hostPort = NetWorkUtils.hostname() + ":" + String.valueOf(Utils.getInt(data.getConf().get(Config.NIMBUS_THRIFT_PORT)));
         } else {
-            this.hostPort =
-                    NetWorkUtils.ip()
-                            + ":"
-                            + String.valueOf(Utils.getInt(data.getConf().get(
-                                    Config.NIMBUS_THRIFT_PORT)));
+            this.hostPort = NetWorkUtils.ip() + ":" + String.valueOf(Utils.getInt(data.getConf().get(Config.NIMBUS_THRIFT_PORT)));
         }
         try {
+
             String[] hostfigs = this.hostPort.split(":");
             boolean isLocaliP = false;
-            if(hostfigs.length > 0){
+            if (hostfigs.length > 0) {
                 isLocaliP = hostfigs[0].equals("127.0.0.1");
             }
-            if(isLocaliP){
+            if (isLocaliP) {
                 throw new Exception("the hostname which Nimbus get is localhost");
             }
-        }catch(Exception e1){
-            LOG.error("get nimbus host error!", e1);
-            throw new RuntimeException(e1);
-        }
-        try {
-            this.tryToBeLeader(data.getConf());
         } catch (Exception e1) {
-            // TODO Auto-generated catch block
-            LOG.error("try to be leader error.", e1);
+            LOG.error("get nimbus host error!", e1);
             throw new RuntimeException(e1);
         }
+
         try {
-            data.getStormClusterState().update_nimbus_slave(hostPort,
-                    data.uptime());
+            data.getStormClusterState().update_nimbus_slave(hostPort, data.uptime());
+            data.getStormClusterState().update_nimbus_detail(hostPort, null);
         } catch (Exception e) {
             LOG.error("register nimbus host fail!", e);
             throw new RuntimeException();
         }
+        try{
+            update_nimbus_detail();
+        }catch (Exception e){
+            LOG.error("register detail of nimbus fail!", e);
+            throw new RuntimeException();
+        }
+        try {
+            this.tryToBeLeader(data.getConf());
+        } catch (Exception e1) {
+            try {
+                data.getStormClusterState().unregister_nimbus_host(hostPort);
+                data.getStormClusterState().unregister_nimbus_detail(hostPort);
+            }catch (Exception e2){
+                LOG.info("due to task errors, so remove register nimbus infomation" );
+            }finally {
+                // TODO Auto-generated catch block
+                LOG.error("try to be leader error.", e1);
+                throw new RuntimeException(e1);
+            }
+        }
         callback = new RunnableCallback() {
             @Override
             public void run() {
@@ -121,6 +135,8 @@ public class FollowerRunnable implements Runnable {
             return true;
         }
 
+        // Two nimbus running on the same node isn't allowed
+        // so just checks ip is enough here
         String[] part = zkMaster.split(":");
         return NetWorkUtils.equals(part[0], NetWorkUtils.ip());
     }
@@ -143,20 +159,21 @@ public class FollowerRunnable implements Runnable {
                 if (data.isLeader() == true) {
                     if (isZkLeader == false) {
                         LOG.info("New ZK master is " + master);
-                        JStormUtils.halt_process(1,
-                                "Lose ZK master node, halt process");
+                        JStormUtils.halt_process(1, "Lose ZK master node, halt process");
                         return;
                     }
                 }
 
                 if (isZkLeader == true) {
                     zkClusterState.unregister_nimbus_host(hostPort);
+                    zkClusterState.unregister_nimbus_detail(hostPort);
                     data.setLeader(true);
                     continue;
                 }
 
                 check();
                 zkClusterState.update_nimbus_slave(hostPort, data.uptime());
+                update_nimbus_detail();
             } catch (InterruptedException e) {
                 // TODO Auto-generated catch block
                 continue;
@@ -178,14 +195,29 @@ public class FollowerRunnable implements Runnable {
         StormClusterState clusterState = data.getStormClusterState();
 
         try {
-            String master_stormdist_root =
-                    StormConfig.masterStormdistRoot(data.getConf());
+            String master_stormdist_root = StormConfig.masterStormdistRoot(data.getConf());
 
-            List<String> code_ids =
-                    PathUtils.read_dir_contents(master_stormdist_root);
+            List<String> code_ids = PathUtils.read_dir_contents(master_stormdist_root);
 
             List<String> assignments_ids = clusterState.assignments(callback);
 
+            Map<String, Assignment> assignmentMap = new HashMap<String, Assignment>();
+            List<String> update_ids = new ArrayList<String>();
+            for (String id : assignments_ids) {
+                Assignment assignment = clusterState.assignment_info(id, null);
+                Long localCodeDownTS;
+                try {
+                    Long tmp = StormConfig.read_nimbus_topology_timestamp(data.getConf(), id);
+                    localCodeDownTS = (tmp == null ? 0L : tmp);
+                } catch (FileNotFoundException e) {
+                    localCodeDownTS = 0L;
+                }
+                if (assignment != null && assignment.isTopologyChange(localCodeDownTS.longValue())) {
+                    update_ids.add(id);
+                }
+                assignmentMap.put(id, assignment);
+            }
+
             List<String> done_ids = new ArrayList<String>();
 
             for (String id : code_ids) {
@@ -199,13 +231,15 @@ public class FollowerRunnable implements Runnable {
                 code_ids.remove(id);
             }
 
+            //redownload  topologyid which hava been updated;
+            assignments_ids.addAll(update_ids);
+
             for (String topologyId : code_ids) {
                 deleteLocalTopology(topologyId);
             }
 
             for (String id : assignments_ids) {
-                Assignment assignment = clusterState.assignment_info(id, null);
-                downloadCodeFromMaster(assignment, id);
+                downloadCodeFromMaster(assignmentMap.get(id), id);
             }
         } catch (IOException e) {
             // TODO Auto-generated catch block
@@ -219,8 +253,7 @@ public class FollowerRunnable implements Runnable {
     }
 
     private void deleteLocalTopology(String topologyId) throws IOException {
-        String dir_to_delete =
-                StormConfig.masterStormdistRoot(data.getConf(), topologyId);
+        String dir_to_delete = StormConfig.masterStormdistRoot(data.getConf(), topologyId);
         try {
             PathUtils.rmr(dir_to_delete);
             LOG.info("delete:" + dir_to_delete + "successfully!");
@@ -230,47 +263,113 @@ public class FollowerRunnable implements Runnable {
         }
     }
 
-    private void downloadCodeFromMaster(Assignment assignment, String topologyId)
-            throws IOException, TException {
+    private void downloadCodeFromMaster(Assignment assignment, String topologyId) throws IOException, TException {
         try {
-            String localRoot =
-                    StormConfig.masterStormdistRoot(data.getConf(), topologyId);
-            String tmpDir =
-                    StormConfig.masterInbox(data.getConf()) + "/"
-                            + UUID.randomUUID().toString();
+            String localRoot = StormConfig.masterStormdistRoot(data.getConf(), topologyId);
+            String tmpDir = StormConfig.masterInbox(data.getConf()) + "/" + UUID.randomUUID().toString();
             String masterCodeDir = assignment.getMasterCodeDir();
-            JStormServerUtils.downloadCodeFromMaster(data.getConf(), tmpDir,
-                    masterCodeDir, topologyId, false);
+            JStormServerUtils.downloadCodeFromMaster(data.getConf(), tmpDir, masterCodeDir, topologyId, false);
 
-            FileUtils.moveDirectory(new File(tmpDir), new File(localRoot));
+            File srcDir = new File(tmpDir);
+            File destDir = new File(localRoot);
+            try {
+                FileUtils.moveDirectory(srcDir, destDir);
+            } catch (FileExistsException e) {
+                FileUtils.copyDirectory(srcDir, destDir);
+                FileUtils.deleteQuietly(srcDir);
+            }
+            // Update downloadCode timeStamp
+            StormConfig.write_nimbus_topology_timestamp(data.getConf(), topologyId, System.currentTimeMillis());
         } catch (TException e) {
             // TODO Auto-generated catch block
-            LOG.error(e + " downloadStormCode failed " + "topologyId:"
-                    + topologyId + "masterCodeDir:"
-                    + assignment.getMasterCodeDir());
+            LOG.error(e + " downloadStormCode failed " + "topologyId:" + topologyId + "masterCodeDir:" + assignment.getMasterCodeDir());
             throw e;
         }
-        LOG.info("Finished downloading code for topology id " + topologyId
-                + " from " + assignment.getMasterCodeDir());
+        LOG.info("Finished downloading code for topology id " + topologyId + " from " + assignment.getMasterCodeDir());
     }
 
     private void tryToBeLeader(final Map conf) throws Exception {
-        RunnableCallback masterCallback = new RunnableCallback() {
-            @Override
-            public void run() {
-                try {
-                    tryToBeLeader(conf);
-                } catch (Exception e) {
-                    LOG.error("To be master error", e);
-                    JStormUtils.halt_process(30,
-                            "Cant't to be master" + e.getMessage());
+        boolean allowed = check_nimbus_priority();
+        
+        if (allowed){
+            RunnableCallback masterCallback = new RunnableCallback() {
+                @Override
+                public void run() {
+                    try {
+                        tryToBeLeader(conf);
+                    } catch (Exception e) {
+                        LOG.error("To be master error", e);
+                        JStormUtils.halt_process(30, "Cant't to be master" + e.getMessage());
+                    }
                 }
-            }
-        };
-        data.getStormClusterState().try_to_be_leader(Cluster.MASTER_SUBTREE,
-                hostPort, masterCallback);
+            };
+            LOG.info("This nimbus can be  leader");
+            data.getStormClusterState().try_to_be_leader(Cluster.MASTER_SUBTREE, hostPort, masterCallback);
+        }else {
+        	LOG.info("This nimbus can't be leader");
+        }
     }
+    /**
+     * Compared with other nimbus ,get priority of this nimbus
+     *
+     * @throws Exception
+     */
+    private  boolean check_nimbus_priority() throws Exception {
+    	
+    	int gap = update_nimbus_detail();
+    	if (gap == 0) {
+    		return true;
+    	}
+    	
+    	int left = SLAVE_NIMBUS_WAIT_TIME;
+        while(left > 0) {
+        	LOG.info( "After " + left + " seconds, nimbus will try to be Leader!");
+            Thread.sleep(10 * 1000);
+            left -= 10;
+        }
+
+        StormClusterState zkClusterState = data.getStormClusterState();
+
+        List<String> followers = zkClusterState.list_dirs(Cluster.NIMBUS_SLAVE_DETAIL_SUBTREE, false);
+        if (followers == null || followers.size() == 0) {
+            return false;
+        }
 
+        for (String follower : followers) {
+            if (follower != null && !follower.equals(hostPort)) {
+                Map bMap = zkClusterState.get_nimbus_detail(follower, false);
+                if (bMap != null){
+                    Object object = bMap.get(NIMBUS_DIFFER_COUNT_ZK);
+                    if (object != null && (JStormUtils.parseInt(object)) < gap){
+                    	LOG.info("Current node can't be leader, due to {} has higher priority", follower);
+                        return false;
+                    }
+                }
+            }
+        }
+        
+        
+        
+        return true;
+    }
+    private int update_nimbus_detail() throws Exception {
+
+        //update count = count of zk's binary files - count of nimbus's binary files
+        StormClusterState zkClusterState = data.getStormClusterState();
+        String master_stormdist_root = StormConfig.masterStormdistRoot(data.getConf());
+        List<String> code_ids = PathUtils.read_dir_contents(master_stormdist_root);
+        List<String> assignments_ids = data.getStormClusterState().assignments(callback);
+        assignments_ids.removeAll(code_ids);
+
+        Map mtmp = zkClusterState.get_nimbus_detail(hostPort, false);
+        if (mtmp == null){
+            mtmp = new HashMap();
+        }
+        mtmp.put(NIMBUS_DIFFER_COUNT_ZK, assignments_ids.size());
+        zkClusterState.update_nimbus_detail(hostPort, mtmp);
+        LOG.debug("update nimbus's detail " + mtmp);
+        return assignments_ids.size();
+    }
     /**
      * Check whether current node is master or not
      * 
@@ -291,13 +390,11 @@ public class FollowerRunnable implements Runnable {
                 // current process own master
                 return;
             }
-            LOG.warn("Current Nimbus has start thrift, but fail to own zk master :"
-                    + zkHost);
+            LOG.warn("Current Nimbus has start thrift, but fail to own zk master :" + zkHost);
         }
 
         // current process doesn't own master
-        String err =
-                "Current Nimubs fail to own nimbus_master, should halt process";
+        String err = "Current Nimubs fail to own nimbus_master, should halt process";
         LOG.error(err);
         JStormUtils.halt_process(0, err);
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/IToplogyScheduler.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/IToplogyScheduler.java b/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/IToplogyScheduler.java
index a9d9b92..a6e6093 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/IToplogyScheduler.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/IToplogyScheduler.java
@@ -26,6 +26,5 @@ import com.alibaba.jstorm.utils.FailedAssignTopologyException;
 public interface IToplogyScheduler {
     void prepare(Map conf);
 
-    Set<ResourceWorkerSlot> assignTasks(TopologyAssignContext contex)
-            throws FailedAssignTopologyException;
+    Set<ResourceWorkerSlot> assignTasks(TopologyAssignContext contex) throws FailedAssignTopologyException;
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/MonitorRunnable.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/MonitorRunnable.java b/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/MonitorRunnable.java
index 8342b79..b7c8e89 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/MonitorRunnable.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/MonitorRunnable.java
@@ -17,27 +17,28 @@
  */
 package com.alibaba.jstorm.schedule;
 
-import java.util.Date;
-import java.util.List;
-import java.util.Set;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+import backtype.storm.Config;
+import backtype.storm.generated.TaskHeartbeat;
+import backtype.storm.generated.TopologyTaskHbInfo;
 
 import com.alibaba.jstorm.cluster.StormClusterState;
 import com.alibaba.jstorm.daemon.nimbus.NimbusData;
 import com.alibaba.jstorm.daemon.nimbus.NimbusUtils;
 import com.alibaba.jstorm.daemon.nimbus.StatusType;
 import com.alibaba.jstorm.schedule.default_assign.ResourceWorkerSlot;
+import com.alibaba.jstorm.utils.JStormUtils;
 import com.alibaba.jstorm.utils.TimeFormat;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static com.alibaba.jstorm.daemon.nimbus.TopologyMetricsRunnable.TaskDeadEvent;
+
+import java.util.*;
 
 /**
- * 
- * Scan all task's heartbeat, if task isn't alive, DO
- * NimbusUtils.transition(monitor)
+ * Scan all task's heartbeat, if task isn't alive, DO NimbusUtils.transition(monitor)
  * 
  * @author Longda
- * 
  */
 public class MonitorRunnable implements Runnable {
     private static Logger LOG = LoggerFactory.getLogger(MonitorRunnable.class);
@@ -49,9 +50,7 @@ public class MonitorRunnable implements Runnable {
     }
 
     /**
-     * @@@ Todo when one topology is being reassigned, the topology should be
-     *     skip check
-     * @param data
+     * @@@ Todo when one topology is being reassigned, the topology should skip check
      */
     @Override
     public void run() {
@@ -80,44 +79,84 @@ public class MonitorRunnable implements Runnable {
                     LOG.info("Failed to get task ids of " + topologyid);
                     continue;
                 }
+                Assignment assignment = clusterState.assignment_info(topologyid, null);
 
+                Set<Integer> deadTasks = new HashSet<Integer>();
                 boolean needReassign = false;
                 for (Integer task : taskIds) {
-                    boolean isTaskDead =
-                            NimbusUtils.isTaskDead(data, topologyid, task);
+                    boolean isTaskDead = NimbusUtils.isTaskDead(data, topologyid, task);
                     if (isTaskDead == true) {
-                        LOG.info("Found " + topologyid + ",taskid:" + task
-                                + " is dead");
-
-                        ResourceWorkerSlot resource = null;
-                        Assignment assignment =
-                                clusterState.assignment_info(topologyid, null);
-                        if (assignment != null)
-                            resource = assignment.getWorkerByTaskId(task);
-                        if (resource != null) {
-                            Date now = new Date();
-                            String nowStr = TimeFormat.getSecond(now);
-                            String errorInfo =
-                                    "Task-" + task + " is dead on "
-                                            + resource.getHostname() + ":"
-                                            + resource.getPort() + ", "
-                                            + nowStr;
-                            LOG.info(errorInfo);
-                            clusterState.report_task_error(topologyid, task,
-                                    errorInfo);
-                        }
+                        deadTasks.add(task);
                         needReassign = true;
-                        break;
                     }
                 }
+
+
+                TopologyTaskHbInfo topologyHbInfo = data.getTasksHeartbeat().get(topologyid);
                 if (needReassign == true) {
-                    NimbusUtils.transition(data, topologyid, false,
-                            StatusType.monitor);
+                    if (topologyHbInfo != null) {
+                        int topologyMasterId = topologyHbInfo.get_topologyMasterId();
+                        if (deadTasks.contains(topologyMasterId)) {
+                            deadTasks.clear();
+                            if (assignment != null) {
+                                ResourceWorkerSlot resource = assignment.getWorkerByTaskId(topologyMasterId);
+                                if (resource != null)
+                                    deadTasks.addAll(resource.getTasks());
+                                else
+                                    deadTasks.add(topologyMasterId);
+                            }
+                        } else {
+                            Map<Integer, TaskHeartbeat> taskHbs = topologyHbInfo.get_taskHbs();
+                            int launchTime = JStormUtils.parseInt(data.getConf().get(Config.NIMBUS_TASK_LAUNCH_SECS));
+                            if (taskHbs == null || taskHbs.get(topologyMasterId) == null || taskHbs.get(topologyMasterId).get_uptime() < launchTime) {
+                                /*try {
+                                    clusterState.topology_heartbeat(topologyid, topologyHbInfo);
+                                } catch (Exception e) {
+                                    LOG.error("Failed to update task heartbeat info to ZK for " + topologyid, e);
+                                }*/
+                                return;
+                            }
+                        }
+                        Map<Integer, ResourceWorkerSlot> deadTaskWorkers = new HashMap<>();
+                        for (Integer task : deadTasks) {
+                            LOG.info("Found " + topologyid + ",taskid:" + task + " is dead");
+
+                            ResourceWorkerSlot resource = null;
+                            if (assignment != null)
+                                resource = assignment.getWorkerByTaskId(task);
+                            if (resource != null) {
+                                deadTaskWorkers.put(task, resource);
+                                Date now = new Date();
+                                String nowStr = TimeFormat.getSecond(now);
+                                String errorInfo = "Task-" + task + " is dead on " + resource.getHostname() + ":" + resource.getPort() + ", " + nowStr;
+                                LOG.info(errorInfo);
+                                clusterState.report_task_error(topologyid, task, errorInfo, null);
+                            }
+                        }
+
+                        if (deadTaskWorkers.size() > 0) {
+                            // notify jstorm monitor
+                            TaskDeadEvent event = new TaskDeadEvent();
+                            event.clusterName = data.getClusterName();
+                            event.topologyId = topologyid;
+                            event.deadTasks = deadTaskWorkers;
+                            event.timestamp = System.currentTimeMillis();
+                            data.getMetricRunnable().pushEvent(event);
+                        }
+                    }
+                    NimbusUtils.transition(data, topologyid, false, StatusType.monitor);
+                }
+                
+                if (topologyHbInfo != null) {
+                    try {
+                        clusterState.topology_heartbeat(topologyid, topologyHbInfo);
+                    } catch (Exception e) {
+                        LOG.error("Failed to update task heartbeat info to ZK for " + topologyid, e);
+                    }
                 }
             }
 
         } catch (Exception e) {
-            // TODO Auto-generated catch block
             LOG.error(e.getMessage(), e);
         }
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/TopologyAssignContext.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/TopologyAssignContext.java b/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/TopologyAssignContext.java
index 12cdad0..9a3a879 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/TopologyAssignContext.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/TopologyAssignContext.java
@@ -38,6 +38,8 @@ public class TopologyAssignContext {
     public static final int ASSIGN_TYPE_MONITOR = 2; // monitor a topology, some
                                                      // tasks are dead
 
+    protected String topologyId;
+
     protected int assignType;
 
     protected StormTopology rawTopology;
@@ -51,6 +53,9 @@ public class TopologyAssignContext {
 
     protected Map<String, SupervisorInfo> cluster;
 
+    protected int topoMasterTaskId;
+    protected boolean assignSingleWorkerForTM = false;
+
     protected Map<Integer, String> taskToComponent;
 
     protected Set<Integer> allTaskIds; // all tasks
@@ -76,6 +81,17 @@ public class TopologyAssignContext {
         this.deadTaskIds = copy.getDeadTaskIds();
         this.unstoppedTaskIds = copy.getUnstoppedTaskIds();
         this.isReassign = copy.isReassign();
+        this.topologyId = copy.getTopologyId();
+        this.topoMasterTaskId = copy.getTopologyMasterTaskId();
+        this.assignSingleWorkerForTM = copy.getAssignSingleWorkerForTM();
+    }
+
+    public String getTopologyId() {
+        return topologyId;
+    }
+
+    public void setTopologyId(String topologyId) {
+        this.topologyId = topologyId;
     }
 
     public int getAssignType() {
@@ -151,8 +167,7 @@ public class TopologyAssignContext {
     }
 
     public static boolean isAssignTypeValid(int type) {
-        return (type == ASSIGN_TYPE_NEW) || (type == ASSIGN_TYPE_REBALANCE)
-                || (type == ASSIGN_TYPE_MONITOR);
+        return (type == ASSIGN_TYPE_NEW) || (type == ASSIGN_TYPE_REBALANCE) || (type == ASSIGN_TYPE_MONITOR);
     }
 
     public Set<ResourceWorkerSlot> getUnstoppedWorkers() {
@@ -171,9 +186,24 @@ public class TopologyAssignContext {
         this.isReassign = isReassign;
     }
 
+    public int getTopologyMasterTaskId() {
+        return topoMasterTaskId;
+    }
+
+    public void setTopologyMasterTaskId(int taskId) {
+        this.topoMasterTaskId = taskId;
+    }
+
+    public boolean getAssignSingleWorkerForTM() {
+        return assignSingleWorkerForTM;
+    }
+
+    public void setAssignSingleWorkerForTM(boolean isAssign) {
+        this.assignSingleWorkerForTM = isAssign;
+    }
+
     @Override
     public String toString() {
-        return ToStringBuilder.reflectionToString(this,
-                ToStringStyle.SHORT_PREFIX_STYLE);
+        return ToStringBuilder.reflectionToString(this, ToStringStyle.SHORT_PREFIX_STYLE);
     }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/DefaultTopologyAssignContext.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/DefaultTopologyAssignContext.java b/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/DefaultTopologyAssignContext.java
index 9eb2775..78649aa 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/DefaultTopologyAssignContext.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/DefaultTopologyAssignContext.java
@@ -17,30 +17,20 @@
  */
 package com.alibaba.jstorm.schedule.default_assign;
 
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-
-import org.apache.commons.lang.builder.ToStringBuilder;
-import org.apache.commons.lang.builder.ToStringStyle;
-
 import backtype.storm.Config;
-import backtype.storm.generated.Bolt;
-import backtype.storm.generated.ComponentCommon;
-import backtype.storm.generated.SpoutSpec;
-import backtype.storm.generated.StateSpoutSpec;
-import backtype.storm.generated.StormTopology;
+import backtype.storm.generated.*;
 import backtype.storm.utils.ThriftTopologyUtils;
-
+import com.alibaba.jstorm.client.ConfigExtension;
 import com.alibaba.jstorm.cluster.Common;
 import com.alibaba.jstorm.daemon.supervisor.SupervisorInfo;
 import com.alibaba.jstorm.schedule.TopologyAssignContext;
 import com.alibaba.jstorm.utils.FailedAssignTopologyException;
 import com.alibaba.jstorm.utils.JStormUtils;
+import org.apache.commons.lang.builder.ToStringBuilder;
+import org.apache.commons.lang.builder.ToStringStyle;
+
+import java.util.*;
+import java.util.Map.Entry;
 
 public class DefaultTopologyAssignContext extends TopologyAssignContext {
 
@@ -49,19 +39,16 @@ public class DefaultTopologyAssignContext extends TopologyAssignContext {
     private final Map<String, List<String>> hostToSid;
     private final Set<ResourceWorkerSlot> oldWorkers;
     private final Map<String, List<Integer>> componentTasks;
-    private final Set<ResourceWorkerSlot> unstoppedWorkers =
-            new HashSet<ResourceWorkerSlot>();
+    private final Set<ResourceWorkerSlot> unstoppedWorkers = new HashSet<ResourceWorkerSlot>();
     private final int totalWorkerNum;
     private final int unstoppedWorkerNum;
 
     private int computeWorkerNum() {
-        Integer settingNum =
-                JStormUtils.parseInt(stormConf.get(Config.TOPOLOGY_WORKERS));
+        Integer settingNum = JStormUtils.parseInt(stormConf.get(Config.TOPOLOGY_WORKERS));
 
-        int hintSum = 0;
+        int ret = 0, hintSum = 0, tmCount = 0;
 
-        Map<String, Object> components =
-                ThriftTopologyUtils.getComponents(sysTopology);
+        Map<String, Object> components = ThriftTopologyUtils.getComponents(sysTopology);
         for (Entry<String, Object> entry : components.entrySet()) {
             String componentName = entry.getKey();
             Object component = entry.getValue();
@@ -78,14 +65,35 @@ public class DefaultTopologyAssignContext extends TopologyAssignContext {
             }
 
             int hint = common.get_parallelism_hint();
+            if (componentName.equals(Common.TOPOLOGY_MASTER_COMPONENT_ID)) {
+                tmCount += hint;
+                continue;
+            }
             hintSum += hint;
         }
 
         if (settingNum == null) {
-            return hintSum;
+            ret = hintSum;
         } else {
-            return Math.min(settingNum, hintSum);
+            ret =  Math.min(settingNum, hintSum);
         }
+
+        Boolean isTmSingleWorker = ConfigExtension.getTopologyMasterSingleWorker(stormConf);
+        if (isTmSingleWorker != null) {
+            if (isTmSingleWorker == true) {
+                // Assign a single worker for topology master
+                ret += tmCount;
+                setAssignSingleWorkerForTM(true);
+            }
+        } else {
+            // If not configured, judge this config by worker number
+            if (ret >= 10) {
+                ret += tmCount;
+                setAssignSingleWorkerForTM(true);
+            }
+        }
+
+        return ret;
     }
 
     public int computeUnstoppedAssignments() {
@@ -149,8 +157,7 @@ public class DefaultTopologyAssignContext extends TopologyAssignContext {
         try {
             sysTopology = Common.system_topology(stormConf, rawTopology);
         } catch (Exception e) {
-            throw new FailedAssignTopologyException(
-                    "Failed to generate system topology");
+            throw new FailedAssignTopologyException("Failed to generate system topology");
         }
 
         sidToHostname = generateSidToHost();
@@ -215,7 +222,6 @@ public class DefaultTopologyAssignContext extends TopologyAssignContext {
     }
 
     public String toDetailString() {
-        return ToStringBuilder.reflectionToString(this,
-                ToStringStyle.SHORT_PREFIX_STYLE);
+        return ToStringBuilder.reflectionToString(this, ToStringStyle.SHORT_PREFIX_STYLE);
     }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/DefaultTopologyScheduler.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/DefaultTopologyScheduler.java b/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/DefaultTopologyScheduler.java
index 5df7de4..99ba9da 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/DefaultTopologyScheduler.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/DefaultTopologyScheduler.java
@@ -32,8 +32,7 @@ import com.alibaba.jstorm.schedule.TopologyAssignContext;
 import com.alibaba.jstorm.utils.FailedAssignTopologyException;
 
 public class DefaultTopologyScheduler implements IToplogyScheduler {
-    private static final Logger LOG = LoggerFactory
-            .getLogger(DefaultTopologyScheduler.class);
+    private static final Logger LOG = LoggerFactory.getLogger(DefaultTopologyScheduler.class);
 
     private Map nimbusConf;
 
@@ -57,8 +56,7 @@ public class DefaultTopologyScheduler implements IToplogyScheduler {
         for (Integer task : canFree) {
             ResourceWorkerSlot worker = oldAssigns.getWorkerByTaskId(task);
             if (worker == null) {
-                LOG.warn("When free rebalance resource, no ResourceAssignment of task "
-                        + task);
+                LOG.warn("When free rebalance resource, no ResourceAssignment of task " + task);
                 continue;
             }
 
@@ -79,25 +77,22 @@ public class DefaultTopologyScheduler implements IToplogyScheduler {
         } else if (assignType == TopologyAssignContext.ASSIGN_TYPE_REBALANCE) {
             needAssign.addAll(context.getAllTaskIds());
             needAssign.removeAll(context.getUnstoppedTaskIds());
-        } else {
-            // monitor
-            needAssign.addAll(context.getDeadTaskIds());
+        } else { // ASSIGN_TYPE_MONITOR
+            Set<Integer> deadTasks = context.getDeadTaskIds();
+            needAssign.addAll(deadTasks);
         }
 
         return needAssign;
     }
 
     /**
-     * Get the task Map which the task is alive and will be kept Only when type
-     * is ASSIGN_TYPE_MONITOR, it is valid
+     * Get the task Map which the task is alive and will be kept Only when type is ASSIGN_TYPE_MONITOR, it is valid
      * 
      * @param defaultContext
      * @param needAssigns
      * @return
      */
-    public Set<ResourceWorkerSlot> getKeepAssign(
-            DefaultTopologyAssignContext defaultContext,
-            Set<Integer> needAssigns) {
+    public Set<ResourceWorkerSlot> getKeepAssign(DefaultTopologyAssignContext defaultContext, Set<Integer> needAssigns) {
 
         Set<Integer> keepAssignIds = new HashSet<Integer>();
         keepAssignIds.addAll(defaultContext.getAllTaskIds());
@@ -125,21 +120,17 @@ public class DefaultTopologyScheduler implements IToplogyScheduler {
     }
 
     @Override
-    public Set<ResourceWorkerSlot> assignTasks(TopologyAssignContext context)
-            throws FailedAssignTopologyException {
+    public Set<ResourceWorkerSlot> assignTasks(TopologyAssignContext context) throws FailedAssignTopologyException {
 
         int assignType = context.getAssignType();
         if (TopologyAssignContext.isAssignTypeValid(assignType) == false) {
-            throw new FailedAssignTopologyException("Invalide Assign Type "
-                    + assignType);
+            throw new FailedAssignTopologyException("Invalide Assign Type " + assignType);
         }
 
-        DefaultTopologyAssignContext defaultContext =
-                new DefaultTopologyAssignContext(context);
+        DefaultTopologyAssignContext defaultContext = new DefaultTopologyAssignContext(context);
         if (assignType == TopologyAssignContext.ASSIGN_TYPE_REBALANCE) {
             /**
-             * Mark all current assigned worker as available. Current assignment
-             * will be restored in task scheduler.
+             * Mark all current assigned worker as available. Current assignment will be restored in task scheduler.
              */
             freeUsed(defaultContext);
         }
@@ -148,36 +139,24 @@ public class DefaultTopologyScheduler implements IToplogyScheduler {
 
         Set<Integer> needAssignTasks = getNeedAssignTasks(defaultContext);
 
-        Set<ResourceWorkerSlot> keepAssigns =
-                getKeepAssign(defaultContext, needAssignTasks);
+        Set<ResourceWorkerSlot> keepAssigns = getKeepAssign(defaultContext, needAssignTasks);
 
         // please use tree map to make task sequence
         Set<ResourceWorkerSlot> ret = new HashSet<ResourceWorkerSlot>();
         ret.addAll(keepAssigns);
         ret.addAll(defaultContext.getUnstoppedWorkers());
 
-        int allocWorkerNum =
-                defaultContext.getTotalWorkerNum()
-                        - defaultContext.getUnstoppedWorkerNum()
-                        - keepAssigns.size();
-        LOG.info("allocWorkerNum=" + allocWorkerNum + ", totalWorkerNum="
-                + defaultContext.getTotalWorkerNum());
+        int allocWorkerNum = defaultContext.getTotalWorkerNum() - defaultContext.getUnstoppedWorkerNum() - keepAssigns.size();
+        LOG.info("allocWorkerNum=" + allocWorkerNum + ", totalWorkerNum=" + defaultContext.getTotalWorkerNum() + ", keepWorkerNum=" + keepAssigns.size());
 
         if (allocWorkerNum <= 0) {
-            LOG.warn("Don't need assign workers, all workers are fine "
-                    + defaultContext.toDetailString());
-            throw new FailedAssignTopologyException(
-                    "Don't need assign worker, all workers are fine ");
+            LOG.warn("Don't need assign workers, all workers are fine " + defaultContext.toDetailString());
+            throw new FailedAssignTopologyException("Don't need assign worker, all workers are fine ");
         }
 
-        List<ResourceWorkerSlot> availableWorkers =
-                WorkerScheduler.getInstance().getAvailableWorkers(
-                        defaultContext, needAssignTasks, allocWorkerNum);
-        TaskScheduler taskScheduler =
-                new TaskScheduler(defaultContext, needAssignTasks,
-                        availableWorkers);
-        Set<ResourceWorkerSlot> assignment =
-                new HashSet<ResourceWorkerSlot>(taskScheduler.assign());
+        List<ResourceWorkerSlot> availableWorkers = WorkerScheduler.getInstance().getAvailableWorkers(defaultContext, needAssignTasks, allocWorkerNum);
+        TaskScheduler taskScheduler = new TaskScheduler(defaultContext, needAssignTasks, availableWorkers);
+        Set<ResourceWorkerSlot> assignment = new HashSet<ResourceWorkerSlot>(taskScheduler.assign());
         ret.addAll(assignment);
 
         LOG.info("Keep Alive slots:" + keepAssigns);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/ResourceWorkerSlot.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/ResourceWorkerSlot.java b/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/ResourceWorkerSlot.java
index c218f52..df8eba5 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/ResourceWorkerSlot.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/ResourceWorkerSlot.java
@@ -39,8 +39,7 @@ import com.alibaba.jstorm.utils.NetWorkUtils;
 //one worker 's assignment
 public class ResourceWorkerSlot extends WorkerSlot implements Serializable {
 
-    public static Logger LOG = LoggerFactory
-            .getLogger(ResourceWorkerSlot.class);
+    public static Logger LOG = LoggerFactory.getLogger(ResourceWorkerSlot.class);
     private static final long serialVersionUID = 9138386287559932411L;
 
     private String hostname;
@@ -58,16 +57,14 @@ public class ResourceWorkerSlot extends WorkerSlot implements Serializable {
         super(supervisorId, port);
     }
 
-    public ResourceWorkerSlot(WorkerAssignment worker,
-            Map<String, List<Integer>> componentToTask) {
+    public ResourceWorkerSlot(WorkerAssignment worker, Map<String, List<Integer>> componentToTask) {
         super(worker.getNodeId(), worker.getPort());
         this.hostname = worker.getHostName();
         this.tasks = new HashSet<Integer>();
         this.cpu = worker.getCpu();
         this.memSize = worker.getMem();
         this.jvm = worker.getJvm();
-        for (Entry<String, Integer> entry : worker.getComponentToNum()
-                .entrySet()) {
+        for (Entry<String, Integer> entry : worker.getComponentToNum().entrySet()) {
             List<Integer> tasks = componentToTask.get(entry.getKey());
             if (tasks == null || tasks.size() == 0)
                 continue;
@@ -121,12 +118,10 @@ public class ResourceWorkerSlot extends WorkerSlot implements Serializable {
 
     @Override
     public String toString() {
-        return ToStringBuilder.reflectionToString(this,
-                ToStringStyle.SHORT_PREFIX_STYLE);
+        return ToStringBuilder.reflectionToString(this, ToStringStyle.SHORT_PREFIX_STYLE);
     }
 
-    public boolean compareToUserDefineWorker(WorkerAssignment worker,
-            Map<Integer, String> taskToComponent) {
+    public boolean compareToUserDefineWorker(WorkerAssignment worker, Map<Integer, String> taskToComponent) {
         int cpu = worker.getCpu();
         if (cpu != 0 && this.cpu != cpu)
             return false;

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/Selector/AbstractSelector.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/Selector/AbstractSelector.java b/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/Selector/AbstractSelector.java
index eed0e39..1130b1a 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/Selector/AbstractSelector.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/schedule/default_assign/Selector/AbstractSelector.java
@@ -36,8 +36,7 @@ public abstract class AbstractSelector implements Selector {
         this.context = context;
     }
 
-    protected List<ResourceWorkerSlot> selectWorker(
-            List<ResourceWorkerSlot> list, Comparator<ResourceWorkerSlot> c) {
+    protected List<ResourceWorkerSlot> selectWorker(List<ResourceWorkerSlot> list, Comparator<ResourceWorkerSlot> c) {
         List<ResourceWorkerSlot> result = new ArrayList<ResourceWorkerSlot>();
         ResourceWorkerSlot best = null;
         for (ResourceWorkerSlot worker : list) {
@@ -58,8 +57,7 @@ public abstract class AbstractSelector implements Selector {
     }
 
     @Override
-    public List<ResourceWorkerSlot> select(List<ResourceWorkerSlot> result,
-            String name) {
+    public List<ResourceWorkerSlot> select(List<ResourceWorkerSlot> result, String name) {
         if (result.size() == 1)
             return result;
         result = this.selectWorker(result, workerComparator.get(name));


[38/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/TaskHeartbeat.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/TaskHeartbeat.java b/jstorm-core/src/main/java/backtype/storm/generated/TaskHeartbeat.java
new file mode 100644
index 0000000..6afb70a
--- /dev/null
+++ b/jstorm-core/src/main/java/backtype/storm/generated/TaskHeartbeat.java
@@ -0,0 +1,482 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.2)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package backtype.storm.generated;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
+public class TaskHeartbeat implements org.apache.thrift.TBase<TaskHeartbeat, TaskHeartbeat._Fields>, java.io.Serializable, Cloneable, Comparable<TaskHeartbeat> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TaskHeartbeat");
+
+  private static final org.apache.thrift.protocol.TField TIME_FIELD_DESC = new org.apache.thrift.protocol.TField("time", org.apache.thrift.protocol.TType.I32, (short)1);
+  private static final org.apache.thrift.protocol.TField UPTIME_FIELD_DESC = new org.apache.thrift.protocol.TField("uptime", org.apache.thrift.protocol.TType.I32, (short)2);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new TaskHeartbeatStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new TaskHeartbeatTupleSchemeFactory());
+  }
+
+  private int time; // required
+  private int uptime; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    TIME((short)1, "time"),
+    UPTIME((short)2, "uptime");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // TIME
+          return TIME;
+        case 2: // UPTIME
+          return UPTIME;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __TIME_ISSET_ID = 0;
+  private static final int __UPTIME_ISSET_ID = 1;
+  private byte __isset_bitfield = 0;
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.TIME, new org.apache.thrift.meta_data.FieldMetaData("time", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+    tmpMap.put(_Fields.UPTIME, new org.apache.thrift.meta_data.FieldMetaData("uptime", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TaskHeartbeat.class, metaDataMap);
+  }
+
+  public TaskHeartbeat() {
+  }
+
+  public TaskHeartbeat(
+    int time,
+    int uptime)
+  {
+    this();
+    this.time = time;
+    set_time_isSet(true);
+    this.uptime = uptime;
+    set_uptime_isSet(true);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public TaskHeartbeat(TaskHeartbeat other) {
+    __isset_bitfield = other.__isset_bitfield;
+    this.time = other.time;
+    this.uptime = other.uptime;
+  }
+
+  public TaskHeartbeat deepCopy() {
+    return new TaskHeartbeat(this);
+  }
+
+  @Override
+  public void clear() {
+    set_time_isSet(false);
+    this.time = 0;
+    set_uptime_isSet(false);
+    this.uptime = 0;
+  }
+
+  public int get_time() {
+    return this.time;
+  }
+
+  public void set_time(int time) {
+    this.time = time;
+    set_time_isSet(true);
+  }
+
+  public void unset_time() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TIME_ISSET_ID);
+  }
+
+  /** Returns true if field time is set (has been assigned a value) and false otherwise */
+  public boolean is_set_time() {
+    return EncodingUtils.testBit(__isset_bitfield, __TIME_ISSET_ID);
+  }
+
+  public void set_time_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TIME_ISSET_ID, value);
+  }
+
+  public int get_uptime() {
+    return this.uptime;
+  }
+
+  public void set_uptime(int uptime) {
+    this.uptime = uptime;
+    set_uptime_isSet(true);
+  }
+
+  public void unset_uptime() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __UPTIME_ISSET_ID);
+  }
+
+  /** Returns true if field uptime is set (has been assigned a value) and false otherwise */
+  public boolean is_set_uptime() {
+    return EncodingUtils.testBit(__isset_bitfield, __UPTIME_ISSET_ID);
+  }
+
+  public void set_uptime_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __UPTIME_ISSET_ID, value);
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case TIME:
+      if (value == null) {
+        unset_time();
+      } else {
+        set_time((Integer)value);
+      }
+      break;
+
+    case UPTIME:
+      if (value == null) {
+        unset_uptime();
+      } else {
+        set_uptime((Integer)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case TIME:
+      return Integer.valueOf(get_time());
+
+    case UPTIME:
+      return Integer.valueOf(get_uptime());
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case TIME:
+      return is_set_time();
+    case UPTIME:
+      return is_set_uptime();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof TaskHeartbeat)
+      return this.equals((TaskHeartbeat)that);
+    return false;
+  }
+
+  public boolean equals(TaskHeartbeat that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_time = true;
+    boolean that_present_time = true;
+    if (this_present_time || that_present_time) {
+      if (!(this_present_time && that_present_time))
+        return false;
+      if (this.time != that.time)
+        return false;
+    }
+
+    boolean this_present_uptime = true;
+    boolean that_present_uptime = true;
+    if (this_present_uptime || that_present_uptime) {
+      if (!(this_present_uptime && that_present_uptime))
+        return false;
+      if (this.uptime != that.uptime)
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_time = true;
+    list.add(present_time);
+    if (present_time)
+      list.add(time);
+
+    boolean present_uptime = true;
+    list.add(present_uptime);
+    if (present_uptime)
+      list.add(uptime);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(TaskHeartbeat other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(is_set_time()).compareTo(other.is_set_time());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_time()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.time, other.time);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(is_set_uptime()).compareTo(other.is_set_uptime());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_uptime()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.uptime, other.uptime);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("TaskHeartbeat(");
+    boolean first = true;
+
+    sb.append("time:");
+    sb.append(this.time);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("uptime:");
+    sb.append(this.uptime);
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws TException {
+    // check for required fields
+    if (!is_set_time()) {
+      throw new TProtocolException("Required field 'time' is unset! Struct:" + toString());
+    }
+
+    if (!is_set_uptime()) {
+      throw new TProtocolException("Required field 'uptime' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class TaskHeartbeatStandardSchemeFactory implements SchemeFactory {
+    public TaskHeartbeatStandardScheme getScheme() {
+      return new TaskHeartbeatStandardScheme();
+    }
+  }
+
+  private static class TaskHeartbeatStandardScheme extends StandardScheme<TaskHeartbeat> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, TaskHeartbeat struct) throws TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // TIME
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.time = iprot.readI32();
+              struct.set_time_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // UPTIME
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.uptime = iprot.readI32();
+              struct.set_uptime_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, TaskHeartbeat struct) throws TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      oprot.writeFieldBegin(TIME_FIELD_DESC);
+      oprot.writeI32(struct.time);
+      oprot.writeFieldEnd();
+      oprot.writeFieldBegin(UPTIME_FIELD_DESC);
+      oprot.writeI32(struct.uptime);
+      oprot.writeFieldEnd();
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class TaskHeartbeatTupleSchemeFactory implements SchemeFactory {
+    public TaskHeartbeatTupleScheme getScheme() {
+      return new TaskHeartbeatTupleScheme();
+    }
+  }
+
+  private static class TaskHeartbeatTupleScheme extends TupleScheme<TaskHeartbeat> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, TaskHeartbeat struct) throws TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeI32(struct.time);
+      oprot.writeI32(struct.uptime);
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, TaskHeartbeat struct) throws TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.time = iprot.readI32();
+      struct.set_time_isSet(true);
+      struct.uptime = iprot.readI32();
+      struct.set_uptime_isSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/TaskSummary.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/TaskSummary.java b/jstorm-core/src/main/java/backtype/storm/generated/TaskSummary.java
index 9abf197..fa2765c 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/TaskSummary.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/TaskSummary.java
@@ -34,11 +34,11 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class TaskSummary implements org.apache.thrift.TBase<TaskSummary, TaskSummary._Fields>, java.io.Serializable, Cloneable, Comparable<TaskSummary> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TaskSummary");
 
-  private static final org.apache.thrift.protocol.TField TASK_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("task_id", org.apache.thrift.protocol.TType.I32, (short)1);
+  private static final org.apache.thrift.protocol.TField TASK_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("taskId", org.apache.thrift.protocol.TType.I32, (short)1);
   private static final org.apache.thrift.protocol.TField UPTIME_FIELD_DESC = new org.apache.thrift.protocol.TField("uptime", org.apache.thrift.protocol.TType.I32, (short)2);
   private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRING, (short)3);
   private static final org.apache.thrift.protocol.TField HOST_FIELD_DESC = new org.apache.thrift.protocol.TField("host", org.apache.thrift.protocol.TType.STRING, (short)4);
@@ -51,7 +51,7 @@ public class TaskSummary implements org.apache.thrift.TBase<TaskSummary, TaskSum
     schemes.put(TupleScheme.class, new TaskSummaryTupleSchemeFactory());
   }
 
-  private int task_id; // required
+  private int taskId; // required
   private int uptime; // required
   private String status; // required
   private String host; // required
@@ -60,7 +60,7 @@ public class TaskSummary implements org.apache.thrift.TBase<TaskSummary, TaskSum
 
   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-    TASK_ID((short)1, "task_id"),
+    TASK_ID((short)1, "taskId"),
     UPTIME((short)2, "uptime"),
     STATUS((short)3, "status"),
     HOST((short)4, "host"),
@@ -132,7 +132,7 @@ public class TaskSummary implements org.apache.thrift.TBase<TaskSummary, TaskSum
   }
 
   // isset id assignments
-  private static final int __TASK_ID_ISSET_ID = 0;
+  private static final int __TASKID_ISSET_ID = 0;
   private static final int __UPTIME_ISSET_ID = 1;
   private static final int __PORT_ISSET_ID = 2;
   private byte __isset_bitfield = 0;
@@ -140,7 +140,7 @@ public class TaskSummary implements org.apache.thrift.TBase<TaskSummary, TaskSum
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.TASK_ID, new org.apache.thrift.meta_data.FieldMetaData("task_id", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+    tmpMap.put(_Fields.TASK_ID, new org.apache.thrift.meta_data.FieldMetaData("taskId", org.apache.thrift.TFieldRequirementType.REQUIRED, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
     tmpMap.put(_Fields.UPTIME, new org.apache.thrift.meta_data.FieldMetaData("uptime", org.apache.thrift.TFieldRequirementType.REQUIRED, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
@@ -161,15 +161,15 @@ public class TaskSummary implements org.apache.thrift.TBase<TaskSummary, TaskSum
   }
 
   public TaskSummary(
-    int task_id,
+    int taskId,
     int uptime,
     String status,
     String host,
     int port)
   {
     this();
-    this.task_id = task_id;
-    set_task_id_isSet(true);
+    this.taskId = taskId;
+    set_taskId_isSet(true);
     this.uptime = uptime;
     set_uptime_isSet(true);
     this.status = status;
@@ -183,7 +183,7 @@ public class TaskSummary implements org.apache.thrift.TBase<TaskSummary, TaskSum
    */
   public TaskSummary(TaskSummary other) {
     __isset_bitfield = other.__isset_bitfield;
-    this.task_id = other.task_id;
+    this.taskId = other.taskId;
     this.uptime = other.uptime;
     if (other.is_set_status()) {
       this.status = other.status;
@@ -207,8 +207,8 @@ public class TaskSummary implements org.apache.thrift.TBase<TaskSummary, TaskSum
 
   @Override
   public void clear() {
-    set_task_id_isSet(false);
-    this.task_id = 0;
+    set_taskId_isSet(false);
+    this.taskId = 0;
     set_uptime_isSet(false);
     this.uptime = 0;
     this.status = null;
@@ -218,26 +218,26 @@ public class TaskSummary implements org.apache.thrift.TBase<TaskSummary, TaskSum
     this.errors = null;
   }
 
-  public int get_task_id() {
-    return this.task_id;
+  public int get_taskId() {
+    return this.taskId;
   }
 
-  public void set_task_id(int task_id) {
-    this.task_id = task_id;
-    set_task_id_isSet(true);
+  public void set_taskId(int taskId) {
+    this.taskId = taskId;
+    set_taskId_isSet(true);
   }
 
-  public void unset_task_id() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TASK_ID_ISSET_ID);
+  public void unset_taskId() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TASKID_ISSET_ID);
   }
 
-  /** Returns true if field task_id is set (has been assigned a value) and false otherwise */
-  public boolean is_set_task_id() {
-    return EncodingUtils.testBit(__isset_bitfield, __TASK_ID_ISSET_ID);
+  /** Returns true if field taskId is set (has been assigned a value) and false otherwise */
+  public boolean is_set_taskId() {
+    return EncodingUtils.testBit(__isset_bitfield, __TASKID_ISSET_ID);
   }
 
-  public void set_task_id_isSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TASK_ID_ISSET_ID, value);
+  public void set_taskId_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TASKID_ISSET_ID, value);
   }
 
   public int get_uptime() {
@@ -372,9 +372,9 @@ public class TaskSummary implements org.apache.thrift.TBase<TaskSummary, TaskSum
     switch (field) {
     case TASK_ID:
       if (value == null) {
-        unset_task_id();
+        unset_taskId();
       } else {
-        set_task_id((Integer)value);
+        set_taskId((Integer)value);
       }
       break;
 
@@ -424,7 +424,7 @@ public class TaskSummary implements org.apache.thrift.TBase<TaskSummary, TaskSum
   public Object getFieldValue(_Fields field) {
     switch (field) {
     case TASK_ID:
-      return Integer.valueOf(get_task_id());
+      return Integer.valueOf(get_taskId());
 
     case UPTIME:
       return Integer.valueOf(get_uptime());
@@ -453,7 +453,7 @@ public class TaskSummary implements org.apache.thrift.TBase<TaskSummary, TaskSum
 
     switch (field) {
     case TASK_ID:
-      return is_set_task_id();
+      return is_set_taskId();
     case UPTIME:
       return is_set_uptime();
     case STATUS:
@@ -481,12 +481,12 @@ public class TaskSummary implements org.apache.thrift.TBase<TaskSummary, TaskSum
     if (that == null)
       return false;
 
-    boolean this_present_task_id = true;
-    boolean that_present_task_id = true;
-    if (this_present_task_id || that_present_task_id) {
-      if (!(this_present_task_id && that_present_task_id))
+    boolean this_present_taskId = true;
+    boolean that_present_taskId = true;
+    if (this_present_taskId || that_present_taskId) {
+      if (!(this_present_taskId && that_present_taskId))
         return false;
-      if (this.task_id != that.task_id)
+      if (this.taskId != that.taskId)
         return false;
     }
 
@@ -542,10 +542,10 @@ public class TaskSummary implements org.apache.thrift.TBase<TaskSummary, TaskSum
   public int hashCode() {
     List<Object> list = new ArrayList<Object>();
 
-    boolean present_task_id = true;
-    list.add(present_task_id);
-    if (present_task_id)
-      list.add(task_id);
+    boolean present_taskId = true;
+    list.add(present_taskId);
+    if (present_taskId)
+      list.add(taskId);
 
     boolean present_uptime = true;
     list.add(present_uptime);
@@ -583,12 +583,12 @@ public class TaskSummary implements org.apache.thrift.TBase<TaskSummary, TaskSum
 
     int lastComparison = 0;
 
-    lastComparison = Boolean.valueOf(is_set_task_id()).compareTo(other.is_set_task_id());
+    lastComparison = Boolean.valueOf(is_set_taskId()).compareTo(other.is_set_taskId());
     if (lastComparison != 0) {
       return lastComparison;
     }
-    if (is_set_task_id()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.task_id, other.task_id);
+    if (is_set_taskId()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.taskId, other.taskId);
       if (lastComparison != 0) {
         return lastComparison;
       }
@@ -650,11 +650,11 @@ public class TaskSummary implements org.apache.thrift.TBase<TaskSummary, TaskSum
     return _Fields.findByThriftId(fieldId);
   }
 
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
@@ -663,8 +663,8 @@ public class TaskSummary implements org.apache.thrift.TBase<TaskSummary, TaskSum
     StringBuilder sb = new StringBuilder("TaskSummary(");
     boolean first = true;
 
-    sb.append("task_id:");
-    sb.append(this.task_id);
+    sb.append("taskId:");
+    sb.append(this.taskId);
     first = false;
     if (!first) sb.append(", ");
     sb.append("uptime:");
@@ -704,26 +704,26 @@ public class TaskSummary implements org.apache.thrift.TBase<TaskSummary, TaskSum
     return sb.toString();
   }
 
-  public void validate() throws org.apache.thrift.TException {
+  public void validate() throws TException {
     // check for required fields
-    if (!is_set_task_id()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'task_id' is unset! Struct:" + toString());
+    if (!is_set_taskId()) {
+      throw new TProtocolException("Required field 'taskId' is unset! Struct:" + toString());
     }
 
     if (!is_set_uptime()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'uptime' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'uptime' is unset! Struct:" + toString());
     }
 
     if (!is_set_status()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'status' is unset! Struct:" + toString());
     }
 
     if (!is_set_host()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'host' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'host' is unset! Struct:" + toString());
     }
 
     if (!is_set_port()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'port' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'port' is unset! Struct:" + toString());
     }
 
     // check for sub-struct validity
@@ -732,7 +732,7 @@ public class TaskSummary implements org.apache.thrift.TBase<TaskSummary, TaskSum
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -742,7 +742,7 @@ public class TaskSummary implements org.apache.thrift.TBase<TaskSummary, TaskSum
       // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
       __isset_bitfield = 0;
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -755,7 +755,7 @@ public class TaskSummary implements org.apache.thrift.TBase<TaskSummary, TaskSum
 
   private static class TaskSummaryStandardScheme extends StandardScheme<TaskSummary> {
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot, TaskSummary struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot, TaskSummary struct) throws TException {
       org.apache.thrift.protocol.TField schemeField;
       iprot.readStructBegin();
       while (true)
@@ -767,8 +767,8 @@ public class TaskSummary implements org.apache.thrift.TBase<TaskSummary, TaskSum
         switch (schemeField.id) {
           case 1: // TASK_ID
             if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
-              struct.task_id = iprot.readI32();
-              struct.set_task_id_isSet(true);
+              struct.taskId = iprot.readI32();
+              struct.set_taskId_isSet(true);
             } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
@@ -808,14 +808,14 @@ public class TaskSummary implements org.apache.thrift.TBase<TaskSummary, TaskSum
           case 6: // ERRORS
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list230 = iprot.readListBegin();
-                struct.errors = new ArrayList<ErrorInfo>(_list230.size);
-                ErrorInfo _elem231;
-                for (int _i232 = 0; _i232 < _list230.size; ++_i232)
+                org.apache.thrift.protocol.TList _list178 = iprot.readListBegin();
+                struct.errors = new ArrayList<ErrorInfo>(_list178.size);
+                ErrorInfo _elem179;
+                for (int _i180 = 0; _i180 < _list178.size; ++_i180)
                 {
-                  _elem231 = new ErrorInfo();
-                  _elem231.read(iprot);
-                  struct.errors.add(_elem231);
+                  _elem179 = new ErrorInfo();
+                  _elem179.read(iprot);
+                  struct.errors.add(_elem179);
                 }
                 iprot.readListEnd();
               }
@@ -833,12 +833,12 @@ public class TaskSummary implements org.apache.thrift.TBase<TaskSummary, TaskSum
       struct.validate();
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot, TaskSummary struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot, TaskSummary struct) throws TException {
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
       oprot.writeFieldBegin(TASK_ID_FIELD_DESC);
-      oprot.writeI32(struct.task_id);
+      oprot.writeI32(struct.taskId);
       oprot.writeFieldEnd();
       oprot.writeFieldBegin(UPTIME_FIELD_DESC);
       oprot.writeI32(struct.uptime);
@@ -861,9 +861,9 @@ public class TaskSummary implements org.apache.thrift.TBase<TaskSummary, TaskSum
           oprot.writeFieldBegin(ERRORS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.errors.size()));
-            for (ErrorInfo _iter233 : struct.errors)
+            for (ErrorInfo _iter181 : struct.errors)
             {
-              _iter233.write(oprot);
+              _iter181.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -885,9 +885,9 @@ public class TaskSummary implements org.apache.thrift.TBase<TaskSummary, TaskSum
   private static class TaskSummaryTupleScheme extends TupleScheme<TaskSummary> {
 
     @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, TaskSummary struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol prot, TaskSummary struct) throws TException {
       TTupleProtocol oprot = (TTupleProtocol) prot;
-      oprot.writeI32(struct.task_id);
+      oprot.writeI32(struct.taskId);
       oprot.writeI32(struct.uptime);
       oprot.writeString(struct.status);
       oprot.writeString(struct.host);
@@ -900,19 +900,19 @@ public class TaskSummary implements org.apache.thrift.TBase<TaskSummary, TaskSum
       if (struct.is_set_errors()) {
         {
           oprot.writeI32(struct.errors.size());
-          for (ErrorInfo _iter234 : struct.errors)
+          for (ErrorInfo _iter182 : struct.errors)
           {
-            _iter234.write(oprot);
+            _iter182.write(oprot);
           }
         }
       }
     }
 
     @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, TaskSummary struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol prot, TaskSummary struct) throws TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
-      struct.task_id = iprot.readI32();
-      struct.set_task_id_isSet(true);
+      struct.taskId = iprot.readI32();
+      struct.set_taskId_isSet(true);
       struct.uptime = iprot.readI32();
       struct.set_uptime_isSet(true);
       struct.status = iprot.readString();
@@ -924,14 +924,14 @@ public class TaskSummary implements org.apache.thrift.TBase<TaskSummary, TaskSum
       BitSet incoming = iprot.readBitSet(1);
       if (incoming.get(0)) {
         {
-          org.apache.thrift.protocol.TList _list235 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-          struct.errors = new ArrayList<ErrorInfo>(_list235.size);
-          ErrorInfo _elem236;
-          for (int _i237 = 0; _i237 < _list235.size; ++_i237)
+          org.apache.thrift.protocol.TList _list183 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.errors = new ArrayList<ErrorInfo>(_list183.size);
+          ErrorInfo _elem184;
+          for (int _i185 = 0; _i185 < _list183.size; ++_i185)
           {
-            _elem236 = new ErrorInfo();
-            _elem236.read(iprot);
-            struct.errors.add(_elem236);
+            _elem184 = new ErrorInfo();
+            _elem184.read(iprot);
+            struct.errors.add(_elem184);
           }
         }
         struct.set_errors_isSet(true);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/ThriftSerializedObject.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/ThriftSerializedObject.java b/jstorm-core/src/main/java/backtype/storm/generated/ThriftSerializedObject.java
index a753d7f..f3156e7 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/ThriftSerializedObject.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/ThriftSerializedObject.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class ThriftSerializedObject implements org.apache.thrift.TBase<ThriftSerializedObject, ThriftSerializedObject._Fields>, java.io.Serializable, Cloneable, Comparable<ThriftSerializedObject> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ThriftSerializedObject");
 
@@ -346,11 +346,11 @@ public class ThriftSerializedObject implements org.apache.thrift.TBase<ThriftSer
     return _Fields.findByThriftId(fieldId);
   }
 
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
@@ -378,14 +378,14 @@ public class ThriftSerializedObject implements org.apache.thrift.TBase<ThriftSer
     return sb.toString();
   }
 
-  public void validate() throws org.apache.thrift.TException {
+  public void validate() throws TException {
     // check for required fields
     if (!is_set_name()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'name' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'name' is unset! Struct:" + toString());
     }
 
     if (!is_set_bits()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'bits' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'bits' is unset! Struct:" + toString());
     }
 
     // check for sub-struct validity
@@ -394,7 +394,7 @@ public class ThriftSerializedObject implements org.apache.thrift.TBase<ThriftSer
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -402,7 +402,7 @@ public class ThriftSerializedObject implements org.apache.thrift.TBase<ThriftSer
   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
     try {
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -415,7 +415,7 @@ public class ThriftSerializedObject implements org.apache.thrift.TBase<ThriftSer
 
   private static class ThriftSerializedObjectStandardScheme extends StandardScheme<ThriftSerializedObject> {
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot, ThriftSerializedObject struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot, ThriftSerializedObject struct) throws TException {
       org.apache.thrift.protocol.TField schemeField;
       iprot.readStructBegin();
       while (true)
@@ -450,7 +450,7 @@ public class ThriftSerializedObject implements org.apache.thrift.TBase<ThriftSer
       struct.validate();
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot, ThriftSerializedObject struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot, ThriftSerializedObject struct) throws TException {
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
@@ -479,14 +479,14 @@ public class ThriftSerializedObject implements org.apache.thrift.TBase<ThriftSer
   private static class ThriftSerializedObjectTupleScheme extends TupleScheme<ThriftSerializedObject> {
 
     @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, ThriftSerializedObject struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol prot, ThriftSerializedObject struct) throws TException {
       TTupleProtocol oprot = (TTupleProtocol) prot;
       oprot.writeString(struct.name);
       oprot.writeBinary(struct.bits);
     }
 
     @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, ThriftSerializedObject struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol prot, ThriftSerializedObject struct) throws TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       struct.name = iprot.readString();
       struct.set_name_isSet(true);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/TopologyAssignException.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/TopologyAssignException.java b/jstorm-core/src/main/java/backtype/storm/generated/TopologyAssignException.java
index 8c95876..13887cc 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/TopologyAssignException.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/TopologyAssignException.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class TopologyAssignException extends TException implements org.apache.thrift.TBase<TopologyAssignException, TopologyAssignException._Fields>, java.io.Serializable, Cloneable, Comparable<TopologyAssignException> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TopologyAssignException");
 
@@ -264,11 +264,11 @@ public class TopologyAssignException extends TException implements org.apache.th
     return _Fields.findByThriftId(fieldId);
   }
 
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
@@ -288,10 +288,10 @@ public class TopologyAssignException extends TException implements org.apache.th
     return sb.toString();
   }
 
-  public void validate() throws org.apache.thrift.TException {
+  public void validate() throws TException {
     // check for required fields
     if (!is_set_msg()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'msg' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'msg' is unset! Struct:" + toString());
     }
 
     // check for sub-struct validity
@@ -300,7 +300,7 @@ public class TopologyAssignException extends TException implements org.apache.th
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -308,7 +308,7 @@ public class TopologyAssignException extends TException implements org.apache.th
   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
     try {
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -321,7 +321,7 @@ public class TopologyAssignException extends TException implements org.apache.th
 
   private static class TopologyAssignExceptionStandardScheme extends StandardScheme<TopologyAssignException> {
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot, TopologyAssignException struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot, TopologyAssignException struct) throws TException {
       org.apache.thrift.protocol.TField schemeField;
       iprot.readStructBegin();
       while (true)
@@ -348,7 +348,7 @@ public class TopologyAssignException extends TException implements org.apache.th
       struct.validate();
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot, TopologyAssignException struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot, TopologyAssignException struct) throws TException {
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
@@ -372,13 +372,13 @@ public class TopologyAssignException extends TException implements org.apache.th
   private static class TopologyAssignExceptionTupleScheme extends TupleScheme<TopologyAssignException> {
 
     @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, TopologyAssignException struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol prot, TopologyAssignException struct) throws TException {
       TTupleProtocol oprot = (TTupleProtocol) prot;
       oprot.writeString(struct.msg);
     }
 
     @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, TopologyAssignException struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol prot, TopologyAssignException struct) throws TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       struct.msg = iprot.readString();
       struct.set_msg_isSet(true);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/TopologyInfo.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/TopologyInfo.java b/jstorm-core/src/main/java/backtype/storm/generated/TopologyInfo.java
index 1da8e98..4b4793d 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/TopologyInfo.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/TopologyInfo.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class TopologyInfo implements org.apache.thrift.TBase<TopologyInfo, TopologyInfo._Fields>, java.io.Serializable, Cloneable, Comparable<TopologyInfo> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TopologyInfo");
 
@@ -523,11 +523,11 @@ public class TopologyInfo implements org.apache.thrift.TBase<TopologyInfo, Topol
     return _Fields.findByThriftId(fieldId);
   }
 
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
@@ -571,22 +571,22 @@ public class TopologyInfo implements org.apache.thrift.TBase<TopologyInfo, Topol
     return sb.toString();
   }
 
-  public void validate() throws org.apache.thrift.TException {
+  public void validate() throws TException {
     // check for required fields
     if (!is_set_topology()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'topology' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'topology' is unset! Struct:" + toString());
     }
 
     if (!is_set_components()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'components' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'components' is unset! Struct:" + toString());
     }
 
     if (!is_set_tasks()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'tasks' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'tasks' is unset! Struct:" + toString());
     }
 
     if (!is_set_metrics()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'metrics' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'metrics' is unset! Struct:" + toString());
     }
 
     // check for sub-struct validity
@@ -601,7 +601,7 @@ public class TopologyInfo implements org.apache.thrift.TBase<TopologyInfo, Topol
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -609,7 +609,7 @@ public class TopologyInfo implements org.apache.thrift.TBase<TopologyInfo, Topol
   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
     try {
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -622,7 +622,7 @@ public class TopologyInfo implements org.apache.thrift.TBase<TopologyInfo, Topol
 
   private static class TopologyInfoStandardScheme extends StandardScheme<TopologyInfo> {
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot, TopologyInfo struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot, TopologyInfo struct) throws TException {
       org.apache.thrift.protocol.TField schemeField;
       iprot.readStructBegin();
       while (true)
@@ -644,14 +644,14 @@ public class TopologyInfo implements org.apache.thrift.TBase<TopologyInfo, Topol
           case 2: // COMPONENTS
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list238 = iprot.readListBegin();
-                struct.components = new ArrayList<ComponentSummary>(_list238.size);
-                ComponentSummary _elem239;
-                for (int _i240 = 0; _i240 < _list238.size; ++_i240)
+                org.apache.thrift.protocol.TList _list186 = iprot.readListBegin();
+                struct.components = new ArrayList<ComponentSummary>(_list186.size);
+                ComponentSummary _elem187;
+                for (int _i188 = 0; _i188 < _list186.size; ++_i188)
                 {
-                  _elem239 = new ComponentSummary();
-                  _elem239.read(iprot);
-                  struct.components.add(_elem239);
+                  _elem187 = new ComponentSummary();
+                  _elem187.read(iprot);
+                  struct.components.add(_elem187);
                 }
                 iprot.readListEnd();
               }
@@ -663,14 +663,14 @@ public class TopologyInfo implements org.apache.thrift.TBase<TopologyInfo, Topol
           case 3: // TASKS
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list241 = iprot.readListBegin();
-                struct.tasks = new ArrayList<TaskSummary>(_list241.size);
-                TaskSummary _elem242;
-                for (int _i243 = 0; _i243 < _list241.size; ++_i243)
+                org.apache.thrift.protocol.TList _list189 = iprot.readListBegin();
+                struct.tasks = new ArrayList<TaskSummary>(_list189.size);
+                TaskSummary _elem190;
+                for (int _i191 = 0; _i191 < _list189.size; ++_i191)
                 {
-                  _elem242 = new TaskSummary();
-                  _elem242.read(iprot);
-                  struct.tasks.add(_elem242);
+                  _elem190 = new TaskSummary();
+                  _elem190.read(iprot);
+                  struct.tasks.add(_elem190);
                 }
                 iprot.readListEnd();
               }
@@ -697,7 +697,7 @@ public class TopologyInfo implements org.apache.thrift.TBase<TopologyInfo, Topol
       struct.validate();
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot, TopologyInfo struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot, TopologyInfo struct) throws TException {
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
@@ -710,9 +710,9 @@ public class TopologyInfo implements org.apache.thrift.TBase<TopologyInfo, Topol
         oprot.writeFieldBegin(COMPONENTS_FIELD_DESC);
         {
           oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.components.size()));
-          for (ComponentSummary _iter244 : struct.components)
+          for (ComponentSummary _iter192 : struct.components)
           {
-            _iter244.write(oprot);
+            _iter192.write(oprot);
           }
           oprot.writeListEnd();
         }
@@ -722,9 +722,9 @@ public class TopologyInfo implements org.apache.thrift.TBase<TopologyInfo, Topol
         oprot.writeFieldBegin(TASKS_FIELD_DESC);
         {
           oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.tasks.size()));
-          for (TaskSummary _iter245 : struct.tasks)
+          for (TaskSummary _iter193 : struct.tasks)
           {
-            _iter245.write(oprot);
+            _iter193.write(oprot);
           }
           oprot.writeListEnd();
         }
@@ -750,53 +750,53 @@ public class TopologyInfo implements org.apache.thrift.TBase<TopologyInfo, Topol
   private static class TopologyInfoTupleScheme extends TupleScheme<TopologyInfo> {
 
     @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, TopologyInfo struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol prot, TopologyInfo struct) throws TException {
       TTupleProtocol oprot = (TTupleProtocol) prot;
       struct.topology.write(oprot);
       {
         oprot.writeI32(struct.components.size());
-        for (ComponentSummary _iter246 : struct.components)
+        for (ComponentSummary _iter194 : struct.components)
         {
-          _iter246.write(oprot);
+          _iter194.write(oprot);
         }
       }
       {
         oprot.writeI32(struct.tasks.size());
-        for (TaskSummary _iter247 : struct.tasks)
+        for (TaskSummary _iter195 : struct.tasks)
         {
-          _iter247.write(oprot);
+          _iter195.write(oprot);
         }
       }
       struct.metrics.write(oprot);
     }
 
     @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, TopologyInfo struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol prot, TopologyInfo struct) throws TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       struct.topology = new TopologySummary();
       struct.topology.read(iprot);
       struct.set_topology_isSet(true);
       {
-        org.apache.thrift.protocol.TList _list248 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-        struct.components = new ArrayList<ComponentSummary>(_list248.size);
-        ComponentSummary _elem249;
-        for (int _i250 = 0; _i250 < _list248.size; ++_i250)
+        org.apache.thrift.protocol.TList _list196 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.components = new ArrayList<ComponentSummary>(_list196.size);
+        ComponentSummary _elem197;
+        for (int _i198 = 0; _i198 < _list196.size; ++_i198)
         {
-          _elem249 = new ComponentSummary();
-          _elem249.read(iprot);
-          struct.components.add(_elem249);
+          _elem197 = new ComponentSummary();
+          _elem197.read(iprot);
+          struct.components.add(_elem197);
         }
       }
       struct.set_components_isSet(true);
       {
-        org.apache.thrift.protocol.TList _list251 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-        struct.tasks = new ArrayList<TaskSummary>(_list251.size);
-        TaskSummary _elem252;
-        for (int _i253 = 0; _i253 < _list251.size; ++_i253)
+        org.apache.thrift.protocol.TList _list199 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.tasks = new ArrayList<TaskSummary>(_list199.size);
+        TaskSummary _elem200;
+        for (int _i201 = 0; _i201 < _list199.size; ++_i201)
         {
-          _elem252 = new TaskSummary();
-          _elem252.read(iprot);
-          struct.tasks.add(_elem252);
+          _elem200 = new TaskSummary();
+          _elem200.read(iprot);
+          struct.tasks.add(_elem200);
         }
       }
       struct.set_tasks_isSet(true);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/TopologyInitialStatus.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/TopologyInitialStatus.java b/jstorm-core/src/main/java/backtype/storm/generated/TopologyInitialStatus.java
index 0d87ee1..8930060 100755
--- a/jstorm-core/src/main/java/backtype/storm/generated/TopologyInitialStatus.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/TopologyInitialStatus.java
@@ -11,7 +11,7 @@ import java.util.Map;
 import java.util.HashMap;
 import org.apache.thrift.TEnum;
 
-public enum TopologyInitialStatus implements org.apache.thrift.TEnum {
+public enum TopologyInitialStatus implements TEnum {
   ACTIVE(1),
   INACTIVE(2);
 


[15/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/StatusTransition.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/StatusTransition.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/StatusTransition.java
index e984455..fab05ea 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/StatusTransition.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/StatusTransition.java
@@ -17,51 +17,36 @@
  */
 package com.alibaba.jstorm.daemon.nimbus;
 
-import java.util.HashMap;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import com.alibaba.jstorm.callback.Callback;
-import com.alibaba.jstorm.callback.impl.ActiveTransitionCallback;
-import com.alibaba.jstorm.callback.impl.DoRebalanceTransitionCallback;
-import com.alibaba.jstorm.callback.impl.DoneRebalanceTransitionCallback;
-import com.alibaba.jstorm.callback.impl.InactiveTransitionCallback;
-import com.alibaba.jstorm.callback.impl.KillTransitionCallback;
-import com.alibaba.jstorm.callback.impl.ReassignTransitionCallback;
-import com.alibaba.jstorm.callback.impl.RebalanceTransitionCallback;
-import com.alibaba.jstorm.callback.impl.RemoveTransitionCallback;
-import com.alibaba.jstorm.callback.impl.UpdateConfTransitionCallback;
+import com.alibaba.jstorm.callback.impl.*;
 import com.alibaba.jstorm.cluster.StormBase;
 import com.alibaba.jstorm.cluster.StormStatus;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
 
 /**
  * Status changing
  * 
  * @author version1: lixin version2: Longda
- * 
- * 
- * 
  */
 public class StatusTransition {
 
-    private final static Logger LOG = LoggerFactory
-            .getLogger(StatusTransition.class);
+    private final static Logger LOG = LoggerFactory.getLogger(StatusTransition.class);
 
     private NimbusData data;
 
-    private Map<String, Object> topologyLocks =
-            new ConcurrentHashMap<String, Object>();
+    private Map<String, Object> topologyLocks = new ConcurrentHashMap<String, Object>();
 
     public StatusTransition(NimbusData data) {
         this.data = data;
 
     }
 
-    public <T> void transition(String topologyid, boolean errorOnNoTransition,
-            StatusType changeStatus, T... args) throws Exception {
+    public <T> void transition(String topologyid, boolean errorOnNoTransition, StatusType changeStatus, T... args) throws Exception {
         // lock outside
         Object lock = topologyLocks.get(topologyid);
         if (lock == null) {
@@ -70,8 +55,7 @@ public class StatusTransition {
         }
 
         if (data.getIsShutdown().get() == true) {
-            LOG.info("Nimbus is in shutdown, skip this event " + topologyid
-                    + ":" + changeStatus);
+            LOG.info("Nimbus is in shutdown, skip this event " + topologyid + ":" + changeStatus);
             return;
         }
 
@@ -86,50 +70,34 @@ public class StatusTransition {
     /**
      * Changing status
      * 
-     * @param topologyId
-     * @param errorOnNTransition if it is true, failure will throw exception
      * @param args -- will be used in the status changing callback
-     * 
      */
-    public <T> void transitionLock(String topologyid,
-            boolean errorOnNoTransition, StatusType changeStatus, T... args)
-            throws Exception {
+    public <T> void transitionLock(String topologyid, boolean errorOnNoTransition, StatusType changeStatus, T... args) throws Exception {
 
         // get ZK's topology node's data, which is StormBase
-        StormBase stormbase =
-                data.getStormClusterState().storm_base(topologyid, null);
+        StormBase stormbase = data.getStormClusterState().storm_base(topologyid, null);
         if (stormbase == null) {
 
-            LOG.error("Cannot apply event changing status "
-                    + changeStatus.getStatus() + " to " + topologyid
-                    + " because failed to get StormBase from ZK");
+            LOG.error("Cannot apply event changing status " + changeStatus.getStatus() + " to " + topologyid + " because failed to get StormBase from ZK");
             return;
         }
 
         StormStatus currentStatus = stormbase.getStatus();
         if (currentStatus == null) {
-            LOG.error("Cannot apply event changing status "
-                    + changeStatus.getStatus() + " to " + topologyid
-                    + " because topologyStatus is null in ZK");
+            LOG.error("Cannot apply event changing status " + changeStatus.getStatus() + " to " + topologyid + " because topologyStatus is null in ZK");
             return;
         }
 
         // <currentStatus, Map<changingStatus, callback>>
-        Map<StatusType, Map<StatusType, Callback>> callbackMap =
-                stateTransitions(topologyid, currentStatus);
+        Map<StatusType, Map<StatusType, Callback>> callbackMap = stateTransitions(topologyid, currentStatus);
 
         // get current changingCallbacks
-        Map<StatusType, Callback> changingCallbacks =
-                callbackMap.get(currentStatus.getStatusType());
+        Map<StatusType, Callback> changingCallbacks = callbackMap.get(currentStatus.getStatusType());
 
-        if (changingCallbacks == null
-                || changingCallbacks.containsKey(changeStatus) == false
-                || changingCallbacks.get(changeStatus) == null) {
+        if (changingCallbacks == null || changingCallbacks.containsKey(changeStatus) == false || changingCallbacks.get(changeStatus) == null) {
             String msg =
-                    "No transition for event: changing status:"
-                            + changeStatus.getStatus() + ", current status: "
-                            + currentStatus.getStatusType() + " topology-id: "
-                            + topologyid;
+                    "No transition for event: changing status:" + changeStatus.getStatus() + ", current status: " + currentStatus.getStatusType()
+                            + " topology-id: " + topologyid;
             LOG.info(msg);
             if (errorOnNoTransition) {
                 throw new RuntimeException(msg);
@@ -144,12 +112,10 @@ public class StatusTransition {
             StormStatus newStatus = (StormStatus) obj;
             // update status to ZK
             data.getStormClusterState().update_storm(topologyid, newStatus);
-            LOG.info("Successfully updated " + topologyid + " as status "
-                    + newStatus);
+            LOG.info("Successfully updated " + topologyid + " as status " + newStatus);
         }
 
-        LOG.info("Successfully apply event changing status "
-                + changeStatus.getStatus() + " to " + topologyid);
+        LOG.info("Successfully apply event changing status " + changeStatus.getStatus() + " to " + topologyid);
         return;
 
     }
@@ -157,104 +123,74 @@ public class StatusTransition {
     /**
      * generate status changing map
      * 
-     * 
-     * 
      * @param topologyid
-     * @param status
-     * @return
-     * 
-     *         Map<StatusType, Map<StatusType, Callback>> means
-     *         Map<currentStatus, Map<changingStatus, Callback>>
+     * @return Map<StatusType, Map<StatusType, Callback>> means Map<currentStatus, Map<changingStatus, Callback>>
      */
 
-    private Map<StatusType, Map<StatusType, Callback>> stateTransitions(
-            String topologyid, StormStatus currentStatus) {
+    private Map<StatusType, Map<StatusType, Callback>> stateTransitions(String topologyid, StormStatus currentStatus) {
 
         /**
          * 
-         * 1. Status: this status will be stored in ZK
-         * killed/inactive/active/rebalancing 2. action:
+         * 1. Status: this status will be stored in ZK killed/inactive/active/rebalancing 2. action:
          * 
-         * monitor -- every Config.NIMBUS_MONITOR_FREQ_SECS seconds will trigger
-         * this only valid when current status is active inactivate -- client
-         * will trigger this action, only valid when current status is active
-         * activate -- client will trigger this action only valid when current
-         * status is inactive startup -- when nimbus startup, it will trigger
-         * this action only valid when current status is killed/rebalancing kill
-         * -- client kill topology will trigger this action, only valid when
-         * current status is active/inactive/killed remove -- 30 seconds after
-         * client submit kill command, it will do this action, only valid when
-         * current status is killed rebalance -- client submit rebalance
-         * command, only valid when current status is active/deactive
-         * do_rebalance -- 30 seconds after client submit rebalance command, it
-         * will do this action, only valid when current status is rebalance
+         * monitor -- every Config.NIMBUS_MONITOR_FREQ_SECS seconds will trigger this only valid when current status is active inactivate -- client will trigger
+         * this action, only valid when current status is active activate -- client will trigger this action only valid when current status is inactive startup
+         * -- when nimbus startup, it will trigger this action only valid when current status is killed/rebalancing kill -- client kill topology will trigger
+         * this action, only valid when current status is active/inactive/killed remove -- 30 seconds after client submit kill command, it will do this action,
+         * only valid when current status is killed rebalance -- client submit rebalance command, only valid when current status is active/deactive do_rebalance
+         * -- 30 seconds after client submit rebalance command, it will do this action, only valid when current status is rebalance
          */
 
-        Map<StatusType, Map<StatusType, Callback>> rtn =
-                new HashMap<StatusType, Map<StatusType, Callback>>();
+        Map<StatusType, Map<StatusType, Callback>> rtn = new HashMap<StatusType, Map<StatusType, Callback>>();
 
         // current status is active
-        Map<StatusType, Callback> activeMap =
-                new HashMap<StatusType, Callback>();
-        activeMap.put(StatusType.monitor, new ReassignTransitionCallback(data,
-                topologyid));
+        Map<StatusType, Callback> activeMap = new HashMap<StatusType, Callback>();
+        activeMap.put(StatusType.monitor, new ReassignTransitionCallback(data, topologyid));
         activeMap.put(StatusType.inactivate, new InactiveTransitionCallback());
         activeMap.put(StatusType.startup, null);
         activeMap.put(StatusType.activate, null);
-        activeMap.put(StatusType.kill, new KillTransitionCallback(data,
-                topologyid));
+        activeMap.put(StatusType.kill, new KillTransitionCallback(data, topologyid));
         activeMap.put(StatusType.remove, null);
-        activeMap.put(StatusType.rebalance, new RebalanceTransitionCallback(
-                data, topologyid, currentStatus));
+        activeMap.put(StatusType.rebalance, new RebalanceTransitionCallback(data, topologyid, currentStatus));
         activeMap.put(StatusType.do_rebalance, null);
         activeMap.put(StatusType.done_rebalance, null);
-        activeMap.put(StatusType.update_conf, new UpdateConfTransitionCallback(
-                data, topologyid, currentStatus));
+        activeMap.put(StatusType.update_topology, new UpdateTopologyTransitionCallback(data, topologyid, currentStatus));
 
         rtn.put(StatusType.active, activeMap);
 
         // current status is inactive
-        Map<StatusType, Callback> inactiveMap =
-                new HashMap<StatusType, Callback>();
+        Map<StatusType, Callback> inactiveMap = new HashMap<StatusType, Callback>();
 
-        inactiveMap.put(StatusType.monitor, new ReassignTransitionCallback(
-                data, topologyid, new StormStatus(StatusType.inactive)));
+        inactiveMap.put(StatusType.monitor, new ReassignTransitionCallback(data, topologyid, new StormStatus(StatusType.inactive)));
         inactiveMap.put(StatusType.inactivate, null);
         inactiveMap.put(StatusType.startup, null);
         inactiveMap.put(StatusType.activate, new ActiveTransitionCallback());
-        inactiveMap.put(StatusType.kill, new KillTransitionCallback(data,
-                topologyid));
+        inactiveMap.put(StatusType.kill, new KillTransitionCallback(data, topologyid));
         inactiveMap.put(StatusType.remove, null);
-        inactiveMap.put(StatusType.rebalance, new RebalanceTransitionCallback(
-                data, topologyid, currentStatus));
+        inactiveMap.put(StatusType.rebalance, new RebalanceTransitionCallback(data, topologyid, currentStatus));
         inactiveMap.put(StatusType.do_rebalance, null);
         inactiveMap.put(StatusType.done_rebalance, null);
-        inactiveMap.put(StatusType.update_conf, null);
+        inactiveMap.put(StatusType.update_topology, null);
 
         rtn.put(StatusType.inactive, inactiveMap);
 
         // current status is killed
-        Map<StatusType, Callback> killedMap =
-                new HashMap<StatusType, Callback>();
+        Map<StatusType, Callback> killedMap = new HashMap<StatusType, Callback>();
 
         killedMap.put(StatusType.monitor, null);
         killedMap.put(StatusType.inactivate, null);
-        killedMap.put(StatusType.startup, new KillTransitionCallback(data,
-                topologyid));
+        killedMap.put(StatusType.startup, new KillTransitionCallback(data, topologyid));
         killedMap.put(StatusType.activate, null);
-        killedMap.put(StatusType.kill, new KillTransitionCallback(data,
-                topologyid));
-        killedMap.put(StatusType.remove, new RemoveTransitionCallback(data,
-                topologyid));
+        killedMap.put(StatusType.kill, new KillTransitionCallback(data, topologyid));
+        killedMap.put(StatusType.remove, new RemoveTransitionCallback(data, topologyid));
         killedMap.put(StatusType.rebalance, null);
         killedMap.put(StatusType.do_rebalance, null);
         killedMap.put(StatusType.done_rebalance, null);
-        killedMap.put(StatusType.update_conf, null);
+        killedMap.put(StatusType.update_topology, null);
         rtn.put(StatusType.killed, killedMap);
 
         // current status is under rebalancing
-        Map<StatusType, Callback> rebalancingMap =
-                new HashMap<StatusType, Callback>();
+        Map<StatusType, Callback> rebalancingMap = new HashMap<StatusType, Callback>();
 
         StatusType rebalanceOldStatus = StatusType.active;
         if (currentStatus.getOldStatus() != null) {
@@ -267,20 +203,14 @@ public class StatusTransition {
 
         rebalancingMap.put(StatusType.monitor, null);
         rebalancingMap.put(StatusType.inactivate, null);
-        rebalancingMap.put(StatusType.startup, new RebalanceTransitionCallback(
-                data, topologyid, new StormStatus(rebalanceOldStatus)));
+        rebalancingMap.put(StatusType.startup, new RebalanceTransitionCallback(data, topologyid, new StormStatus(rebalanceOldStatus)));
         rebalancingMap.put(StatusType.activate, null);
         rebalancingMap.put(StatusType.kill, null);
         rebalancingMap.put(StatusType.remove, null);
-        rebalancingMap
-                .put(StatusType.rebalance, new RebalanceTransitionCallback(
-                        data, topologyid, currentStatus));
-        rebalancingMap.put(StatusType.do_rebalance,
-                new DoRebalanceTransitionCallback(data, topologyid,
-                        new StormStatus(rebalanceOldStatus)));
-        rebalancingMap.put(StatusType.done_rebalance,
-                new DoneRebalanceTransitionCallback(data, topologyid));
-        rebalancingMap.put(StatusType.update_conf, null);
+        rebalancingMap.put(StatusType.rebalance, new RebalanceTransitionCallback(data, topologyid, currentStatus));
+        rebalancingMap.put(StatusType.do_rebalance, new DoRebalanceTransitionCallback(data, topologyid, new StormStatus(rebalanceOldStatus)));
+        rebalancingMap.put(StatusType.done_rebalance, new DoneRebalanceTransitionCallback(data, topologyid));
+        rebalancingMap.put(StatusType.update_topology, null);
         rtn.put(StatusType.rebalancing, rebalancingMap);
 
         /**
@@ -288,7 +218,6 @@ public class StatusTransition {
          */
 
         return rtn;
-
     }
 
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/StatusType.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/StatusType.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/StatusType.java
index cf785b7..d0f68ff 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/StatusType.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/StatusType.java
@@ -20,21 +20,14 @@ package com.alibaba.jstorm.daemon.nimbus;
 /**
  * topology status:
  * 
- * 1. Status: this status will be stored in ZK
- * killed/inactive/active/rebalancing 2. action:
+ * 1. Status: this status will be stored in ZK killed/inactive/active/rebalancing 2. action:
  * 
- * monitor -- every Config.NIMBUS_MONITOR_FREQ_SECS seconds will trigger this
- * only valid when current status is active inactivate -- client will trigger
- * this action, only valid when current status is active activate -- client will
- * trigger this action only valid when current status is inactive startup --
- * when nimbus startup, it will trigger this action only valid when current
- * status is killed/rebalancing kill -- client kill topology will trigger this
- * action, only valid when current status is active/inactive/killed remove -- 30
- * seconds after client submit kill command, it will do this action, only valid
- * when current status is killed rebalance -- client submit rebalance command,
- * only valid when current status is active/deactive do_rebalance -- 30 seconds
- * after client submit rebalance command, it will do this action, only valid
- * when current status is rebalance
+ * monitor -- every Config.NIMBUS_MONITOR_FREQ_SECS seconds will trigger this only valid when current status is active inactivate -- client will trigger this
+ * action, only valid when current status is active activate -- client will trigger this action only valid when current status is inactive startup -- when
+ * nimbus startup, it will trigger this action only valid when current status is killed/rebalancing kill -- client kill topology will trigger this action, only
+ * valid when current status is active/inactive/killed remove -- 30 seconds after client submit kill command, it will do this action, only valid when current
+ * status is killed rebalance -- client submit rebalance command, only valid when current status is active/deactive do_rebalance -- 30 seconds after client
+ * submit rebalance command, it will do this action, only valid when current status is rebalance
  * 
  * 
  * 
@@ -43,13 +36,11 @@ package com.alibaba.jstorm.daemon.nimbus;
 public enum StatusType {
 
     // status
-    active("active"), inactive("inactive"), rebalancing("rebalancing"), killed(
-            "killed"),
+    active("active"), inactive("inactive"), rebalancing("rebalancing"), killed("killed"),
 
     // actions
-    activate("activate"), inactivate("inactivate"), monitor("monitor"), startup(
-            "startup"), kill("kill"), remove("remove"), rebalance("rebalance"), do_rebalance(
-            "do-rebalance"), done_rebalance("done-rebalance"), update_conf("update-config");
+    activate("activate"), inactivate("inactivate"), monitor("monitor"), startup("startup"), kill("kill"), remove("remove"), rebalance("rebalance"), do_rebalance(
+            "do-rebalance"), done_rebalance("done-rebalance"), update_topology("update-topoloogy");
 
     private String status;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/TopologyAssign.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/TopologyAssign.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/TopologyAssign.java
index fd6f461..51da198 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/TopologyAssign.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/TopologyAssign.java
@@ -17,33 +17,12 @@
  */
 package com.alibaba.jstorm.daemon.nimbus;
 
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.TreeMap;
-import java.util.concurrent.LinkedBlockingQueue;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import backtype.storm.Config;
 import backtype.storm.generated.StormTopology;
 import backtype.storm.scheduler.WorkerSlot;
-
 import com.alibaba.jstorm.client.ConfigExtension;
-import com.alibaba.jstorm.cluster.Cluster;
-import com.alibaba.jstorm.cluster.StormBase;
-import com.alibaba.jstorm.cluster.StormClusterState;
-import com.alibaba.jstorm.cluster.StormConfig;
-import com.alibaba.jstorm.cluster.StormStatus;
+import com.alibaba.jstorm.cluster.*;
+import com.alibaba.jstorm.daemon.nimbus.TopologyMetricsRunnable.TaskStartEvent;
 import com.alibaba.jstorm.daemon.supervisor.SupervisorInfo;
 import com.alibaba.jstorm.schedule.Assignment;
 import com.alibaba.jstorm.schedule.AssignmentBak;
@@ -51,15 +30,23 @@ import com.alibaba.jstorm.schedule.IToplogyScheduler;
 import com.alibaba.jstorm.schedule.TopologyAssignContext;
 import com.alibaba.jstorm.schedule.default_assign.DefaultTopologyScheduler;
 import com.alibaba.jstorm.schedule.default_assign.ResourceWorkerSlot;
+import com.alibaba.jstorm.task.TaskInfo;
 import com.alibaba.jstorm.utils.FailedAssignTopologyException;
 import com.alibaba.jstorm.utils.JStormUtils;
 import com.alibaba.jstorm.utils.PathUtils;
 import com.alibaba.jstorm.utils.TimeUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.*;
+import java.util.Map.Entry;
+import java.util.concurrent.LinkedBlockingQueue;
 
 public class TopologyAssign implements Runnable {
 
-    private final static Logger LOG = LoggerFactory
-            .getLogger(TopologyAssign.class);
+    private final static Logger LOG = LoggerFactory.getLogger(TopologyAssign.class);
 
     /**
      * private constructor function to avoid multiple instance
@@ -93,7 +80,7 @@ public class TopologyAssign implements Runnable {
     public void init(NimbusData nimbusData) {
         this.nimbusData = nimbusData;
 
-        //this.cleanupTimeoutSec = 60;
+        // this.cleanupTimeoutSec = 60;
 
         this.schedulers = new HashMap<String, IToplogyScheduler>();
 
@@ -113,8 +100,7 @@ public class TopologyAssign implements Runnable {
         thread.interrupt();
     }
 
-    protected static LinkedBlockingQueue<TopologyAssignEvent> queue =
-            new LinkedBlockingQueue<TopologyAssignEvent>();
+    protected static LinkedBlockingQueue<TopologyAssignEvent> queue = new LinkedBlockingQueue<TopologyAssignEvent>();
 
     public static void push(TopologyAssignEvent event) {
         queue.offer(event);
@@ -159,12 +145,37 @@ public class TopologyAssign implements Runnable {
      * @return
      */
     protected boolean doTopologyAssignment(TopologyAssignEvent event) {
-        Assignment assignment = null;
+        Assignment assignment;
         try {
+            Assignment oldAssignment = null;
+            boolean isReassign = event.isScratch();
+            if (isReassign) {
+                oldAssignment = nimbusData.getStormClusterState().assignment_info(event.getTopologyId(), null);
+            }
             assignment = mkAssignment(event);
 
-            if (!(event.isScratch()))
+            // notify jstorm monitor on task assign/reassign/rebalance
+            TaskStartEvent taskEvent = new TaskStartEvent();
+            taskEvent.oldAssignment = oldAssignment;
+            taskEvent.newAssignment = assignment;
+            taskEvent.topologyId = event.getTopologyId();
+            taskEvent.clusterName = nimbusData.getClusterName();
+            taskEvent.timestamp = System.currentTimeMillis();
+
+            Map<Integer, String> task2Component;
+            // get from nimbus cache first
+            Map<Integer, TaskInfo> taskInfoMap = Cluster.get_all_taskInfo(nimbusData.getStormClusterState(), event.getTopologyId());
+            if (taskInfoMap != null) {
+                task2Component = Common.getTaskToComponent(taskInfoMap);
+            } else {
+                task2Component = Common.getTaskToComponent(Cluster.get_all_taskInfo(nimbusData.getStormClusterState(), event.getTopologyId()));
+            }
+            taskEvent.task2Component = task2Component;
+            nimbusData.getMetricRunnable().pushEvent(taskEvent);
+
+            if (!isReassign) {
                 setTopologyStatus(event);
+            }
         } catch (Throwable e) {
             LOG.error("Failed to assign topology " + event.getTopologyId(), e);
             event.fail(e.getMessage());
@@ -180,8 +191,6 @@ public class TopologyAssign implements Runnable {
     /**
      * cleanup the topologies which are not in ZK /topology, but in other place
      * 
-     * @param nimbusData
-     * @param active_topologys
      * @throws Exception
      */
     public void cleanupDisappearedTopology() throws Exception {
@@ -192,8 +201,7 @@ public class TopologyAssign implements Runnable {
             return;
         }
 
-        Set<String> cleanupIds =
-                get_cleanup_ids(clusterState, active_topologys);
+        Set<String> cleanupIds = get_cleanup_ids(clusterState, active_topologys);
 
         for (String topologyId : cleanupIds) {
 
@@ -202,13 +210,12 @@ public class TopologyAssign implements Runnable {
             clusterState.try_remove_storm(topologyId);
             //
             nimbusData.getTaskHeartbeatsCache().remove(topologyId);
+            nimbusData.getTasksHeartbeat().remove(topologyId);
 
             NimbusUtils.removeTopologyTaskTimeout(nimbusData, topologyId);
 
             // get /nimbus/stormdist/topologyId
-            String master_stormdist_root =
-                    StormConfig.masterStormdistRoot(nimbusData.getConf(),
-                            topologyId);
+            String master_stormdist_root = StormConfig.masterStormdistRoot(nimbusData.getConf(), topologyId);
             try {
                 // delete topologyId local dir
                 PathUtils.rmr(master_stormdist_root);
@@ -218,14 +225,12 @@ public class TopologyAssign implements Runnable {
         }
     }
 
-    private void get_code_ids(List<String> code_ids,
-            HashSet<String> latest_code_ids) throws IOException {
+    private void get_code_ids(List<String> code_ids, HashSet<String> latest_code_ids) throws IOException {
         Map conf = nimbusData.getConf();
 
         String master_stormdist_root = StormConfig.masterStormdistRoot(conf);
         // listdir /local-dir/nimbus/stormdist
-        List<String> all_code_ids =
-                PathUtils.read_dir_contents(master_stormdist_root);
+        List<String> all_code_ids = PathUtils.read_dir_contents(master_stormdist_root);
         code_ids.addAll(all_code_ids);
 
         long now = System.currentTimeMillis();
@@ -238,9 +243,6 @@ public class TopologyAssign implements Runnable {
 
                 long modify = file.lastModified();
 
-                if (now - modify < cleanupTimeoutSec * 1000) {
-                    latest_code_ids.add(dir);
-                }
             } catch (Exception exception) {
                 LOG.error("Failed to get modify time of " + dir, exception);
             }
@@ -256,14 +258,14 @@ public class TopologyAssign implements Runnable {
      * @return
      * @throws Exception
      */
-    private Set<String> get_cleanup_ids(StormClusterState clusterState,
-            List<String> active_topologys) throws Exception {
+    private Set<String> get_cleanup_ids(StormClusterState clusterState, List<String> active_topologys) throws Exception {
 
         List<String> task_ids = clusterState.task_storms();
         List<String> heartbeat_ids = clusterState.heartbeat_storms();
         List<String> error_ids = clusterState.task_error_storms();
         List<String> assignment_ids = clusterState.assignments(null);
         List<String> metric_ids = clusterState.get_metrics();
+        List<String> backpressure_ids = clusterState.backpressureInfos();
 
         List<String> code_ids = new ArrayList<String>();
         HashSet<String> latest_code_ids = new HashSet<String>();
@@ -272,8 +274,7 @@ public class TopologyAssign implements Runnable {
         // Set<String> assigned_ids =
         // JStormUtils.listToSet(clusterState.active_storms());
         Set<String> to_cleanup_ids = new HashSet<String>();
-        Set<String> pendingTopologys =
-                nimbusData.getPendingSubmitTopoloygs().keySet();
+        Set<String> pendingTopologys = nimbusData.getPendingSubmitTopoloygs().keySet();
 
         if (task_ids != null) {
             to_cleanup_ids.addAll(task_ids);
@@ -294,9 +295,13 @@ public class TopologyAssign implements Runnable {
         if (code_ids != null) {
             to_cleanup_ids.addAll(code_ids);
         }
-        
+
         if (metric_ids != null) {
-        	to_cleanup_ids.addAll(metric_ids);
+            to_cleanup_ids.addAll(metric_ids);
+        }
+
+        if (backpressure_ids != null) {
+            to_cleanup_ids.addAll(backpressure_ids);
         }
 
         if (active_topologys != null) {
@@ -309,8 +314,7 @@ public class TopologyAssign implements Runnable {
         }
 
         /**
-         * Why need to remove latest code. Due to competition between
-         * Thrift.threads and TopologyAssign thread
+         * Why need to remove latest code. Due to competition between Thrift.threads and TopologyAssign thread
          * 
          */
         to_cleanup_ids.removeAll(latest_code_ids);
@@ -321,11 +325,6 @@ public class TopologyAssign implements Runnable {
 
     /**
      * start a topology: set active status of the topology
-     * 
-     * @param topologyName
-     * @param stormClusterState
-     * @param topologyId
-     * @throws Exception
      */
     public void setTopologyStatus(TopologyAssignEvent event) throws Exception {
         StormClusterState stormClusterState = nimbusData.getStormClusterState();
@@ -339,15 +338,11 @@ public class TopologyAssign implements Runnable {
             status = event.getOldStatus();
         }
 
-        boolean isEnable =
-                ConfigExtension
-                        .isEnablePerformanceMetrics(nimbusData.getConf());
+        boolean isEnable = ConfigExtension.isEnablePerformanceMetrics(nimbusData.getConf());
 
         StormBase stormBase = stormClusterState.storm_base(topologyId, null);
         if (stormBase == null) {
-            stormBase =
-                    new StormBase(topologyName, TimeUtils.current_time_secs(),
-                            status, group);
+            stormBase = new StormBase(topologyName, TimeUtils.current_time_secs(), status, group);
             stormBase.setEnableMonitor(isEnable);
             stormClusterState.activate_storm(topologyId, stormBase);
 
@@ -367,18 +362,20 @@ public class TopologyAssign implements Runnable {
 
     }
 
-    protected TopologyAssignContext prepareTopologyAssign(
-            TopologyAssignEvent event) throws Exception {
+    protected TopologyAssignContext prepareTopologyAssign(TopologyAssignEvent event) throws Exception {
         TopologyAssignContext ret = new TopologyAssignContext();
 
         String topologyId = event.getTopologyId();
+        ret.setTopologyId(topologyId);
+
+        int topoMasterId = nimbusData.getTasksHeartbeat().get(topologyId).get_topologyMasterId();
+        ret.setTopologyMasterTaskId(topoMasterId);
+        LOG.info("prepareTopologyAssign, topoMasterId={}", topoMasterId);
 
         Map<Object, Object> nimbusConf = nimbusData.getConf();
-        Map<Object, Object> topologyConf =
-                StormConfig.read_nimbus_topology_conf(nimbusConf, topologyId);
+        Map<Object, Object> topologyConf = StormConfig.read_nimbus_topology_conf(nimbusConf, topologyId);
 
-        StormTopology rawTopology =
-                StormConfig.read_nimbus_topology_code(nimbusConf, topologyId);
+        StormTopology rawTopology = StormConfig.read_nimbus_topology_code(nimbusConf, topologyId);
         ret.setRawTopology(rawTopology);
 
         Map stormConf = new HashMap();
@@ -389,8 +386,7 @@ public class TopologyAssign implements Runnable {
         StormClusterState stormClusterState = nimbusData.getStormClusterState();
 
         // get all running supervisor, don't need callback to watch supervisor
-        Map<String, SupervisorInfo> supInfos =
-                Cluster.get_all_SupervisorInfo(stormClusterState, null);
+        Map<String, SupervisorInfo> supInfos = Cluster.get_all_SupervisorInfo(stormClusterState, null);
         // init all AvailableWorkerPorts
         for (Entry<String, SupervisorInfo> supInfo : supInfos.entrySet()) {
             SupervisorInfo supervisor = supInfo.getValue();
@@ -400,21 +396,16 @@ public class TopologyAssign implements Runnable {
 
         getAliveSupervsByHb(supInfos, nimbusConf);
         if (supInfos.size() == 0) {
-            throw new FailedAssignTopologyException(
-                    "Failed to make assignment " + topologyId
-                            + ", due to no alive supervisor");
+            throw new FailedAssignTopologyException("Failed to make assignment " + topologyId + ", due to no alive supervisor");
         }
 
-        Map<Integer, String> taskToComponent =
-                Cluster.get_all_task_component(stormClusterState, topologyId, null);
+        Map<Integer, String> taskToComponent = Cluster.get_all_task_component(stormClusterState, topologyId, null);
         ret.setTaskToComponent(taskToComponent);
 
         // get taskids /ZK/tasks/topologyId
         Set<Integer> allTaskIds = taskToComponent.keySet();
         if (allTaskIds == null || allTaskIds.size() == 0) {
-            String errMsg =
-                    "Failed to get all task ID list from /ZK-dir/tasks/"
-                            + topologyId;
+            String errMsg = "Failed to get all task ID list from /ZK-dir/tasks/" + topologyId;
             LOG.warn(errMsg);
             throw new IOException(errMsg);
         }
@@ -425,18 +416,31 @@ public class TopologyAssign implements Runnable {
         // machine
         Set<Integer> unstoppedTasks = new HashSet<Integer>();
         Set<Integer> deadTasks = new HashSet<Integer>();
-        Set<ResourceWorkerSlot> unstoppedWorkers =
-                new HashSet<ResourceWorkerSlot>();
+        Set<ResourceWorkerSlot> unstoppedWorkers = new HashSet<ResourceWorkerSlot>();
 
-        Assignment existingAssignment =
-                stormClusterState.assignment_info(topologyId, null);
+        Assignment existingAssignment = stormClusterState.assignment_info(topologyId, null);
         if (existingAssignment != null) {
             aliveTasks = getAliveTasks(topologyId, allTaskIds);
-            unstoppedTasks =
-                    getUnstoppedSlots(aliveTasks, supInfos, existingAssignment);
 
-            deadTasks.addAll(allTaskIds);
-            deadTasks.removeAll(aliveTasks);
+            /*
+             * Check if the topology master task is alive first since all task 
+             * heartbeat info is reported by topology master. 
+             * If master is dead, do reassignment for topology master first.
+             */
+            if (aliveTasks.contains(topoMasterId) == false) {
+                ResourceWorkerSlot worker = existingAssignment.getWorkerByTaskId(topoMasterId);
+                deadTasks.addAll(worker.getTasks());
+
+                Set<Integer> tempSet = new HashSet<Integer>(allTaskIds);
+                tempSet.removeAll(deadTasks);
+                aliveTasks.addAll(tempSet);
+                aliveTasks.removeAll(deadTasks);
+            } else {
+                deadTasks.addAll(allTaskIds);
+                deadTasks.removeAll(aliveTasks);
+            }
+
+            unstoppedTasks = getUnstoppedSlots(aliveTasks, supInfos, existingAssignment);
         }
 
         ret.setDeadTaskIds(deadTasks);
@@ -451,9 +455,7 @@ public class TopologyAssign implements Runnable {
             ret.setAssignType(TopologyAssignContext.ASSIGN_TYPE_NEW);
 
             try {
-                AssignmentBak lastAssignment =
-                        stormClusterState.assignment_bak(event
-                                .getTopologyName());
+                AssignmentBak lastAssignment = stormClusterState.assignment_bak(event.getTopologyName());
                 if (lastAssignment != null) {
                     ret.setOldAssignment(lastAssignment.getAssignment());
                 }
@@ -465,13 +467,11 @@ public class TopologyAssign implements Runnable {
             if (event.isScratch()) {
                 ret.setAssignType(TopologyAssignContext.ASSIGN_TYPE_REBALANCE);
                 ret.setIsReassign(event.isReassign());
-                unstoppedWorkers =
-                        getUnstoppedWorkers(unstoppedTasks, existingAssignment);
+                unstoppedWorkers = getUnstoppedWorkers(unstoppedTasks, existingAssignment);
                 ret.setUnstoppedWorkers(unstoppedWorkers);
             } else {
                 ret.setAssignType(TopologyAssignContext.ASSIGN_TYPE_MONITOR);
-                unstoppedWorkers =
-                        getUnstoppedWorkers(aliveTasks, existingAssignment);
+                unstoppedWorkers = getUnstoppedWorkers(aliveTasks, existingAssignment);
                 ret.setUnstoppedWorkers(unstoppedWorkers);
             }
         }
@@ -480,13 +480,8 @@ public class TopologyAssign implements Runnable {
     }
 
     /**
-     * make assignments for a topology The nimbus core function, this function
-     * has been totally rewrite
+     * make assignments for a topology The nimbus core function, this function has been totally rewrite
      * 
-     * @param nimbusData NimbusData
-     * @param topologyId String
-     * @param isScratch Boolean: isScratch is false unless rebalancing the
-     *            topology
      * @throws Exception
      */
     public Assignment mkAssignment(TopologyAssignEvent event) throws Exception {
@@ -500,8 +495,7 @@ public class TopologyAssign implements Runnable {
 
         if (!StormConfig.local_mode(nimbusData.getConf())) {
 
-            IToplogyScheduler scheduler =
-                    schedulers.get(DEFAULT_SCHEDULER_NAME);
+            IToplogyScheduler scheduler = schedulers.get(DEFAULT_SCHEDULER_NAME);
 
             assignments = scheduler.assignTasks(context);
 
@@ -511,29 +505,24 @@ public class TopologyAssign implements Runnable {
 
         Assignment assignment = null;
         if (assignments != null && assignments.size() > 0) {
-            Map<String, String> nodeHost =
-                    getTopologyNodeHost(context.getCluster(),
-                            context.getOldAssignment(), assignments);
+            Map<String, String> nodeHost = getTopologyNodeHost(context.getCluster(), context.getOldAssignment(), assignments);
 
-            Map<Integer, Integer> startTimes =
-                    getTaskStartTimes(context, nimbusData, topologyId,
-                            context.getOldAssignment(), assignments);
+            Map<Integer, Integer> startTimes = getTaskStartTimes(context, nimbusData, topologyId, context.getOldAssignment(), assignments);
 
-            String codeDir =
-                    StormConfig.masterStormdistRoot(nimbusData.getConf(),
-                            topologyId);
+            String codeDir = StormConfig.masterStormdistRoot(nimbusData.getConf(), topologyId);
 
-            assignment =
-                    new Assignment(codeDir, assignments, nodeHost, startTimes);
+            assignment = new Assignment(codeDir, assignments, nodeHost, startTimes);
 
-            StormClusterState stormClusterState =
-                    nimbusData.getStormClusterState();
+            //  the topology binary changed.
+            if (event.isScaleTopology()){
+                assignment.setAssignmentType(Assignment.AssignmentType.ScaleTopology);
+            }
+            StormClusterState stormClusterState = nimbusData.getStormClusterState();
 
             stormClusterState.set_assignment(topologyId, assignment);
 
             // update task heartbeat's start time
-            NimbusUtils.updateTaskHbStartTime(nimbusData, assignment,
-                    topologyId);
+            NimbusUtils.updateTaskHbStartTime(nimbusData, assignment, topologyId);
 
             // @@@ TODO
 
@@ -547,14 +536,13 @@ public class TopologyAssign implements Runnable {
 
             NimbusUtils.updateTopologyTaskTimeout(nimbusData, topologyId);
 
-            LOG.info("Successfully make assignment for topology id "
-                    + topologyId + ": " + assignment);
+            LOG.info("Successfully make assignment for topology id " + topologyId + ": " + assignment);
         }
         return assignment;
     }
 
     private static Set<ResourceWorkerSlot> mkLocalAssignment(
-            TopologyAssignContext context) {
+            TopologyAssignContext context) throws Exception {
         Set<ResourceWorkerSlot> result = new HashSet<ResourceWorkerSlot>();
         Map<String, SupervisorInfo> cluster = context.getCluster();
         if (cluster.size() != 1)
@@ -565,7 +553,15 @@ public class TopologyAssign implements Runnable {
             supervisorId = entry.getKey();
             localSupervisor = entry.getValue();
         }
-        int port = localSupervisor.getAvailableWorkerPorts().iterator().next();
+        int port = -1;
+        if (localSupervisor.getAvailableWorkerPorts().iterator().hasNext()) {
+            port = localSupervisor.getAvailableWorkerPorts().iterator().next();
+        } else {
+            LOG.info(" amount of worker's ports is not enough");
+            throw new FailedAssignTopologyException(
+                    "Failed to make assignment " + ", due to no enough ports");
+        }
+
         ResourceWorkerSlot worker = new ResourceWorkerSlot(supervisorId, port);
         worker.setTasks(new HashSet<Integer>(context.getAllTaskIds()));
         worker.setHostname(localSupervisor.getHostName());
@@ -573,16 +569,8 @@ public class TopologyAssign implements Runnable {
         return result;
     }
 
-    /**
-     * @param existingAssignment
-     * @param taskWorkerSlot
-     * @return
-     * @throws Exception
-     */
-    public static Map<Integer, Integer> getTaskStartTimes(
-            TopologyAssignContext context, NimbusData nimbusData,
-            String topologyId, Assignment existingAssignment,
-            Set<ResourceWorkerSlot> workers) throws Exception {
+    public static Map<Integer, Integer> getTaskStartTimes(TopologyAssignContext context, NimbusData nimbusData, String topologyId,
+            Assignment existingAssignment, Set<ResourceWorkerSlot> workers) throws Exception {
 
         Map<Integer, Integer> startTimes = new TreeMap<Integer, Integer>();
 
@@ -600,8 +588,7 @@ public class TopologyAssign implements Runnable {
         Set<ResourceWorkerSlot> oldWorkers = new HashSet<ResourceWorkerSlot>();
 
         if (existingAssignment != null) {
-            Map<Integer, Integer> taskStartTimeSecs =
-                    existingAssignment.getTaskStartTimeSecs();
+            Map<Integer, Integer> taskStartTimeSecs = existingAssignment.getTaskStartTimeSecs();
             if (taskStartTimeSecs != null) {
                 startTimes.putAll(taskStartTimeSecs);
             }
@@ -616,23 +603,21 @@ public class TopologyAssign implements Runnable {
         int nowSecs = TimeUtils.current_time_secs();
         for (Integer changedTaskId : changedTaskIds) {
             startTimes.put(changedTaskId, nowSecs);
-            zkClusterState.remove_task_heartbeat(topologyId, changedTaskId);
+            NimbusUtils.removeTopologyTaskHb(nimbusData, topologyId, changedTaskId);
         }
 
         Set<Integer> removedTaskIds = getRemovedTaskIds(oldWorkers, workers);
         for (Integer removedTaskId : removedTaskIds) {
             startTimes.remove(removedTaskId);
-            zkClusterState.remove_task_heartbeat(topologyId, removedTaskId);
+            NimbusUtils.removeTopologyTaskHb(nimbusData, topologyId, removedTaskId);
         }
 
-        LOG.info("Task assignment has been changed: " + changedTaskIds
-                + ", removed tasks " + removedTaskIds);
+        LOG.info("Task assignment has been changed: " + changedTaskIds + ", removed tasks " + removedTaskIds);
         return startTimes;
     }
 
-    public static Map<String, String> getTopologyNodeHost(
-            Map<String, SupervisorInfo> supervisorMap,
-            Assignment existingAssignment, Set<ResourceWorkerSlot> workers) {
+    public static Map<String, String> getTopologyNodeHost(Map<String, SupervisorInfo> supervisorMap, Assignment existingAssignment,
+            Set<ResourceWorkerSlot> workers) {
 
         // the following is that remove unused node from allNodeHost
         Set<String> usedNodes = new HashSet<String>();
@@ -649,8 +634,7 @@ public class TopologyAssign implements Runnable {
         }
 
         // get alive supervisorMap Map<supervisorId, hostname>
-        Map<String, String> nodeHost =
-                SupervisorInfo.getNodeHost(supervisorMap);
+        Map<String, String> nodeHost = SupervisorInfo.getNodeHost(supervisorMap);
         if (nodeHost != null) {
             allNodeHost.putAll(nodeHost);
         }
@@ -661,8 +645,7 @@ public class TopologyAssign implements Runnable {
             if (allNodeHost.containsKey(supervisorId)) {
                 ret.put(supervisorId, allNodeHost.get(supervisorId));
             } else {
-                LOG.warn("Node " + supervisorId
-                        + " doesn't in the supervisor list");
+                LOG.warn("Node " + supervisorId + " doesn't in the supervisor list");
             }
         }
 
@@ -672,16 +655,12 @@ public class TopologyAssign implements Runnable {
     /**
      * get all taskids which are assigned newly or reassigned
      * 
-     * @param taskToWorkerSlot
-     * @param newtaskToWorkerSlot
      * @return Set<Integer> taskid which is assigned newly or reassigned
      */
-    public static Set<Integer> getNewOrChangedTaskIds(
-            Set<ResourceWorkerSlot> oldWorkers, Set<ResourceWorkerSlot> workers) {
+    public static Set<Integer> getNewOrChangedTaskIds(Set<ResourceWorkerSlot> oldWorkers, Set<ResourceWorkerSlot> workers) {
 
         Set<Integer> rtn = new HashSet<Integer>();
-        HashMap<String, ResourceWorkerSlot> workerPortMap =
-                HostPortToWorkerMap(oldWorkers);
+        HashMap<String, ResourceWorkerSlot> workerPortMap = HostPortToWorkerMap(oldWorkers);
         for (ResourceWorkerSlot worker : workers) {
             ResourceWorkerSlot oldWorker = workerPortMap.get(worker.getHostPort());
             if (oldWorker != null) {
@@ -691,14 +670,15 @@ public class TopologyAssign implements Runnable {
                         rtn.add(task);
                 }
             } else {
-                rtn.addAll(worker.getTasks());
+                if (worker.getTasks() != null) {
+                    rtn.addAll(worker.getTasks());
+                }
             }
         }
         return rtn;
     }
 
-    public static Set<Integer> getRemovedTaskIds(
-            Set<ResourceWorkerSlot> oldWorkers, Set<ResourceWorkerSlot> workers) {
+    public static Set<Integer> getRemovedTaskIds(Set<ResourceWorkerSlot> oldWorkers, Set<ResourceWorkerSlot> workers) {
 
         Set<Integer> rtn = new HashSet<Integer>();
         Set<Integer> oldTasks = getTaskSetFromWorkerSet(oldWorkers);
@@ -711,8 +691,7 @@ public class TopologyAssign implements Runnable {
         return rtn;
     }
 
-    private static Set<Integer> getTaskSetFromWorkerSet(
-            Set<ResourceWorkerSlot> workers) {
+    private static Set<Integer> getTaskSetFromWorkerSet(Set<ResourceWorkerSlot> workers) {
         Set<Integer> rtn = new HashSet<Integer>();
         for (ResourceWorkerSlot worker : workers) {
             rtn.addAll(worker.getTasks());
@@ -720,10 +699,8 @@ public class TopologyAssign implements Runnable {
         return rtn;
     }
 
-    private static HashMap<String, ResourceWorkerSlot> HostPortToWorkerMap(
-            Set<ResourceWorkerSlot> workers) {
-        HashMap<String, ResourceWorkerSlot> rtn =
-                new HashMap<String, ResourceWorkerSlot>();
+    private static HashMap<String, ResourceWorkerSlot> HostPortToWorkerMap(Set<ResourceWorkerSlot> workers) {
+        HashMap<String, ResourceWorkerSlot> rtn = new HashMap<String, ResourceWorkerSlot>();
         for (ResourceWorkerSlot worker : workers) {
             rtn.put(worker.getHostPort(), worker);
         }
@@ -731,17 +708,14 @@ public class TopologyAssign implements Runnable {
     }
 
     /**
-     * sort slots, the purpose is to ensure that the tasks are assigned in
-     * balancing
+     * sort slots, the purpose is to ensure that the tasks are assigned in balancing
      * 
      * @param allSlots
      * @return List<WorkerSlot>
      */
-    public static List<WorkerSlot> sortSlots(Set<WorkerSlot> allSlots,
-            int needSlotNum) {
+    public static List<WorkerSlot> sortSlots(Set<WorkerSlot> allSlots, int needSlotNum) {
 
-        Map<String, List<WorkerSlot>> nodeMap =
-                new HashMap<String, List<WorkerSlot>>();
+        Map<String, List<WorkerSlot>> nodeMap = new HashMap<String, List<WorkerSlot>>();
 
         // group by first
         for (WorkerSlot np : allSlots) {
@@ -778,8 +752,7 @@ public class TopologyAssign implements Runnable {
         }
 
         // interleave
-        List<List<WorkerSlot>> splitup =
-                new ArrayList<List<WorkerSlot>>(nodeMap.values());
+        List<List<WorkerSlot>> splitup = new ArrayList<List<WorkerSlot>>(nodeMap.values());
 
         Collections.sort(splitup, new Comparator<List<WorkerSlot>>() {
 
@@ -801,13 +774,8 @@ public class TopologyAssign implements Runnable {
 
     /**
      * Get unstopped slots from alive task list
-     * 
-     * @param aliveAssigned
-     * @param supInfos
-     * @return
      */
-    public Set<Integer> getUnstoppedSlots(Set<Integer> aliveTasks,
-            Map<String, SupervisorInfo> supInfos, Assignment existAssignment) {
+    public Set<Integer> getUnstoppedSlots(Set<Integer> aliveTasks, Map<String, SupervisorInfo> supInfos, Assignment existAssignment) {
         Set<Integer> ret = new HashSet<Integer>();
 
         Set<ResourceWorkerSlot> oldWorkers = existAssignment.getWorkers();
@@ -835,8 +803,7 @@ public class TopologyAssign implements Runnable {
 
     }
 
-    private Set<ResourceWorkerSlot> getUnstoppedWorkers(
-            Set<Integer> aliveTasks, Assignment existAssignment) {
+    private Set<ResourceWorkerSlot> getUnstoppedWorkers(Set<Integer> aliveTasks, Assignment existAssignment) {
         Set<ResourceWorkerSlot> ret = new HashSet<ResourceWorkerSlot>();
         for (ResourceWorkerSlot worker : existAssignment.getWorkers()) {
             boolean alive = true;
@@ -860,12 +827,9 @@ public class TopologyAssign implements Runnable {
      * @param stormClusterState
      * @throws Exception
      */
-    public static void getFreeSlots(
-            Map<String, SupervisorInfo> supervisorInfos,
-            StormClusterState stormClusterState) throws Exception {
+    public static void getFreeSlots(Map<String, SupervisorInfo> supervisorInfos, StormClusterState stormClusterState) throws Exception {
 
-        Map<String, Assignment> assignments =
-                Cluster.get_all_assignment(stormClusterState, null);
+        Map<String, Assignment> assignments = Cluster.get_all_assignment(stormClusterState, null);
 
         for (Entry<String, Assignment> entry : assignments.entrySet()) {
             String topologyId = entry.getKey();
@@ -875,8 +839,7 @@ public class TopologyAssign implements Runnable {
 
             for (ResourceWorkerSlot worker : workers) {
 
-                SupervisorInfo supervisorInfo =
-                        supervisorInfos.get(worker.getNodeId());
+                SupervisorInfo supervisorInfo = supervisorInfos.get(worker.getNodeId());
                 if (supervisorInfo == null) {
                     // the supervisor is dead
                     continue;
@@ -888,31 +851,20 @@ public class TopologyAssign implements Runnable {
     }
 
     /**
-     * find all alived taskid Does not assume that clocks are synchronized. Task
-     * heartbeat is only used so that nimbus knows when it's received a new
-     * heartbeat. All timing is done by nimbus and tracked through
-     * task-heartbeat-cache
+     * find all alived taskid Does not assume that clocks are synchronized. Task heartbeat is only used so that nimbus knows when it's received a new heartbeat.
+     * All timing is done by nimbus and tracked through task-heartbeat-cache
      * 
-     * @param conf
-     * @param topologyId
-     * @param stormClusterState
-     * @param taskIds
-     * @param taskStartTimes
-     * @param taskHeartbeatsCache --Map<topologyId, Map<taskid,
-     *            Map<tkHbCacheTime, time>>>
      * @return Set<Integer> : taskid
      * @throws Exception
      */
-    public Set<Integer> getAliveTasks(String topologyId, Set<Integer> taskIds)
-            throws Exception {
+    public Set<Integer> getAliveTasks(String topologyId, Set<Integer> taskIds) throws Exception {
 
         Set<Integer> aliveTasks = new HashSet<Integer>();
 
         // taskIds is the list from ZK /ZK-DIR/tasks/topologyId
         for (int taskId : taskIds) {
 
-            boolean isDead =
-                    NimbusUtils.isTaskDead(nimbusData, topologyId, taskId);
+            boolean isDead = NimbusUtils.isTaskDead(nimbusData, topologyId, taskId);
             if (isDead == false) {
                 aliveTasks.add(taskId);
             }
@@ -925,24 +877,20 @@ public class TopologyAssign implements Runnable {
     /**
      * Backup the toplogy's Assignment to ZK
      * 
-     * @@@ Question Do we need to do backup operation every time?
      * @param assignment
      * @param event
+     * @@@ Question Do we need to do backup operation every time?
      */
-    public void backupAssignment(Assignment assignment,
-            TopologyAssignEvent event) {
+    public void backupAssignment(Assignment assignment, TopologyAssignEvent event) {
         String topologyId = event.getTopologyId();
         String topologyName = event.getTopologyName();
         try {
 
-            StormClusterState zkClusterState =
-                    nimbusData.getStormClusterState();
+            StormClusterState zkClusterState = nimbusData.getStormClusterState();
             // one little problem, get tasks twice when assign one topology
-            Map<Integer, String> tasks =
-                            Cluster.get_all_task_component(zkClusterState, topologyId, null);
+            Map<Integer, String> tasks = Cluster.get_all_task_component(zkClusterState, topologyId, null);
 
-            Map<String, List<Integer>> componentTasks =
-                    JStormUtils.reverse_map(tasks);
+            Map<String, List<Integer>> componentTasks = JStormUtils.reverse_map(tasks);
 
             for (Entry<String, List<Integer>> entry : componentTasks.entrySet()) {
                 List<Integer> keys = entry.getValue();
@@ -951,31 +899,24 @@ public class TopologyAssign implements Runnable {
 
             }
 
-            AssignmentBak assignmentBak =
-                    new AssignmentBak(componentTasks, assignment);
+            AssignmentBak assignmentBak = new AssignmentBak(componentTasks, assignment);
             zkClusterState.backup_assignment(topologyName, assignmentBak);
 
         } catch (Exception e) {
-            LOG.warn("Failed to backup " + topologyId + " assignment "
-                    + assignment, e);
+            LOG.warn("Failed to backup " + topologyId + " assignment " + assignment, e);
         }
     }
 
-    private void getAliveSupervsByHb(
-            Map<String, SupervisorInfo> supervisorInfos, Map conf) {
+    private void getAliveSupervsByHb(Map<String, SupervisorInfo> supervisorInfos, Map conf) {
         int currentTime = TimeUtils.current_time_secs();
-        int hbTimeout =
-                JStormUtils.parseInt(
-                        conf.get(Config.NIMBUS_SUPERVISOR_TIMEOUT_SECS),
-                        (JStormUtils.MIN_1 * 3));
+        int hbTimeout = JStormUtils.parseInt(conf.get(Config.NIMBUS_SUPERVISOR_TIMEOUT_SECS), (JStormUtils.MIN_1 * 3));
         Set<String> supervisorTobeRemoved = new HashSet<String>();
 
         for (Entry<String, SupervisorInfo> entry : supervisorInfos.entrySet()) {
             SupervisorInfo supInfo = entry.getValue();
             int lastReportTime = supInfo.getTimeSecs();
             if ((currentTime - lastReportTime) > hbTimeout) {
-                LOG.warn("Supervisor-" + supInfo.getHostName()
-                        + " is dead. lastReportTime=" + lastReportTime);
+                LOG.warn("Supervisor-" + supInfo.getHostName() + " is dead. lastReportTime=" + lastReportTime);
                 supervisorTobeRemoved.add(entry.getKey());
             }
         }
@@ -989,8 +930,6 @@ public class TopologyAssign implements Runnable {
      * @param args
      */
     public static void main(String[] args) {
-        // TODO Auto-generated method stub
-
     }
 
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/TopologyAssignEvent.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/TopologyAssignEvent.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/TopologyAssignEvent.java
index 8725918..a0bf9b9 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/TopologyAssignEvent.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/nimbus/TopologyAssignEvent.java
@@ -25,7 +25,7 @@ import com.alibaba.jstorm.cluster.StormStatus;
 public class TopologyAssignEvent {
 
     // unit is minutes
-    private static final int DEFAULT_WAIT_TIME = 2;
+    private static final int DEFAULT_WAIT_TIME = 5;
     private String topologyId;
     private String topologyName; // if this field has been set, it is create
     private String group;
@@ -37,6 +37,14 @@ public class TopologyAssignEvent {
     private CountDownLatch latch = new CountDownLatch(1);
     private boolean isSuccess = false;
     private String errorMsg;
+    private boolean isScaleTopology = false;
+
+    public void setScaleTopology(boolean isScaleTopology){
+        this.isScaleTopology = isScaleTopology;
+    }
+    public boolean isScaleTopology(){
+        return isScaleTopology;
+    }
 
     public String getTopologyId() {
         return topologyId;


[12/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/SyncSupervisorEvent.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/SyncSupervisorEvent.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/SyncSupervisorEvent.java
index 32aa0f1..9ca1d8a 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/SyncSupervisorEvent.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/SyncSupervisorEvent.java
@@ -17,33 +17,11 @@
  */
 package com.alibaba.jstorm.daemon.supervisor;
 
-import java.io.File;
-import java.io.IOException;
-import java.net.URL;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.UUID;
-import java.util.concurrent.ConcurrentHashMap;
-
-import org.apache.commons.io.FileExistsException;
-import org.apache.commons.io.FileUtils;
-import org.apache.thrift.TException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import backtype.storm.Config;
 import backtype.storm.utils.LocalState;
-
 import com.alibaba.jstorm.callback.RunnableCallback;
 import com.alibaba.jstorm.client.ConfigExtension;
 import com.alibaba.jstorm.cluster.Cluster;
 import com.alibaba.jstorm.cluster.Common;
-import com.alibaba.jstorm.cluster.StormBase;
 import com.alibaba.jstorm.cluster.StormClusterState;
 import com.alibaba.jstorm.cluster.StormConfig;
 import com.alibaba.jstorm.daemon.worker.LocalAssignment;
@@ -55,16 +33,26 @@ import com.alibaba.jstorm.utils.JStormServerUtils;
 import com.alibaba.jstorm.utils.JStormUtils;
 import com.alibaba.jstorm.utils.PathUtils;
 import com.alibaba.jstorm.utils.TimeUtils;
+import org.apache.commons.io.FileExistsException;
+import org.apache.commons.io.FileUtils;
+import org.apache.thrift.TException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.URL;
+import java.util.*;
+import java.util.Map.Entry;
 
 /**
- * supervisor SynchronizeSupervisor workflow (1) writer local assignment to
- * LocalState (2) download new Assignment's topology (3) remove useless Topology
- * (4) push one SyncProcessEvent to SyncProcessEvent's EventManager
+ * supervisor SynchronizeSupervisor workflow (1) writer local assignment to LocalState (2) download new Assignment's topology (3) remove useless Topology (4)
+ * push one SyncProcessEvent to SyncProcessEvent's EventManager
+ * @author Johnfang (xiaojian.fxj@alibaba-inc.com)
  */
 class SyncSupervisorEvent extends RunnableCallback {
 
-    private static final Logger LOG = LoggerFactory
-            .getLogger(SyncSupervisorEvent.class);
+    private static final Logger LOG = LoggerFactory.getLogger(SyncSupervisorEvent.class);
 
     // private Supervisor supervisor;
 
@@ -95,10 +83,8 @@ class SyncSupervisorEvent extends RunnableCallback {
      * @param localState
      * @param syncProcesses
      */
-    public SyncSupervisorEvent(String supervisorId, Map conf,
-            EventManager processEventManager, EventManager syncSupEventManager,
-            StormClusterState stormClusterState, LocalState localState,
-            SyncProcessEvent syncProcesses, Heartbeat heartbeat) {
+    public SyncSupervisorEvent(String supervisorId, Map conf, EventManager processEventManager, EventManager syncSupEventManager,
+            StormClusterState stormClusterState, LocalState localState, SyncProcessEvent syncProcesses, Heartbeat heartbeat) {
 
         this.syncProcesses = syncProcesses;
         this.processEventManager = processEventManager;
@@ -112,38 +98,30 @@ class SyncSupervisorEvent extends RunnableCallback {
 
     @Override
     public void run() {
-        LOG.debug("Synchronizing supervisor, interval seconds:"
-                + TimeUtils.time_delta(lastTime));
+        LOG.debug("Synchronizing supervisor, interval seconds:" + TimeUtils.time_delta(lastTime));
         lastTime = TimeUtils.current_time_secs();
 
         try {
 
-            RunnableCallback syncCallback =
-                    new EventManagerZkPusher(this, syncSupEventManager);
+            RunnableCallback syncCallback = new EventManagerZkPusher(this, syncSupEventManager);
 
             /**
-             * Step 1: get all assignments and register /ZK-dir/assignment and
-             * every assignment watch
+             * Step 1: get all assignments and register /ZK-dir/assignment and every assignment watch
              * 
              */
-            Map<String, Assignment> assignments =
-                    Cluster.get_all_assignment(stormClusterState, syncCallback);
+            Map<String, Assignment> assignments = Cluster.get_all_assignment(stormClusterState, syncCallback);
             LOG.debug("Get all assignments " + assignments);
 
             /**
-             * Step 2: get topologyIds list from
-             * STORM-LOCAL-DIR/supervisor/stormdist/
+             * Step 2: get topologyIds list from STORM-LOCAL-DIR/supervisor/stormdist/
              */
-            List<String> downloadedTopologyIds =
-                    StormConfig.get_supervisor_toplogy_list(conf);
+            List<String> downloadedTopologyIds = StormConfig.get_supervisor_toplogy_list(conf);
             LOG.debug("Downloaded storm ids: " + downloadedTopologyIds);
 
             /**
-             * Step 3: get <port,LocalAssignments> from ZK local node's
-             * assignment
+             * Step 3: get <port,LocalAssignments> from ZK local node's assignment
              */
-            Map<Integer, LocalAssignment> zkAssignment =
-                    getLocalAssign(stormClusterState, supervisorId, assignments);
+            Map<Integer, LocalAssignment> zkAssignment = getLocalAssign(stormClusterState, supervisorId, assignments);
             Map<Integer, LocalAssignment> localAssignment;
             Set<String> updateTopologys;
 
@@ -152,35 +130,31 @@ class SyncSupervisorEvent extends RunnableCallback {
              */
             try {
                 LOG.debug("Writing local assignment " + zkAssignment);
-                localAssignment =
-                        (Map<Integer, LocalAssignment>) localState
-                                .get(Common.LS_LOCAL_ASSIGNMENTS);
+                localAssignment = (Map<Integer, LocalAssignment>) localState.get(Common.LS_LOCAL_ASSIGNMENTS);
                 if (localAssignment == null) {
                     localAssignment = new HashMap<Integer, LocalAssignment>();
                 }
                 localState.put(Common.LS_LOCAL_ASSIGNMENTS, zkAssignment);
 
-                updateTopologys =
-                        getUpdateTopologys(localAssignment, zkAssignment);
-                Set<String> reDownloadTopologys =
-                        getNeedReDownloadTopologys(localAssignment);
+                updateTopologys = getUpdateTopologys(localAssignment, zkAssignment, assignments);
+                Set<String> reDownloadTopologys = getNeedReDownloadTopologys(localAssignment);
                 if (reDownloadTopologys != null) {
                     updateTopologys.addAll(reDownloadTopologys);
                 }
             } catch (IOException e) {
-                LOG.error("put LS_LOCAL_ASSIGNMENTS " + zkAssignment
-                        + " of localState failed");
+                LOG.error("put LS_LOCAL_ASSIGNMENTS " + zkAssignment + " of localState failed");
                 throw e;
             }
 
             /**
              * Step 5: download code from ZK
              */
-            Map<String, String> topologyCodes =
-                    getTopologyCodeLocations(assignments, supervisorId);
+            Map<String, String> topologyCodes = getTopologyCodeLocations(assignments, supervisorId);
+
+            //  downloadFailedTopologyIds which can't finished download binary from nimbus
+            Set<String> downloadFailedTopologyIds = new HashSet<String>();
 
-            downloadTopology(topologyCodes, downloadedTopologyIds,
-                    updateTopologys, assignments);
+            downloadTopology(topologyCodes, downloadedTopologyIds, updateTopologys, assignments, downloadFailedTopologyIds);
 
             /**
              * Step 6: remove any downloaded useless topology
@@ -191,7 +165,7 @@ class SyncSupervisorEvent extends RunnableCallback {
              * Step 7: push syncProcesses Event
              */
             // processEventManager.add(syncProcesses);
-            syncProcesses.run(zkAssignment);
+            syncProcesses.run(zkAssignment, downloadFailedTopologyIds);
 
             // If everything is OK, set the trigger to update heartbeat of
             // supervisor
@@ -209,11 +183,9 @@ class SyncSupervisorEvent extends RunnableCallback {
      * @param conf
      * @param topologyId
      * @param masterCodeDir
-     * @param clusterMode
      * @throws IOException
      */
-    private void downloadStormCode(Map conf, String topologyId,
-            String masterCodeDir) throws IOException, TException {
+    private void downloadStormCode(Map conf, String topologyId, String masterCodeDir) throws IOException, TException {
         String clusterMode = StormConfig.cluster_mode(conf);
 
         if (clusterMode.endsWith("distributed")) {
@@ -224,17 +196,14 @@ class SyncSupervisorEvent extends RunnableCallback {
         }
     }
 
-    private void downloadLocalStormCode(Map conf, String topologyId,
-            String masterCodeDir) throws IOException, TException {
+    private void downloadLocalStormCode(Map conf, String topologyId, String masterCodeDir) throws IOException, TException {
 
         // STORM-LOCAL-DIR/supervisor/stormdist/storm-id
-        String stormroot =
-                StormConfig.supervisor_stormdist_root(conf, topologyId);
+        String stormroot = StormConfig.supervisor_stormdist_root(conf, topologyId);
 
         FileUtils.copyDirectory(new File(masterCodeDir), new File(stormroot));
 
-        ClassLoader classloader =
-                Thread.currentThread().getContextClassLoader();
+        ClassLoader classloader = Thread.currentThread().getContextClassLoader();
 
         String resourcesJar = resourcesJar();
 
@@ -244,20 +213,16 @@ class SyncSupervisorEvent extends RunnableCallback {
 
         if (resourcesJar != null) {
 
-            LOG.info("Extracting resources from jar at " + resourcesJar
-                    + " to " + targetDir);
+            LOG.info("Extracting resources from jar at " + resourcesJar + " to " + targetDir);
 
-            JStormUtils.extract_dir_from_jar(resourcesJar,
-                    StormConfig.RESOURCES_SUBDIR, stormroot);// extract dir
+            JStormUtils.extract_dir_from_jar(resourcesJar, StormConfig.RESOURCES_SUBDIR, stormroot);// extract dir
             // from jar;;
             // util.clj
         } else if (url != null) {
 
-            LOG.info("Copying resources at " + url.toString() + " to "
-                    + targetDir);
+            LOG.info("Copying resources at " + url.toString() + " to " + targetDir);
 
-            FileUtils.copyDirectory(new File(url.getFile()), (new File(
-                    targetDir)));
+            FileUtils.copyDirectory(new File(url.getFile()), (new File(targetDir)));
 
         }
     }
@@ -271,27 +236,21 @@ class SyncSupervisorEvent extends RunnableCallback {
      * @throws IOException
      * @throws TException
      */
-    private void downloadDistributeStormCode(Map conf, String topologyId,
-            String masterCodeDir) throws IOException, TException {
+    private void downloadDistributeStormCode(Map conf, String topologyId, String masterCodeDir) throws IOException, TException {
 
         // STORM_LOCAL_DIR/supervisor/tmp/(UUID)
-        String tmproot =
-                StormConfig.supervisorTmpDir(conf) + File.separator
-                        + UUID.randomUUID().toString();
+        String tmproot = StormConfig.supervisorTmpDir(conf) + File.separator + UUID.randomUUID().toString();
 
         // STORM_LOCAL_DIR/supervisor/stormdist/topologyId
-        String stormroot =
-                StormConfig.supervisor_stormdist_root(conf, topologyId);
+        String stormroot = StormConfig.supervisor_stormdist_root(conf, topologyId);
 
-        JStormServerUtils.downloadCodeFromMaster(conf, tmproot, masterCodeDir,
-                topologyId, true);
+        JStormServerUtils.downloadCodeFromMaster(conf, tmproot, masterCodeDir, topologyId, true);
 
         // tmproot/stormjar.jar
         String localFileJarTmp = StormConfig.stormjar_path(tmproot);
 
         // extract dir from jar
-        JStormUtils.extract_dir_from_jar(localFileJarTmp,
-                StormConfig.RESOURCES_SUBDIR, tmproot);
+        JStormUtils.extract_dir_from_jar(localFileJarTmp, StormConfig.RESOURCES_SUBDIR, tmproot);
 
         File srcDir = new File(tmproot);
         File destDir = new File(stormroot);
@@ -325,8 +284,7 @@ class SyncSupervisorEvent extends RunnableCallback {
         List<String> rtn = new ArrayList<String>();
         int size = jarPaths.size();
         for (int i = 0; i < size; i++) {
-            if (JStormUtils.zipContainsDir(jarPaths.get(i),
-                    StormConfig.RESOURCES_SUBDIR)) {
+            if (JStormUtils.zipContainsDir(jarPaths.get(i), StormConfig.RESOURCES_SUBDIR)) {
                 rtn.add(jarPaths.get(i));
             }
         }
@@ -342,24 +300,19 @@ class SyncSupervisorEvent extends RunnableCallback {
      * 
      * @param stormClusterState
      * @param supervisorId
-     * @param callback
      * @throws Exception
      * @returns map: {port,LocalAssignment}
      */
-    private Map<Integer, LocalAssignment> getLocalAssign(
-            StormClusterState stormClusterState, String supervisorId,
-            Map<String, Assignment> assignments) throws Exception {
+    private Map<Integer, LocalAssignment> getLocalAssign(StormClusterState stormClusterState, String supervisorId, Map<String, Assignment> assignments)
+            throws Exception {
 
-        Map<Integer, LocalAssignment> portLA =
-                new HashMap<Integer, LocalAssignment>();
+        Map<Integer, LocalAssignment> portLA = new HashMap<Integer, LocalAssignment>();
 
         for (Entry<String, Assignment> assignEntry : assignments.entrySet()) {
             String topologyId = assignEntry.getKey();
             Assignment assignment = assignEntry.getValue();
 
-            Map<Integer, LocalAssignment> portTasks =
-                    readMyTasks(stormClusterState, topologyId, supervisorId,
-                            assignment);
+            Map<Integer, LocalAssignment> portTasks = readMyTasks(stormClusterState, topologyId, supervisorId, assignment);
             if (portTasks == null) {
                 continue;
             }
@@ -374,8 +327,7 @@ class SyncSupervisorEvent extends RunnableCallback {
                 if (!portLA.containsKey(port)) {
                     portLA.put(port, la);
                 } else {
-                    throw new RuntimeException(
-                            "Should not have multiple topologys assigned to one port");
+                    throw new RuntimeException("Should not have multiple topologys assigned to one port");
                 }
             }
         }
@@ -389,30 +341,27 @@ class SyncSupervisorEvent extends RunnableCallback {
      * @param stormClusterState
      * @param topologyId
      * @param supervisorId
-     * @param callback
      * @return Map: {port, LocalAssignment}
      * @throws Exception
      */
-    private Map<Integer, LocalAssignment> readMyTasks(
-            StormClusterState stormClusterState, String topologyId,
-            String supervisorId, Assignment assignmenInfo) throws Exception {
+    private Map<Integer, LocalAssignment> readMyTasks(StormClusterState stormClusterState, String topologyId, String supervisorId, Assignment assignmentInfo)
+            throws Exception {
 
-        Map<Integer, LocalAssignment> portTasks =
-                new HashMap<Integer, LocalAssignment>();
+        Map<Integer, LocalAssignment> portTasks = new HashMap<Integer, LocalAssignment>();
 
-        Set<ResourceWorkerSlot> workers = assignmenInfo.getWorkers();
+        Set<ResourceWorkerSlot> workers = assignmentInfo.getWorkers();
         if (workers == null) {
-            LOG.error("No worker of assignement's " + assignmenInfo);
+            LOG.error("No worker of assignment's " + assignmentInfo);
             return portTasks;
         }
 
         for (ResourceWorkerSlot worker : workers) {
             if (!supervisorId.equals(worker.getNodeId()))
                 continue;
-            portTasks.put(worker.getPort(), new LocalAssignment(topologyId,
-                    worker.getTasks(), Common.topologyIdToName(topologyId),
-                    worker.getMemSize(), worker.getCpu(), worker.getJvm(),
-                    assignmenInfo.getTimeStamp()));
+            portTasks.put(
+                    worker.getPort(),
+                    new LocalAssignment(topologyId, worker.getTasks(), Common.topologyIdToName(topologyId), worker.getMemSize(), worker.getCpu(), worker
+                            .getJvm(), assignmentInfo.getTimeStamp()));
         }
 
         return portTasks;
@@ -421,14 +370,10 @@ class SyncSupervisorEvent extends RunnableCallback {
     /**
      * get mastercodedir for every topology
      * 
-     * @param stormClusterState
-     * @param callback
      * @throws Exception
      * @returns Map: <topologyId, master-code-dir> from zookeeper
      */
-    public static Map<String, String> getTopologyCodeLocations(
-            Map<String, Assignment> assignments, String supervisorId)
-            throws Exception {
+    public static Map<String, String> getTopologyCodeLocations(Map<String, Assignment> assignments, String supervisorId) throws Exception {
 
         Map<String, String> rtn = new HashMap<String, String>();
         for (Entry<String, Assignment> entry : assignments.entrySet()) {
@@ -448,9 +393,8 @@ class SyncSupervisorEvent extends RunnableCallback {
         return rtn;
     }
 
-    public void downloadTopology(Map<String, String> topologyCodes,
-            List<String> downloadedTopologyIds, Set<String> updateTopologys,
-            Map<String, Assignment> assignments) throws Exception {
+    public void downloadTopology(Map<String, String> topologyCodes, List<String> downloadedTopologyIds, Set<String> updateTopologys,
+                                 Map<String, Assignment> assignments, Set<String> downloadFailedTopologyIds) throws Exception {
 
         Set<String> downloadTopologys = new HashSet<String>();
 
@@ -459,38 +403,53 @@ class SyncSupervisorEvent extends RunnableCallback {
             String topologyId = entry.getKey();
             String masterCodeDir = entry.getValue();
 
-            if (!downloadedTopologyIds.contains(topologyId)
-                    || updateTopologys.contains(topologyId)) {
+            if (!downloadedTopologyIds.contains(topologyId) || updateTopologys.contains(topologyId)) {
 
-                LOG.info("Downloading code for storm id " + topologyId
-                        + " from " + masterCodeDir);
+                LOG.info("Downloading code for storm id " + topologyId + " from " + masterCodeDir);
 
-                try {
-                    downloadStormCode(conf, topologyId, masterCodeDir);
-                    // Update assignment timeStamp
-                    StormConfig.write_supervisor_topology_timestamp(conf,
-                            topologyId, assignments.get(topologyId)
-                                    .getTimeStamp());
-                } catch (IOException e) {
-                    LOG.error(e + " downloadStormCode failed " + "topologyId:"
-                            + topologyId + "masterCodeDir:" + masterCodeDir);
+                int retry = 0;
+                while (retry < 3) {
+                    try {
+                        downloadStormCode(conf, topologyId, masterCodeDir);
+                        // Update assignment timeStamp
+                        StormConfig.write_supervisor_topology_timestamp(conf, topologyId, assignments.get(topologyId).getTimeStamp());
+                        break;
+                    } catch (IOException e) {
+                        LOG.error(e + " downloadStormCode failed " + "topologyId:" + topologyId + "masterCodeDir:" + masterCodeDir);
 
-                } catch (TException e) {
-                    LOG.error(e + " downloadStormCode failed " + "topologyId:"
-                            + topologyId + "masterCodeDir:" + masterCodeDir);
+                    } catch (TException e) {
+                        LOG.error(e + " downloadStormCode failed " + "topologyId:" + topologyId + "masterCodeDir:" + masterCodeDir);
+                    }
+                    retry++;
+                }
+                if (retry < 3) {
+                    LOG.info("Finished downloading code for storm id " + topologyId + " from " + masterCodeDir);
+                    downloadTopologys.add(topologyId);
+                } else {
+                    LOG.error("Cann't  download code for storm id " + topologyId + " from " + masterCodeDir);
+                    downloadFailedTopologyIds.add(topologyId);
                 }
-                LOG.info("Finished downloading code for storm id " + topologyId
-                        + " from " + masterCodeDir);
 
-                downloadTopologys.add(topologyId);
+            }
+        }
+        // clear directory of topologyId is dangerous , so it only clear the topologyId which
+        // isn't contained by downloadedTopologyIds
+        for (String topologyId : downloadFailedTopologyIds) {
+            if (!downloadedTopologyIds.contains(topologyId)) {
+                try {
+                    String stormroot = StormConfig.supervisor_stormdist_root(conf, topologyId);
+                    File destDir = new File(stormroot);
+                    FileUtils.deleteQuietly(destDir);
+                } catch (Exception e) {
+                    LOG.error("Cann't  clear directory about storm id " + topologyId + " on supervisor ");
+                }
             }
         }
 
         updateTaskCleanupTimeout(downloadTopologys);
     }
 
-    public void removeUselessTopology(Map<String, String> topologyCodes,
-            List<String> downloadedTopologyIds) {
+    public void removeUselessTopology(Map<String, String> topologyCodes, List<String> downloadedTopologyIds) {
         for (String topologyId : downloadedTopologyIds) {
 
             if (!topologyCodes.containsKey(topologyId)) {
@@ -499,9 +458,7 @@ class SyncSupervisorEvent extends RunnableCallback {
 
                 String path = null;
                 try {
-                    path =
-                            StormConfig.supervisor_stormdist_root(conf,
-                                    topologyId);
+                    path = StormConfig.supervisor_stormdist_root(conf, topologyId);
                     PathUtils.rmr(path);
                 } catch (IOException e) {
                     String errMsg = "rmr the path:" + path + "failed\n";
@@ -511,13 +468,11 @@ class SyncSupervisorEvent extends RunnableCallback {
         }
     }
 
-    private Set<String> getUpdateTopologys(
-            Map<Integer, LocalAssignment> localAssignments,
-            Map<Integer, LocalAssignment> zkAssignments) {
+    private Set<String> getUpdateTopologys(Map<Integer, LocalAssignment> localAssignments, Map<Integer, LocalAssignment> zkAssignments,
+            Map<String, Assignment> assignments) {
         Set<String> ret = new HashSet<String>();
         if (localAssignments != null && zkAssignments != null) {
-            for (Entry<Integer, LocalAssignment> entry : localAssignments
-                    .entrySet()) {
+            for (Entry<Integer, LocalAssignment> entry : localAssignments.entrySet()) {
                 Integer port = entry.getKey();
                 LocalAssignment localAssignment = entry.getValue();
 
@@ -526,14 +481,11 @@ class SyncSupervisorEvent extends RunnableCallback {
                 if (localAssignment == null || zkAssignment == null)
                     continue;
 
-                if (localAssignment.getTopologyId().equals(
-                        zkAssignment.getTopologyId())
-                        && localAssignment.getTimeStamp() < zkAssignment
-                                .getTimeStamp())
+                Assignment assignment = assignments.get(localAssignment.getTopologyId());
+                if (localAssignment.getTopologyId().equals(zkAssignment.getTopologyId()) && assignment != null
+                        && assignment.isTopologyChange(localAssignment.getTimeStamp()))
                     if (ret.add(localAssignment.getTopologyId())) {
-                        LOG.info("Topology-" + localAssignment.getTopologyId()
-                                + " has been updated. LocalTs="
-                                + localAssignment.getTimeStamp() + ", ZkTs="
+                        LOG.info("Topology-" + localAssignment.getTopologyId() + " has been updated. LocalTs=" + localAssignment.getTimeStamp() + ", ZkTs="
                                 + zkAssignment.getTimeStamp());
                     }
             }
@@ -542,49 +494,37 @@ class SyncSupervisorEvent extends RunnableCallback {
         return ret;
     }
 
-    private Set<String> getNeedReDownloadTopologys(
-            Map<Integer, LocalAssignment> localAssignment) {
-        Set<String> reDownloadTopologys =
-                syncProcesses.getTopologyIdNeedDownload().getAndSet(null);
+    private Set<String> getNeedReDownloadTopologys(Map<Integer, LocalAssignment> localAssignment) {
+        Set<String> reDownloadTopologys = syncProcesses.getTopologyIdNeedDownload().getAndSet(null);
         if (reDownloadTopologys == null || reDownloadTopologys.size() == 0)
             return null;
         Set<String> needRemoveTopologys = new HashSet<String>();
-        Map<Integer, String> portToStartWorkerId =
-                syncProcesses.getPortToWorkerId();
-        for (Entry<Integer, LocalAssignment> entry : localAssignment
-                .entrySet()) {
+        Map<Integer, String> portToStartWorkerId = syncProcesses.getPortToWorkerId();
+        for (Entry<Integer, LocalAssignment> entry : localAssignment.entrySet()) {
             if (portToStartWorkerId.containsKey(entry.getKey()))
                 needRemoveTopologys.add(entry.getValue().getTopologyId());
         }
-        LOG.debug(
-                "worker is starting on these topology, so delay download topology binary: "
-                        + needRemoveTopologys);
+        LOG.debug("worker is starting on these topology, so delay download topology binary: " + needRemoveTopologys);
         reDownloadTopologys.removeAll(needRemoveTopologys);
         if (reDownloadTopologys.size() > 0)
-            LOG.info("Following topologys is going to re-download the jars, "
-                    + reDownloadTopologys);
+            LOG.info("Following topologys is going to re-download the jars, " + reDownloadTopologys);
         return reDownloadTopologys;
     }
 
     private void updateTaskCleanupTimeout(Set<String> topologys) {
         Map topologyConf = null;
-        Map<String, Integer> taskCleanupTimeouts =
-                new HashMap<String, Integer>();
+        Map<String, Integer> taskCleanupTimeouts = new HashMap<String, Integer>();
 
         for (String topologyId : topologys) {
             try {
-                topologyConf =
-                        StormConfig.read_supervisor_topology_conf(conf,
-                                topologyId);
+                topologyConf = StormConfig.read_supervisor_topology_conf(conf, topologyId);
             } catch (IOException e) {
                 LOG.info("Failed to read conf for " + topologyId);
             }
 
             Integer cleanupTimeout = null;
             if (topologyConf != null) {
-                cleanupTimeout =
-                        JStormUtils.parseInt(topologyConf
-                                .get(ConfigExtension.TASK_CLEANUP_TIMEOUT_SEC));
+                cleanupTimeout = JStormUtils.parseInt(topologyConf.get(ConfigExtension.TASK_CLEANUP_TIMEOUT_SEC));
             }
 
             if (cleanupTimeout == null) {
@@ -596,9 +536,7 @@ class SyncSupervisorEvent extends RunnableCallback {
 
         Map<String, Integer> localTaskCleanupTimeouts = null;
         try {
-            localTaskCleanupTimeouts =
-                    (Map<String, Integer>) localState
-                            .get(Common.LS_TASK_CLEANUP_TIMEOUT);
+            localTaskCleanupTimeouts = (Map<String, Integer>) localState.get(Common.LS_TASK_CLEANUP_TIMEOUT);
         } catch (IOException e) {
             LOG.error("Failed to read local task cleanup timeout map", e);
         }
@@ -609,8 +547,7 @@ class SyncSupervisorEvent extends RunnableCallback {
             localTaskCleanupTimeouts.putAll(taskCleanupTimeouts);
 
         try {
-            localState.put(Common.LS_TASK_CLEANUP_TIMEOUT,
-                    localTaskCleanupTimeouts);
+            localState.put(Common.LS_TASK_CLEANUP_TIMEOUT, localTaskCleanupTimeouts);
         } catch (IOException e) {
             LOG.error("Failed to write local task cleanup timeout map", e);
         }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/BatchDrainerRunable.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/BatchDrainerRunable.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/BatchDrainerRunable.java
index 81e4374..394c134 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/BatchDrainerRunable.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/BatchDrainerRunable.java
@@ -39,8 +39,7 @@ import com.alibaba.jstorm.utils.Pair;
  * 
  */
 public class BatchDrainerRunable extends DisruptorRunable {
-    private final static Logger LOG = LoggerFactory
-            .getLogger(BatchDrainerRunable.class);
+    private final static Logger LOG = LoggerFactory.getLogger(BatchDrainerRunable.class);
 
     public BatchDrainerRunable(WorkerData workerData) {
         super(workerData.getSendingQueue(), MetricDef.BATCH_DRAINER_THREAD);
@@ -50,8 +49,7 @@ public class BatchDrainerRunable extends DisruptorRunable {
     @Override
     public void handleEvent(Object event, boolean endOfBatch) throws Exception {
 
-        Pair<IConnection, List<TaskMessage>> pair =
-                (Pair<IConnection, List<TaskMessage>>) event;
+        Pair<IConnection, List<TaskMessage>> pair = (Pair<IConnection, List<TaskMessage>>) event;
 
         pair.getFirst().send(pair.getSecond());
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/ContextMaker.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/ContextMaker.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/ContextMaker.java
index a260323..47e73b8 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/ContextMaker.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/ContextMaker.java
@@ -17,25 +17,23 @@
  */
 package com.alibaba.jstorm.daemon.worker;
 
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import backtype.storm.generated.StormTopology;
 import backtype.storm.generated.StreamInfo;
 import backtype.storm.task.TopologyContext;
 import backtype.storm.tuple.Fields;
 import backtype.storm.utils.ThriftTopologyUtils;
-
 import com.alibaba.jstorm.cluster.StormConfig;
 import com.alibaba.jstorm.utils.JStormUtils;
 import com.alibaba.jstorm.utils.PathUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
 
 /**
  * ContextMaker This class is used to create TopologyContext
@@ -56,8 +54,7 @@ public class ContextMaker {
     @SuppressWarnings("rawtypes")
     public ContextMaker(WorkerData workerData) {
         /*
-         * Map stormConf, String topologyId, String workerId, HashMap<Integer,
-         * String> tasksToComponent, Integer port, List<Integer> workerTasks
+         * Map stormConf, String topologyId, String workerId, HashMap<Integer, String> tasksToComponent, Integer port, List<Integer> workerTasks
          */
         this.workerData = workerData;
         this.workerTasks = JStormUtils.mk_list(workerData.getTaskids());
@@ -67,12 +64,9 @@ public class ContextMaker {
             String topologyId = workerData.getTopologyId();
             String workerId = workerData.getWorkerId();
 
-            String distroot =
-                    StormConfig
-                            .supervisor_stormdist_root(stormConf, topologyId);
+            String distroot = StormConfig.supervisor_stormdist_root(stormConf, topologyId);
 
-            resourcePath =
-                    StormConfig.supervisor_storm_resources_path(distroot);
+            resourcePath = StormConfig.supervisor_storm_resources_path(distroot);
 
             pidDir = StormConfig.worker_pids_root(stormConf, workerId);
 
@@ -85,43 +79,32 @@ public class ContextMaker {
         }
     }
 
-    public TopologyContext makeTopologyContext(StormTopology topology,
-            Integer taskId, clojure.lang.Atom openOrPrepareWasCalled) {
+    public TopologyContext makeTopologyContext(StormTopology topology, Integer taskId, clojure.lang.Atom openOrPrepareWasCalled) {
 
         Map stormConf = workerData.getStormConf();
         String topologyId = workerData.getTopologyId();
 
-        HashMap<String, Map<String, Fields>> componentToStreamToFields =
-                new HashMap<String, Map<String, Fields>>();
+        HashMap<String, Map<String, Fields>> componentToStreamToFields = new HashMap<String, Map<String, Fields>>();
 
         Set<String> components = ThriftTopologyUtils.getComponentIds(topology);
         for (String component : components) {
 
-            Map<String, Fields> streamToFieldsMap =
-                    new HashMap<String, Fields>();
+            Map<String, Fields> streamToFieldsMap = new HashMap<String, Fields>();
 
-            Map<String, StreamInfo> streamInfoMap =
-                    ThriftTopologyUtils.getComponentCommon(topology, component)
-                            .get_streams();
+            Map<String, StreamInfo> streamInfoMap = ThriftTopologyUtils.getComponentCommon(topology, component).get_streams();
             for (Entry<String, StreamInfo> entry : streamInfoMap.entrySet()) {
                 String streamId = entry.getKey();
                 StreamInfo streamInfo = entry.getValue();
 
-                streamToFieldsMap.put(streamId,
-                        new Fields(streamInfo.get_output_fields()));
+                streamToFieldsMap.put(streamId, new Fields(streamInfo.get_output_fields()));
             }
 
             componentToStreamToFields.put(component, streamToFieldsMap);
         }
 
-        return new TopologyContext(topology, stormConf,
-                workerData.getTasksToComponent(),
-                workerData.getComponentToSortedTasks(),
-                componentToStreamToFields, topologyId, resourcePath, pidDir,
-                taskId, workerData.getPort(), workerTasks,
-                workerData.getDefaultResources(),
-                workerData.getUserResources(), workerData.getExecutorData(),
-                workerData.getRegisteredMetrics(), openOrPrepareWasCalled);
+        return new TopologyContext(topology, stormConf, workerData.getTasksToComponent(), workerData.getComponentToSortedTasks(), componentToStreamToFields,
+                topologyId, resourcePath, pidDir, taskId, workerData.getPort(), workerTasks, workerData.getDefaultResources(), workerData.getUserResources(),
+                workerData.getExecutorData(), workerData.getRegisteredMetrics(), openOrPrepareWasCalled, workerData.getZkCluster());
 
     }
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/DrainerRunable.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/DrainerRunable.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/DrainerRunable.java
index 3477cc4..c19947a 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/DrainerRunable.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/DrainerRunable.java
@@ -46,8 +46,7 @@ import com.alibaba.jstorm.utils.Pair;
  * 
  */
 public class DrainerRunable extends DisruptorRunable {
-    private final static Logger LOG = LoggerFactory
-            .getLogger(DrainerRunable.class);
+    private final static Logger LOG = LoggerFactory.getLogger(DrainerRunable.class);
 
     private DisruptorQueue transferQueue;
     private ConcurrentHashMap<WorkerSlot, IConnection> nodeportSocket;
@@ -92,8 +91,7 @@ public class DrainerRunable extends DisruptorRunable {
 
         if (conn.isClosed() == true) {
             // if connection has been closed, just skip the package
-            LOG.debug("Skip one tuple of " + taskId
-                    + ", due to close connection of " + nodePort);
+            LOG.debug("Skip one tuple of " + taskId + ", due to close connection of " + nodePort);
             return;
         }
 
@@ -113,11 +111,8 @@ public class DrainerRunable extends DisruptorRunable {
     }
 
     public void handleFinish() {
-        for (Entry<IConnection, List<TaskMessage>> entry : dispatchMap
-                .entrySet()) {
-            Pair<IConnection, List<TaskMessage>> pair =
-                    new Pair<IConnection, List<TaskMessage>>(entry.getKey(),
-                            entry.getValue());
+        for (Entry<IConnection, List<TaskMessage>> entry : dispatchMap.entrySet()) {
+            Pair<IConnection, List<TaskMessage>> pair = new Pair<IConnection, List<TaskMessage>>(entry.getKey(), entry.getValue());
 
             sendingQueue.publish(pair);
         }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/LocalAssignment.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/LocalAssignment.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/LocalAssignment.java
index 312c57f..1221680 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/LocalAssignment.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/LocalAssignment.java
@@ -38,8 +38,7 @@ public class LocalAssignment implements Serializable {
     private String jvm;
     private long timeStamp;
 
-    public LocalAssignment(String topologyId, Set<Integer> taskIds,
-            String topologyName, long mem, int cpu, String jvm, long timeStamp) {
+    public LocalAssignment(String topologyId, Set<Integer> taskIds, String topologyName, long mem, int cpu, String jvm, long timeStamp) {
         this.topologyId = topologyId;
         this.taskIds = new HashSet<Integer>(taskIds);
         this.topologyName = topologyName;
@@ -105,13 +104,8 @@ public class LocalAssignment implements Serializable {
         result = prime * result + ((jvm == null) ? 0 : jvm.hashCode());
         result = prime * result + (int) (mem ^ (mem >>> 32));
         result = prime * result + ((taskIds == null) ? 0 : taskIds.hashCode());
-        result =
-                prime * result
-                        + ((topologyId == null) ? 0 : topologyId.hashCode());
-        result =
-                prime
-                        * result
-                        + ((topologyName == null) ? 0 : topologyName.hashCode());
+        result = prime * result + ((topologyId == null) ? 0 : topologyId.hashCode());
+        result = prime * result + ((topologyName == null) ? 0 : topologyName.hashCode());
         result = prime * result + (int) (timeStamp & 0xffffffff);
         return result;
     }
@@ -156,7 +150,6 @@ public class LocalAssignment implements Serializable {
 
     @Override
     public String toString() {
-        return ToStringBuilder.reflectionToString(this,
-                ToStringStyle.SHORT_PREFIX_STYLE);
+        return ToStringBuilder.reflectionToString(this, ToStringStyle.SHORT_PREFIX_STYLE);
     }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/ProcessSimulator.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/ProcessSimulator.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/ProcessSimulator.java
index 628e0f5..056b6f3 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/ProcessSimulator.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/ProcessSimulator.java
@@ -33,8 +33,7 @@ public class ProcessSimulator {
      * skip old function name: pid-counter
      */
 
-    protected static ConcurrentHashMap<String, WorkerShutdown> processMap =
-            new ConcurrentHashMap<String, WorkerShutdown>();
+    protected static ConcurrentHashMap<String, WorkerShutdown> processMap = new ConcurrentHashMap<String, WorkerShutdown>();
 
     /**
      * Register process handler old function name: register-process

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/RefreshActive.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/RefreshActive.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/RefreshActive.java
index 3f8acfc..bde8232 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/RefreshActive.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/RefreshActive.java
@@ -34,8 +34,7 @@ import com.alibaba.jstorm.task.TaskShutdownDameon;
 import com.alibaba.jstorm.utils.JStormUtils;
 
 /**
- * Timely check whether topology is active or not and whether the metrics
- * monitor is enable or disable from ZK
+ * Timely check whether topology is active or not and whether the metrics monitor is enable or disable from ZK
  * 
  * @author yannian/Longda
  * 
@@ -63,9 +62,7 @@ public class RefreshActive extends RunnableCallback {
         this.conf = workerData.getStormConf();
         this.zkCluster = workerData.getZkCluster();
         this.topologyId = workerData.getTopologyId();
-        this.frequence =
-                JStormUtils.parseInt(conf.get(Config.TASK_REFRESH_POLL_SECS),
-                        10);
+        this.frequence = JStormUtils.parseInt(conf.get(Config.TASK_REFRESH_POLL_SECS), 10);
     }
 
     @Override
@@ -91,8 +88,7 @@ public class RefreshActive extends RunnableCallback {
                 return;
             }
 
-            LOG.info("Old TopologyStatus:" + oldTopologyStatus
-                    + ", new TopologyStatus:" + newTopologyStatus);
+            LOG.info("Old TopologyStatus:" + oldTopologyStatus + ", new TopologyStatus:" + newTopologyStatus);
 
             List<TaskShutdownDameon> tasks = workerData.getShutdownTasks();
             if (tasks == null) {
@@ -120,8 +116,7 @@ public class RefreshActive extends RunnableCallback {
             boolean newMonitorEnable = base.isEnableMonitor();
             boolean oldMonitorEnable = monitorEnable.get();
             if (newMonitorEnable != oldMonitorEnable) {
-                LOG.info("Change MonitorEnable from " + oldMonitorEnable
-                        + " to " + newMonitorEnable);
+                LOG.info("Change MonitorEnable from " + oldMonitorEnable + " to " + newMonitorEnable);
                 monitorEnable.set(newMonitorEnable);
             }
         } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/RefreshConnections.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/RefreshConnections.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/RefreshConnections.java
index 48cc945..130985b 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/RefreshConnections.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/RefreshConnections.java
@@ -17,23 +17,10 @@
  */
 package com.alibaba.jstorm.daemon.worker;
 
-import java.io.FileNotFoundException;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import backtype.storm.Config;
 import backtype.storm.messaging.IConnection;
 import backtype.storm.messaging.IContext;
 import backtype.storm.scheduler.WorkerSlot;
-
 import com.alibaba.jstorm.callback.RunnableCallback;
 import com.alibaba.jstorm.cluster.StormClusterState;
 import com.alibaba.jstorm.cluster.StormConfig;
@@ -42,9 +29,15 @@ import com.alibaba.jstorm.schedule.Assignment.AssignmentType;
 import com.alibaba.jstorm.schedule.default_assign.ResourceWorkerSlot;
 import com.alibaba.jstorm.task.Task;
 import com.alibaba.jstorm.task.TaskShutdownDameon;
-import com.alibaba.jstorm.task.heartbeat.TaskHeartbeat;
 import com.alibaba.jstorm.utils.JStormUtils;
-import com.alibaba.jstorm.utils.TimeUtils;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.FileNotFoundException;
+import java.util.*;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicBoolean;
 
 /**
  * 
@@ -56,8 +49,7 @@ import com.alibaba.jstorm.utils.TimeUtils;
  * 
  */
 public class RefreshConnections extends RunnableCallback {
-    private static Logger LOG = LoggerFactory
-            .getLogger(RefreshConnections.class);
+    private static Logger LOG = LoggerFactory.getLogger(RefreshConnections.class);
 
     private WorkerData workerData;
 
@@ -102,13 +94,9 @@ public class RefreshConnections extends RunnableCallback {
         this.supervisorId = workerData.getSupervisorId();
 
         // this.endpoint_socket_lock = endpoint_socket_lock;
-        frequence =
-                JStormUtils
-                        .parseInt(conf.get(Config.TASK_REFRESH_POLL_SECS), 5);
+        frequence = JStormUtils.parseInt(conf.get(Config.TASK_REFRESH_POLL_SECS), 5);
 
-        taskTimeoutSecs =
-                JStormUtils.parseInt(
-                        conf.get(Config.TASK_HEARTBEAT_FREQUENCY_SECS), 10);
+        taskTimeoutSecs = JStormUtils.parseInt(conf.get(Config.TASK_HEARTBEAT_FREQUENCY_SECS), 10);
         taskTimeoutSecs = taskTimeoutSecs * 3;
     }
 
@@ -122,8 +110,7 @@ public class RefreshConnections extends RunnableCallback {
             //
 
             synchronized (this) {
-                Assignment assignment =
-                        zkCluster.assignment_info(topologyId, this);
+                Assignment assignment = zkCluster.assignment_info(topologyId, this);
                 if (assignment == null) {
                     String errMsg = "Failed to get Assignment of " + topologyId;
                     LOG.error(errMsg);
@@ -137,47 +124,39 @@ public class RefreshConnections extends RunnableCallback {
                 // updated. If so, the outbound
                 // task map should be updated accordingly.
                 try {
-                    Long localAssignmentTS =
-                            StormConfig.read_supervisor_topology_timestamp(
-                                    conf, topologyId);
-                    if (localAssignmentTS.longValue() > workerData
-                            .getAssignmentTs().longValue()) {
+                    Long localAssignmentTS = StormConfig.read_supervisor_topology_timestamp(conf, topologyId);
+                    if (localAssignmentTS.longValue() > workerData.getAssignmentTs().longValue()) {
                         try {
-                            if (assignment.getAssignmentType() == AssignmentType.Config) {
+                            if (assignment.getAssignmentType() == AssignmentType.UpdateTopology) {
                                 LOG.info("Get config reload request for " + topologyId);
                                 // If config was updated, notify all tasks
                                 List<TaskShutdownDameon> taskShutdowns = workerData.getShutdownTasks();
                                 Map newConf = StormConfig.read_supervisor_topology_conf(conf, topologyId);
                                 workerData.getStormConf().putAll(newConf);
                                 for (TaskShutdownDameon taskSD : taskShutdowns) {
-                                    taskSD.updateConf(newConf);
+                                    taskSD.update(newConf);
                                 }
-                                workerData.setAssignmentType(AssignmentType.Config);
+                                workerData.setAssignmentType(AssignmentType.UpdateTopology);
                             } else {
                                 Set<Integer> addedTasks = getAddedTasks(assignment);
-                                Set<Integer> removedTasks =
-                                        getRemovedTasks(assignment);
-                                
+                                Set<Integer> removedTasks = getRemovedTasks(assignment);
+                                Set<Integer> updatedTasks = getUpdatedTasks(assignment);
+
                                 workerData.updateWorkerData(assignment);
-                                
-                                if (removedTasks.size() > 0)
-                                    shutdownTasks(removedTasks);
-                                if (addedTasks.size() > 0)
-                                    createTasks(addedTasks);
-                                
-                                Set<Integer> tmpOutboundTasks =
-                                        Worker.worker_output_tasks(workerData);
+
+                                shutdownTasks(removedTasks);
+                                createTasks(addedTasks);
+                                updateTasks(updatedTasks);
+
+                                Set<Integer> tmpOutboundTasks = Worker.worker_output_tasks(workerData);
                                 if (outboundTasks.equals(tmpOutboundTasks) == false) {
                                     for (int taskId : tmpOutboundTasks) {
                                         if (outboundTasks.contains(taskId) == false)
-                                            workerData
-                                                    .addOutboundTaskStatusIfAbsent(taskId);
+                                            workerData.addOutboundTaskStatusIfAbsent(taskId);
                                     }
-                                    for (int taskId : workerData
-                                            .getOutboundTaskStatus().keySet()) {
+                                    for (int taskId : workerData.getOutboundTaskStatus().keySet()) {
                                         if (tmpOutboundTasks.contains(taskId) == false) {
-                                            workerData
-                                                    .removeOutboundTaskStatus(taskId);
+                                            workerData.removeOutboundTaskStatus(taskId);
                                         }
                                     }
                                     workerData.setOutboundTasks(tmpOutboundTasks);
@@ -196,23 +175,19 @@ public class RefreshConnections extends RunnableCallback {
                     }
 
                 } catch (FileNotFoundException e) {
-                    LOG.warn(
-                            "Failed to read supervisor topology timeStamp for "
-                                    + topologyId + " port="
-                                    + workerData.getPort(), e);
+                    LOG.warn("Failed to read supervisor topology timeStamp for " + topologyId + " port=" + workerData.getPort(), e);
                 }
 
                 Set<ResourceWorkerSlot> workers = assignment.getWorkers();
                 if (workers == null) {
-                    String errMsg =
-                            "Failed to get taskToResource of " + topologyId;
+                    String errMsg = "Failed to get taskToResource of " + topologyId;
                     LOG.error(errMsg);
                     return;
                 }
-                workerData.getWorkerToResource().addAll(workers);
 
-                Map<Integer, WorkerSlot> my_assignment =
-                        new HashMap<Integer, WorkerSlot>();
+                workerData.updateWorkerToResource(workers);
+
+                Map<Integer, WorkerSlot> my_assignment = new HashMap<Integer, WorkerSlot>();
 
                 Map<String, String> node = assignment.getNodeHost();
 
@@ -220,11 +195,13 @@ public class RefreshConnections extends RunnableCallback {
                 Set<WorkerSlot> need_connections = new HashSet<WorkerSlot>();
 
                 Set<Integer> localTasks = new HashSet<Integer>();
+                Set<Integer> localNodeTasks = new HashSet<Integer>();
 
                 if (workers != null && outboundTasks != null) {
                     for (ResourceWorkerSlot worker : workers) {
-                        if (supervisorId.equals(worker.getNodeId())
-                                && worker.getPort() == workerData.getPort())
+                        if (supervisorId.equals(worker.getNodeId()))
+                            localNodeTasks.addAll(worker.getTasks());
+                        if (supervisorId.equals(worker.getNodeId()) && worker.getPort() == workerData.getPort())
                             localTasks.addAll(worker.getTasks());
                         for (Integer id : worker.getTasks()) {
                             if (outboundTasks.contains(id)) {
@@ -236,6 +213,7 @@ public class RefreshConnections extends RunnableCallback {
                 }
                 taskNodeport.putAll(my_assignment);
                 workerData.setLocalTasks(localTasks);
+                workerData.setLocalNodeTasks(localNodeTasks);
 
                 // get which connection need to be remove or add
                 Set<WorkerSlot> current_connections = nodeportSocket.keySet();
@@ -274,18 +252,9 @@ public class RefreshConnections extends RunnableCallback {
                     nodeportSocket.remove(node_port).close();
                 }
 
-                // Update the status of all outbound tasks
+                // check the status of connections to all outbound tasks
                 for (Integer taskId : outboundTasks) {
-                    boolean isActive = false;
-                    int currentTime = TimeUtils.current_time_secs();
-                    TaskHeartbeat tHB =
-                            zkCluster.task_heartbeat(topologyId, taskId);
-                    if (tHB != null) {
-                        int taskReportTime = tHB.getTimeSecs();
-                        if ((currentTime - taskReportTime) < taskTimeoutSecs)
-                            isActive = true;
-                    }
-                    workerData.updateOutboundTaskStatus(taskId, isActive);
+                    workerData.updateOutboundTaskStatus(taskId, isOutTaskConnected(taskId));
                 }
             }
         } catch (Exception e) {
@@ -307,16 +276,13 @@ public class RefreshConnections extends RunnableCallback {
     private Set<Integer> getAddedTasks(Assignment assignment) {
         Set<Integer> ret = new HashSet<Integer>();
         try {
-            Set<Integer> taskIds =
-                    assignment.getCurrentWorkerTasks(
-                            workerData.getSupervisorId(), workerData.getPort());
+            Set<Integer> taskIds = assignment.getCurrentWorkerTasks(workerData.getSupervisorId(), workerData.getPort());
             for (Integer taskId : taskIds) {
                 if (!(workerData.getTaskids().contains(taskId)))
                     ret.add(taskId);
             }
         } catch (Exception e) {
-            LOG.warn("Failed to get added task list for"
-                    + workerData.getTopologyId());
+            LOG.warn("Failed to get added task list for" + workerData.getTopologyId());
             ;
         }
         return ret;
@@ -325,22 +291,36 @@ public class RefreshConnections extends RunnableCallback {
     private Set<Integer> getRemovedTasks(Assignment assignment) {
         Set<Integer> ret = new HashSet<Integer>();
         try {
-            Set<Integer> taskIds =
-                    assignment.getCurrentWorkerTasks(
-                            workerData.getSupervisorId(), workerData.getPort());
+            Set<Integer> taskIds = assignment.getCurrentWorkerTasks(workerData.getSupervisorId(), workerData.getPort());
             for (Integer taskId : workerData.getTaskids()) {
                 if (!(taskIds.contains(taskId)))
                     ret.add(taskId);
             }
         } catch (Exception e) {
-            LOG.warn("Failed to get removed task list for"
-                    + workerData.getTopologyId());
+            LOG.warn("Failed to get removed task list for" + workerData.getTopologyId());
             ;
         }
         return ret;
     }
 
+    private Set<Integer> getUpdatedTasks(Assignment assignment) {
+        Set<Integer> ret = new HashSet<Integer>();
+        try {
+            Set<Integer> taskIds = assignment.getCurrentWorkerTasks(workerData.getSupervisorId(), workerData.getPort());
+            for (Integer taskId : taskIds) {
+                if ((workerData.getTaskids().contains(taskId)))
+                    ret.add(taskId);
+            }
+        } catch (Exception e) {
+            LOG.warn("Failed to get updated task list for" + workerData.getTopologyId());
+        }
+        return ret;
+    }
+
     private void createTasks(Set<Integer> tasks) {
+        if (tasks == null)
+            return;
+
         for (Integer taskId : tasks) {
             try {
                 TaskShutdownDameon shutdown = Task.mk_task(workerData, taskId);
@@ -352,17 +332,50 @@ public class RefreshConnections extends RunnableCallback {
     }
 
     private void shutdownTasks(Set<Integer> tasks) {
-        for (Integer taskId : tasks) {
+        if (tasks == null)
+            return;
+
+        List<TaskShutdownDameon> shutdowns = workerData.getShutdownDaemonbyTaskIds(tasks);
+        for (TaskShutdownDameon shutdown : shutdowns) {
             try {
-                List<TaskShutdownDameon> shutdowns =
-                        workerData.getShutdownDaemonbyTaskIds(tasks);
-                for (TaskShutdownDameon shutdown : shutdowns) {
-                    shutdown.shutdown();
-                }
+                shutdown.shutdown();
             } catch (Exception e) {
-                LOG.error("Failed to shutdown task-" + taskId, e);
+                LOG.error("Failed to shutdown task-" + shutdown.getTaskId(), e);
             }
         }
     }
 
+    private void updateTasks(Set<Integer> tasks) {
+        if (tasks == null)
+            return;
+
+        List<TaskShutdownDameon> shutdowns = workerData.getShutdownDaemonbyTaskIds(tasks);
+        for (TaskShutdownDameon shutdown : shutdowns) {
+            try {
+                shutdown.getTask().updateTaskData();
+            } catch (Exception e) {
+                LOG.error("Failed to update task-" + shutdown.getTaskId(), e);
+            }
+        }
+    }
+
+    private boolean isOutTaskConnected(int taskId) {
+        boolean ret = false;
+
+        if (workerData.getInnerTaskTransfer().get(taskId) != null) {
+            // Connections to inner tasks should be done after initialization. 
+            // So return true here for all inner tasks.
+            ret = true;
+        } else {
+            WorkerSlot slot = taskNodeport.get(taskId);
+            if (slot != null) {
+                IConnection connection = nodeportSocket.get(slot);
+                if (connection != null) {
+                    ret = connection.available();
+                }
+            }
+        }
+
+        return ret;
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/ShutdownableDameon.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/ShutdownableDameon.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/ShutdownableDameon.java
index 2006b05..97932b9 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/ShutdownableDameon.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/ShutdownableDameon.java
@@ -21,7 +21,6 @@ import backtype.storm.daemon.Shutdownable;
 
 import com.alibaba.jstorm.cluster.DaemonCommon;
 
-public interface ShutdownableDameon extends Shutdownable, DaemonCommon,
-        Runnable {
+public interface ShutdownableDameon extends Shutdownable, DaemonCommon, Runnable {
 
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/VirtualPortDispatch.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/VirtualPortDispatch.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/VirtualPortDispatch.java
index 21dc37c..a769cc1 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/VirtualPortDispatch.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/VirtualPortDispatch.java
@@ -38,23 +38,21 @@ import com.alibaba.jstorm.utils.DisruptorRunable;
  * 
  */
 public class VirtualPortDispatch extends DisruptorRunable {
-    private final static Logger LOG = LoggerFactory
-            .getLogger(VirtualPortDispatch.class);
+    private final static Logger LOG = LoggerFactory.getLogger(VirtualPortDispatch.class);
 
     private ConcurrentHashMap<Integer, DisruptorQueue> deserializeQueues;
     private IConnection recvConnection;
 
-    public VirtualPortDispatch(WorkerData workerData,
-            IConnection recvConnection, DisruptorQueue recvQueue) {
+    public VirtualPortDispatch(WorkerData workerData, IConnection recvConnection, DisruptorQueue recvQueue) {
         super(recvQueue, MetricDef.DISPATCH_THREAD);
 
         this.recvConnection = recvConnection;
         this.deserializeQueues = workerData.getDeserializeQueues();
 
     }
-    
+
     public void shutdownRecv() {
-    	// don't need send shutdown command to every task
+        // don't need send shutdown command to every task
         // due to every task has been shutdown by workerData.active
         // at the same time queue has been fulll
         // byte shutdownCmd[] = { TaskStatus.SHUTDOWN };
@@ -87,8 +85,7 @@ public class VirtualPortDispatch extends DisruptorRunable {
 
         DisruptorQueue queue = deserializeQueues.get(task);
         if (queue == null) {
-            LOG.warn("Received invalid message directed at port " + task
-                    + ". Dropping...");
+            LOG.warn("Received invalid message directed at port " + task + ". Dropping...");
             return;
         }
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/Worker.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/Worker.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/Worker.java
index d5cf9c8..2bf4c9c 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/Worker.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/Worker.java
@@ -17,22 +17,6 @@
  */
 package com.alibaba.jstorm.daemon.worker;
 
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.commons.lang.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import backtype.storm.Config;
 import backtype.storm.generated.Grouping;
 import backtype.storm.generated.StormTopology;
@@ -41,30 +25,31 @@ import backtype.storm.messaging.IContext;
 import backtype.storm.task.TopologyContext;
 import backtype.storm.utils.DisruptorQueue;
 import backtype.storm.utils.Utils;
-
 import com.alibaba.jstorm.callback.AsyncLoopThread;
 import com.alibaba.jstorm.callback.RunnableCallback;
 import com.alibaba.jstorm.client.ConfigExtension;
-import com.alibaba.jstorm.cluster.Common;
 import com.alibaba.jstorm.cluster.StormConfig;
+import com.alibaba.jstorm.metric.JStormMetricsReporter;
 import com.alibaba.jstorm.daemon.worker.hearbeat.SyncContainerHb;
 import com.alibaba.jstorm.daemon.worker.hearbeat.WorkerHeartbeatRunable;
-import com.alibaba.jstorm.metric.JStormMetricsReporter;
 import com.alibaba.jstorm.task.Task;
 import com.alibaba.jstorm.task.TaskShutdownDameon;
-import com.alibaba.jstorm.task.heartbeat.TaskHeartbeatRunable;
 import com.alibaba.jstorm.utils.JStormServerUtils;
 import com.alibaba.jstorm.utils.JStormUtils;
-import com.alibaba.jstorm.utils.NetWorkUtils;
 import com.alibaba.jstorm.utils.PathUtils;
 import com.lmax.disruptor.WaitStrategy;
 import com.lmax.disruptor.dsl.ProducerType;
+import org.apache.commons.lang.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.*;
+import java.util.*;
 
 /**
  * worker entrance
  * 
  * @author yannian/Longda
- * 
  */
 public class Worker {
 
@@ -76,26 +61,14 @@ public class Worker {
     private WorkerData workerData;
 
     @SuppressWarnings({ "rawtypes", "unchecked" })
-    public Worker(Map conf, IContext context, String topology_id,
-            String supervisor_id, int port, String worker_id, String jar_path)
-            throws Exception {
-
-        workerData =
-                new WorkerData(conf, context, topology_id, supervisor_id, port,
-                        worker_id, jar_path);
-
+    public Worker(Map conf, IContext context, String topology_id, String supervisor_id, int port, String worker_id, String jar_path) throws Exception {
+        workerData = new WorkerData(conf, context, topology_id, supervisor_id, port, worker_id, jar_path);
     }
 
     /**
      * get current task's output task list
-     * 
-     * @param tasks_component
-     * @param mk_topology_context
-     * @param task_ids
-     * @throws Exception
      */
     public static Set<Integer> worker_output_tasks(WorkerData workerData) {
-
         ContextMaker context_maker = workerData.getContextMaker();
         Set<Integer> task_ids = workerData.getTaskids();
         StormTopology topology = workerData.getSysTopology();
@@ -103,16 +76,13 @@ public class Worker {
         Set<Integer> rtn = new HashSet<Integer>();
 
         for (Integer taskid : task_ids) {
-            TopologyContext context =
-                    context_maker.makeTopologyContext(topology, taskid, null);
+            TopologyContext context = context_maker.makeTopologyContext(topology, taskid, null);
 
             // <StreamId, <ComponentId, Grouping>>
-            Map<String, Map<String, Grouping>> targets =
-                    context.getThisTargets();
+            Map<String, Map<String, Grouping>> targets = context.getThisTargets();
             for (Map<String, Grouping> e : targets.values()) {
                 for (String componentId : e.keySet()) {
-                    List<Integer> tasks =
-                            context.getComponentTasks(componentId);
+                    List<Integer> tasks = context.getComponentTasks(componentId);
                     rtn.addAll(tasks);
                 }
             }
@@ -140,45 +110,46 @@ public class Worker {
 
         Set<Integer> taskids = workerData.getTaskids();
 
+        Set<Thread> threads = new HashSet<Thread>();
+        List<Task> taskArrayList = new ArrayList<Task>();
         for (int taskid : taskids) {
-
-            TaskShutdownDameon t = Task.mk_task(workerData, taskid);
-
-            shutdowntasks.add(t);
+            Task task = new Task(workerData, taskid);
+            Thread thread =new Thread(task);
+            threads.add(thread);
+            taskArrayList.add(task);
+            thread.start();
+        }
+        for (Thread thread : threads) {
+            thread.join();
+        }
+        for (Task t : taskArrayList){
+            shutdowntasks.add(t.getTaskShutdownDameon());
         }
-
         return shutdowntasks;
     }
-    
+
     @Deprecated
     private DisruptorQueue startDispatchDisruptor() {
-    	Map stormConf = workerData.getStormConf();
-
-        int queue_size =
-                Utils.getInt(
-                        stormConf.get(Config.TOPOLOGY_TRANSFER_BUFFER_SIZE),
-                        1024);
-        WaitStrategy waitStrategy =
-                (WaitStrategy) JStormUtils.createDisruptorWaitStrategy(stormConf);
-        DisruptorQueue recvQueue =
-                DisruptorQueue.mkInstance("Dispatch", ProducerType.MULTI,
-                        queue_size, waitStrategy);
+        Map stormConf = workerData.getStormConf();
+
+        int queue_size = Utils.getInt(stormConf.get(Config.TOPOLOGY_TRANSFER_BUFFER_SIZE), 1024);
+        WaitStrategy waitStrategy = (WaitStrategy) JStormUtils.createDisruptorWaitStrategy(stormConf);
+        DisruptorQueue recvQueue = DisruptorQueue.mkInstance("Dispatch", ProducerType.MULTI, queue_size, waitStrategy);
         // stop consumerStarted
         recvQueue.consumerStarted();
-        
+
         return recvQueue;
     }
 
     private void startDispatchThread() {
-    	// remove dispatch thread, send tuple directly from nettyserver
-    	//startDispatchDisruptor();
+        // remove dispatch thread, send tuple directly from nettyserver
+        // startDispatchDisruptor();
 
         IContext context = workerData.getContext();
         String topologyId = workerData.getTopologyId();
 
-        IConnection recvConnection =
-                context.bind(topologyId, workerData.getPort(), workerData.getDeserializeQueues());
-        
+        IConnection recvConnection = context.bind(topologyId, workerData.getPort(), workerData.getDeserializeQueues());
+
         workerData.setRecvConnection(recvConnection);
     }
 
@@ -191,40 +162,27 @@ public class Worker {
         // so create client connection before create task
         // refresh connection
         RefreshConnections refreshConn = makeRefreshConnections();
-        AsyncLoopThread refreshconn =
-                new AsyncLoopThread(refreshConn, false, Thread.MIN_PRIORITY,
-                        true);
+        AsyncLoopThread refreshconn = new AsyncLoopThread(refreshConn, false, Thread.MIN_PRIORITY, true);
         threads.add(refreshconn);
 
         // refresh ZK active status
         RefreshActive refreshZkActive = new RefreshActive(workerData);
-        AsyncLoopThread refreshzk =
-                new AsyncLoopThread(refreshZkActive, false,
-                        Thread.MIN_PRIORITY, true);
+        AsyncLoopThread refreshzk = new AsyncLoopThread(refreshZkActive, false, Thread.MIN_PRIORITY, true);
         threads.add(refreshzk);
 
         // Sync heartbeat to Apsara Container
-        AsyncLoopThread syncContainerHbThread =
-                SyncContainerHb.mkWorkerInstance(workerData.getStormConf());
+        AsyncLoopThread syncContainerHbThread = SyncContainerHb.mkWorkerInstance(workerData.getStormConf());
         if (syncContainerHbThread != null) {
             threads.add(syncContainerHbThread);
         }
 
-        JStormMetricsReporter metricReporter =
-                new JStormMetricsReporter(workerData);
-        AsyncLoopThread metricThread = new AsyncLoopThread(metricReporter);
-        threads.add(metricThread);
-
-        // create task heartbeat
-        TaskHeartbeatRunable taskHB = new TaskHeartbeatRunable(workerData);
-        AsyncLoopThread taskHBThread = new AsyncLoopThread(taskHB);
-        threads.add(taskHBThread);
+        JStormMetricsReporter metricReporter = new JStormMetricsReporter(workerData);
+        metricReporter.init();
+        workerData.setMetricsReporter(metricReporter);
 
         // refresh hearbeat to Local dir
         RunnableCallback heartbeat_fn = new WorkerHeartbeatRunable(workerData);
-        AsyncLoopThread hb =
-                new AsyncLoopThread(heartbeat_fn, false, null,
-                        Thread.NORM_PRIORITY, true);
+        AsyncLoopThread hb = new AsyncLoopThread(heartbeat_fn, false, null, Thread.NORM_PRIORITY, true);
         threads.add(hb);
 
         // shutdown task callbacks
@@ -239,7 +197,6 @@ public class Worker {
      * create worker instance and run it
      * 
      * @param conf
-     * @param mq_context
      * @param topology_id
      * @param supervisor_id
      * @param port
@@ -248,9 +205,8 @@ public class Worker {
      * @throws Exception
      */
     @SuppressWarnings("rawtypes")
-    public static WorkerShutdown mk_worker(Map conf, IContext context,
-            String topology_id, String supervisor_id, int port,
-            String worker_id, String jar_path) throws Exception {
+    public static WorkerShutdown mk_worker(Map conf, IContext context, String topology_id, String supervisor_id, int port, String worker_id, String jar_path)
+            throws Exception {
 
         StringBuilder sb = new StringBuilder();
         sb.append("topologyId:" + topology_id + ", ");
@@ -260,9 +216,7 @@ public class Worker {
 
         LOG.info("Begin to run worker:" + sb.toString());
 
-        Worker w =
-                new Worker(conf, context, topology_id, supervisor_id, port,
-                        worker_id, jar_path);
+        Worker w = new Worker(conf, context, topology_id, supervisor_id, port, worker_id, jar_path);
 
         w.redirectOutput();
 
@@ -271,8 +225,7 @@ public class Worker {
 
     public void redirectOutput() {
 
-        if (System.getenv("REDIRECT") == null
-                || !System.getenv("REDIRECT").equals("true")) {
+        if (System.getenv("REDIRECT") == null || !System.getenv("REDIRECT").equals("true")) {
             return;
         }
 
@@ -283,9 +236,7 @@ public class Worker {
             DEFAULT_OUT_TARGET_FILE += ".out";
         }
 
-        String outputFile =
-                ConfigExtension.getWorkerRedirectOutputFile(workerData
-                        .getStormConf());
+        String outputFile = ConfigExtension.getWorkerRedirectOutputFile(workerData.getStormConf());
         if (outputFile == null) {
             outputFile = DEFAULT_OUT_TARGET_FILE;
         } else {
@@ -302,7 +253,6 @@ public class Worker {
                         outputFile = DEFAULT_OUT_TARGET_FILE;
                     }
                 }
-
             } catch (Exception e) {
                 LOG.warn("Failed to touch " + outputFile, e);
                 outputFile = DEFAULT_OUT_TARGET_FILE;
@@ -318,9 +268,7 @@ public class Worker {
     }
 
     /**
-     * Have one problem if the worker's start parameter length is longer than
-     * 4096, ps -ef|grep com.alibaba.jstorm.daemon.worker.Worker can't find
-     * worker
+     * Have one problem if the worker's start parameter length is longer than 4096, ps -ef|grep com.alibaba.jstorm.daemon.worker.Worker can't find worker
      * 
      * @param port
      */
@@ -341,15 +289,11 @@ public class Worker {
 
         try {
             LOG.info("Begin to execute " + sb.toString());
-            Process process =
-                    JStormUtils.launch_process(sb.toString(),
-                            new HashMap<String, String>(), false);
-
+            Process process = JStormUtils.launch_process(sb.toString(), new HashMap<String, String>(), false);
             // Process process = Runtime.getRuntime().exec(sb.toString());
 
             InputStream stdin = process.getInputStream();
-            BufferedReader reader =
-                    new BufferedReader(new InputStreamReader(stdin));
+            BufferedReader reader = new BufferedReader(new InputStreamReader(stdin));
 
             JStormUtils.sleepMs(1000);
 
@@ -405,7 +349,6 @@ public class Worker {
                             LOG.info("Skip kill myself");
                             continue;
                         }
-
                         Integer pid = Integer.valueOf(fields[1]);
 
                         LOG.info("Find one process :" + pid.toString());
@@ -415,9 +358,7 @@ public class Worker {
                         continue;
                     }
                 }
-
             }
-
             return ret;
         } catch (IOException e) {
             LOG.info("Failed to execute " + sb.toString());
@@ -429,13 +370,10 @@ public class Worker {
     }
 
     public static void killOldWorker(String port) {
-
         List<Integer> oldPids = getOldPortPids(port);
         for (Integer pid : oldPids) {
-
             JStormUtils.kill(pid);
         }
-
     }
 
     /**
@@ -456,7 +394,6 @@ public class Worker {
         }
 
         StringBuilder sb = new StringBuilder();
-
         try {
             String topology_id = args[0];
             String supervisor_id = args[1];
@@ -476,9 +413,7 @@ public class Worker {
             sb.append("workerId:" + worker_id + ", ");
             sb.append("jar_path:" + jar_path + "\n");
 
-            WorkerShutdown sd =
-                    mk_worker(conf, null, topology_id, supervisor_id,
-                            Integer.parseInt(port_str), worker_id, jar_path);
+            WorkerShutdown sd = mk_worker(conf, null, topology_id, supervisor_id, Integer.parseInt(port_str), worker_id, jar_path);
             sd.join();
 
             LOG.info("Successfully shutdown worker " + sb.toString());


[21/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/StormStatus.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/StormStatus.java b/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/StormStatus.java
index 5ad70cb..c2e07ee 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/StormStatus.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/StormStatus.java
@@ -29,10 +29,8 @@ import com.alibaba.jstorm.daemon.nimbus.StatusType;
  * 
  * Dedicate Topology status
  * 
- * Topology status: active/inactive/killed/rebalancing killTimeSecs: when status
- * isn't killed, it is -1 and useless. when status is killed, do kill operation
- * after killTimeSecs seconds when status is rebalancing, do rebalancing opation
- * after delaySecs seconds restore oldStatus as current status
+ * Topology status: active/inactive/killed/rebalancing killTimeSecs: when status isn't killed, it is -1 and useless. when status is killed, do kill operation
+ * after killTimeSecs seconds when status is rebalancing, do rebalancing opation after delaySecs seconds restore oldStatus as current status
  */
 public class StormStatus implements Serializable {
 
@@ -99,9 +97,7 @@ public class StormStatus implements Serializable {
         }
 
         StormStatus check = (StormStatus) base;
-        if (check.getStatusType().equals(getStatusType())
-                && check.getKillTimeSecs() == getKillTimeSecs()
-                && check.getDelaySecs().equals(getDelaySecs())) {
+        if (check.getStatusType().equals(getStatusType()) && check.getKillTimeSecs() == getKillTimeSecs() && check.getDelaySecs().equals(getDelaySecs())) {
             return true;
         }
         return false;
@@ -109,15 +105,12 @@ public class StormStatus implements Serializable {
 
     @Override
     public int hashCode() {
-        return this.getStatusType().hashCode()
-                + this.getKillTimeSecs().hashCode()
-                + this.getDelaySecs().hashCode();
+        return this.getStatusType().hashCode() + this.getKillTimeSecs().hashCode() + this.getDelaySecs().hashCode();
     }
 
     @Override
     public String toString() {
-        return ToStringBuilder.reflectionToString(this,
-                ToStringStyle.SHORT_PREFIX_STYLE);
+        return ToStringBuilder.reflectionToString(this, ToStringStyle.SHORT_PREFIX_STYLE);
     }
 
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/StormZkClusterState.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/StormZkClusterState.java b/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/StormZkClusterState.java
index bd60d45..1550c7e 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/StormZkClusterState.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/StormZkClusterState.java
@@ -17,18 +17,8 @@
  */
 package com.alibaba.jstorm.cluster;
 
-import java.util.*;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.atomic.AtomicReference;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.zookeeper.KeeperException.NodeExistsException;
-import org.apache.zookeeper.Watcher.Event.EventType;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
+import backtype.storm.generated.TopologyTaskHbInfo;
 import backtype.storm.utils.Utils;
-
 import com.alibaba.jstorm.cache.JStormCache;
 import com.alibaba.jstorm.callback.ClusterStateCallback;
 import com.alibaba.jstorm.callback.RunnableCallback;
@@ -38,14 +28,22 @@ import com.alibaba.jstorm.schedule.Assignment;
 import com.alibaba.jstorm.schedule.AssignmentBak;
 import com.alibaba.jstorm.task.TaskInfo;
 import com.alibaba.jstorm.task.error.TaskError;
-import com.alibaba.jstorm.task.heartbeat.TaskHeartbeat;
+import com.alibaba.jstorm.task.backpressure.SourceBackpressureInfo;
 import com.alibaba.jstorm.utils.JStormUtils;
 import com.alibaba.jstorm.utils.PathUtils;
 import com.alibaba.jstorm.utils.TimeUtils;
+import org.apache.commons.lang.StringUtils;
+import org.apache.zookeeper.KeeperException.NodeExistsException;
+import org.apache.zookeeper.Watcher.Event.EventType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.*;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicReference;
 
 public class StormZkClusterState implements StormClusterState {
-    private static Logger LOG = LoggerFactory
-            .getLogger(StormZkClusterState.class);
+    private static Logger LOG = LoggerFactory.getLogger(StormZkClusterState.class);
 
     private ClusterState cluster_state;
 
@@ -67,12 +65,10 @@ public class StormZkClusterState implements StormClusterState {
         } else {
 
             solo = true;
-            cluster_state =
-                    new DistributedClusterState((Map) cluster_state_spec);
+            cluster_state = new DistributedClusterState((Map) cluster_state_spec);
         }
 
-        assignment_info_callback =
-                new ConcurrentHashMap<String, RunnableCallback>();
+        assignment_info_callback = new ConcurrentHashMap<String, RunnableCallback>();
         supervisors_callback = new AtomicReference<RunnableCallback>(null);
         assignments_callback = new AtomicReference<RunnableCallback>(null);
         storm_base_callback = new ConcurrentHashMap<String, RunnableCallback>();
@@ -85,8 +81,7 @@ public class StormZkClusterState implements StormClusterState {
                     LOG.warn("Input args is null");
                     return null;
                 } else if (args.length < 2) {
-                    LOG.warn("Input args is invalid, args length:"
-                            + args.length);
+                    LOG.warn("Input args is invalid, args length:" + args.length);
                     return null;
                 }
 
@@ -132,11 +127,8 @@ public class StormZkClusterState implements StormClusterState {
         });
 
         String[] pathlist =
-                JStormUtils.mk_arr(Cluster.SUPERVISORS_SUBTREE,
-                        Cluster.STORMS_SUBTREE, Cluster.ASSIGNMENTS_SUBTREE,
-                        Cluster.ASSIGNMENTS_BAK_SUBTREE, Cluster.TASKS_SUBTREE,
-                        Cluster.TASKBEATS_SUBTREE, Cluster.TASKERRORS_SUBTREE,
-                        Cluster.METRIC_SUBTREE);
+                JStormUtils.mk_arr(Cluster.SUPERVISORS_SUBTREE, Cluster.STORMS_SUBTREE, Cluster.ASSIGNMENTS_SUBTREE, Cluster.ASSIGNMENTS_BAK_SUBTREE,
+                        Cluster.TASKS_SUBTREE, Cluster.TASKBEATS_SUBTREE, Cluster.TASKERRORS_SUBTREE, Cluster.METRIC_SUBTREE, Cluster.BACKPRESSURE_SUBTREE);
         for (String path : pathlist) {
             cluster_state.mkdirs(path);
         }
@@ -146,8 +138,7 @@ public class StormZkClusterState implements StormClusterState {
     /**
      * @@@ TODO
      * 
-     *     Just add cache in lower ZK level In fact, for some Object
-     *     Assignment/TaskInfo/StormBase These object can be cache for long time
+     *     Just add cache in lower ZK level In fact, for some Object Assignment/TaskInfo/StormBase These object can be cache for long time
      * 
      * @param simpleCache
      */
@@ -221,10 +212,10 @@ public class StormZkClusterState implements StormClusterState {
             deleteObject(Cluster.storm_task_root(topologyId));
             teardown_heartbeats(topologyId);
             teardown_task_errors(topologyId);
+			teardown_backpressure(topologyId);
             deleteObject(Cluster.metric_path(topologyId));
         } catch (Exception e) {
-            LOG.warn("Failed to delete task root and monitor root for" 
-                    + topologyId);
+            LOG.warn("Failed to delete task root and monitor root for" + topologyId);
         }
         remove_storm_base(topologyId);
     }
@@ -240,8 +231,7 @@ public class StormZkClusterState implements StormClusterState {
     }
 
     @Override
-    public Assignment assignment_info(String topologyId,
-            RunnableCallback callback) throws Exception {
+    public Assignment assignment_info(String topologyId, RunnableCallback callback) throws Exception {
         if (callback != null) {
             assignment_info_callback.put(topologyId, callback);
         }
@@ -257,13 +247,11 @@ public class StormZkClusterState implements StormClusterState {
         if (callback != null) {
             assignments_callback.set(callback);
         }
-        return cluster_state.get_children(Cluster.ASSIGNMENTS_SUBTREE,
-                callback != null);
+        return cluster_state.get_children(Cluster.ASSIGNMENTS_SUBTREE, callback != null);
     }
 
     @Override
-    public void set_assignment(String topologyId, Assignment info)
-            throws Exception {
+    public void set_assignment(String topologyId, Assignment info) throws Exception {
         setObject(Cluster.assignment_path(topologyId), info);
     }
 
@@ -276,26 +264,22 @@ public class StormZkClusterState implements StormClusterState {
     }
 
     @Override
-    public void backup_assignment(String topologyName, AssignmentBak info)
-            throws Exception {
+    public void backup_assignment(String topologyName, AssignmentBak info) throws Exception {
         setObject(Cluster.assignment_bak_path(topologyName), info);
     }
 
     @Override
-    public StormBase storm_base(String topologyId, RunnableCallback callback)
-            throws Exception {
+    public StormBase storm_base(String topologyId, RunnableCallback callback) throws Exception {
         if (callback != null) {
             storm_base_callback.put(topologyId, callback);
         }
 
-        return (StormBase) getObject(Cluster.storm_path(topologyId),
-                callback != null);
+        return (StormBase) getObject(Cluster.storm_path(topologyId), callback != null);
 
     }
 
     @Override
-    public void activate_storm(String topologyId, StormBase stormBase)
-            throws Exception {
+    public void activate_storm(String topologyId, StormBase stormBase) throws Exception {
         String stormPath = Cluster.storm_path(topologyId);
 
         setObject(stormPath, stormBase);
@@ -307,8 +291,7 @@ public class StormZkClusterState implements StormClusterState {
     }
 
     @Override
-    public void update_storm(String topologyId, StormStatus newElems)
-            throws Exception {
+    public void update_storm(String topologyId, StormStatus newElems) throws Exception {
         /**
          * FIXME, maybe overwrite old callback
          */
@@ -323,8 +306,7 @@ public class StormZkClusterState implements StormClusterState {
     }
 
     @Override
-    public void set_storm_monitor(String topologyId, boolean isEnable)
-            throws Exception {
+    public void set_storm_monitor(String topologyId, boolean isEnable) throws Exception {
         // TODO Auto-generated method stub
         StormBase base = this.storm_base(topologyId, null);
 
@@ -340,30 +322,20 @@ public class StormZkClusterState implements StormClusterState {
     }
 
     @Override
-    public void setup_heartbeats(String topologyId) throws Exception {
-        String taskbeatPath = Cluster.taskbeat_storm_root(topologyId);
-
-        cluster_state.mkdirs(taskbeatPath);
-    }
-
-    @Override
-    public List<String> heartbeat_storms() throws Exception {
-        return cluster_state.get_children(Cluster.TASKBEATS_SUBTREE, false);
+    public void topology_heartbeat(String topologyId, TopologyTaskHbInfo info) throws Exception {
+        String taskPath = Cluster.taskbeat_storm_root(topologyId);
+        setObject(taskPath, info);
     }
 
     @Override
-    public List<String> heartbeat_tasks(String topologyId) throws Exception {
-        String taskbeatPath = Cluster.taskbeat_storm_root(topologyId);
-
-        return cluster_state.get_children(taskbeatPath, false);
+    public TopologyTaskHbInfo topology_heartbeat(String topologyId) throws Exception {
+        String taskPath = Cluster.taskbeat_storm_root(topologyId);
+        return (TopologyTaskHbInfo) getObject(taskPath, false);
     }
 
     @Override
-    public void remove_task_heartbeat(String topologyId, int taskId)
-            throws Exception {
-        String taskbeatPath = Cluster.taskbeat_path(topologyId, taskId);
-
-        deleteObject(taskbeatPath);
+    public List<String> heartbeat_storms() throws Exception {
+        return cluster_state.get_children(Cluster.TASKBEATS_SUBTREE, false);
     }
 
     @Override
@@ -379,14 +351,11 @@ public class StormZkClusterState implements StormClusterState {
     }
 
     @Override
-    public void report_task_error(String topologyId, int taskId, Throwable error)
-            throws Exception {
-        report_task_error(topologyId, taskId,
-                new String(JStormUtils.getErrorInfo(error)));
+    public void report_task_error(String topologyId, int taskId, Throwable error) throws Exception {
+        report_task_error(topologyId, taskId, new String(JStormUtils.getErrorInfo(error)), null);
     }
 
-    public void report_task_error(String topologyId, int taskId, String error)
-            throws Exception {
+    public void report_task_error(String topologyId, int taskId, String error, String tag) throws Exception {
         boolean found = false;
         String path = Cluster.taskerror_path(topologyId, taskId);
         cluster_state.mkdirs(path);
@@ -403,9 +372,10 @@ public class StormZkClusterState implements StormClusterState {
                 deleteObject(errorPath);
                 continue;
             }
-            if (errorInfo.equals(error)) {
-                deleteObject(errorPath);
-                setObject(timestampPath, error);
+            if (errorInfo.equals(error)
+                    || (tag != null && errorInfo.startsWith(tag))) {
+                cluster_state.delete_node(errorPath);
+                cluster_state.set_data(timestampPath, error.getBytes());
                 found = true;
                 break;
             }
@@ -429,8 +399,7 @@ public class StormZkClusterState implements StormClusterState {
     private static final String TASK_IS_DEAD = "is dead on"; // Full string is
                                                              // "task-id is dead on hostname:port"
 
-    private void setLastErrInfo(String topologyId, String error,
-            String timeStamp) throws Exception {
+    private void setLastErrInfo(String topologyId, String error, String timeStamp) throws Exception {
         // Set error information in task error topology patch
         // Last Error information format in ZK: map<report_duration, timestamp>
         // report_duration means only the errors will presented in web ui if the
@@ -440,13 +409,10 @@ public class StormZkClusterState implements StormClusterState {
         String lastErrTopoPath = Cluster.lasterror_path(topologyId);
         Map<Integer, String> lastErrInfo = null;
         try {
-            lastErrInfo =
-                    (Map<Integer, String>) getObject(lastErrTopoPath, false);
+            lastErrInfo = (Map<Integer, String>) getObject(lastErrTopoPath, false);
 
         } catch (Exception e) {
-            LOG.error(
-                    "Failed to get last error time. Remove the corrupt node for "
-                            + topologyId, e);
+            LOG.error("Failed to get last error time. Remove the corrupt node for " + topologyId, e);
             remove_lastErr_time(topologyId);
             lastErrInfo = null;
         }
@@ -466,15 +432,13 @@ public class StormZkClusterState implements StormClusterState {
     }
 
     @Override
-    public void remove_task_error(String topologyId, int taskId)
-            throws Exception {
+    public void remove_task_error(String topologyId, int taskId) throws Exception {
         String path = Cluster.taskerror_path(topologyId, taskId);
         cluster_state.delete_node(path);
     }
 
     @Override
-    public Map<Integer, String> topo_lastErr_time(String topologyId)
-            throws Exception {
+    public Map<Integer, String> topo_lastErr_time(String topologyId) throws Exception {
         String path = Cluster.lasterror_path(topologyId);
 
         return (Map<Integer, String>) getObject(path, false);
@@ -490,17 +454,18 @@ public class StormZkClusterState implements StormClusterState {
     public List<String> task_error_storms() throws Exception {
         return cluster_state.get_children(Cluster.TASKERRORS_SUBTREE, false);
     }
-    
+
     @Override
     public List<String> task_error_ids(String topologyId) throws Exception {
-    	return cluster_state.get_children(Cluster.taskerror_storm_root(topologyId), false);
+        return cluster_state.get_children(Cluster.taskerror_storm_root(topologyId), false);
     }
 
     @Override
-    public List<String> task_error_time(String topologyId, int taskId)
-            throws Exception {
+    public List<String> task_error_time(String topologyId, int taskId) throws Exception {
         String path = Cluster.taskerror_path(topologyId, taskId);
-        cluster_state.mkdirs(path);
+        if (cluster_state.node_existed(path, false) == false) {
+        	return new ArrayList<String>();
+        }
         return cluster_state.get_children(path, false);
     }
 
@@ -509,38 +474,37 @@ public class StormZkClusterState implements StormClusterState {
         String tasksPath = Cluster.storm_task_root(topologyId);
         Object data = getObject(tasksPath, false);
         if (data != null) {
-            Map<Integer, TaskInfo> taskInfoMap = ((Map<Integer, TaskInfo>)data);
-            for (Integer taskId : taskIds){
+            Map<Integer, TaskInfo> taskInfoMap = ((Map<Integer, TaskInfo>) data);
+            for (Integer taskId : taskIds) {
                 taskInfoMap.remove(taskId);
             }
-            //update zk node of tasks
+            // update zk node of tasks
             setObject(tasksPath, taskInfoMap);
         }
     }
 
     @Override
-    public String task_error_info(String topologyId, int taskId, long timeStamp)
-            throws Exception {
+    public String task_error_info(String topologyId, int taskId, long timeStamp) throws Exception {
         String path = Cluster.taskerror_path(topologyId, taskId);
-        cluster_state.mkdirs(path);
         path = path + "/" + timeStamp;
         return getString(path, false);
     }
 
     @Override
-    public List<TaskError> task_errors(String topologyId, int taskId)
-            throws Exception {
-        String path = Cluster.taskerror_path(topologyId, taskId);
-        cluster_state.mkdirs(path);
+    public List<TaskError> task_errors(String topologyId, int taskId) throws Exception {
+    	List<TaskError> errors = new ArrayList<TaskError>();
+    	String path = Cluster.taskerror_path(topologyId, taskId);
+    	if (cluster_state.node_existed(path, false) == false) {
+        	return errors;
+        }
 
         List<String> children = cluster_state.get_children(path, false);
-        List<TaskError> errors = new ArrayList<TaskError>();
+        
 
         for (String str : children) {
             byte[] v = cluster_state.get_data(path + "/" + str, false);
             if (v != null) {
-                TaskError error =
-                        new TaskError(new String(v), Integer.parseInt(str));
+                TaskError error = new TaskError(new String(v), Integer.parseInt(str));
                 errors.add(error);
             }
         }
@@ -572,45 +536,28 @@ public class StormZkClusterState implements StormClusterState {
             LOG.error("Could not teardown errors for " + topologyId, e);
         }
     }
+
     @Override
-    public void set_task(String topologyId, Map<Integer, TaskInfo>  taskInfoMap)
-            throws Exception {
+    public void set_task(String topologyId, Map<Integer, TaskInfo> taskInfoMap) throws Exception {
         String stormTaskPath = Cluster.storm_task_root(topologyId);
-        if (taskInfoMap != null){
-            //reupdate zk node of tasks
+        if (taskInfoMap != null) {
+            // reupdate zk node of tasks
             setObject(stormTaskPath, taskInfoMap);
         }
     }
+
     @Override
-    public void add_task(String topologyId, Map<Integer, TaskInfo> taskInfoMap)
-            throws Exception {
+    public void add_task(String topologyId, Map<Integer, TaskInfo> taskInfoMap) throws Exception {
         String stormTaskPath = Cluster.storm_task_root(topologyId);
         Object data = getObject(stormTaskPath, false);
-        if (data != null){
-            ((Map<Integer, TaskInfo>)data).putAll(taskInfoMap);
-            //reupdate zk node of tasks
+        if (data != null) {
+            ((Map<Integer, TaskInfo>) data).putAll(taskInfoMap);
+            // reupdate zk node of tasks
             setObject(stormTaskPath, data);
         }
     }
 
     @Override
-    public TaskHeartbeat task_heartbeat(String topologyId, int taskId)
-            throws Exception {
-        String taskbeatPath = Cluster.taskbeat_path(topologyId, taskId);
-
-        return (TaskHeartbeat) getObjectSync(taskbeatPath, false);
-
-    }
-
-    @Override
-    public void task_heartbeat(String topologyId, int taskId, TaskHeartbeat info)
-            throws Exception {
-        String taskPath = Cluster.taskbeat_path(topologyId, taskId);
-
-        setObject(taskPath, info);
-    }
-
-    @Override
     public List<String> task_storms() throws Exception {
         return cluster_state.get_children(Cluster.TASKS_SUBTREE, false);
     }
@@ -623,23 +570,22 @@ public class StormZkClusterState implements StormClusterState {
         if (data == null) {
             return null;
         }
-        return ((Map<Integer, TaskInfo>)data).keySet();
+        return ((Map<Integer, TaskInfo>) data).keySet();
     }
 
     @Override
-    public Set<Integer> task_ids_by_componentId(String topologyId,
-            String componentId) throws Exception {
+    public Set<Integer> task_ids_by_componentId(String topologyId, String componentId) throws Exception {
         String stormTaskPath = Cluster.storm_task_root(topologyId);
         Object data = getObject(stormTaskPath, false);
         if (data == null) {
             return null;
         }
-        Map<Integer, TaskInfo> taskInfoMap = (Map<Integer, TaskInfo>)data;
+        Map<Integer, TaskInfo> taskInfoMap = (Map<Integer, TaskInfo>) data;
         Set<Integer> rtn = new HashSet<Integer>();
         Set<Integer> taskIds = taskInfoMap.keySet();
-        for(Integer taskId : taskIds){
+        for (Integer taskId : taskIds) {
             TaskInfo taskInfo = taskInfoMap.get(taskId);
-            if (taskInfo != null){
+            if (taskInfo != null) {
                 if (taskInfo.getComponentId().equalsIgnoreCase(componentId))
                     rtn.add(taskId);
             }
@@ -672,13 +618,11 @@ public class StormZkClusterState implements StormClusterState {
         if (callback != null) {
             supervisors_callback.set(callback);
         }
-        return cluster_state.get_children(Cluster.SUPERVISORS_SUBTREE,
-                callback != null);
+        return cluster_state.get_children(Cluster.SUPERVISORS_SUBTREE, callback != null);
     }
 
     @Override
-    public void supervisor_heartbeat(String supervisorId, SupervisorInfo info)
-            throws Exception {
+    public void supervisor_heartbeat(String supervisorId, SupervisorInfo info) throws Exception {
 
         String supervisorPath = Cluster.supervisor_path(supervisorId);
 
@@ -703,15 +647,13 @@ public class StormZkClusterState implements StormClusterState {
     }
 
     public String get_nimbus_slave_time(String host) throws Exception {
-        String path =
-                Cluster.NIMBUS_SLAVE_SUBTREE + Cluster.ZK_SEPERATOR + host;
-        return (String) getObject(path, false);
+        String path = Cluster.NIMBUS_SLAVE_SUBTREE + Cluster.ZK_SEPERATOR + host;
+        return getString(path, false);
     }
 
     @Override
     public void update_nimbus_slave(String host, int time) throws Exception {
-        setTempObject(Cluster.NIMBUS_SLAVE_SUBTREE + Cluster.ZK_SEPERATOR
-                + host, String.valueOf(time));
+        setTempObject(Cluster.NIMBUS_SLAVE_SUBTREE + Cluster.ZK_SEPERATOR + host, String.valueOf(time));
     }
 
     @Override
@@ -720,8 +662,24 @@ public class StormZkClusterState implements StormClusterState {
     }
 
     @Override
-    public boolean try_to_be_leader(String path, String host,
-            RunnableCallback callback) throws Exception {
+    public void update_nimbus_detail(String hostPort, Map map) throws Exception {
+        // TODO Auto-generated method stub
+        cluster_state.set_ephemeral_node(Cluster.NIMBUS_SLAVE_DETAIL_SUBTREE + Cluster.ZK_SEPERATOR + hostPort, Utils.serialize(map));
+    }
+
+    @Override
+    public Map get_nimbus_detail(String hostPort, boolean watch) throws Exception {
+        byte[] data = cluster_state.get_data(Cluster.NIMBUS_SLAVE_DETAIL_SUBTREE + Cluster.ZK_SEPERATOR + hostPort, watch);
+        return (Map) Utils.maybe_deserialize(data);
+    }
+    @Override
+    public void unregister_nimbus_detail(String hostPort) throws Exception {
+        cluster_state.delete_node(Cluster.NIMBUS_SLAVE_DETAIL_SUBTREE + Cluster.ZK_SEPERATOR + hostPort);
+    }
+
+
+    @Override
+    public boolean try_to_be_leader(String path, String host, RunnableCallback callback) throws Exception {
         // TODO Auto-generated method stub
         if (callback != null)
             this.master_callback.set(callback);
@@ -736,24 +694,53 @@ public class StormZkClusterState implements StormClusterState {
     }
 
     @Override
-    public void set_topology_metric(String topologyId, Object metric)
-            throws Exception {
-        // TODO Auto-generated method stub
+    public void set_topology_metric(String topologyId, Object metric) throws Exception {
         String path = Cluster.metric_path(topologyId);
-
         setObject(path, metric);
     }
 
     @Override
     public Object get_topology_metric(String topologyId) throws Exception {
-        // TODO Auto-generated method stub
         return getObject(Cluster.metric_path(topologyId), false);
     }
 
-	@Override
-	public List<String> get_metrics() throws Exception {
-		// TODO Auto-generated method stub
-		return cluster_state.get_children(Cluster.METRIC_SUBTREE, false);
-	}
+    @Override
+    public List<String> get_metrics() throws Exception {
+        return cluster_state.get_children(Cluster.METRIC_SUBTREE, false);
+    }
+    @Override
+    public List<String> list_dirs(String path, boolean watch) throws  Exception {
+        List<String> subDirs = null;
+        subDirs = cluster_state.get_children(path, watch);
+        return subDirs;
+    }
 
+    @Override
+    public List<String> backpressureInfos() throws Exception {
+        return cluster_state.get_children(Cluster.BACKPRESSURE_SUBTREE, false);
+    }
+
+    @Override
+    public void set_backpressure_info(String topologyId, Map<String, SourceBackpressureInfo> sourceToBackpressureInfo) throws Exception {
+        String path = Cluster.backpressure_path(topologyId);
+        cluster_state.set_data(path, Utils.serialize(sourceToBackpressureInfo));
+    }
+
+    @Override
+    public Map<String, SourceBackpressureInfo> get_backpressure_info(String topologyId) throws Exception {
+        String path = Cluster.backpressure_path(topologyId);
+        byte[] data = cluster_state.get_data(path, false);
+        return (Map<String, SourceBackpressureInfo>) Utils.maybe_deserialize(data);
+    }
+
+    @Override
+    public void teardown_backpressure(String topologyId) {
+        try {
+            String backpressurePath = Cluster.backpressure_path(topologyId);
+
+            cluster_state.delete_node(backpressurePath);
+        } catch (Exception e) {
+            LOG.warn("Could not teardown backpressure info for " + topologyId, e);
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/AsmCounter.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/AsmCounter.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/AsmCounter.java
new file mode 100644
index 0000000..120bbfb
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/AsmCounter.java
@@ -0,0 +1,92 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.common.metric;
+
+
+import com.alibaba.jstorm.common.metric.snapshot.AsmCounterSnapshot;
+import com.alibaba.jstorm.common.metric.snapshot.AsmSnapshot;
+import com.codahale.metrics.Counter;
+
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * counter wrapper. note that counter is a little special, every snapshot we only return the delta value instead of
+ * total value, which prevents data loss if certain tasks are killed.
+ */
+public class AsmCounter extends AsmMetric<Counter> {
+
+    private final Map<Integer, Counter> counterMap = new ConcurrentHashMap<>();
+    private Counter unFlushed = new Counter();
+
+    public AsmCounter() {
+        super();
+        for (int win : windowSeconds) {
+            counterMap.put(win, new Counter());
+        }
+    }
+
+    public void inc() {
+        update(1);
+    }
+
+    @Override
+    public void update(Number val) {
+        this.unFlushed.inc(val.longValue());
+    }
+
+    /**
+     * flush temp counter data to all windows & assoc metrics.
+     */
+    protected void doFlush() {
+        long v = unFlushed.getCount();
+        for (Counter counter : counterMap.values()) {
+            counter.inc(v);
+        }
+        for (AsmMetric assocMetric : assocMetrics) {
+            assocMetric.updateDirectly(v);
+        }
+
+        this.unFlushed.dec(v);
+    }
+
+    @Override
+    public Map<Integer, Counter> getWindowMetricMap() {
+        return counterMap;
+    }
+
+    @Override
+    public Counter mkInstance() {
+        return new Counter();
+    }
+
+    @Override
+    protected void updateSnapshot(int window) {
+        Counter counter = counterMap.get(window);
+        if (counter != null) {
+            AsmSnapshot snapshot = new AsmCounterSnapshot().setValue(counter.getCount())
+                    .setTs(System.currentTimeMillis()).setMetricId(metricId);
+            snapshots.put(window, snapshot);
+        }
+    }
+
+    @Override
+    public AsmMetric clone() {
+        return new AsmCounter();
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/AsmGauge.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/AsmGauge.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/AsmGauge.java
new file mode 100644
index 0000000..4bc255a
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/AsmGauge.java
@@ -0,0 +1,72 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.common.metric;
+
+import com.alibaba.jstorm.common.metric.snapshot.AsmGaugeSnapshot;
+import com.alibaba.jstorm.common.metric.snapshot.AsmSnapshot;
+import com.codahale.metrics.Gauge;
+
+import java.util.Map;
+
+/**
+ * gauges cannot be aggregated.
+ */
+public class AsmGauge extends AsmMetric<Gauge> {
+
+    private Gauge gauge;
+
+    public AsmGauge(Gauge<Double> gauge) {
+        this.aggregate = false;
+        this.gauge = gauge;
+    }
+
+    @Override
+    public void update(Number obj) {
+        // nothing to do for gauges.
+    }
+
+    @Override
+    public AsmMetric clone() {
+        AsmMetric metric = new AsmGauge(this.gauge);
+        metric.setMetricName(this.getMetricName());
+        return metric;
+    }
+
+    @Override
+    public Map<Integer, Gauge> getWindowMetricMap() {
+        return null;
+    }
+
+    @Override
+    public Gauge mkInstance() {
+        return null;
+    }
+
+    @Override
+    protected void doFlush() {
+        // nothing to do for gauges.
+    }
+
+    @Override
+    protected void updateSnapshot(int window) {
+        double v = (Double) gauge.getValue();
+        AsmSnapshot snapshot =  new AsmGaugeSnapshot().setValue(v)
+                .setTs(System.currentTimeMillis()).setMetricId(metricId);
+        snapshots.put(window, snapshot);
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/AsmHistogram.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/AsmHistogram.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/AsmHistogram.java
new file mode 100644
index 0000000..43c8dbc
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/AsmHistogram.java
@@ -0,0 +1,101 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.common.metric;
+
+import com.alibaba.jstorm.common.metric.snapshot.AsmHistogramSnapshot;
+import com.alibaba.jstorm.common.metric.snapshot.AsmSnapshot;
+import com.codahale.metrics.ExponentiallyDecayingReservoir;
+import com.codahale.metrics.Histogram;
+
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * each window has a separate histogram, which is recreated after the window cycle.
+ */
+public class AsmHistogram extends AsmMetric<Histogram> {
+
+    private final Map<Integer, Histogram> histogramMap = new ConcurrentHashMap<Integer, Histogram>();
+    private Histogram unFlushed = newHistogram();
+
+    public AsmHistogram() {
+        super();
+        for (int win : windowSeconds) {
+            histogramMap.put(win, newHistogram());
+        }
+    }
+
+    @Override
+    public void update(Number obj) {
+        if (sample()) {
+            this.unFlushed.update(obj.longValue());
+        }
+    }
+
+    @Override
+    public void updateDirectly(Number obj) {
+        this.unFlushed.update(obj.longValue());
+    }
+
+    @Override
+    public Map<Integer, Histogram> getWindowMetricMap() {
+        return histogramMap;
+    }
+
+    @Override
+    public Histogram mkInstance() {
+        return newHistogram();
+    }
+
+    @Override
+    protected void updateSnapshot(int window) {
+        Histogram histogram = histogramMap.get(window);
+        if (histogram != null) {
+            AsmSnapshot snapshot = new AsmHistogramSnapshot().setSnapshot(histogram.getSnapshot())
+                    .setTs(System.currentTimeMillis()).setMetricId(metricId);
+            snapshots.put(window, snapshot);
+        }
+    }
+
+    /**
+     * flush temp histogram data to all windows & assoc metrics.
+     */
+    protected void doFlush() {
+        long[] values = unFlushed.getSnapshot().getValues();
+        for (Histogram histogram : histogramMap.values()) {
+            for (long val : values) {
+                histogram.update(val);
+            }
+        }
+        for (long val : values) {
+            for (AsmMetric metric : this.assocMetrics) {
+                metric.updateDirectly(val);
+            }
+        }
+        this.unFlushed = newHistogram();
+    }
+
+    @Override
+    public AsmMetric clone() {
+        return new AsmHistogram();
+    }
+
+    private Histogram newHistogram() {
+        return new Histogram(new ExponentiallyDecayingReservoir());
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/AsmMeter.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/AsmMeter.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/AsmMeter.java
new file mode 100644
index 0000000..8959800
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/AsmMeter.java
@@ -0,0 +1,74 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.common.metric;
+
+import com.alibaba.jstorm.common.metric.snapshot.AsmMeterSnapshot;
+import com.alibaba.jstorm.common.metric.snapshot.AsmSnapshot;
+import com.codahale.metrics.Meter;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * one meter & one snapshot for all windows. since meter is window-sliding, there's no need to recreate new ones.
+ */
+public class AsmMeter extends AsmMetric<Meter> {
+    private final Meter meter = new Meter();
+
+    public void mark() {
+        meter.mark(1l);
+    }
+
+    @Override
+    public void update(Number obj) {
+        meter.mark(obj.longValue());
+        for (AsmMetric metric : this.assocMetrics) {
+            metric.update(obj);
+        }
+    }
+
+
+    @Override
+    public AsmMetric clone() {
+        return new AsmMeter();
+    }
+
+    @Override
+    public Map<Integer, Meter> getWindowMetricMap() {
+        return null;
+    }
+
+    @Override
+    protected void doFlush() {
+        // nothing to do for meters.
+    }
+
+    @Override
+    protected void updateSnapshot(int window) {
+        AsmMeterSnapshot meterSnapshot = new AsmMeterSnapshot();
+        meterSnapshot.setM1(meter.getOneMinuteRate()).setM5(meter.getFiveMinuteRate()).setM15(meter.getFifteenMinuteRate()).setMean(meter.getMeanRate())
+                .setTs(System.currentTimeMillis()).setMetricId(metricId);
+        snapshots.put(window, meterSnapshot);
+    }
+
+    @Override
+    public Meter mkInstance() {
+        return null;
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/AsmMetric.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/AsmMetric.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/AsmMetric.java
new file mode 100644
index 0000000..d399e12
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/AsmMetric.java
@@ -0,0 +1,267 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.common.metric;
+
+import com.alibaba.jstorm.client.ConfigExtension;
+import com.alibaba.jstorm.common.metric.snapshot.AsmSnapshot;
+import com.alibaba.jstorm.metric.AsmWindow;
+import com.alibaba.jstorm.metric.MetaType;
+import com.alibaba.jstorm.metric.MetricType;
+import com.alibaba.jstorm.utils.TimeUtils;
+import com.codahale.metrics.Metric;
+import com.google.common.base.Joiner;
+import com.google.common.collect.Lists;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.*;
+import java.util.concurrent.ConcurrentHashMap;
+
+public abstract class AsmMetric<T extends Metric> {
+    protected final Logger logger = LoggerFactory.getLogger(getClass());
+
+    private static final Joiner JOINER = Joiner.on(".");
+
+    protected static final List<Integer> windowSeconds = Lists
+            .newArrayList(AsmWindow.M1_WINDOW, AsmWindow.M10_WINDOW, AsmWindow.H2_WINDOW, AsmWindow.D1_WINDOW);
+    protected static final List<Integer> nettyWindows = Lists.newArrayList(AsmWindow.M1_WINDOW);
+
+    protected static int minWindow = AsmWindow.M1_WINDOW;
+    protected static final List<Integer> EMPTY_WIN = Lists.newArrayListWithCapacity(0);
+    /**
+     * sample rate for meter, histogram and timer, note that counter & gauge are not sampled.
+     */
+    private static double sampleRate = ConfigExtension.DEFAULT_METRIC_SAMPLE_RATE;
+
+    protected int op = MetricOp.REPORT;
+    protected volatile long metricId = 0L;
+    protected String metricName;
+    protected boolean aggregate = true;
+    protected volatile long lastFlushTime = TimeUtils.current_time_secs() - AsmWindow.M1_WINDOW;
+    protected Map<Integer, Long> rollingTimeMap = new ConcurrentHashMap<>();
+    protected Map<Integer, Boolean> rollingDirtyMap = new ConcurrentHashMap<>();
+
+    protected final Map<Integer, AsmSnapshot> snapshots = new ConcurrentHashMap<Integer, AsmSnapshot>();
+
+    protected Set<AsmMetric> assocMetrics = new HashSet<AsmMetric>();
+
+    public AsmMetric() {
+        for (Integer win : windowSeconds) {
+            rollingTimeMap.put(win, lastFlushTime);
+            rollingDirtyMap.put(win, false);
+        }
+    }
+
+    /**
+     * keep a random for each instance to avoid competition (although it's thread-safe).
+     */
+    private final Random rand = new Random();
+
+    protected boolean sample() {
+        return rand.nextDouble() <= sampleRate;
+    }
+
+    public static void setSampleRate(double sampleRate) {
+        AsmMetric.sampleRate = sampleRate;
+    }
+
+    /**
+     * In order to improve performance
+     */
+    public abstract void update(Number obj);
+
+
+    public void updateDirectly(Number obj) {
+        update(obj);
+    }
+
+    public abstract AsmMetric clone();
+
+    public AsmMetric setOp(int op) {
+        this.op = op;
+        return this;
+    }
+
+    public int getOp() {
+        return this.op;
+    }
+
+    /**
+     * for test
+     */
+    public static void setWindowSeconds(List<Integer> windows) {
+        synchronized (windowSeconds) {
+            windowSeconds.clear();
+            windowSeconds.addAll(windows);
+
+            minWindow = getMinWindow(windows);
+        }
+    }
+
+    public static int getMinWindow(List<Integer> windows) {
+        int min = Integer.MAX_VALUE;
+        for (int win : windows) {
+            if (win < min) {
+                min = win;
+            }
+        }
+        return min;
+    }
+
+    public void addAssocMetrics(AsmMetric... metrics) {
+        Collections.addAll(assocMetrics, metrics);
+    }
+
+    public long getMetricId() {
+        return metricId;
+    }
+
+    public void setMetricId(long metricId) {
+        this.metricId = metricId;
+    }
+
+    public String getMetricName() {
+        return metricName;
+    }
+
+    public void setMetricName(String metricName) {
+        this.metricName = metricName;
+    }
+
+    public void flush() {
+        long time = TimeUtils.current_time_secs();
+        List<Integer> windows = getValidWindows();
+        if (windows.size() == 0) {
+            return;
+        }
+
+        doFlush();
+
+        List<Integer> rollwindows = rollWindows(time, windows);
+
+        for (int win : windows) {
+            if (rollwindows.contains(win)) {
+                updateSnapshot(win);
+
+                Map<Integer, T> metricMap = getWindowMetricMap();
+                if (metricMap != null) {
+                    metricMap.put(win, mkInstance());
+                }
+            } else if (!rollingDirtyMap.get(win)) {
+                //if this window has never been passed, we still update this window snapshot
+                updateSnapshot(win);
+            }
+        }
+        this.lastFlushTime = TimeUtils.current_time_secs();
+    }
+
+    public List<Integer> rollWindows(long time, List<Integer> windows) {
+        List<Integer> rolling = new ArrayList<>();
+        for (Integer win : windows) {
+            long rollingTime = rollingTimeMap.get(win);
+            // might delay somehow, so add extra 5 sec bias
+            if (time - rollingTime >= win - 5) {
+                rolling.add(win);
+                rollingDirtyMap.put(win, true);     //mark this window has been passed
+                rollingTimeMap.put(win, (long) TimeUtils.current_time_secs());
+            }
+        }
+        return rolling;
+    }
+
+    /**
+     * flush temp data to all windows & assoc metrics.
+     */
+    protected abstract void doFlush();
+
+    public abstract Map<Integer, T> getWindowMetricMap();
+
+    public abstract T mkInstance();
+
+    protected abstract void updateSnapshot(int window);
+
+    public Map<Integer, AsmSnapshot> getSnapshots() {
+        return snapshots;
+    }
+
+    /**
+     * DO NOT judge whether to flush by 60sec because there might be nuance by the alignment of time(maybe less than 1 sec?)
+     * so we subtract 5 sec from a min flush window.
+     */
+    public List<Integer> getValidWindows() {
+        long diff = TimeUtils.current_time_secs() - this.lastFlushTime + 5;
+        if (diff < minWindow) {
+            // logger.warn("no valid windows for metric:{}, diff:{}", this.metricName, diff);
+            return EMPTY_WIN;
+        }
+        // for netty metrics, use only 1min window
+        if (this.metricName.startsWith(MetaType.NETTY.getV())) {
+            return nettyWindows;
+        }
+
+        return windowSeconds;
+    }
+
+    public boolean isAggregate() {
+        return aggregate;
+    }
+
+    public void setAggregate(boolean aggregate) {
+        this.aggregate = aggregate;
+    }
+
+    public static String mkName(Object... parts) {
+        return JOINER.join(parts);
+    }
+
+    public static class MetricOp {
+        public static final int LOG = 1;
+        public static final int REPORT = 2;
+    }
+
+    public static class Builder {
+        public static AsmMetric build(MetricType metricType) {
+            AsmMetric metric;
+            if (metricType == MetricType.COUNTER) {
+                metric = new AsmCounter();
+            } else if (metricType == MetricType.METER) {
+                metric = new AsmMeter();
+            } else if (metricType == MetricType.HISTOGRAM) {
+                metric = new AsmHistogram();
+            } else if (metricType == MetricType.TIMER) {
+                metric = new AsmTimer();
+            } else {
+                throw new IllegalArgumentException("invalid metric type:" + metricType);
+            }
+            return metric;
+        }
+    }
+
+    public static void main(String[] args) throws Exception {
+        AsmMeter meter = new AsmMeter();
+        int t = 0, f = 0;
+        for (int i = 0; i < 100; i++) {
+            if (meter.sample()) {
+                t++;
+            } else {
+                f++;
+            }
+        }
+        System.out.println(t + "," + f);
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/AsmTimer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/AsmTimer.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/AsmTimer.java
new file mode 100644
index 0000000..d9f46e1
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/AsmTimer.java
@@ -0,0 +1,107 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.common.metric;
+
+import com.alibaba.jstorm.common.metric.snapshot.AsmMeterSnapshot;
+import com.alibaba.jstorm.common.metric.snapshot.AsmTimerSnapshot;
+import com.codahale.metrics.*;
+import com.codahale.metrics.Timer;
+
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * same as histogram, each window has a separate timer, which is recreated after the window cycle. note that all data in a timer are measured by nanoseconds. so
+ * for most cases, you can replace with histograms.
+ */
+public class AsmTimer extends AsmMetric<Timer> {
+    private final Map<Integer, Timer> timerMap = new ConcurrentHashMap<Integer, Timer>();
+    private Timer unFlushed = newTimer();
+
+    public AsmTimer() {
+        super();
+        for (int win : windowSeconds) {
+            timerMap.put(win, newTimer());
+        }
+    }
+
+    @Override
+    public void update(Number obj) {
+        if (sample()) {
+            this.unFlushed.update(obj.longValue(), TimeUnit.MILLISECONDS);
+        }
+    }
+
+    @Override
+    public void updateDirectly(Number obj) {
+        this.unFlushed.update(obj.longValue(), TimeUnit.MILLISECONDS);
+    }
+
+    @Override
+    public Map<Integer, Timer> getWindowMetricMap() {
+        return timerMap;
+    }
+
+    @Override
+    public Timer mkInstance() {
+        return newTimer();
+    }
+
+    @Override
+    protected void updateSnapshot(int window) {
+        Timer timer = timerMap.get(window);
+        if (timer != null){
+            AsmTimerSnapshot timerSnapshot = new AsmTimerSnapshot();
+            timerSnapshot.setHistogram(timer.getSnapshot());
+            timerSnapshot.setMeter(new AsmMeterSnapshot().setM1(timer.getOneMinuteRate()).setM5(timer.getFiveMinuteRate())
+                    .setM15(timer.getFifteenMinuteRate()).setMean(timer.getMeanRate()));
+            if (metricId > 0) {
+                timerSnapshot.setMetricId(metricId);
+            }
+            timerSnapshot.setTs(System.currentTimeMillis());
+            snapshots.put(window, timerSnapshot);
+        }
+    }
+
+    /**
+     * flush temp timer data to all windows & assoc metrics.
+     */
+    protected void doFlush() {
+        long[] values = unFlushed.getSnapshot().getValues();
+        for (Timer timer : timerMap.values()) {
+            for (long val : values) {
+                timer.update(val, TimeUnit.MILLISECONDS);
+
+                for (AsmMetric metric : this.assocMetrics) {
+                    metric.updateDirectly(val);
+                }
+            }
+        }
+        this.unFlushed = newTimer();
+    }
+
+    @Override
+    public AsmMetric clone() {
+        return new AsmTimer();
+    }
+
+    private Timer newTimer() {
+        return new Timer(new ExponentiallyDecayingReservoir());
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/Counter.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/Counter.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/Counter.java
deleted file mode 100755
index f9e97dd..0000000
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/Counter.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.alibaba.jstorm.common.metric;
-
-import com.alibaba.jstorm.common.metric.operator.convert.DefaultConvertor;
-import com.alibaba.jstorm.common.metric.operator.merger.SumMerger;
-import com.alibaba.jstorm.common.metric.operator.updater.AddUpdater;
-import com.alibaba.jstorm.common.metric.window.Metric;
-
-/**
- * The class is similar to com.codahale.metrics.Counter
- * 
- * Sum all window's value
- * 
- * how to use Counter , please refer to Sampling Interface
- * 
- * @author zhongyan.feng
- *
- * @param <T>
- */
-public class Counter<T extends Number> extends Metric<T, T> {
-    private static final long serialVersionUID = -1362345159511508074L;
-
-    /**
-     * 
-     * @param defaultValue
-     */
-    public Counter(T zero) {
-        updater = new AddUpdater<T>();
-        merger = new SumMerger<T>();
-        convertor = new DefaultConvertor<T>();
-        defaultValue = zero;
-
-        init();
-    }
-
-    public static void main(String[] args) {
-
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/CounterData.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/CounterData.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/CounterData.java
new file mode 100644
index 0000000..03d13be
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/CounterData.java
@@ -0,0 +1,34 @@
+package com.alibaba.jstorm.common.metric;
+
+
+import com.alibaba.jstorm.metric.Bytes;
+import com.alibaba.jstorm.metric.KVSerializable;
+
+/**
+ * @author wange
+ * @since 15/6/23
+ */
+public class CounterData extends MetricBaseData implements KVSerializable {
+    private long v;
+
+    public long getV() {
+        return v;
+    }
+
+    public void setV(long v) {
+        this.v = v;
+    }
+
+    @Override
+    public byte[] getValue() {
+        return Bytes.toBytes(v);
+    }
+
+    @Override
+    public Object fromKV(byte[] key, byte[] value) {
+        parseKey(key);
+        this.v = Bytes.toLong(value);
+
+        return this;
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/Gauge.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/Gauge.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/Gauge.java
deleted file mode 100755
index 30fa110..0000000
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/Gauge.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.alibaba.jstorm.common.metric;
-
-import java.util.Map;
-import java.util.TreeMap;
-
-import com.alibaba.jstorm.common.metric.window.Metric;
-import com.alibaba.jstorm.common.metric.window.StatBuckets;
-
-public class Gauge<T extends Number> extends Metric<Number, Number> {
-    private static final long serialVersionUID = 1985614006717750790L;
-
-    protected com.codahale.metrics.Gauge<T> gauge;
-
-    public Gauge(com.codahale.metrics.Gauge<T> gauge) {
-        this.gauge = gauge;
-
-        init();
-    }
-
-    @Override
-    public void init() {
-
-    }
-
-    @Override
-    public void update(Number obj) {
-        // TODO Auto-generated method stub
-    }
-
-    @Override
-    public Map<Integer, Number> getSnapshot() {
-        // TODO Auto-generated method stub
-        Number value = gauge.getValue();
-
-        Map<Integer, Number> ret = new TreeMap<Integer, Number>();
-        for (Integer timeKey : windowSeconds) {
-            ret.put(timeKey, value);
-        }
-        ret.put(StatBuckets.ALL_TIME_WINDOW, value);
-
-        return ret;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/GaugeData.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/GaugeData.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/GaugeData.java
new file mode 100644
index 0000000..134a194
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/GaugeData.java
@@ -0,0 +1,34 @@
+package com.alibaba.jstorm.common.metric;
+
+
+import com.alibaba.jstorm.metric.Bytes;
+import com.alibaba.jstorm.metric.KVSerializable;
+
+/**
+ * @author wange
+ * @since 15/6/23
+ */
+public class GaugeData extends MetricBaseData implements KVSerializable {
+    private double v;
+
+    public double getV() {
+        return v;
+    }
+
+    public void setV(double v) {
+        this.v = v;
+    }
+
+    @Override
+    public byte[] getValue() {
+        return Bytes.toBytes(v);
+    }
+
+    @Override
+    public Object fromKV(byte[] key, byte[] value) {
+        parseKey(key);
+        this.v = Bytes.toDouble(value);
+
+        return this;
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/Histogram.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/Histogram.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/Histogram.java
deleted file mode 100755
index 7276fdf..0000000
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/Histogram.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.alibaba.jstorm.common.metric;
-
-import com.alibaba.jstorm.common.metric.operator.convert.Convertor;
-import com.alibaba.jstorm.common.metric.operator.merger.AvgMerger;
-import com.alibaba.jstorm.common.metric.operator.updater.AvgUpdater;
-import com.alibaba.jstorm.common.metric.window.Metric;
-
-/**
- * Meter is used to compute tps
- * 
- * Attention: 1.
- * 
- * @author zhongyan.feng
- * 
- */
-public class Histogram extends Metric<Double, Histogram.HistorgramPair> {
-    private static final long serialVersionUID = -1362345159511508074L;
-
-    public Histogram() {
-        defaultValue =
-                new HistorgramPair();
-        updater = new AvgUpdater();
-        merger = new AvgMerger();
-        convertor = new HistogramConvertor();
-
-        init();
-    }
-
-    public static class HistogramConvertor implements
-            Convertor<HistorgramPair, Double> {
-        private static final long serialVersionUID = -1569170826785657226L;
-
-        @Override
-        public Double convert(HistorgramPair from) {
-            // TODO Auto-generated method stub
-            if (from == null) {
-                return 0.0d;
-            }
-
-            if (from.getTimes() == 0) {
-                return 0.0d;
-            } else {
-                return from.getSum()/ from.getTimes();
-            }
-        }
-
-    }
-    
-    public static class HistorgramPair {
-        private double sum;
-        private long times;
-        
-        public HistorgramPair() {
-            
-        }
-        
-        public HistorgramPair(double sum, long times){
-            this.sum = sum;
-            this.times = times;
-        }
-
-        public double getSum() {
-            return sum;
-        }
-
-        public void setSum(double sum) {
-            this.sum = sum;
-        }
-        
-        public void addValue(double value) {
-            sum += value;
-        }
-
-        public long getTimes() {
-            return times;
-        }
-
-        public void setTimes(long times) {
-            this.times = times;
-        }
-
-        public void addTimes(long time) {
-            times += time;
-        }
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/Histogram.java.bak
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/Histogram.java.bak b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/Histogram.java.bak
deleted file mode 100755
index b830789..0000000
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/Histogram.java.bak
+++ /dev/null
@@ -1,71 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.alibaba.jstorm.common.metric;
-
-import java.util.concurrent.atomic.AtomicLong;
-
-import com.alibaba.jstorm.common.metric.operator.convert.Convertor;
-import com.alibaba.jstorm.common.metric.operator.merger.AvgMerger2;
-import com.alibaba.jstorm.common.metric.operator.updater.AvgUpdater2;
-import com.alibaba.jstorm.common.metric.window.Metric;
-import com.alibaba.jstorm.utils.Pair;
-import com.google.common.util.concurrent.AtomicDouble;
-
-/**
- * Meter is used to compute tps
- * 
- * Attention: 1.
- * 
- * @author zhongyan.feng
- * 
- */
-public class Histogram extends Metric<Double, Pair<AtomicDouble, AtomicLong>> {
-    private static final long serialVersionUID = -1362345159511508074L;
-
-    public Histogram() {
-        defaultValue =
-                new Pair<AtomicDouble, AtomicLong>(new AtomicDouble(0.0),
-                        new AtomicLong(0));
-        updater = new AvgUpdater2();
-        merger = new AvgMerger2();
-        convertor = new HistogramConvertor();
-
-        init();
-    }
-
-    public static class HistogramConvertor implements
-            Convertor<Pair<AtomicDouble, AtomicLong>, Double> {
-        private static final long serialVersionUID = -1569170826785657226L;
-
-        @Override
-        public Double convert(Pair<AtomicDouble, AtomicLong> from) {
-            // TODO Auto-generated method stub
-            if (from == null) {
-                return 0.0d;
-            }
-
-            if (from.getSecond().get() == 0) {
-                return 0.0d;
-            } else {
-                return from.getFirst().get() / from.getSecond().get();
-            }
-        }
-
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/HistogramData.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/HistogramData.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/HistogramData.java
new file mode 100644
index 0000000..5f5ae97
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/HistogramData.java
@@ -0,0 +1,135 @@
+package com.alibaba.jstorm.common.metric;
+
+
+import com.alibaba.jstorm.metric.Bytes;
+import com.alibaba.jstorm.metric.KVSerializable;
+
+/**
+ * @author wange
+ * @since 15/6/23
+ */
+public class HistogramData extends MetricBaseData implements KVSerializable {
+    private long min;
+    private long max;
+    private double mean;
+    private double p50;
+    private double p75;
+    private double p95;
+    private double p98;
+    private double p99;
+    private double p999;
+    private double stddev;
+
+    public long getMin() {
+        return min;
+    }
+
+    public void setMin(long min) {
+        this.min = min;
+    }
+
+    public long getMax() {
+        return max;
+    }
+
+    public void setMax(long max) {
+        this.max = max;
+    }
+
+    public double getMean() {
+        return mean;
+    }
+
+    public void setMean(double mean) {
+        this.mean = mean;
+    }
+
+    public double getP50() {
+        return p50;
+    }
+
+    public void setP50(double p50) {
+        this.p50 = p50;
+    }
+
+    public double getP75() {
+        return p75;
+    }
+
+    public void setP75(double p75) {
+        this.p75 = p75;
+    }
+
+    public double getP95() {
+        return p95;
+    }
+
+    public void setP95(double p95) {
+        this.p95 = p95;
+    }
+
+    public double getP98() {
+        return p98;
+    }
+
+    public void setP98(double p98) {
+        this.p98 = p98;
+    }
+
+    public double getP99() {
+        return p99;
+    }
+
+    public void setP99(double p99) {
+        this.p99 = p99;
+    }
+
+    public double getP999() {
+        return p999;
+    }
+
+    public void setP999(double p999) {
+        this.p999 = p999;
+    }
+
+    public double getStddev() {
+        return stddev;
+    }
+
+    public void setStddev(double stddev) {
+        this.stddev = stddev;
+    }
+
+    @Override
+    public byte[] getValue() {
+        byte[] ret = new byte[8 * 9];
+        Bytes.putLong(ret, 0, min);
+        Bytes.putLong(ret, 8, max);
+        Bytes.putDouble(ret, 16, p50);
+        Bytes.putDouble(ret, 24, p75);
+        Bytes.putDouble(ret, 32, p95);
+        Bytes.putDouble(ret, 40, p98);
+        Bytes.putDouble(ret, 48, p99);
+        Bytes.putDouble(ret, 56, p999);
+        Bytes.putDouble(ret, 64, mean);
+
+        return ret;
+    }
+
+    @Override
+    public Object fromKV(byte[] key, byte[] value) {
+        parseKey(key);
+
+        this.min = Bytes.toLong(value, 0, KVSerializable.LONG_SIZE);
+        this.max = Bytes.toLong(value, 8, KVSerializable.LONG_SIZE);
+        this.p50 = Bytes.toDouble(value, 16);
+        this.p75 = Bytes.toDouble(value, 24);
+        this.p95 = Bytes.toDouble(value, 32);
+        this.p98 = Bytes.toDouble(value, 40);
+        this.p99 = Bytes.toDouble(value, 48);
+        this.p999 = Bytes.toDouble(value, 56);
+        this.mean = Bytes.toDouble(value, 64);
+
+        return this;
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/LongCounter.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/LongCounter.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/LongCounter.java
deleted file mode 100755
index ac58912..0000000
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/LongCounter.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.alibaba.jstorm.common.metric;
-
-import java.util.concurrent.atomic.AtomicLong;
-
-import com.alibaba.jstorm.common.metric.operator.convert.AtomicLongToLong;
-import com.alibaba.jstorm.common.metric.operator.merger.LongSumMerger;
-import com.alibaba.jstorm.common.metric.operator.updater.LongAddUpdater;
-import com.alibaba.jstorm.common.metric.window.Metric;
-
-public class LongCounter extends Metric<Long, AtomicLong> {
-    private static final long serialVersionUID = -1362345159511508074L;
-
-    public LongCounter() {
-        super.defaultValue = new AtomicLong(0);
-        super.updater = new LongAddUpdater();
-        super.merger = new LongSumMerger();
-        super.convertor = new AtomicLongToLong();
-
-        init();
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/Meter.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/Meter.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/Meter.java
deleted file mode 100755
index e56d025..0000000
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/Meter.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.alibaba.jstorm.common.metric;
-
-import com.alibaba.jstorm.common.metric.operator.convert.DefaultConvertor;
-import com.alibaba.jstorm.common.metric.operator.merger.TpsMerger;
-import com.alibaba.jstorm.common.metric.operator.updater.AddUpdater;
-import com.alibaba.jstorm.common.metric.window.Metric;
-import com.alibaba.jstorm.common.metric.window.RollingWindow;
-
-/**
- * Meter is used to compute tps
- * 
- * Attention: 1.
- * 
- * @author zhongyan.feng
- * 
- */
-public class Meter extends Metric<Double, Double> {
-    private static final long serialVersionUID = -1362345159511508074L;
-
-    public Meter() {
-        defaultValue = 0.0d;
-        updater = new AddUpdater<Double>();
-        merger = new TpsMerger();
-        convertor = new DefaultConvertor<Double>();
-
-        init();
-    }
-
-    public void update() {
-        update(Double.valueOf(1));
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/MeterData.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/MeterData.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/MeterData.java
new file mode 100644
index 0000000..2df87aa
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/MeterData.java
@@ -0,0 +1,71 @@
+package com.alibaba.jstorm.common.metric;
+
+
+import com.alibaba.jstorm.metric.Bytes;
+import com.alibaba.jstorm.metric.KVSerializable;
+
+/**
+ * @author wange
+ * @since 15/6/23
+ */
+public class MeterData extends MetricBaseData implements KVSerializable {
+    private double m1;
+    private double m5;
+    private double m15;
+    private double mean;
+
+    public double getM1() {
+        return m1;
+    }
+
+    public void setM1(double m1) {
+        this.m1 = m1;
+    }
+
+    public double getM5() {
+        return m5;
+    }
+
+    public void setM5(double m5) {
+        this.m5 = m5;
+    }
+
+    public double getM15() {
+        return m15;
+    }
+
+    public void setM15(double m15) {
+        this.m15 = m15;
+    }
+
+    public double getMean() {
+        return mean;
+    }
+
+    public void setMean(double mean) {
+        this.mean = mean;
+    }
+
+    @Override
+    public byte[] getValue() {
+        byte[] ret = new byte[8 * 4];
+        Bytes.putDouble(ret, 0, m1);
+        Bytes.putDouble(ret, 8, m5);
+        Bytes.putDouble(ret, 16, m15);
+        Bytes.putDouble(ret, 24, mean);
+
+        return ret;
+    }
+
+    @Override
+    public Object fromKV(byte[] key, byte[] value) {
+        parseKey(key);
+
+        this.m1 = Bytes.toDouble(value, 0);
+        this.m5 = Bytes.toDouble(value, 8);
+        this.m15 = Bytes.toDouble(value, 16);
+        this.mean = Bytes.toDouble(value, 24);
+
+        return this;
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/MetricBaseData.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/MetricBaseData.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/MetricBaseData.java
new file mode 100644
index 0000000..907762d
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/MetricBaseData.java
@@ -0,0 +1,59 @@
+package com.alibaba.jstorm.common.metric;
+
+import com.alibaba.jstorm.metric.Bytes;
+import com.alibaba.jstorm.metric.KVSerializable;
+
+import java.util.Date;
+
+/**
+ * @author wange
+ * @since 15/7/22
+ */
+public abstract class MetricBaseData implements KVSerializable {
+    protected long metricId;
+    protected int win;
+    protected Date ts;
+
+    public long getMetricId() {
+        return metricId;
+    }
+
+    public void setMetricId(long metricId) {
+        this.metricId = metricId;
+    }
+
+    public Date getTs() {
+        return ts;
+    }
+
+    public void setTs(Date ts) {
+        this.ts = ts;
+    }
+
+    public int getWin() {
+        return win;
+    }
+
+    public void setWin(int win) {
+        this.win = win;
+    }
+
+    @Override
+    public byte[] getKey() {
+        return makeKey(metricId, win, ts.getTime());
+    }
+
+    public static byte[] makeKey(long metricId, int win, long ts) {
+        byte[] ret = new byte[8 + 4 + 8];
+        Bytes.putLong(ret, 0, metricId);
+        Bytes.putInt(ret, 8, win);
+        Bytes.putLong(ret, 12, ts);
+        return ret;
+    }
+
+    protected void parseKey(byte[] key) {
+        this.metricId = Bytes.toLong(key, 0, KVSerializable.LONG_SIZE);
+        this.win = Bytes.toInt(key, 8, KVSerializable.INT_SIZE);
+        this.ts = new Date(Bytes.toLong(key, 12, KVSerializable.LONG_SIZE));
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/MetricFilter.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/MetricFilter.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/MetricFilter.java
deleted file mode 100755
index 92b1f6b..0000000
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/MetricFilter.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.alibaba.jstorm.common.metric;
-
-import java.io.Serializable;
-
-import com.alibaba.jstorm.common.metric.window.Metric;
-
-public interface MetricFilter extends Serializable {
-    /**
-     * Matches all metrics, regardless of type or name.
-     */
-    MetricFilter ALL = new MetricFilter() {
-        private static final long serialVersionUID = 7089987006352295530L;
-
-        @Override
-        public boolean matches(String name, Metric metric) {
-            return true;
-        }
-    };
-
-    /**
-     * Returns {@code true} if the metric matches the filter; {@code false}
-     * otherwise.
-     *
-     * @param name the metric's name
-     * @param metric the metric
-     * @return {@code true} if the metric matches the filter
-     */
-    boolean matches(String name, Metric metric);
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/MetricMeta.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/MetricMeta.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/MetricMeta.java
new file mode 100644
index 0000000..da6b4dd
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/MetricMeta.java
@@ -0,0 +1,213 @@
+package com.alibaba.jstorm.common.metric;
+
+import com.alibaba.jstorm.metric.KVSerializable;
+import com.alibaba.jstorm.metric.MetaType;
+import com.alibaba.jstorm.metric.MetricType;
+import com.alibaba.jstorm.metric.MetricUtils;
+import com.alibaba.jstorm.utils.JStormUtils;
+
+import java.util.Date;
+
+/**
+ * @author wange
+ * @since 15/6/18
+ */
+public class MetricMeta implements KVSerializable {
+    // common
+    private long id;
+    // string id
+    private String sid;
+    private String clusterName;
+    private String topologyId;
+    private int metricType;
+    private String metricGroup = MetricUtils.DEFAULT_GROUP;//sys group
+    private String metricName;
+    private Date gmtCreate = new Date();
+
+    // task meta
+    private String component = MetricUtils.EMPTY;
+    private int taskId = 0;
+    private String streamId = MetricUtils.EMPTY;
+    private int metaType;
+
+    // worker meta
+    private String host = MetricUtils.EMPTY;
+    private int port = 0;
+
+    public long getId() {
+        return id;
+    }
+
+    public void setId(long id) {
+        this.id = id;
+        this.sid = id + "";
+    }
+
+    public String getSid() {
+        return sid;
+    }
+
+    public String getClusterName() {
+        return clusterName;
+    }
+
+    public void setClusterName(String clusterName) {
+        this.clusterName = clusterName;
+    }
+
+    public String getTopologyId() {
+        return topologyId;
+    }
+
+    public void setTopologyId(String topologyId) {
+        this.topologyId = topologyId;
+    }
+
+    public String getHost() {
+        return host;
+    }
+
+    public void setHost(String host) {
+        this.host = host;
+    }
+
+    public int getPort() {
+        return port;
+    }
+
+    public void setPort(int port) {
+        this.port = port;
+    }
+
+    public int getMetricType() {
+        return metricType;
+    }
+
+    public void setMetricType(int metricType) {
+        this.metricType = metricType;
+    }
+
+    public String getMetricGroup() {
+        return metricGroup;
+    }
+
+    public void setMetricGroup(String metricGroup) {
+        this.metricGroup = metricGroup;
+    }
+
+    public String getMetricName() {
+        return metricName;
+    }
+
+    public void setMetricName(String metricName) {
+        this.metricName = metricName;
+    }
+
+    public Date getGmtCreate() {
+        return gmtCreate;
+    }
+
+    public void setGmtCreate(Date gmtCreate) {
+        this.gmtCreate = gmtCreate;
+    }
+
+    public String getComponent() {
+        return component;
+    }
+
+    public void setComponent(String component) {
+        this.component = component;
+    }
+
+    public int getTaskId() {
+        return taskId;
+    }
+
+    public void setTaskId(int taskId) {
+        this.taskId = taskId;
+    }
+
+    public String getStreamId() {
+        return streamId;
+    }
+
+    public void setStreamId(String streamId) {
+        this.streamId = streamId;
+    }
+
+    public int getMetaType() {
+        return metaType;
+    }
+
+    public void setMetaType(int metaType) {
+        this.metaType = metaType;
+    }
+
+    public boolean isWorkerMetric() {
+        return this.metaType == MetaType.NETTY.getT() || this.getMetaType() == MetaType.WORKER.getT() ||
+                this.metaType == MetaType.TOPOLOGY.getT();
+    }
+
+    public String getFQN() {
+        MetaType meta = MetaType.parse(metaType);
+        MetricType metric = MetricType.parse(metricType);
+        String types = meta.getV() + metric.getV();
+        if (isWorkerMetric()) {
+            return MetricUtils.concat2(types, topologyId, host, port, metricGroup, metricName);
+        }
+        return MetricUtils.concat2(types, topologyId, component, taskId, streamId, metricGroup, metricName);
+    }
+
+    /**
+     * key: clusterName + topologyId + metaType + id
+     */
+    @Override
+    public byte[] getKey() {
+        StringBuilder sb = new StringBuilder(64);
+        sb.append(clusterName).append(MetricUtils.AT).append(topologyId).append(MetricUtils.AT)
+                .append(metaType).append(MetricUtils.AT).append(id);
+        return sb.toString().getBytes();
+    }
+
+    /**
+     * value: component + taskId + streamId + metricType + host + port + metricGroup + metricName
+     */
+    @Override
+    public byte[] getValue() {
+        StringBuilder sb = new StringBuilder(64);
+        sb.append(component).append(MetricUtils.AT).append(taskId).append(MetricUtils.AT)
+                .append(streamId).append(MetricUtils.AT).append(metricType).append(MetricUtils.AT)
+                .append(host).append(MetricUtils.AT).append(port).append(MetricUtils.AT)
+                .append(metricGroup).append(MetricUtils.AT).append(metricName);
+        return sb.toString().getBytes();
+    }
+
+    @Override
+    public Object fromKV(byte[] key, byte[] value) {
+        String[] keyParts = new String(key).split(MetricUtils.DELIM);
+        if (keyParts.length >= 4) {
+            this.clusterName = keyParts[0];
+            this.topologyId = keyParts[1];
+            this.metaType = Integer.valueOf(keyParts[2]);
+            this.id = Long.valueOf(keyParts[3]);
+            this.sid = this.id + "";
+        }
+        String[] valueParts = new String(value).split(MetricUtils.DELIM);
+        if (valueParts.length >= 8) {
+            this.component = valueParts[0];
+            this.taskId = JStormUtils.parseInt(valueParts[1], 0);
+            this.streamId = valueParts[2];
+            this.metricType = JStormUtils.parseInt(valueParts[3], 0);
+            this.host = valueParts[4];
+            this.port = JStormUtils.parseInt(valueParts[5], 0);
+            this.metricGroup = valueParts[6];
+            this.metricName = valueParts[7];
+        }
+        return this;
+    }
+
+    public static MetricMeta parse(String name) {
+        return MetricMetaParser.fromMetricName(name);
+    }
+
+}


[30/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/task/TopologyContext.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/task/TopologyContext.java b/jstorm-core/src/main/java/backtype/storm/task/TopologyContext.java
index 8124651..e8390f6 100755
--- a/jstorm-core/src/main/java/backtype/storm/task/TopologyContext.java
+++ b/jstorm-core/src/main/java/backtype/storm/task/TopologyContext.java
@@ -21,86 +21,76 @@ import backtype.storm.generated.GlobalStreamId;
 import backtype.storm.generated.Grouping;
 import backtype.storm.generated.StormTopology;
 import backtype.storm.hooks.ITaskHook;
-import backtype.storm.metric.api.IMetric;
-import backtype.storm.metric.api.IReducer;
-import backtype.storm.metric.api.ICombiner;
-import backtype.storm.metric.api.ReducedMetric;
-import backtype.storm.metric.api.CombinedMetric;
+import backtype.storm.metric.api.*;
 import backtype.storm.state.ISubscribedState;
 import backtype.storm.tuple.Fields;
 import backtype.storm.utils.Utils;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
+import com.alibaba.jstorm.cluster.StormClusterState;
 import org.apache.commons.lang.NotImplementedException;
 import org.json.simple.JSONValue;
 
+import java.util.*;
+
 /**
- * A TopologyContext is given to bolts and spouts in their "prepare" and "open"
- * methods, respectively. This object provides information about the component's
+ * A TopologyContext is given to bolts and spouts in their "prepare" and "open" methods, respectively. This object provides information about the component's
  * place within the topology, such as task ids, inputs and outputs, etc.
- *
- * <p>The TopologyContext is also used to declare ISubscribedState objects to
- * synchronize state with StateSpouts this object is subscribed to.</p>
+ * 
+ * <p>
+ * The TopologyContext is also used to declare ISubscribedState objects to synchronize state with StateSpouts this object is subscribed to.
+ * </p>
  */
 public class TopologyContext extends WorkerTopologyContext implements IMetricsContext {
     private Integer _taskId;
     private Map<String, Object> _taskData = new HashMap<String, Object>();
     private List<ITaskHook> _hooks = new ArrayList<ITaskHook>();
     private Map<String, Object> _executorData;
-    private Map<Integer,Map<Integer, Map<String, IMetric>>> _registeredMetrics;
+    private Map<Integer, Map<Integer, Map<String, IMetric>>> _registeredMetrics;
     private clojure.lang.Atom _openOrPrepareWasCalled;
-
-
-    public TopologyContext(StormTopology topology, Map stormConf,
-            Map<Integer, String> taskToComponent, Map<String, List<Integer>> componentToSortedTasks,
-            Map<String, Map<String, Fields>> componentToStreamToFields,
-            String stormId, String codeDir, String pidDir, Integer taskId,
-            Integer workerPort, List<Integer> workerTasks, Map<String, Object> defaultResources,
-            Map<String, Object> userResources, Map<String, Object> executorData, Map registeredMetrics,
-            clojure.lang.Atom openOrPrepareWasCalled) {
-        super(topology, stormConf, taskToComponent, componentToSortedTasks,
-                componentToStreamToFields, stormId, codeDir, pidDir,
-                workerPort, workerTasks, defaultResources, userResources);
+    private StormClusterState _zkCluster;
+
+    public TopologyContext(StormTopology topology, Map stormConf, Map<Integer, String> taskToComponent, Map<String, List<Integer>> componentToSortedTasks,
+            Map<String, Map<String, Fields>> componentToStreamToFields, String stormId, String codeDir, String pidDir, Integer taskId, Integer workerPort,
+            List<Integer> workerTasks, Map<String, Object> defaultResources, Map<String, Object> userResources, Map<String, Object> executorData,
+            Map registeredMetrics, clojure.lang.Atom openOrPrepareWasCalled, StormClusterState zkCluster) {
+        super(topology, stormConf, taskToComponent, componentToSortedTasks, componentToStreamToFields, stormId, codeDir, pidDir, workerPort, workerTasks,
+                defaultResources, userResources);
         _taskId = taskId;
         _executorData = executorData;
         _registeredMetrics = registeredMetrics;
         _openOrPrepareWasCalled = openOrPrepareWasCalled;
+        _zkCluster = zkCluster;
     }
 
     /**
-     * All state from all subscribed state spouts streams will be synced with
-     * the provided object.
-     *
-     * <p>It is recommended that your ISubscribedState object is kept as an instance
-     * variable of this object. The recommended usage of this method is as follows:</p>
-     *
+     * All state from all subscribed state spouts streams will be synced with the provided object.
+     * 
+     * <p>
+     * It is recommended that your ISubscribedState object is kept as an instance variable of this object. The recommended usage of this method is as follows:
+     * </p>
+     * 
      * <p>
      * _myState = context.setAllSubscribedState(new MyState());
      * </p>
+     * 
      * @param obj Provided ISubscribedState implementation
      * @return Returns the ISubscribedState object provided
      */
     public <T extends ISubscribedState> T setAllSubscribedState(T obj) {
-        //check that only subscribed to one component/stream for statespout
-        //setsubscribedstate appropriately
+        // check that only subscribed to one component/stream for statespout
+        // setsubscribedstate appropriately
         throw new NotImplementedException();
     }
 
-
     /**
-     * Synchronizes the default stream from the specified state spout component
-     * id with the provided ISubscribedState object.
-     *
-     * <p>The recommended usage of this method is as follows:</p>
+     * Synchronizes the default stream from the specified state spout component id with the provided ISubscribedState object.
+     * 
+     * <p>
+     * The recommended usage of this method is as follows:
+     * </p>
      * <p>
      * _myState = context.setSubscribedState(componentId, new MyState());
      * </p>
-     *
+     * 
      * @param componentId the id of the StateSpout component to subscribe to
      * @param obj Provided ISubscribedState implementation
      * @return Returns the ISubscribedState object provided
@@ -110,14 +100,15 @@ public class TopologyContext extends WorkerTopologyContext implements IMetricsCo
     }
 
     /**
-     * Synchronizes the specified stream from the specified state spout component
-     * id with the provided ISubscribedState object.
-     *
-     * <p>The recommended usage of this method is as follows:</p>
+     * Synchronizes the specified stream from the specified state spout component id with the provided ISubscribedState object.
+     * 
+     * <p>
+     * The recommended usage of this method is as follows:
+     * </p>
      * <p>
      * _myState = context.setSubscribedState(componentId, streamId, new MyState());
      * </p>
-     *
+     * 
      * @param componentId the id of the StateSpout component to subscribe to
      * @param streamId the stream to subscribe to
      * @param obj Provided ISubscribedState implementation
@@ -129,7 +120,7 @@ public class TopologyContext extends WorkerTopologyContext implements IMetricsCo
 
     /**
      * Gets the task id of this task.
-     *
+     * 
      * @return the task id
      */
     public int getThisTaskId() {
@@ -137,33 +128,31 @@ public class TopologyContext extends WorkerTopologyContext implements IMetricsCo
     }
 
     /**
-     * Gets the component id for this task. The component id maps
-     * to a component id specified for a Spout or Bolt in the topology definition.
+     * Gets the component id for this task. The component id maps to a component id specified for a Spout or Bolt in the topology definition.
+     * 
      * @return
      */
     public String getThisComponentId() {
         return getComponentId(_taskId);
     }
 
-	/**
-	 * Gets the declared output fields for the specified stream id for the
-	 * component this task is a part of.
-	 */
-	public Fields getThisOutputFields(String streamId) {
-		return getComponentOutputFields(getThisComponentId(), streamId);
-	}
-
-	/**
-	 * Gets the declared output fields for the specified stream id for the
-	 * component this task is a part of.
-	 */
-	public Map<String, List<String>> getThisOutputFieldsForStreams() {
-		Map<String, List<String>> streamToFields = new HashMap<String, List<String>>();
-		for (String stream : this.getThisStreams()) {
-			streamToFields.put(stream, this.getThisOutputFields(stream).toList());
-		}
-		return streamToFields;
-	}
+    /**
+     * Gets the declared output fields for the specified stream id for the component this task is a part of.
+     */
+    public Fields getThisOutputFields(String streamId) {
+        return getComponentOutputFields(getThisComponentId(), streamId);
+    }
+
+    /**
+     * Gets the declared output fields for the specified stream id for the component this task is a part of.
+     */
+    public Map<String, List<String>> getThisOutputFieldsForStreams() {
+        Map<String, List<String>> streamToFields = new HashMap<String, List<String>>();
+        for (String stream : this.getThisStreams()) {
+            streamToFields.put(stream, this.getThisOutputFields(stream).toList());
+        }
+        return streamToFields;
+    }
 
     /**
      * Gets the set of streams declared for the component of this task.
@@ -173,15 +162,14 @@ public class TopologyContext extends WorkerTopologyContext implements IMetricsCo
     }
 
     /**
-     * Gets the index of this task id in getComponentTasks(getThisComponentId()).
-     * An example use case for this method is determining which task
-     * accesses which resource in a distributed resource to ensure an even distribution.
+     * Gets the index of this task id in getComponentTasks(getThisComponentId()). An example use case for this method is determining which task accesses which
+     * resource in a distributed resource to ensure an even distribution.
      */
     public int getThisTaskIndex() {
         List<Integer> tasks = new ArrayList<Integer>(getComponentTasks(getThisComponentId()));
         Collections.sort(tasks);
-        for(int i=0; i<tasks.size(); i++) {
-            if(tasks.get(i) == getThisTaskId()) {
+        for (int i = 0; i < tasks.size(); i++) {
+            if (tasks.get(i) == getThisTaskId()) {
                 return i;
             }
         }
@@ -190,7 +178,7 @@ public class TopologyContext extends WorkerTopologyContext implements IMetricsCo
 
     /**
      * Gets the declared inputs to this component.
-     *
+     * 
      * @return A map from subscribed component/stream to the grouping subscribed with.
      */
     public Map<GlobalStreamId, Grouping> getThisSources() {
@@ -199,7 +187,7 @@ public class TopologyContext extends WorkerTopologyContext implements IMetricsCo
 
     /**
      * Gets information about who is consuming the outputs of this component, and how.
-     *
+     * 
      * @return Map from stream id to component id to the Grouping used.
      */
     public Map<String, Map<String, Grouping>> getThisTargets() {
@@ -231,15 +219,15 @@ public class TopologyContext extends WorkerTopologyContext implements IMetricsCo
         return _hooks;
     }
 
-	private static Map<String, Object> groupingToJSONableMap(Grouping grouping) {
-		Map groupingMap = new HashMap<String, Object>();
-		groupingMap.put("type", grouping.getSetField().toString());
-		if (grouping.is_set_fields()) {
-			groupingMap.put("fields", grouping.get_fields());
-		}
-		return groupingMap;
-	}
-    
+    private static Map<String, Object> groupingToJSONableMap(Grouping grouping) {
+        Map groupingMap = new HashMap<String, Object>();
+        groupingMap.put("type", grouping.getSetField().toString());
+        if (grouping.is_set_fields()) {
+            groupingMap.put("fields", grouping.get_fields());
+        }
+        return groupingMap;
+    }
+
     @Override
     public String toJSONString() {
         Map obj = new HashMap();
@@ -253,39 +241,38 @@ public class TopologyContext extends WorkerTopologyContext implements IMetricsCo
         // Convert targets to a JSON serializable format
         Map<String, Map> stringTargets = new HashMap<String, Map>();
         for (Map.Entry<String, Map<String, Grouping>> entry : this.getThisTargets().entrySet()) {
-        	Map stringTargetMap = new HashMap<String, Object>();
-        	for (Map.Entry<String, Grouping> innerEntry : entry.getValue().entrySet()) {
-        		stringTargetMap.put(innerEntry.getKey(), groupingToJSONableMap(innerEntry.getValue()));
-        	}
-        	stringTargets.put(entry.getKey(), stringTargetMap);
+            Map stringTargetMap = new HashMap<String, Object>();
+            for (Map.Entry<String, Grouping> innerEntry : entry.getValue().entrySet()) {
+                stringTargetMap.put(innerEntry.getKey(), groupingToJSONableMap(innerEntry.getValue()));
+            }
+            stringTargets.put(entry.getKey(), stringTargetMap);
         }
         obj.put("stream->target->grouping", stringTargets);
         // Convert sources to a JSON serializable format
         Map<String, Map<String, Object>> stringSources = new HashMap<String, Map<String, Object>>();
         for (Map.Entry<GlobalStreamId, Grouping> entry : this.getThisSources().entrySet()) {
-        	GlobalStreamId gid = entry.getKey();
-        	Map<String, Object> stringSourceMap = stringSources.get(gid.get_componentId());
-        	if (stringSourceMap == null) {
-        		stringSourceMap = new HashMap<String, Object>();
-        		stringSources.put(gid.get_componentId(), stringSourceMap);
-        	}
-        	stringSourceMap.put(gid.get_streamId(), groupingToJSONableMap(entry.getValue()));        	
+            GlobalStreamId gid = entry.getKey();
+            Map<String, Object> stringSourceMap = stringSources.get(gid.get_componentId());
+            if (stringSourceMap == null) {
+                stringSourceMap = new HashMap<String, Object>();
+                stringSources.put(gid.get_componentId(), stringSourceMap);
+            }
+            stringSourceMap.put(gid.get_streamId(), groupingToJSONableMap(entry.getValue()));
         }
         obj.put("source->stream->grouping", stringSources);
         return JSONValue.toJSONString(obj);
     }
 
     /*
-     * Register a IMetric instance.
-     * Storm will then call getValueAndReset on the metric every timeBucketSizeInSecs
-     * and the returned value is sent to all metrics consumers.
-     * You must call this during IBolt::prepare or ISpout::open.
+     * Register a IMetric instance. Storm will then call getValueAndReset on the metric every timeBucketSizeInSecs and the returned value is sent to all metrics
+     * consumers. You must call this during IBolt::prepare or ISpout::open.
+     * 
      * @return The IMetric argument unchanged.
      */
     public <T extends IMetric> T registerMetric(String name, T metric, int timeBucketSizeInSecs) {
-        if((Boolean)_openOrPrepareWasCalled.deref() == true) {
-            throw new RuntimeException("TopologyContext.registerMetric can only be called from within overridden " +
-                                       "IBolt::prepare() or ISpout::open() method.");
+        if ((Boolean) _openOrPrepareWasCalled.deref() == true) {
+            throw new RuntimeException("TopologyContext.registerMetric can only be called from within overridden "
+                    + "IBolt::prepare() or ISpout::open() method.");
         }
 
         if (metric == null) {
@@ -293,27 +280,27 @@ public class TopologyContext extends WorkerTopologyContext implements IMetricsCo
         }
 
         if (timeBucketSizeInSecs <= 0) {
-            throw new IllegalArgumentException("TopologyContext.registerMetric can only be called with timeBucketSizeInSecs " +
-                                               "greater than or equal to 1 second.");
+            throw new IllegalArgumentException("TopologyContext.registerMetric can only be called with timeBucketSizeInSecs "
+                    + "greater than or equal to 1 second.");
         }
 
         if (getRegisteredMetricByName(name) != null) {
-            throw new RuntimeException("The same metric name `" + name + "` was registered twice." );
+            throw new RuntimeException("The same metric name `" + name + "` was registered twice.");
         }
 
         Map m1 = _registeredMetrics;
-        if(!m1.containsKey(timeBucketSizeInSecs)) {
+        if (!m1.containsKey(timeBucketSizeInSecs)) {
             m1.put(timeBucketSizeInSecs, new HashMap());
         }
 
-        Map m2 = (Map)m1.get(timeBucketSizeInSecs);
-        if(!m2.containsKey(_taskId)) {
+        Map m2 = (Map) m1.get(timeBucketSizeInSecs);
+        if (!m2.containsKey(_taskId)) {
             m2.put(_taskId, new HashMap());
         }
 
-        Map m3 = (Map)m2.get(_taskId);
-        if(m3.containsKey(name)) {
-            throw new RuntimeException("The same metric name `" + name + "` was registered twice." );
+        Map m3 = (Map) m2.get(_taskId);
+        if (m3.containsKey(name)) {
+            throw new RuntimeException("The same metric name `" + name + "` was registered twice.");
         } else {
             m3.put(name, metric);
         }
@@ -322,21 +309,18 @@ public class TopologyContext extends WorkerTopologyContext implements IMetricsCo
     }
 
     /**
-     * Get component's metric from registered metrics by name.
-     * Notice: Normally, one component can only register one metric name once.
-     *         But now registerMetric has a bug(https://issues.apache.org/jira/browse/STORM-254)
-     *         cause the same metric name can register twice.
-     *         So we just return the first metric we meet.
+     * Get component's metric from registered metrics by name. Notice: Normally, one component can only register one metric name once. But now registerMetric
+     * has a bug(https://issues.apache.org/jira/browse/STORM-254) cause the same metric name can register twice. So we just return the first metric we meet.
      */
     public IMetric getRegisteredMetricByName(String name) {
         IMetric metric = null;
 
-        for (Map<Integer, Map<String, IMetric>> taskIdToNameToMetric: _registeredMetrics.values()) {
+        for (Map<Integer, Map<String, IMetric>> taskIdToNameToMetric : _registeredMetrics.values()) {
             Map<String, IMetric> nameToMetric = taskIdToNameToMetric.get(_taskId);
             if (nameToMetric != null) {
                 metric = nameToMetric.get(name);
                 if (metric != null) {
-                    //we just return the first metric we meet
+                    // we just return the first metric we meet
                     break;
                 }
             }
@@ -351,10 +335,21 @@ public class TopologyContext extends WorkerTopologyContext implements IMetricsCo
     public ReducedMetric registerMetric(String name, IReducer reducer, int timeBucketSizeInSecs) {
         return registerMetric(name, new ReducedMetric(reducer), timeBucketSizeInSecs);
     }
+
     /*
      * Convinience method for registering CombinedMetric.
      */
     public CombinedMetric registerMetric(String name, ICombiner combiner, int timeBucketSizeInSecs) {
         return registerMetric(name, new CombinedMetric(combiner), timeBucketSizeInSecs);
     }
+
+    public StormClusterState getZkCluster() {
+        return _zkCluster;
+    }
+    /*
+    * Task error report callback
+    * */
+    public void reportError(String errorMsg) throws Exception{
+            _zkCluster.report_task_error(getTopologyId(), _taskId, errorMsg, null);
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/task/WorkerTopologyContext.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/task/WorkerTopologyContext.java b/jstorm-core/src/main/java/backtype/storm/task/WorkerTopologyContext.java
index de407ac..09c8c8c 100755
--- a/jstorm-core/src/main/java/backtype/storm/task/WorkerTopologyContext.java
+++ b/jstorm-core/src/main/java/backtype/storm/task/WorkerTopologyContext.java
@@ -27,34 +27,23 @@ import java.util.concurrent.ExecutorService;
 
 public class WorkerTopologyContext extends GeneralTopologyContext {
     public static final String SHARED_EXECUTOR = "executor";
-    
+
     private Integer _workerPort;
     private List<Integer> _workerTasks;
     private String _codeDir;
     private String _pidDir;
     Map<String, Object> _userResources;
     Map<String, Object> _defaultResources;
-    
-    public WorkerTopologyContext(
-            StormTopology topology,
-            Map stormConf,
-            Map<Integer, String> taskToComponent,
-            Map<String, List<Integer>> componentToSortedTasks,
-            Map<String, Map<String, Fields>> componentToStreamToFields,
-            String stormId,
-            String codeDir,
-            String pidDir,
-            Integer workerPort,
-            List<Integer> workerTasks,
-            Map<String, Object> defaultResources,
-            Map<String, Object> userResources
-            ) {
+
+    public WorkerTopologyContext(StormTopology topology, Map stormConf, Map<Integer, String> taskToComponent,
+            Map<String, List<Integer>> componentToSortedTasks, Map<String, Map<String, Fields>> componentToStreamToFields, String stormId, String codeDir,
+            String pidDir, Integer workerPort, List<Integer> workerTasks, Map<String, Object> defaultResources, Map<String, Object> userResources) {
         super(topology, stormConf, taskToComponent, componentToSortedTasks, componentToStreamToFields, stormId);
         _codeDir = codeDir;
         _defaultResources = defaultResources;
         _userResources = userResources;
         try {
-            if(pidDir!=null) {
+            if (pidDir != null) {
                 _pidDir = new File(pidDir).getCanonicalPath();
             } else {
                 _pidDir = null;
@@ -67,13 +56,12 @@ public class WorkerTopologyContext extends GeneralTopologyContext {
     }
 
     /**
-     * Gets all the task ids that are running in this worker process
-     * (including the task for this task).
+     * Gets all the task ids that are running in this worker process (including the task for this task).
      */
     public List<Integer> getThisWorkerTasks() {
         return _workerTasks;
     }
-    
+
     public Integer getThisWorkerPort() {
         return _workerPort;
     }
@@ -81,28 +69,27 @@ public class WorkerTopologyContext extends GeneralTopologyContext {
     public void setThisWorkerTasks(List<Integer> workerTasks) {
         this._workerTasks = workerTasks;
     }
+
     /**
-     * Gets the location of the external resources for this worker on the
-     * local filesystem. These external resources typically include bolts implemented
-     * in other languages, such as Ruby or Python.
+     * Gets the location of the external resources for this worker on the local filesystem. These external resources typically include bolts implemented in
+     * other languages, such as Ruby or Python.
      */
     public String getCodeDir() {
         return _codeDir;
     }
 
     /**
-     * If this task spawns any subprocesses, those subprocesses must immediately
-     * write their PID to this directory on the local filesystem to ensure that
-     * Storm properly destroys that process when the worker is shutdown.
+     * If this task spawns any subprocesses, those subprocesses must immediately write their PID to this directory on the local filesystem to ensure that Storm
+     * properly destroys that process when the worker is shutdown.
      */
     public String getPIDDir() {
         return _pidDir;
     }
-    
+
     public Object getResource(String name) {
         return _userResources.get(name);
     }
-    
+
     public ExecutorService getSharedExecutor() {
         return (ExecutorService) _defaultResources.get(SHARED_EXECUTOR);
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/AckFailDelegate.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/AckFailDelegate.java b/jstorm-core/src/main/java/backtype/storm/testing/AckFailDelegate.java
index d65c8bd..fbbcbfc 100755
--- a/jstorm-core/src/main/java/backtype/storm/testing/AckFailDelegate.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/AckFailDelegate.java
@@ -21,5 +21,6 @@ import java.io.Serializable;
 
 public interface AckFailDelegate extends Serializable {
     public void ack(Object id);
+
     public void fail(Object id);
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/AckFailMapTracker.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/AckFailMapTracker.java b/jstorm-core/src/main/java/backtype/storm/testing/AckFailMapTracker.java
index e16afd8..f3feff3 100755
--- a/jstorm-core/src/main/java/backtype/storm/testing/AckFailMapTracker.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/AckFailMapTracker.java
@@ -22,31 +22,31 @@ import java.util.HashSet;
 import java.util.Set;
 
 public class AckFailMapTracker implements AckFailDelegate {
-    
+
     String _acked;
     String _failed;
-    
+
     public AckFailMapTracker() {
         _acked = RegisteredGlobalState.registerState(new HashSet());
         _failed = RegisteredGlobalState.registerState(new HashSet());
     }
-    
+
     public boolean isAcked(Object id) {
-        return ((Set)RegisteredGlobalState.getState(_acked)).contains(id);
+        return ((Set) RegisteredGlobalState.getState(_acked)).contains(id);
     }
-    
+
     public boolean isFailed(Object id) {
-        return ((Set)RegisteredGlobalState.getState(_failed)).contains(id);        
+        return ((Set) RegisteredGlobalState.getState(_failed)).contains(id);
     }
 
     @Override
     public void ack(Object id) {
-        ((Set)RegisteredGlobalState.getState(_acked)).add(id);
+        ((Set) RegisteredGlobalState.getState(_acked)).add(id);
     }
 
     @Override
     public void fail(Object id) {
-        ((Set)RegisteredGlobalState.getState(_failed)).add(id);
+        ((Set) RegisteredGlobalState.getState(_failed)).add(id);
     }
-    
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/AckTracker.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/AckTracker.java b/jstorm-core/src/main/java/backtype/storm/testing/AckTracker.java
index ad80475..10973f1 100755
--- a/jstorm-core/src/main/java/backtype/storm/testing/AckTracker.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/AckTracker.java
@@ -24,14 +24,14 @@ import java.util.concurrent.atomic.AtomicInteger;
 
 public class AckTracker implements AckFailDelegate {
     private static Map<String, AtomicInteger> acks = new ConcurrentHashMap<String, AtomicInteger>();
-    
+
     private String _id;
-    
+
     public AckTracker() {
         _id = UUID.randomUUID().toString();
         acks.put(_id, new AtomicInteger(0));
     }
-    
+
     @Override
     public void ack(Object id) {
         acks.get(_id).incrementAndGet();
@@ -40,13 +40,13 @@ public class AckTracker implements AckFailDelegate {
     @Override
     public void fail(Object id) {
     }
-    
+
     public int getNumAcks() {
         return acks.get(_id).intValue();
     }
-    
+
     public void resetNumAcks() {
         acks.get(_id).set(0);
     }
-    
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/BatchNumberList.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/BatchNumberList.java b/jstorm-core/src/main/java/backtype/storm/testing/BatchNumberList.java
index 26f964a..2565f25 100755
--- a/jstorm-core/src/main/java/backtype/storm/testing/BatchNumberList.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/BatchNumberList.java
@@ -37,16 +37,15 @@ public class BatchNumberList extends BaseBatchBolt {
     }
 
     String _wordComponent;
-    
+
     public BatchNumberList(String wordComponent) {
         _wordComponent = wordComponent;
     }
-    
+
     String word = null;
     List<Integer> intSet = new ArrayList<Integer>();
     BatchOutputCollector _collector;
-    
-    
+
     @Override
     public void prepare(Map conf, TopologyContext context, BatchOutputCollector collector, Object id) {
         _collector = collector;
@@ -54,7 +53,7 @@ public class BatchNumberList extends BaseBatchBolt {
 
     @Override
     public void execute(Tuple tuple) {
-        if(tuple.getSourceComponent().equals(_wordComponent)) {
+        if (tuple.getSourceComponent().equals(_wordComponent)) {
             this.word = tuple.getString(1);
         } else {
             intSet.add(tuple.getInteger(1));
@@ -63,10 +62,10 @@ public class BatchNumberList extends BaseBatchBolt {
 
     @Override
     public void finishBatch() {
-        if(word!=null) {
+        if (word != null) {
             Collections.sort(intSet);
             _collector.emit(new Values(word, intSet));
         }
     }
-    
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/BatchProcessWord.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/BatchProcessWord.java b/jstorm-core/src/main/java/backtype/storm/testing/BatchProcessWord.java
index 7f3eaf1..819c7c1 100755
--- a/jstorm-core/src/main/java/backtype/storm/testing/BatchProcessWord.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/BatchProcessWord.java
@@ -35,5 +35,5 @@ public class BatchProcessWord extends BaseBasicBolt {
     public void execute(Tuple input, BasicOutputCollector collector) {
         collector.emit(new Values(input.getValue(0), input.getString(1).length()));
     }
-    
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/BatchRepeatA.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/BatchRepeatA.java b/jstorm-core/src/main/java/backtype/storm/testing/BatchRepeatA.java
index 107f2ed..f5751d1 100755
--- a/jstorm-core/src/main/java/backtype/storm/testing/BatchRepeatA.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/BatchRepeatA.java
@@ -24,21 +24,20 @@ import backtype.storm.tuple.Fields;
 import backtype.storm.tuple.Tuple;
 import backtype.storm.tuple.Values;
 
+public class BatchRepeatA extends BaseBasicBolt {
 
-public class BatchRepeatA extends BaseBasicBolt {  
-    
     @Override
     public void execute(Tuple input, BasicOutputCollector collector) {
-       Object id = input.getValue(0);
-       String word = input.getString(1);
-       for(int i=0; i<word.length(); i++) {
-            if(word.charAt(i) == 'a') {
+        Object id = input.getValue(0);
+        String word = input.getString(1);
+        for (int i = 0; i < word.length(); i++) {
+            if (word.charAt(i) == 'a') {
                 collector.emit("multi", new Values(id, word.substring(0, i)));
             }
         }
         collector.emit("single", new Values(id, word));
     }
-    
+
     @Override
     public void declareOutputFields(OutputFieldsDeclarer declarer) {
         declarer.declareStream("multi", new Fields("id", "word"));

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/BoltTracker.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/BoltTracker.java b/jstorm-core/src/main/java/backtype/storm/testing/BoltTracker.java
index 3fe4e7a..0319ebc 100755
--- a/jstorm-core/src/main/java/backtype/storm/testing/BoltTracker.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/BoltTracker.java
@@ -22,7 +22,6 @@ import backtype.storm.topology.OutputFieldsDeclarer;
 import java.util.HashMap;
 import java.util.Map;
 
-
 public class BoltTracker extends NonRichBoltTracker implements IRichBolt {
     IRichBolt _richDelegate;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/CompleteTopologyParam.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/CompleteTopologyParam.java b/jstorm-core/src/main/java/backtype/storm/testing/CompleteTopologyParam.java
index f3306cf..8483413 100755
--- a/jstorm-core/src/main/java/backtype/storm/testing/CompleteTopologyParam.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/CompleteTopologyParam.java
@@ -23,65 +23,65 @@ import backtype.storm.Config;
  * The param class for the <code>Testing.completeTopology</code>.
  */
 public class CompleteTopologyParam {
-	/**
-	 * The mocked spout sources
-	 */
-	private MockedSources mockedSources;
-	/**
-	 * the config for the topology when it was submitted to the cluster
-	 */
-	private Config stormConf;
-	/**
-	 * whether cleanup the state?
-	 */
-	private Boolean cleanupState;
-	/**
-	 * the topology name you want to submit to the cluster
-	 */
-	private String topologyName;
+    /**
+     * The mocked spout sources
+     */
+    private MockedSources mockedSources;
+    /**
+     * the config for the topology when it was submitted to the cluster
+     */
+    private Config stormConf;
+    /**
+     * whether cleanup the state?
+     */
+    private Boolean cleanupState;
+    /**
+     * the topology name you want to submit to the cluster
+     */
+    private String topologyName;
 
-	/**
-	 * the timeout of topology you want to submit to the cluster
-	 */
-	private Integer timeoutMs;
+    /**
+     * the timeout of topology you want to submit to the cluster
+     */
+    private Integer timeoutMs;
 
-	public MockedSources getMockedSources() {
-		return mockedSources;
-	}
+    public MockedSources getMockedSources() {
+        return mockedSources;
+    }
 
-	public void setMockedSources(MockedSources mockedSources) {
-		this.mockedSources = mockedSources;
-	}
+    public void setMockedSources(MockedSources mockedSources) {
+        this.mockedSources = mockedSources;
+    }
 
-	public Config getStormConf() {
-		return stormConf;
-	}
+    public Config getStormConf() {
+        return stormConf;
+    }
 
-	public void setStormConf(Config stormConf) {
-		this.stormConf = stormConf;
-	}
+    public void setStormConf(Config stormConf) {
+        this.stormConf = stormConf;
+    }
 
-	public Boolean getCleanupState() {
-		return cleanupState;
-	}
+    public Boolean getCleanupState() {
+        return cleanupState;
+    }
 
-	public void setCleanupState(Boolean cleanupState) {
-		this.cleanupState = cleanupState;
-	}
+    public void setCleanupState(Boolean cleanupState) {
+        this.cleanupState = cleanupState;
+    }
 
-	public String getTopologyName() {
-		return topologyName;
-	}
+    public String getTopologyName() {
+        return topologyName;
+    }
 
-	public void setTopologyName(String topologyName) {
-		this.topologyName = topologyName;
-	}
+    public void setTopologyName(String topologyName) {
+        this.topologyName = topologyName;
+    }
 
-	public Integer getTimeoutMs() {
-		return timeoutMs;
-	}
+    public Integer getTimeoutMs() {
+        return timeoutMs;
+    }
 
-	public void setTimeoutMs(Integer timeoutMs) {
-		this.timeoutMs = timeoutMs;
-	}
+    public void setTimeoutMs(Integer timeoutMs) {
+        this.timeoutMs = timeoutMs;
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/CountingBatchBolt.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/CountingBatchBolt.java b/jstorm-core/src/main/java/backtype/storm/testing/CountingBatchBolt.java
index 882801c..3682120 100755
--- a/jstorm-core/src/main/java/backtype/storm/testing/CountingBatchBolt.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/CountingBatchBolt.java
@@ -30,7 +30,7 @@ public class CountingBatchBolt extends BaseBatchBolt {
     BatchOutputCollector _collector;
     Object _id;
     int _count = 0;
-    
+
     @Override
     public void prepare(Map conf, TopologyContext context, BatchOutputCollector collector, Object id) {
         _collector = collector;
@@ -44,12 +44,12 @@ public class CountingBatchBolt extends BaseBatchBolt {
 
     @Override
     public void finishBatch() {
-        _collector.emit(new Values(_id, _count));        
-    }   
+        _collector.emit(new Values(_id, _count));
+    }
 
     @Override
     public void declareOutputFields(OutputFieldsDeclarer declarer) {
         declarer.declare(new Fields("tx", "count"));
     }
-    
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/CountingCommitBolt.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/CountingCommitBolt.java b/jstorm-core/src/main/java/backtype/storm/testing/CountingCommitBolt.java
index cb8f7e5..a45f16b 100755
--- a/jstorm-core/src/main/java/backtype/storm/testing/CountingCommitBolt.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/CountingCommitBolt.java
@@ -32,7 +32,7 @@ public class CountingCommitBolt extends BaseTransactionalBolt implements ICommit
     BatchOutputCollector _collector;
     TransactionAttempt _id;
     int _count = 0;
-    
+
     @Override
     public void prepare(Map conf, TopologyContext context, BatchOutputCollector collector, TransactionAttempt id) {
         _id = id;
@@ -46,8 +46,8 @@ public class CountingCommitBolt extends BaseTransactionalBolt implements ICommit
 
     @Override
     public void finishBatch() {
-        _collector.emit(new Values(_id, _count));        
-    }   
+        _collector.emit(new Values(_id, _count));
+    }
 
     @Override
     public void declareOutputFields(OutputFieldsDeclarer declarer) {

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/FeederSpout.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/FeederSpout.java b/jstorm-core/src/main/java/backtype/storm/testing/FeederSpout.java
index 1ffb594..52ba5b7 100755
--- a/jstorm-core/src/main/java/backtype/storm/testing/FeederSpout.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/FeederSpout.java
@@ -29,7 +29,6 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.UUID;
 
-
 public class FeederSpout extends BaseRichSpout {
     private int _id;
     private Fields _outFields;
@@ -44,15 +43,15 @@ public class FeederSpout extends BaseRichSpout {
     public void setAckFailDelegate(AckFailDelegate d) {
         _ackFailDelegate = d;
     }
-    
+
     public void feed(List<Object> tuple) {
         feed(tuple, UUID.randomUUID().toString());
     }
 
     public void feed(List<Object> tuple, Object msgId) {
         InprocMessaging.sendMessage(_id, new Values(tuple, msgId));
-    }    
-    
+    }
+
     public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
         _collector = collector;
     }
@@ -63,10 +62,10 @@ public class FeederSpout extends BaseRichSpout {
 
     public void nextTuple() {
         List<Object> toEmit = (List<Object>) InprocMessaging.pollMessage(_id);
-        if(toEmit!=null) {
+        if (toEmit != null) {
             List<Object> tuple = (List<Object>) toEmit.get(0);
             Object msgId = toEmit.get(1);
-            
+
             _collector.emit(tuple, msgId);
         } else {
             try {
@@ -78,13 +77,13 @@ public class FeederSpout extends BaseRichSpout {
     }
 
     public void ack(Object msgId) {
-        if(_ackFailDelegate!=null) {
+        if (_ackFailDelegate != null) {
             _ackFailDelegate.ack(msgId);
         }
     }
 
     public void fail(Object msgId) {
-        if(_ackFailDelegate!=null) {
+        if (_ackFailDelegate != null) {
             _ackFailDelegate.fail(msgId);
         }
     }
@@ -96,5 +95,5 @@ public class FeederSpout extends BaseRichSpout {
     @Override
     public Map<String, Object> getComponentConfiguration() {
         return new HashMap<String, Object>();
-    }    
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/FixedTupleSpout.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/FixedTupleSpout.java b/jstorm-core/src/main/java/backtype/storm/testing/FixedTupleSpout.java
index 9527803..01fc3e3 100755
--- a/jstorm-core/src/main/java/backtype/storm/testing/FixedTupleSpout.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/FixedTupleSpout.java
@@ -35,17 +35,17 @@ public class FixedTupleSpout implements IRichSpout {
     private static final Map<String, Integer> failed = new HashMap<String, Integer>();
 
     public static int getNumAcked(String stormId) {
-        synchronized(acked) {
+        synchronized (acked) {
             return get(acked, stormId, 0);
         }
     }
 
     public static int getNumFailed(String stormId) {
-        synchronized(failed) {
+        synchronized (failed) {
             return get(failed, stormId, 0);
         }
     }
-    
+
     public static void clear(String stormId) {
         acked.remove(stormId);
         failed.remove(stormId);
@@ -67,16 +67,16 @@ public class FixedTupleSpout implements IRichSpout {
 
     public FixedTupleSpout(List tuples, String fieldName) {
         _id = UUID.randomUUID().toString();
-        synchronized(acked) {
+        synchronized (acked) {
             acked.put(_id, 0);
         }
-        synchronized(failed) {
+        synchronized (failed) {
             failed.put(_id, 0);
         }
         _tuples = new ArrayList<FixedTuple>();
-        for(Object o: tuples) {
+        for (Object o : tuples) {
             FixedTuple ft;
-            if(o instanceof FixedTuple) {
+            if (o instanceof FixedTuple) {
                 ft = (FixedTuple) o;
             } else {
                 ft = new FixedTuple((List) o);
@@ -89,25 +89,25 @@ public class FixedTupleSpout implements IRichSpout {
     public List<FixedTuple> getSourceTuples() {
         return _tuples;
     }
-    
+
     public int getCompleted() {
         int ackedAmt;
         int failedAmt;
-        
-        synchronized(acked) {
+
+        synchronized (acked) {
             ackedAmt = acked.get(_id);
         }
-        synchronized(failed) {
+        synchronized (failed) {
             failedAmt = failed.get(_id);
         }
         return ackedAmt + failedAmt;
     }
-    
+
     public void cleanup() {
-        synchronized(acked) {            
+        synchronized (acked) {
             acked.remove(_id);
-        } 
-        synchronized(failed) {            
+        }
+        synchronized (failed) {
             failed.remove(_id);
         }
     }
@@ -116,15 +116,15 @@ public class FixedTupleSpout implements IRichSpout {
         _context = context;
         List<Integer> tasks = context.getComponentTasks(context.getThisComponentId());
         int startIndex;
-        for(startIndex=0; startIndex<tasks.size(); startIndex++) {
-            if(tasks.get(startIndex)==context.getThisTaskId()) {
+        for (startIndex = 0; startIndex < tasks.size(); startIndex++) {
+            if (tasks.get(startIndex) == context.getThisTaskId()) {
                 break;
             }
         }
         _collector = collector;
         _pending = new HashMap<String, FixedTuple>();
         _serveTuples = new ArrayList<FixedTuple>();
-        for(int i=startIndex; i<_tuples.size(); i+=tasks.size()) {
+        for (int i = startIndex; i < _tuples.size(); i += tasks.size()) {
             _serveTuples.add(_tuples.get(i));
         }
     }
@@ -133,7 +133,7 @@ public class FixedTupleSpout implements IRichSpout {
     }
 
     public void nextTuple() {
-        if(_serveTuples.size()>0) {
+        if (_serveTuples.size() > 0) {
             FixedTuple ft = _serveTuples.remove(0);
             String id = UUID.randomUUID().toString();
             _pending.put(id, ft);
@@ -144,16 +144,16 @@ public class FixedTupleSpout implements IRichSpout {
     }
 
     public void ack(Object msgId) {
-        synchronized(acked) {
+        synchronized (acked) {
             int curr = get(acked, _id, 0);
-            acked.put(_id, curr+1);
+            acked.put(_id, curr + 1);
         }
     }
 
     public void fail(Object msgId) {
-        synchronized(failed) {
+        synchronized (failed) {
             int curr = get(failed, _id, 0);
-            failed.put(_id, curr+1);
+            failed.put(_id, curr + 1);
         }
     }
 
@@ -166,7 +166,7 @@ public class FixedTupleSpout implements IRichSpout {
     }
 
     @Override
-    public void declareOutputFields(OutputFieldsDeclarer declarer) { 
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
         if (_fieldName != null) {
             declarer.declare(new Fields(_fieldName));
         }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/ForwardingMetricsConsumer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/ForwardingMetricsConsumer.java b/jstorm-core/src/main/java/backtype/storm/testing/ForwardingMetricsConsumer.java
index 010336e..5cf8830 100755
--- a/jstorm-core/src/main/java/backtype/storm/testing/ForwardingMetricsConsumer.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/ForwardingMetricsConsumer.java
@@ -51,45 +51,42 @@ public class ForwardingMetricsConsumer implements IMetricsConsumer {
 
     @Override
     public void prepare(Map stormConf, Object registrationArgument, TopologyContext context, IErrorReporter errorReporter) {
-        String [] parts = ((String)registrationArgument).split(":",2);
+        String[] parts = ((String) registrationArgument).split(":", 2);
         host = parts[0];
         port = Integer.valueOf(parts[1]);
         try {
-          socket = new Socket(host, port);
-          out = socket.getOutputStream();
+            socket = new Socket(host, port);
+            out = socket.getOutputStream();
         } catch (Exception e) {
-          throw new RuntimeException(e);
+            throw new RuntimeException(e);
         }
     }
 
     @Override
     public void handleDataPoints(TaskInfo taskInfo, Collection<DataPoint> dataPoints) {
         StringBuilder sb = new StringBuilder();
-        String header = taskInfo.timestamp + "\t" +
-            taskInfo.srcWorkerHost + ":"+ taskInfo.srcWorkerPort + "\t"+
-            taskInfo.srcTaskId + "\t" + taskInfo.srcComponentId + "\t";
+        String header =
+                taskInfo.timestamp + "\t" + taskInfo.srcWorkerHost + ":" + taskInfo.srcWorkerPort + "\t" + taskInfo.srcTaskId + "\t" + taskInfo.srcComponentId
+                        + "\t";
         sb.append(header);
         for (DataPoint p : dataPoints) {
             sb.delete(header.length(), sb.length());
-            sb.append(p.name)
-                .append("\t")
-                .append(p.value)
-                .append("\n");
+            sb.append(p.name).append("\t").append(p.value).append("\n");
             try {
-              out.write(sb.toString().getBytes());
-              out.flush();
+                out.write(sb.toString().getBytes());
+                out.flush();
             } catch (Exception e) {
-              throw new RuntimeException(e);
+                throw new RuntimeException(e);
             }
         }
     }
 
     @Override
-    public void cleanup() { 
-      try {
-        socket.close();
-      } catch (Exception e) {
-        throw new RuntimeException(e);
-      }
+    public void cleanup() {
+        try {
+            socket.close();
+        } catch (Exception e) {
+            throw new RuntimeException(e);
+        }
     }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/IdentityBolt.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/IdentityBolt.java b/jstorm-core/src/main/java/backtype/storm/testing/IdentityBolt.java
index dcad640..b951d84 100755
--- a/jstorm-core/src/main/java/backtype/storm/testing/IdentityBolt.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/IdentityBolt.java
@@ -25,7 +25,7 @@ import backtype.storm.tuple.Tuple;
 
 public class IdentityBolt extends BaseBasicBolt {
     Fields _fields;
-    
+
     public IdentityBolt(Fields fields) {
         _fields = fields;
     }
@@ -38,5 +38,5 @@ public class IdentityBolt extends BaseBasicBolt {
     @Override
     public void declareOutputFields(OutputFieldsDeclarer declarer) {
         declarer.declare(_fields);
-    }    
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/KeyedCountingBatchBolt.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/KeyedCountingBatchBolt.java b/jstorm-core/src/main/java/backtype/storm/testing/KeyedCountingBatchBolt.java
index 1c4d5b3..7e4b32f 100755
--- a/jstorm-core/src/main/java/backtype/storm/testing/KeyedCountingBatchBolt.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/KeyedCountingBatchBolt.java
@@ -32,7 +32,7 @@ public class KeyedCountingBatchBolt extends BaseBatchBolt {
     BatchOutputCollector _collector;
     Object _id;
     Map<Object, Integer> _counts = new HashMap<Object, Integer>();
-    
+
     @Override
     public void prepare(Map conf, TopologyContext context, BatchOutputCollector collector, Object id) {
         _collector = collector;
@@ -48,14 +48,14 @@ public class KeyedCountingBatchBolt extends BaseBatchBolt {
 
     @Override
     public void finishBatch() {
-        for(Object key: _counts.keySet()) {
+        for (Object key : _counts.keySet()) {
             _collector.emit(new Values(_id, key, _counts.get(key)));
         }
-    }   
+    }
 
     @Override
     public void declareOutputFields(OutputFieldsDeclarer declarer) {
         declarer.declare(new Fields("tx", "key", "count"));
     }
-    
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/KeyedSummingBatchBolt.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/KeyedSummingBatchBolt.java b/jstorm-core/src/main/java/backtype/storm/testing/KeyedSummingBatchBolt.java
index 887eb4e..67225cb 100755
--- a/jstorm-core/src/main/java/backtype/storm/testing/KeyedSummingBatchBolt.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/KeyedSummingBatchBolt.java
@@ -33,7 +33,7 @@ public class KeyedSummingBatchBolt extends BaseBatchBolt {
     BatchOutputCollector _collector;
     Object _id;
     Map<Object, Number> _sums = new HashMap<Object, Number>();
-    
+
     @Override
     public void prepare(Map conf, TopologyContext context, BatchOutputCollector collector, Object id) {
         _collector = collector;
@@ -43,19 +43,19 @@ public class KeyedSummingBatchBolt extends BaseBatchBolt {
     @Override
     public void execute(Tuple tuple) {
         Object key = tuple.getValue(1);
-        Number curr = Utils.get(_sums, key, 0);        
+        Number curr = Utils.get(_sums, key, 0);
         _sums.put(key, Numbers.add(curr, tuple.getValue(2)));
     }
 
     @Override
     public void finishBatch() {
-        for(Object key: _sums.keySet()) {
+        for (Object key : _sums.keySet()) {
             _collector.emit(new Values(_id, key, _sums.get(key)));
         }
-    }   
+    }
 
     @Override
     public void declareOutputFields(OutputFieldsDeclarer declarer) {
         declarer.declare(new Fields("tx", "key", "sum"));
-    }    
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/MemoryTransactionalSpout.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/MemoryTransactionalSpout.java b/jstorm-core/src/main/java/backtype/storm/testing/MemoryTransactionalSpout.java
index 3b492e1..75ad375 100644
--- a/jstorm-core/src/main/java/backtype/storm/testing/MemoryTransactionalSpout.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/MemoryTransactionalSpout.java
@@ -35,13 +35,13 @@ import java.util.Map;
 
 public class MemoryTransactionalSpout implements IPartitionedTransactionalSpout<MemoryTransactionalSpoutMeta> {
     public static String TX_FIELD = MemoryTransactionalSpout.class.getName() + "/id";
-    
+
     private String _id;
     private String _finishedPartitionsId;
     private int _takeAmt;
     private Fields _outFields;
     private Map<Integer, List<List<Object>>> _initialPartitions;
-    
+
     public MemoryTransactionalSpout(Map<Integer, List<List<Object>>> partitions, Fields outFields, int takeAmt) {
         _id = RegisteredGlobalState.registerState(partitions);
         Map<Integer, Boolean> finished = Collections.synchronizedMap(new HashMap<Integer, Boolean>());
@@ -50,17 +50,17 @@ public class MemoryTransactionalSpout implements IPartitionedTransactionalSpout<
         _outFields = outFields;
         _initialPartitions = partitions;
     }
-    
+
     public boolean isExhaustedTuples() {
         Map<Integer, Boolean> statuses = getFinishedStatuses();
-        for(Integer partition: getQueues().keySet()) {
-            if(!statuses.containsKey(partition) || !getFinishedStatuses().get(partition)) {
+        for (Integer partition : getQueues().keySet()) {
+            if (!statuses.containsKey(partition) || !getFinishedStatuses().get(partition)) {
                 return false;
             }
         }
         return true;
     }
-    
+
     class Coordinator implements IPartitionedTransactionalSpout.Coordinator {
 
         @Override
@@ -71,29 +71,31 @@ public class MemoryTransactionalSpout implements IPartitionedTransactionalSpout<
         @Override
         public boolean isReady() {
             return true;
-        }        
-        
+        }
+
         @Override
         public void close() {
-        }        
+        }
     }
-    
+
     class Emitter implements IPartitionedTransactionalSpout.Emitter<MemoryTransactionalSpoutMeta> {
-        
+
         Integer _maxSpoutPending;
         Map<Integer, Integer> _emptyPartitions = new HashMap<Integer, Integer>();
-        
+
         public Emitter(Map conf) {
             Object c = conf.get(Config.TOPOLOGY_MAX_SPOUT_PENDING);
-            if(c==null) _maxSpoutPending = 1;
-            else _maxSpoutPending = Utils.getInt(c);
+            if (c == null)
+                _maxSpoutPending = 1;
+            else
+                _maxSpoutPending = Utils.getInt(c);
         }
-        
-        
+
         @Override
-        public MemoryTransactionalSpoutMeta emitPartitionBatchNew(TransactionAttempt tx, BatchOutputCollector collector, int partition, MemoryTransactionalSpoutMeta lastPartitionMeta) {
+        public MemoryTransactionalSpoutMeta emitPartitionBatchNew(TransactionAttempt tx, BatchOutputCollector collector, int partition,
+                MemoryTransactionalSpoutMeta lastPartitionMeta) {
             int index;
-            if(lastPartitionMeta==null) {
+            if (lastPartitionMeta == null) {
                 index = 0;
             } else {
                 index = lastPartitionMeta.index + lastPartitionMeta.amt;
@@ -102,40 +104,40 @@ public class MemoryTransactionalSpout implements IPartitionedTransactionalSpout<
             int total = queue.size();
             int left = total - index;
             int toTake = Math.min(left, _takeAmt);
-            
+
             MemoryTransactionalSpoutMeta ret = new MemoryTransactionalSpoutMeta(index, toTake);
             emitPartitionBatch(tx, collector, partition, ret);
-            if(toTake==0) {
+            if (toTake == 0) {
                 // this is a pretty hacky way to determine when all the partitions have been committed
                 // wait until we've emitted max-spout-pending empty partitions for the partition
                 int curr = Utils.get(_emptyPartitions, partition, 0) + 1;
                 _emptyPartitions.put(partition, curr);
-                if(curr > _maxSpoutPending) {
+                if (curr > _maxSpoutPending) {
                     Map<Integer, Boolean> finishedStatuses = getFinishedStatuses();
                     // will be null in remote mode
-                    if(finishedStatuses!=null) {
+                    if (finishedStatuses != null) {
                         finishedStatuses.put(partition, true);
                     }
                 }
             }
-            return ret;   
+            return ret;
         }
 
         @Override
         public void emitPartitionBatch(TransactionAttempt tx, BatchOutputCollector collector, int partition, MemoryTransactionalSpoutMeta partitionMeta) {
             List<List<Object>> queue = getQueues().get(partition);
-            for(int i=partitionMeta.index; i < partitionMeta.index + partitionMeta.amt; i++) {
+            for (int i = partitionMeta.index; i < partitionMeta.index + partitionMeta.amt; i++) {
                 List<Object> toEmit = new ArrayList<Object>(queue.get(i));
                 toEmit.add(0, tx);
-                collector.emit(toEmit);                
+                collector.emit(toEmit);
             }
         }
-                
+
         @Override
         public void close() {
-        }        
-    } 
-    
+        }
+    }
+
     @Override
     public IPartitionedTransactionalSpout.Coordinator getCoordinator(Map conf, TopologyContext context) {
         return new Coordinator();
@@ -159,22 +161,24 @@ public class MemoryTransactionalSpout implements IPartitionedTransactionalSpout<
         conf.registerSerialization(MemoryTransactionalSpoutMeta.class);
         return conf;
     }
-    
+
     public void startup() {
         getFinishedStatuses().clear();
     }
-    
+
     public void cleanup() {
         RegisteredGlobalState.clearState(_id);
         RegisteredGlobalState.clearState(_finishedPartitionsId);
     }
-    
-    private Map<Integer, List<List<Object>>> getQueues() {   
+
+    private Map<Integer, List<List<Object>>> getQueues() {
         Map<Integer, List<List<Object>>> ret = (Map<Integer, List<List<Object>>>) RegisteredGlobalState.getState(_id);
-        if(ret!=null) return ret;
-        else return _initialPartitions;
+        if (ret != null)
+            return ret;
+        else
+            return _initialPartitions;
     }
-    
+
     private Map<Integer, Boolean> getFinishedStatuses() {
         return (Map<Integer, Boolean>) RegisteredGlobalState.getState(_finishedPartitionsId);
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/MemoryTransactionalSpoutMeta.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/MemoryTransactionalSpoutMeta.java b/jstorm-core/src/main/java/backtype/storm/testing/MemoryTransactionalSpoutMeta.java
index 29681fb..a00788d 100755
--- a/jstorm-core/src/main/java/backtype/storm/testing/MemoryTransactionalSpoutMeta.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/MemoryTransactionalSpoutMeta.java
@@ -20,12 +20,12 @@ package backtype.storm.testing;
 public class MemoryTransactionalSpoutMeta {
     int index;
     int amt;
-    
+
     // for kryo compatibility
     public MemoryTransactionalSpoutMeta() {
-        
+
     }
-    
+
     public MemoryTransactionalSpoutMeta(int index, int amt) {
         this.index = index;
         this.amt = amt;
@@ -34,5 +34,5 @@ public class MemoryTransactionalSpoutMeta {
     @Override
     public String toString() {
         return "index: " + index + "; amt: " + amt;
-    }    
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/MkClusterParam.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/MkClusterParam.java b/jstorm-core/src/main/java/backtype/storm/testing/MkClusterParam.java
index cd677c8..d325377 100755
--- a/jstorm-core/src/main/java/backtype/storm/testing/MkClusterParam.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/MkClusterParam.java
@@ -23,35 +23,40 @@ import java.util.Map;
  * The param arg for <code>Testing.withSimulatedTimeCluster</code> and <code>Testing.withTrackedCluster</code>
  */
 public class MkClusterParam {
-	/**
-	 * count of supervisors for the cluster.
-	 */
-	private Integer supervisors;
-	/**
-	 * count of port for each supervisor
-	 */
-	private Integer portsPerSupervisor;
-	/**
-	 * cluster config
-	 */
-	private Map daemonConf;
-	
-	public Integer getSupervisors() {
-		return supervisors;
-	}
-	public void setSupervisors(Integer supervisors) {
-		this.supervisors = supervisors;
-	}
-	public Integer getPortsPerSupervisor() {
-		return portsPerSupervisor;
-	}
-	public void setPortsPerSupervisor(Integer portsPerSupervisor) {
-		this.portsPerSupervisor = portsPerSupervisor;
-	}
-	public Map getDaemonConf() {
-		return daemonConf;
-	}
-	public void setDaemonConf(Map daemonConf) {
-		this.daemonConf = daemonConf;
-	}
+    /**
+     * count of supervisors for the cluster.
+     */
+    private Integer supervisors;
+    /**
+     * count of port for each supervisor
+     */
+    private Integer portsPerSupervisor;
+    /**
+     * cluster config
+     */
+    private Map daemonConf;
+
+    public Integer getSupervisors() {
+        return supervisors;
+    }
+
+    public void setSupervisors(Integer supervisors) {
+        this.supervisors = supervisors;
+    }
+
+    public Integer getPortsPerSupervisor() {
+        return portsPerSupervisor;
+    }
+
+    public void setPortsPerSupervisor(Integer portsPerSupervisor) {
+        this.portsPerSupervisor = portsPerSupervisor;
+    }
+
+    public Map getDaemonConf() {
+        return daemonConf;
+    }
+
+    public void setDaemonConf(Map daemonConf) {
+        this.daemonConf = daemonConf;
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/MkTupleParam.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/MkTupleParam.java b/jstorm-core/src/main/java/backtype/storm/testing/MkTupleParam.java
index 34a8c68..a8a4bdf 100755
--- a/jstorm-core/src/main/java/backtype/storm/testing/MkTupleParam.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/MkTupleParam.java
@@ -21,31 +21,34 @@ import java.util.ArrayList;
 import java.util.List;
 
 public class MkTupleParam {
-	private String stream;
-	private String component;
-	private List<String> fields;
-	
-	public String getStream() {
-		return stream;
-	}
-	public void setStream(String stream) {
-		this.stream = stream;
-	}
-	
-	public String getComponent() {
-		return component;
-	}
-	public void setComponent(String component) {
-		this.component = component;
-	}
-	
-	public List<String> getFields() {
-		return fields;
-	}
-	public void setFields(String... fields) {
-		this.fields = new ArrayList<String>();
-		for (int i = 0; i < fields.length; i++) {
-			this.fields.add(fields[i]);
-		}
-	}
+    private String stream;
+    private String component;
+    private List<String> fields;
+
+    public String getStream() {
+        return stream;
+    }
+
+    public void setStream(String stream) {
+        this.stream = stream;
+    }
+
+    public String getComponent() {
+        return component;
+    }
+
+    public void setComponent(String component) {
+        this.component = component;
+    }
+
+    public List<String> getFields() {
+        return fields;
+    }
+
+    public void setFields(String... fields) {
+        this.fields = new ArrayList<String>();
+        for (int i = 0; i < fields.length; i++) {
+            this.fields.add(fields[i]);
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/MockedSources.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/MockedSources.java b/jstorm-core/src/main/java/backtype/storm/testing/MockedSources.java
index 1fd6b85..48b9ac0 100755
--- a/jstorm-core/src/main/java/backtype/storm/testing/MockedSources.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/MockedSources.java
@@ -26,11 +26,11 @@ import backtype.storm.tuple.Values;
 import backtype.storm.utils.Utils;
 
 public class MockedSources {
-	/**
-	 * mocked spout sources for the [spout, stream] pair.
-	 */
+    /**
+     * mocked spout sources for the [spout, stream] pair.
+     */
     private Map<String, List<FixedTuple>> data = new HashMap<String, List<FixedTuple>>();
-    
+
     /**
      * add mock data for the spout.
      * 
@@ -42,18 +42,18 @@ public class MockedSources {
         if (!data.containsKey(spoutId)) {
             data.put(spoutId, new ArrayList<FixedTuple>());
         }
-        
+
         List<FixedTuple> tuples = data.get(spoutId);
         for (int i = 0; i < valueses.length; i++) {
             FixedTuple tuple = new FixedTuple(streamId, valueses[i]);
             tuples.add(tuple);
         }
     }
-    
+
     public void addMockData(String spoutId, Values... valueses) {
         this.addMockData(spoutId, Utils.DEFAULT_STREAM_ID, valueses);
     }
-    
+
     public Map<String, List<FixedTuple>> getData() {
         return this.data;
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/NGrouping.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/NGrouping.java b/jstorm-core/src/main/java/backtype/storm/testing/NGrouping.java
index 785ed92..9e7363c 100755
--- a/jstorm-core/src/main/java/backtype/storm/testing/NGrouping.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/NGrouping.java
@@ -27,17 +27,17 @@ import java.util.List;
 public class NGrouping implements CustomStreamGrouping {
     int _n;
     List<Integer> _outTasks;
-    
+
     public NGrouping(int n) {
         _n = n;
     }
-    
+
     @Override
     public void prepare(WorkerTopologyContext context, GlobalStreamId stream, List<Integer> targetTasks) {
         targetTasks = new ArrayList<Integer>(targetTasks);
         Collections.sort(targetTasks);
         _outTasks = new ArrayList<Integer>();
-        for(int i=0; i<_n; i++) {
+        for (int i = 0; i < _n; i++) {
             _outTasks.add(targetTasks.get(i));
         }
     }
@@ -46,5 +46,5 @@ public class NGrouping implements CustomStreamGrouping {
     public List<Integer> chooseTasks(int taskId, List<Object> values) {
         return _outTasks;
     }
-    
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/NonRichBoltTracker.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/NonRichBoltTracker.java b/jstorm-core/src/main/java/backtype/storm/testing/NonRichBoltTracker.java
index ccbb67f..b489289 100755
--- a/jstorm-core/src/main/java/backtype/storm/testing/NonRichBoltTracker.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/NonRichBoltTracker.java
@@ -25,7 +25,6 @@ import backtype.storm.utils.RegisteredGlobalState;
 import java.util.Map;
 import java.util.concurrent.atomic.AtomicInteger;
 
-
 public class NonRichBoltTracker implements IBolt {
     IBolt _delegate;
     String _trackId;

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/OpaqueMemoryTransactionalSpout.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/OpaqueMemoryTransactionalSpout.java b/jstorm-core/src/main/java/backtype/storm/testing/OpaqueMemoryTransactionalSpout.java
index 1ff01b9..0c91d2b 100755
--- a/jstorm-core/src/main/java/backtype/storm/testing/OpaqueMemoryTransactionalSpout.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/OpaqueMemoryTransactionalSpout.java
@@ -37,34 +37,34 @@ import java.util.Map;
  */
 public class OpaqueMemoryTransactionalSpout implements IOpaquePartitionedTransactionalSpout<MemoryTransactionalSpoutMeta> {
     public static String TX_FIELD = MemoryTransactionalSpout.class.getName() + "/id";
-    
+
     private String _id;
     private String _finishedPartitionsId;
     private String _disabledId;
     private int _takeAmt;
     private Fields _outFields;
-    
+
     public OpaqueMemoryTransactionalSpout(Map<Integer, List<List<Object>>> partitions, Fields outFields, int takeAmt) {
         _id = RegisteredGlobalState.registerState(partitions);
-        
+
         Map<Integer, Boolean> finished = Collections.synchronizedMap(new HashMap<Integer, Boolean>());
         _finishedPartitionsId = RegisteredGlobalState.registerState(finished);
-        
+
         Map<Integer, Boolean> disabled = Collections.synchronizedMap(new HashMap<Integer, Boolean>());
         _disabledId = RegisteredGlobalState.registerState(disabled);
-        
+
         _takeAmt = takeAmt;
         _outFields = outFields;
     }
-    
+
     public void setDisabled(Integer partition, boolean disabled) {
         getDisabledStatuses().put(partition, disabled);
     }
-    
+
     public boolean isExhaustedTuples() {
         Map<Integer, Boolean> statuses = getFinishedStatuses();
-        for(Integer partition: getQueues().keySet()) {
-            if(!statuses.containsKey(partition) || !getFinishedStatuses().get(partition)) {
+        for (Integer partition : getQueues().keySet()) {
+            if (!statuses.containsKey(partition) || !getFinishedStatuses().get(partition)) {
                 return false;
             }
         }
@@ -80,7 +80,7 @@ public class OpaqueMemoryTransactionalSpout implements IOpaquePartitionedTransac
     public IOpaquePartitionedTransactionalSpout.Coordinator getCoordinator(Map conf, TopologyContext context) {
         return new Coordinator();
     }
-    
+
     class Coordinator implements IOpaquePartitionedTransactionalSpout.Coordinator {
         @Override
         public boolean isReady() {
@@ -91,24 +91,26 @@ public class OpaqueMemoryTransactionalSpout implements IOpaquePartitionedTransac
         public void close() {
         }
     }
-    
+
     class Emitter implements IOpaquePartitionedTransactionalSpout.Emitter<MemoryTransactionalSpoutMeta> {
-        
+
         Integer _maxSpoutPending;
         Map<Integer, Integer> _emptyPartitions = new HashMap<Integer, Integer>();
-        
+
         public Emitter(Map conf) {
             Object c = conf.get(Config.TOPOLOGY_MAX_SPOUT_PENDING);
-            if(c==null) _maxSpoutPending = 1;
-            else _maxSpoutPending = Utils.getInt(c);
+            if (c == null)
+                _maxSpoutPending = 1;
+            else
+                _maxSpoutPending = Utils.getInt(c);
         }
-        
-        
+
         @Override
-        public MemoryTransactionalSpoutMeta emitPartitionBatch(TransactionAttempt tx, BatchOutputCollector collector, int partition, MemoryTransactionalSpoutMeta lastPartitionMeta) {
-            if(!Boolean.FALSE.equals(getDisabledStatuses().get(partition))) {
+        public MemoryTransactionalSpoutMeta emitPartitionBatch(TransactionAttempt tx, BatchOutputCollector collector, int partition,
+                MemoryTransactionalSpoutMeta lastPartitionMeta) {
+            if (!Boolean.FALSE.equals(getDisabledStatuses().get(partition))) {
                 int index;
-                if(lastPartitionMeta==null) {
+                if (lastPartitionMeta == null) {
                     index = 0;
                 } else {
                     index = lastPartitionMeta.index + lastPartitionMeta.amt;
@@ -119,26 +121,26 @@ public class OpaqueMemoryTransactionalSpout implements IOpaquePartitionedTransac
                 int toTake = Math.min(left, _takeAmt);
 
                 MemoryTransactionalSpoutMeta ret = new MemoryTransactionalSpoutMeta(index, toTake);
-                for(int i=ret.index; i < ret.index + ret.amt; i++) {
+                for (int i = ret.index; i < ret.index + ret.amt; i++) {
                     List<Object> toEmit = new ArrayList<Object>(queue.get(i));
                     toEmit.add(0, tx);
-                    collector.emit(toEmit);                
+                    collector.emit(toEmit);
                 }
-                if(toTake==0) {
+                if (toTake == 0) {
                     // this is a pretty hacky way to determine when all the partitions have been committed
                     // wait until we've emitted max-spout-pending empty partitions for the partition
                     int curr = Utils.get(_emptyPartitions, partition, 0) + 1;
                     _emptyPartitions.put(partition, curr);
-                    if(curr > _maxSpoutPending) {
+                    if (curr > _maxSpoutPending) {
                         getFinishedStatuses().put(partition, true);
                     }
                 }
-                return ret; 
+                return ret;
             } else {
                 return null;
             }
         }
-                
+
         @Override
         public void close() {
         }
@@ -147,7 +149,7 @@ public class OpaqueMemoryTransactionalSpout implements IOpaquePartitionedTransac
         public int numPartitions() {
             return getQueues().size();
         }
-    } 
+    }
 
     @Override
     public void declareOutputFields(OutputFieldsDeclarer declarer) {
@@ -162,20 +164,20 @@ public class OpaqueMemoryTransactionalSpout implements IOpaquePartitionedTransac
         conf.registerSerialization(MemoryTransactionalSpoutMeta.class);
         return conf;
     }
-    
+
     public void startup() {
         getFinishedStatuses().clear();
     }
-    
+
     public void cleanup() {
         RegisteredGlobalState.clearState(_id);
         RegisteredGlobalState.clearState(_finishedPartitionsId);
     }
-    
+
     private Map<Integer, List<List<Object>>> getQueues() {
         return (Map<Integer, List<List<Object>>>) RegisteredGlobalState.getState(_id);
     }
-    
+
     private Map<Integer, Boolean> getFinishedStatuses() {
         return (Map<Integer, Boolean>) RegisteredGlobalState.getState(_finishedPartitionsId);
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/PrepareBatchBolt.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/PrepareBatchBolt.java b/jstorm-core/src/main/java/backtype/storm/testing/PrepareBatchBolt.java
index 0bd9833..e9e2a9d 100755
--- a/jstorm-core/src/main/java/backtype/storm/testing/PrepareBatchBolt.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/PrepareBatchBolt.java
@@ -26,10 +26,9 @@ import backtype.storm.utils.Utils;
 import java.util.ArrayList;
 import java.util.List;
 
-
 public class PrepareBatchBolt extends BaseBasicBolt {
     Fields _outFields;
-    
+
     public PrepareBatchBolt(Fields outFields) {
         _outFields = outFields;
     }
@@ -47,6 +46,5 @@ public class PrepareBatchBolt extends BaseBasicBolt {
         toEmit.addAll(input.getValues());
         collector.emit(toEmit);
     }
-    
-    
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/PythonShellMetricsBolt.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/PythonShellMetricsBolt.java b/jstorm-core/src/main/java/backtype/storm/testing/PythonShellMetricsBolt.java
index 4b85ce8..fd41283 100755
--- a/jstorm-core/src/main/java/backtype/storm/testing/PythonShellMetricsBolt.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/PythonShellMetricsBolt.java
@@ -27,23 +27,23 @@ import backtype.storm.topology.IRichBolt;
 import backtype.storm.topology.OutputFieldsDeclarer;
 
 public class PythonShellMetricsBolt extends ShellBolt implements IRichBolt {
-	private static final long serialVersionUID = 1999209252187463355L;
-	
-	public PythonShellMetricsBolt(String[] command) {
-		super(command);
-	}
-
-	public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
-		super.prepare(stormConf, context, collector);
-		
-		CountShellMetric cMetric = new CountShellMetric();
-		context.registerMetric("my-custom-shell-metric", cMetric, 5);
-	}
-	
-	public void declareOutputFields(OutputFieldsDeclarer declarer) {
-	}
-
-	public Map<String, Object> getComponentConfiguration() {
-		return null;
-	}
+    private static final long serialVersionUID = 1999209252187463355L;
+
+    public PythonShellMetricsBolt(String[] command) {
+        super(command);
+    }
+
+    public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
+        super.prepare(stormConf, context, collector);
+
+        CountShellMetric cMetric = new CountShellMetric();
+        context.registerMetric("my-custom-shell-metric", cMetric, 5);
+    }
+
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+    }
+
+    public Map<String, Object> getComponentConfiguration() {
+        return null;
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/PythonShellMetricsSpout.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/PythonShellMetricsSpout.java b/jstorm-core/src/main/java/backtype/storm/testing/PythonShellMetricsSpout.java
index 3ccf935..8325fba 100755
--- a/jstorm-core/src/main/java/backtype/storm/testing/PythonShellMetricsSpout.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/PythonShellMetricsSpout.java
@@ -28,25 +28,25 @@ import backtype.storm.topology.OutputFieldsDeclarer;
 import backtype.storm.tuple.Fields;
 
 public class PythonShellMetricsSpout extends ShellSpout implements IRichSpout {
-	private static final long serialVersionUID = 1999209252187463355L;
-
-	public PythonShellMetricsSpout(String[] command) {
-		super(command);
-	}
-	
-	@Override
-	public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
-		super.open(conf, context, collector);
-	
-		CountShellMetric cMetric = new CountShellMetric();
-		context.registerMetric("my-custom-shellspout-metric", cMetric, 5);
-	}
-
-	public void declareOutputFields(OutputFieldsDeclarer declarer) {
-		declarer.declare(new Fields("field1"));
-	}
-
-	public Map<String, Object> getComponentConfiguration() {
-		return null;
-	}
+    private static final long serialVersionUID = 1999209252187463355L;
+
+    public PythonShellMetricsSpout(String[] command) {
+        super(command);
+    }
+
+    @Override
+    public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
+        super.open(conf, context, collector);
+
+        CountShellMetric cMetric = new CountShellMetric();
+        context.registerMetric("my-custom-shellspout-metric", cMetric, 5);
+    }
+
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+        declarer.declare(new Fields("field1"));
+    }
+
+    public Map<String, Object> getComponentConfiguration() {
+        return null;
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/SingleUserSimpleTransport.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/SingleUserSimpleTransport.java b/jstorm-core/src/main/java/backtype/storm/testing/SingleUserSimpleTransport.java
index 4d25ac7..cf9dc4d 100755
--- a/jstorm-core/src/main/java/backtype/storm/testing/SingleUserSimpleTransport.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/SingleUserSimpleTransport.java
@@ -23,15 +23,19 @@ import javax.security.auth.Subject;
 import java.security.Principal;
 import java.util.HashSet;
 
-
 public class SingleUserSimpleTransport extends SimpleTransportPlugin {
-   @Override
-   protected Subject getDefaultSubject() {
-       HashSet<Principal> principals = new HashSet<Principal>();
-       principals.add(new Principal() {
-          public String getName() { return "user"; }
-          public String toString() { return "user"; }
-       });
-       return new Subject(true, principals, new HashSet<Object>(), new HashSet<Object>());
-   } 
+    @Override
+    protected Subject getDefaultSubject() {
+        HashSet<Principal> principals = new HashSet<Principal>();
+        principals.add(new Principal() {
+            public String getName() {
+                return "user";
+            }
+
+            public String toString() {
+                return "user";
+            }
+        });
+        return new Subject(true, principals, new HashSet<Object>(), new HashSet<Object>());
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/SpoutTracker.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/SpoutTracker.java b/jstorm-core/src/main/java/backtype/storm/testing/SpoutTracker.java
index 75ba2b8..369e661 100755
--- a/jstorm-core/src/main/java/backtype/storm/testing/SpoutTracker.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/SpoutTracker.java
@@ -28,13 +28,11 @@ import java.util.List;
 import java.util.Map;
 import java.util.concurrent.atomic.AtomicInteger;
 
-
 public class SpoutTracker extends BaseRichSpout {
     IRichSpout _delegate;
     SpoutTrackOutputCollector _tracker;
     String _trackId;
 
-
     private class SpoutTrackOutputCollector implements ISpoutOutputCollector {
         public int transferred = 0;
         public int emitted = 0;
@@ -43,11 +41,11 @@ public class SpoutTracker extends BaseRichSpout {
         public SpoutTrackOutputCollector(SpoutOutputCollector collector) {
             _collector = collector;
         }
-        
+
         private void recordSpoutEmit() {
             Map stats = (Map) RegisteredGlobalState.getState(_trackId);
             ((AtomicInteger) stats.get("spout-emitted")).incrementAndGet();
-            
+
         }
 
         public List<Integer> emit(String streamId, List<Object> tuple, Object messageId) {
@@ -63,11 +61,10 @@ public class SpoutTracker extends BaseRichSpout {
 
         @Override
         public void reportError(Throwable error) {
-        	_collector.reportError(error);
+            _collector.reportError(error);
         }
     }
 
-
     public SpoutTracker(IRichSpout delegate, String trackId) {
         _delegate = delegate;
         _trackId = trackId;
@@ -95,7 +92,7 @@ public class SpoutTracker extends BaseRichSpout {
     public void fail(Object msgId) {
         _delegate.fail(msgId);
         Map stats = (Map) RegisteredGlobalState.getState(_trackId);
-        ((AtomicInteger) stats.get("processed")).incrementAndGet();        
+        ((AtomicInteger) stats.get("processed")).incrementAndGet();
     }
 
     public void declareOutputFields(OutputFieldsDeclarer declarer) {

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/TestAggregatesCounter.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/TestAggregatesCounter.java b/jstorm-core/src/main/java/backtype/storm/testing/TestAggregatesCounter.java
index e8c0a61..76b6874 100755
--- a/jstorm-core/src/main/java/backtype/storm/testing/TestAggregatesCounter.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/TestAggregatesCounter.java
@@ -29,7 +29,6 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import static backtype.storm.utils.Utils.tuple;
 
-
 public class TestAggregatesCounter extends BaseRichBolt {
     public static Logger LOG = LoggerFactory.getLogger(TestWordCounter.class);
 
@@ -46,8 +45,8 @@ public class TestAggregatesCounter extends BaseRichBolt {
         int count = (Integer) input.getValues().get(1);
         _counts.put(word, count);
         int globalCount = 0;
-        for(String w: _counts.keySet()) {
-            globalCount+=_counts.get(w);
+        for (String w : _counts.keySet()) {
+            globalCount += _counts.get(w);
         }
         _collector.emit(tuple(globalCount));
         _collector.ack(input);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/TestConfBolt.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/TestConfBolt.java b/jstorm-core/src/main/java/backtype/storm/testing/TestConfBolt.java
index 5790fb3..634cbe1 100755
--- a/jstorm-core/src/main/java/backtype/storm/testing/TestConfBolt.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/TestConfBolt.java
@@ -26,7 +26,6 @@ import backtype.storm.tuple.Tuple;
 import backtype.storm.tuple.Values;
 import java.util.Map;
 
-
 public class TestConfBolt extends BaseBasicBolt {
     Map<String, Object> _componentConf;
     Map<String, Object> _conf;
@@ -34,16 +33,16 @@ public class TestConfBolt extends BaseBasicBolt {
     public TestConfBolt() {
         this(null);
     }
-        
+
     public TestConfBolt(Map<String, Object> componentConf) {
         _componentConf = componentConf;
-    }        
+    }
 
     @Override
     public void prepare(Map conf, TopologyContext context) {
         _conf = conf;
-    }    
-    
+    }
+
     @Override
     public void declareOutputFields(OutputFieldsDeclarer declarer) {
         declarer.declare(new Fields("conf", "value"));
@@ -58,5 +57,5 @@ public class TestConfBolt extends BaseBasicBolt {
     @Override
     public Map<String, Object> getComponentConfiguration() {
         return _componentConf;
-    }    
+    }
 }


[28/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/transactional/TransactionalSpoutCoordinator.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/transactional/TransactionalSpoutCoordinator.java b/jstorm-core/src/main/java/backtype/storm/transactional/TransactionalSpoutCoordinator.java
index f7ce534..3768cb1 100755
--- a/jstorm-core/src/main/java/backtype/storm/transactional/TransactionalSpoutCoordinator.java
+++ b/jstorm-core/src/main/java/backtype/storm/transactional/TransactionalSpoutCoordinator.java
@@ -35,40 +35,38 @@ import java.util.Random;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class TransactionalSpoutCoordinator extends BaseRichSpout { 
+public class TransactionalSpoutCoordinator extends BaseRichSpout {
     public static final Logger LOG = LoggerFactory.getLogger(TransactionalSpoutCoordinator.class);
-    
+
     public static final BigInteger INIT_TXID = BigInteger.ONE;
-    
-    
+
     public static final String TRANSACTION_BATCH_STREAM_ID = TransactionalSpoutCoordinator.class.getName() + "/batch";
     public static final String TRANSACTION_COMMIT_STREAM_ID = TransactionalSpoutCoordinator.class.getName() + "/commit";
 
     private static final String CURRENT_TX = "currtx";
     private static final String META_DIR = "meta";
-    
+
     private ITransactionalSpout _spout;
     private ITransactionalSpout.Coordinator _coordinator;
     private TransactionalState _state;
     private RotatingTransactionalState _coordinatorState;
-    
+
     TreeMap<BigInteger, TransactionStatus> _activeTx = new TreeMap<BigInteger, TransactionStatus>();
-    
+
     private SpoutOutputCollector _collector;
     private Random _rand;
     BigInteger _currTransaction;
     int _maxTransactionActive;
     StateInitializer _initializer;
-    
-    
+
     public TransactionalSpoutCoordinator(ITransactionalSpout spout) {
         _spout = spout;
     }
-    
+
     public ITransactionalSpout getSpout() {
         return _spout;
     }
-    
+
     @Override
     public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
         _rand = new Random(Utils.secureRandomLong());
@@ -78,7 +76,7 @@ public class TransactionalSpoutCoordinator extends BaseRichSpout {
         _coordinator = _spout.getCoordinator(conf, context);
         _currTransaction = getStoredCurrTransaction(_state);
         Object active = conf.get(Config.TOPOLOGY_MAX_SPOUT_PENDING);
-        if(active==null) {
+        if (active == null) {
             _maxTransactionActive = 1;
         } else {
             _maxTransactionActive = Utils.getInt(active);
@@ -100,10 +98,10 @@ public class TransactionalSpoutCoordinator extends BaseRichSpout {
     public void ack(Object msgId) {
         TransactionAttempt tx = (TransactionAttempt) msgId;
         TransactionStatus status = _activeTx.get(tx.getTransactionId());
-        if(status!=null && tx.equals(status.attempt)) {
-            if(status.status==AttemptStatus.PROCESSING) {
+        if (status != null && tx.equals(status.attempt)) {
+            if (status.status == AttemptStatus.PROCESSING) {
                 status.status = AttemptStatus.PROCESSED;
-            } else if(status.status==AttemptStatus.COMMITTING) {
+            } else if (status.status == AttemptStatus.COMMITTING) {
                 _activeTx.remove(tx.getTransactionId());
                 _coordinatorState.cleanupBefore(tx.getTransactionId());
                 _currTransaction = nextTransactionId(tx.getTransactionId());
@@ -117,12 +115,12 @@ public class TransactionalSpoutCoordinator extends BaseRichSpout {
     public void fail(Object msgId) {
         TransactionAttempt tx = (TransactionAttempt) msgId;
         TransactionStatus stored = _activeTx.remove(tx.getTransactionId());
-        if(stored!=null && tx.equals(stored.attempt)) {
+        if (stored != null && tx.equals(stored.attempt)) {
             _activeTx.tailMap(tx.getTransactionId()).clear();
             sync();
         }
     }
-    
+
     @Override
     public void declareOutputFields(OutputFieldsDeclarer declarer) {
         // in partitioned example, in case an emitter task receives a later transaction than it's emitted so far,
@@ -130,24 +128,23 @@ public class TransactionalSpoutCoordinator extends BaseRichSpout {
         declarer.declareStream(TRANSACTION_BATCH_STREAM_ID, new Fields("tx", "tx-meta", "committed-txid"));
         declarer.declareStream(TRANSACTION_COMMIT_STREAM_ID, new Fields("tx"));
     }
-    
+
     private void sync() {
         // note that sometimes the tuples active may be less than max_spout_pending, e.g.
         // max_spout_pending = 3
         // tx 1, 2, 3 active, tx 2 is acked. there won't be a commit for tx 2 (because tx 1 isn't committed yet),
         // and there won't be a batch for tx 4 because there's max_spout_pending tx active
         TransactionStatus maybeCommit = _activeTx.get(_currTransaction);
-        if(maybeCommit!=null && maybeCommit.status == AttemptStatus.PROCESSED) {
+        if (maybeCommit != null && maybeCommit.status == AttemptStatus.PROCESSED) {
             maybeCommit.status = AttemptStatus.COMMITTING;
             _collector.emit(TRANSACTION_COMMIT_STREAM_ID, new Values(maybeCommit.attempt), maybeCommit.attempt);
         }
-        
+
         try {
-            if(_activeTx.size() < _maxTransactionActive) {
+            if (_activeTx.size() < _maxTransactionActive) {
                 BigInteger curr = _currTransaction;
-                for(int i=0; i<_maxTransactionActive; i++) {
-                    if((_coordinatorState.hasCache(curr) || _coordinator.isReady())
-                            && !_activeTx.containsKey(curr)) {
+                for (int i = 0; i < _maxTransactionActive; i++) {
+                    if ((_coordinatorState.hasCache(curr) || _coordinator.isReady()) && !_activeTx.containsKey(curr)) {
                         TransactionAttempt attempt = new TransactionAttempt(curr, _rand.nextLong());
                         Object state = _coordinatorState.getState(curr, _initializer);
                         _activeTx.put(curr, new TransactionStatus(attempt));
@@ -155,8 +152,8 @@ public class TransactionalSpoutCoordinator extends BaseRichSpout {
                     }
                     curr = nextTransactionId(curr);
                 }
-            }     
-        } catch(FailedException e) {
+            }
+        } catch (FailedException e) {
             LOG.warn("Failed to get metadata for a transaction", e);
         }
     }
@@ -167,17 +164,15 @@ public class TransactionalSpoutCoordinator extends BaseRichSpout {
         ret.setMaxTaskParallelism(1);
         return ret;
     }
-    
+
     private static enum AttemptStatus {
-        PROCESSING,
-        PROCESSED,
-        COMMITTING
+        PROCESSING, PROCESSED, COMMITTING
     }
-    
+
     private static class TransactionStatus {
         TransactionAttempt attempt;
         AttemptStatus status;
-        
+
         public TransactionStatus(TransactionAttempt attempt) {
             this.attempt = attempt;
             this.status = AttemptStatus.PROCESSING;
@@ -186,28 +181,29 @@ public class TransactionalSpoutCoordinator extends BaseRichSpout {
         @Override
         public String toString() {
             return attempt.toString() + " <" + status.toString() + ">";
-        }        
+        }
     }
-    
-    
+
     private BigInteger nextTransactionId(BigInteger id) {
         return id.add(BigInteger.ONE);
     }
-    
+
     private BigInteger previousTransactionId(BigInteger id) {
-        if(id.equals(INIT_TXID)) {
+        if (id.equals(INIT_TXID)) {
             return null;
         } else {
             return id.subtract(BigInteger.ONE);
         }
-    }    
-    
+    }
+
     private BigInteger getStoredCurrTransaction(TransactionalState state) {
         BigInteger ret = (BigInteger) state.getData(CURRENT_TX);
-        if(ret==null) return INIT_TXID;
-        else return ret;
+        if (ret == null)
+            return INIT_TXID;
+        else
+            return ret;
     }
-    
+
     private class StateInitializer implements RotatingTransactionalState.StateInitializer {
         @Override
         public Object init(BigInteger txid, Object lastState) {

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/transactional/TransactionalTopologyBuilder.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/transactional/TransactionalTopologyBuilder.java b/jstorm-core/src/main/java/backtype/storm/transactional/TransactionalTopologyBuilder.java
index 98d1163..e775eb5 100755
--- a/jstorm-core/src/main/java/backtype/storm/transactional/TransactionalTopologyBuilder.java
+++ b/jstorm-core/src/main/java/backtype/storm/transactional/TransactionalTopologyBuilder.java
@@ -50,8 +50,7 @@ import java.util.Map;
 import java.util.Set;
 
 /**
- * Trident subsumes the functionality provided by transactional topologies, so this 
- * class is deprecated.
+ * Trident subsumes the functionality provided by transactional topologies, so this class is deprecated.
  * 
  */
 @Deprecated
@@ -62,16 +61,16 @@ public class TransactionalTopologyBuilder {
     Map<String, Component> _bolts = new HashMap<String, Component>();
     Integer _spoutParallelism;
     List<Map> _spoutConfs = new ArrayList();
-    
+
     // id is used to store the state of this transactionalspout in zookeeper
-    // it would be very dangerous to have 2 topologies active with the same id in the same cluster    
+    // it would be very dangerous to have 2 topologies active with the same id in the same cluster
     public TransactionalTopologyBuilder(String id, String spoutId, ITransactionalSpout spout, Number spoutParallelism) {
         _id = id;
         _spoutId = spoutId;
         _spout = spout;
         _spoutParallelism = (spoutParallelism == null) ? null : spoutParallelism.intValue();
     }
-    
+
     public TransactionalTopologyBuilder(String id, String spoutId, ITransactionalSpout spout) {
         this(id, spoutId, spout, null);
     }
@@ -79,27 +78,27 @@ public class TransactionalTopologyBuilder {
     public TransactionalTopologyBuilder(String id, String spoutId, IPartitionedTransactionalSpout spout, Number spoutParallelism) {
         this(id, spoutId, new PartitionedTransactionalSpoutExecutor(spout), spoutParallelism);
     }
-    
+
     public TransactionalTopologyBuilder(String id, String spoutId, IPartitionedTransactionalSpout spout) {
         this(id, spoutId, spout, null);
     }
-    
+
     public TransactionalTopologyBuilder(String id, String spoutId, IOpaquePartitionedTransactionalSpout spout, Number spoutParallelism) {
         this(id, spoutId, new OpaquePartitionedTransactionalSpoutExecutor(spout), spoutParallelism);
     }
-    
+
     public TransactionalTopologyBuilder(String id, String spoutId, IOpaquePartitionedTransactionalSpout spout) {
         this(id, spoutId, spout, null);
     }
-    
+
     public SpoutDeclarer getSpoutDeclarer() {
         return new SpoutDeclarerImpl();
     }
-    
+
     public BoltDeclarer setBolt(String id, IBatchBolt bolt) {
         return setBolt(id, bolt, null);
     }
-    
+
     public BoltDeclarer setBolt(String id, IBatchBolt bolt, Number parallelism) {
         return setBolt(id, new BatchBoltExecutor(bolt), parallelism, bolt instanceof ICommitter);
     }
@@ -107,86 +106,79 @@ public class TransactionalTopologyBuilder {
     public BoltDeclarer setCommitterBolt(String id, IBatchBolt bolt) {
         return setCommitterBolt(id, bolt, null);
     }
-    
+
     public BoltDeclarer setCommitterBolt(String id, IBatchBolt bolt, Number parallelism) {
         return setBolt(id, new BatchBoltExecutor(bolt), parallelism, true);
-    }      
-    
+    }
+
     public BoltDeclarer setBolt(String id, IBasicBolt bolt) {
         return setBolt(id, bolt, null);
-    }    
-    
+    }
+
     public BoltDeclarer setBolt(String id, IBasicBolt bolt, Number parallelism) {
         return setBolt(id, new BasicBoltExecutor(bolt), parallelism, false);
     }
-    
+
     private BoltDeclarer setBolt(String id, IRichBolt bolt, Number parallelism, boolean committer) {
         Integer p = null;
-        if(parallelism!=null) p = parallelism.intValue();
+        if (parallelism != null)
+            p = parallelism.intValue();
         Component component = new Component(bolt, p, committer);
         _bolts.put(id, component);
         return new BoltDeclarerImpl(component);
     }
-    
+
     public TopologyBuilder buildTopologyBuilder() {
         String coordinator = _spoutId + "/coordinator";
         TopologyBuilder builder = new TopologyBuilder();
         SpoutDeclarer declarer = builder.setSpout(coordinator, new TransactionalSpoutCoordinator(_spout));
-        for(Map conf: _spoutConfs) {
+        for (Map conf : _spoutConfs) {
             declarer.addConfigurations(conf);
         }
         declarer.addConfiguration(Config.TOPOLOGY_TRANSACTIONAL_ID, _id);
 
-        BoltDeclarer emitterDeclarer = 
-                builder.setBolt(_spoutId,
-                        new CoordinatedBolt(new TransactionalSpoutBatchExecutor(_spout),
-                                             null,
-                                             null),
-                        _spoutParallelism)
-                .allGrouping(coordinator, TransactionalSpoutCoordinator.TRANSACTION_BATCH_STREAM_ID)
-                .addConfiguration(Config.TOPOLOGY_TRANSACTIONAL_ID, _id);
-        if(_spout instanceof ICommitterTransactionalSpout) {
+        BoltDeclarer emitterDeclarer =
+                builder.setBolt(_spoutId, new CoordinatedBolt(new TransactionalSpoutBatchExecutor(_spout), null, null), _spoutParallelism)
+                        .allGrouping(coordinator, TransactionalSpoutCoordinator.TRANSACTION_BATCH_STREAM_ID)
+                        .addConfiguration(Config.TOPOLOGY_TRANSACTIONAL_ID, _id);
+        if (_spout instanceof ICommitterTransactionalSpout) {
             emitterDeclarer.allGrouping(coordinator, TransactionalSpoutCoordinator.TRANSACTION_COMMIT_STREAM_ID);
         }
-        for(String id: _bolts.keySet()) {
+        for (String id : _bolts.keySet()) {
             Component component = _bolts.get(id);
             Map<String, SourceArgs> coordinatedArgs = new HashMap<String, SourceArgs>();
-            for(String c: componentBoltSubscriptions(component)) {
+            for (String c : componentBoltSubscriptions(component)) {
                 coordinatedArgs.put(c, SourceArgs.all());
             }
-            
+
             IdStreamSpec idSpec = null;
-            if(component.committer) {
-                idSpec = IdStreamSpec.makeDetectSpec(coordinator, TransactionalSpoutCoordinator.TRANSACTION_COMMIT_STREAM_ID);          
+            if (component.committer) {
+                idSpec = IdStreamSpec.makeDetectSpec(coordinator, TransactionalSpoutCoordinator.TRANSACTION_COMMIT_STREAM_ID);
             }
-            BoltDeclarer input = builder.setBolt(id,
-                                                  new CoordinatedBolt(component.bolt,
-                                                                      coordinatedArgs,
-                                                                      idSpec),
-                                                  component.parallelism);
-            for(Map conf: component.componentConfs) {
+            BoltDeclarer input = builder.setBolt(id, new CoordinatedBolt(component.bolt, coordinatedArgs, idSpec), component.parallelism);
+            for (Map conf : component.componentConfs) {
                 input.addConfigurations(conf);
             }
-            for(String c: componentBoltSubscriptions(component)) {
+            for (String c : componentBoltSubscriptions(component)) {
                 input.directGrouping(c, Constants.COORDINATED_STREAM_ID);
             }
-            for(InputDeclaration d: component.declarations) {
+            for (InputDeclaration d : component.declarations) {
                 d.declare(input);
             }
-            if(component.committer) {
-                input.allGrouping(coordinator, TransactionalSpoutCoordinator.TRANSACTION_COMMIT_STREAM_ID);                
+            if (component.committer) {
+                input.allGrouping(coordinator, TransactionalSpoutCoordinator.TRANSACTION_COMMIT_STREAM_ID);
             }
         }
         return builder;
     }
-    
+
     public StormTopology buildTopology() {
         return buildTopologyBuilder().createTopology();
     }
-    
+
     private Set<String> componentBoltSubscriptions(Component component) {
         Set<String> ret = new HashSet<String>();
-        for(InputDeclaration d: component.declarations) {
+        for (InputDeclaration d : component.declarations) {
             ret.add(d.getComponent());
         }
         return ret;
@@ -198,34 +190,35 @@ public class TransactionalTopologyBuilder {
         public List<InputDeclaration> declarations = new ArrayList<InputDeclaration>();
         public List<Map> componentConfs = new ArrayList<Map>();
         public boolean committer;
-        
+
         public Component(IRichBolt bolt, Integer parallelism, boolean committer) {
             this.bolt = bolt;
             this.parallelism = parallelism;
             this.committer = committer;
         }
     }
-    
+
     private static interface InputDeclaration {
         void declare(InputDeclarer declarer);
+
         String getComponent();
     }
-    
+
     private class SpoutDeclarerImpl extends BaseConfigurationDeclarer<SpoutDeclarer> implements SpoutDeclarer {
         @Override
         public SpoutDeclarer addConfigurations(Map conf) {
             _spoutConfs.add(conf);
             return this;
-        }        
+        }
     }
-    
+
     private class BoltDeclarerImpl extends BaseConfigurationDeclarer<BoltDeclarer> implements BoltDeclarer {
         Component _component;
-        
+
         public BoltDeclarerImpl(Component component) {
             _component = component;
         }
-        
+
         @Override
         public BoltDeclarer fieldsGrouping(final String component, final Fields fields) {
             addDeclaration(new InputDeclaration() {
@@ -237,7 +230,7 @@ public class TransactionalTopologyBuilder {
                 @Override
                 public String getComponent() {
                     return component;
-                }                
+                }
             });
             return this;
         }
@@ -248,12 +241,12 @@ public class TransactionalTopologyBuilder {
                 @Override
                 public void declare(InputDeclarer declarer) {
                     declarer.fieldsGrouping(component, streamId, fields);
-                }                
+                }
 
                 @Override
                 public String getComponent() {
                     return component;
-                }                
+                }
             });
             return this;
         }
@@ -264,12 +257,12 @@ public class TransactionalTopologyBuilder {
                 @Override
                 public void declare(InputDeclarer declarer) {
                     declarer.globalGrouping(component);
-                }                
+                }
 
                 @Override
                 public String getComponent() {
                     return component;
-                }                
+                }
             });
             return this;
         }
@@ -280,12 +273,12 @@ public class TransactionalTopologyBuilder {
                 @Override
                 public void declare(InputDeclarer declarer) {
                     declarer.globalGrouping(component, streamId);
-                }                
+                }
 
                 @Override
                 public String getComponent() {
                     return component;
-                }                
+                }
             });
             return this;
         }
@@ -296,12 +289,12 @@ public class TransactionalTopologyBuilder {
                 @Override
                 public void declare(InputDeclarer declarer) {
                     declarer.shuffleGrouping(component);
-                }                
+                }
 
                 @Override
                 public String getComponent() {
                     return component;
-                }                
+                }
             });
             return this;
         }
@@ -312,12 +305,12 @@ public class TransactionalTopologyBuilder {
                 @Override
                 public void declare(InputDeclarer declarer) {
                     declarer.shuffleGrouping(component, streamId);
-                }                
+                }
 
                 @Override
                 public String getComponent() {
                     return component;
-                }                
+                }
             });
             return this;
         }
@@ -328,12 +321,12 @@ public class TransactionalTopologyBuilder {
                 @Override
                 public void declare(InputDeclarer declarer) {
                     declarer.localOrShuffleGrouping(component);
-                }                
+                }
 
                 @Override
                 public String getComponent() {
                     return component;
-                }                
+                }
             });
             return this;
         }
@@ -345,7 +338,7 @@ public class TransactionalTopologyBuilder {
                 public void declare(InputDeclarer declarer) {
                     declarer.localOrShuffleGrouping(component, streamId);
                 }
-                
+
                 @Override
                 public String getComponent() {
                     return component;
@@ -353,7 +346,7 @@ public class TransactionalTopologyBuilder {
             });
             return this;
         }
-        
+
         @Override
         public BoltDeclarer localFirstGrouping(final String component) {
             addDeclaration(new InputDeclaration() {
@@ -361,7 +354,7 @@ public class TransactionalTopologyBuilder {
                 public void declare(InputDeclarer declarer) {
                     declarer.localFirstGrouping(component);
                 }
-                
+
                 @Override
                 public String getComponent() {
                     return component;
@@ -369,7 +362,7 @@ public class TransactionalTopologyBuilder {
             });
             return this;
         }
-        
+
         @Override
         public BoltDeclarer localFirstGrouping(final String component, final String streamId) {
             addDeclaration(new InputDeclaration() {
@@ -377,7 +370,7 @@ public class TransactionalTopologyBuilder {
                 public void declare(InputDeclarer declarer) {
                     declarer.localFirstGrouping(component, streamId);
                 }
-                
+
                 @Override
                 public String getComponent() {
                     return component;
@@ -385,19 +378,19 @@ public class TransactionalTopologyBuilder {
             });
             return this;
         }
-        
+
         @Override
         public BoltDeclarer noneGrouping(final String component) {
             addDeclaration(new InputDeclaration() {
                 @Override
                 public void declare(InputDeclarer declarer) {
                     declarer.noneGrouping(component);
-                }                
+                }
 
                 @Override
                 public String getComponent() {
                     return component;
-                }                
+                }
             });
             return this;
         }
@@ -408,12 +401,12 @@ public class TransactionalTopologyBuilder {
                 @Override
                 public void declare(InputDeclarer declarer) {
                     declarer.noneGrouping(component, streamId);
-                }                
+                }
 
                 @Override
                 public String getComponent() {
                     return component;
-                }                
+                }
             });
             return this;
         }
@@ -424,12 +417,12 @@ public class TransactionalTopologyBuilder {
                 @Override
                 public void declare(InputDeclarer declarer) {
                     declarer.allGrouping(component);
-                }                
+                }
 
                 @Override
                 public String getComponent() {
                     return component;
-                }                
+                }
             });
             return this;
         }
@@ -440,12 +433,12 @@ public class TransactionalTopologyBuilder {
                 @Override
                 public void declare(InputDeclarer declarer) {
                     declarer.allGrouping(component, streamId);
-                }                
+                }
 
                 @Override
                 public String getComponent() {
                     return component;
-                }                
+                }
             });
             return this;
         }
@@ -456,12 +449,12 @@ public class TransactionalTopologyBuilder {
                 @Override
                 public void declare(InputDeclarer declarer) {
                     declarer.directGrouping(component);
-                }                
+                }
 
                 @Override
                 public String getComponent() {
                     return component;
-                }                
+                }
             });
             return this;
         }
@@ -472,12 +465,12 @@ public class TransactionalTopologyBuilder {
                 @Override
                 public void declare(InputDeclarer declarer) {
                     declarer.directGrouping(component, streamId);
-                }                
+                }
 
                 @Override
                 public String getComponent() {
                     return component;
-                }                
+                }
             });
             return this;
         }
@@ -498,14 +491,14 @@ public class TransactionalTopologyBuilder {
                 @Override
                 public void declare(InputDeclarer declarer) {
                     declarer.customGrouping(component, grouping);
-                }                
+                }
 
                 @Override
                 public String getComponent() {
                     return component;
-                }                
+                }
             });
-            return this;        
+            return this;
         }
 
         @Override
@@ -514,12 +507,12 @@ public class TransactionalTopologyBuilder {
                 @Override
                 public void declare(InputDeclarer declarer) {
                     declarer.customGrouping(component, streamId, grouping);
-                }                
+                }
 
                 @Override
                 public String getComponent() {
                     return component;
-                }                
+                }
             });
             return this;
         }
@@ -530,16 +523,16 @@ public class TransactionalTopologyBuilder {
                 @Override
                 public void declare(InputDeclarer declarer) {
                     declarer.grouping(stream, grouping);
-                }                
+                }
 
                 @Override
                 public String getComponent() {
                     return stream.get_componentId();
-                }                
+                }
             });
             return this;
         }
-        
+
         private void addDeclaration(InputDeclaration declaration) {
             _component.declarations.add(declaration);
         }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/transactional/partitioned/IOpaquePartitionedTransactionalSpout.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/transactional/partitioned/IOpaquePartitionedTransactionalSpout.java b/jstorm-core/src/main/java/backtype/storm/transactional/partitioned/IOpaquePartitionedTransactionalSpout.java
index 8d1f60b..35fb1c6 100755
--- a/jstorm-core/src/main/java/backtype/storm/transactional/partitioned/IOpaquePartitionedTransactionalSpout.java
+++ b/jstorm-core/src/main/java/backtype/storm/transactional/partitioned/IOpaquePartitionedTransactionalSpout.java
@@ -24,33 +24,34 @@ import backtype.storm.transactional.TransactionAttempt;
 import java.util.Map;
 
 /**
- * This defines a transactional spout which does *not* necessarily
- * replay the same batch every time it emits a batch for a transaction id.
+ * This defines a transactional spout which does *not* necessarily replay the same batch every time it emits a batch for a transaction id.
  */
 public interface IOpaquePartitionedTransactionalSpout<T> extends IComponent {
     public interface Coordinator {
         /**
          * Returns true if its ok to emit start a new transaction, false otherwise (will skip this transaction).
          * 
-         * You should sleep here if you want a delay between asking for the next transaction (this will be called 
-         * repeatedly in a loop).
+         * You should sleep here if you want a delay between asking for the next transaction (this will be called repeatedly in a loop).
          */
         boolean isReady();
+
         void close();
     }
-    
+
     public interface Emitter<X> {
         /**
-         * Emit a batch of tuples for a partition/transaction. 
+         * Emit a batch of tuples for a partition/transaction.
          * 
-         * Return the metadata describing this batch that will be used as lastPartitionMeta
-         * for defining the parameters of the next batch.
+         * Return the metadata describing this batch that will be used as lastPartitionMeta for defining the parameters of the next batch.
          */
         X emitPartitionBatch(TransactionAttempt tx, BatchOutputCollector collector, int partition, X lastPartitionMeta);
+
         int numPartitions();
+
         void close();
     }
-    
-    Emitter<T> getEmitter(Map conf, TopologyContext context);     
-    Coordinator getCoordinator(Map conf, TopologyContext context);     
+
+    Emitter<T> getEmitter(Map conf, TopologyContext context);
+
+    Coordinator getCoordinator(Map conf, TopologyContext context);
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/transactional/partitioned/IPartitionedTransactionalSpout.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/transactional/partitioned/IPartitionedTransactionalSpout.java b/jstorm-core/src/main/java/backtype/storm/transactional/partitioned/IPartitionedTransactionalSpout.java
index e428328..7b1e4fb 100755
--- a/jstorm-core/src/main/java/backtype/storm/transactional/partitioned/IPartitionedTransactionalSpout.java
+++ b/jstorm-core/src/main/java/backtype/storm/transactional/partitioned/IPartitionedTransactionalSpout.java
@@ -24,46 +24,43 @@ import backtype.storm.coordination.BatchOutputCollector;
 import java.util.Map;
 
 /**
- * This interface defines a transactional spout that reads its tuples from a partitioned set of 
- * brokers. It automates the storing of metadata for each partition to ensure that the same batch
- * is always emitted for the same transaction id. The partition metadata is stored in Zookeeper.
+ * This interface defines a transactional spout that reads its tuples from a partitioned set of brokers. It automates the storing of metadata for each partition
+ * to ensure that the same batch is always emitted for the same transaction id. The partition metadata is stored in Zookeeper.
  */
 public interface IPartitionedTransactionalSpout<T> extends IComponent {
     public interface Coordinator {
         /**
-         * Return the number of partitions currently in the source of data. The idea is
-         * is that if a new partition is added and a prior transaction is replayed, it doesn't
-         * emit tuples for the new partition because it knows how many partitions were in 
-         * that transaction.
+         * Return the number of partitions currently in the source of data. The idea is is that if a new partition is added and a prior transaction is replayed,
+         * it doesn't emit tuples for the new partition because it knows how many partitions were in that transaction.
          */
         int numPartitions();
-        
+
         /**
          * Returns true if its ok to emit start a new transaction, false otherwise (will skip this transaction).
          * 
-         * You should sleep here if you want a delay between asking for the next transaction (this will be called 
-         * repeatedly in a loop).
+         * You should sleep here if you want a delay between asking for the next transaction (this will be called repeatedly in a loop).
          */
         boolean isReady();
-                
+
         void close();
     }
-    
+
     public interface Emitter<X> {
         /**
-         * Emit a batch of tuples for a partition/transaction that's never been emitted before.
-         * Return the metadata that can be used to reconstruct this partition/batch in the future.
+         * Emit a batch of tuples for a partition/transaction that's never been emitted before. Return the metadata that can be used to reconstruct this
+         * partition/batch in the future.
          */
         X emitPartitionBatchNew(TransactionAttempt tx, BatchOutputCollector collector, int partition, X lastPartitionMeta);
 
         /**
-         * Emit a batch of tuples for a partition/transaction that has been emitted before, using
-         * the metadata created when it was first emitted.
+         * Emit a batch of tuples for a partition/transaction that has been emitted before, using the metadata created when it was first emitted.
          */
         void emitPartitionBatch(TransactionAttempt tx, BatchOutputCollector collector, int partition, X partitionMeta);
+
         void close();
     }
-    
+
     Coordinator getCoordinator(Map conf, TopologyContext context);
-    Emitter<T> getEmitter(Map conf, TopologyContext context);      
+
+    Emitter<T> getEmitter(Map conf, TopologyContext context);
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/transactional/partitioned/OpaquePartitionedTransactionalSpoutExecutor.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/transactional/partitioned/OpaquePartitionedTransactionalSpoutExecutor.java b/jstorm-core/src/main/java/backtype/storm/transactional/partitioned/OpaquePartitionedTransactionalSpoutExecutor.java
index aabcb7a..4f894d9 100755
--- a/jstorm-core/src/main/java/backtype/storm/transactional/partitioned/OpaquePartitionedTransactionalSpoutExecutor.java
+++ b/jstorm-core/src/main/java/backtype/storm/transactional/partitioned/OpaquePartitionedTransactionalSpoutExecutor.java
@@ -33,17 +33,16 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.TreeMap;
 
-
 public class OpaquePartitionedTransactionalSpoutExecutor implements ICommitterTransactionalSpout<Object> {
     IOpaquePartitionedTransactionalSpout _spout;
-    
+
     public class Coordinator implements ITransactionalSpout.Coordinator<Object> {
         IOpaquePartitionedTransactionalSpout.Coordinator _coordinator;
 
         public Coordinator(Map conf, TopologyContext context) {
             _coordinator = _spout.getCoordinator(conf, context);
         }
-        
+
         @Override
         public Object initializeTransaction(BigInteger txid, Object prevMetadata) {
             return null;
@@ -52,14 +51,14 @@ public class OpaquePartitionedTransactionalSpoutExecutor implements ICommitterTr
         @Override
         public boolean isReady() {
             return _coordinator.isReady();
-        }        
+        }
 
         @Override
         public void close() {
             _coordinator.close();
-        }        
+        }
     }
-    
+
     public class Emitter implements ICommitterTransactionalSpout.Emitter {
         IOpaquePartitionedTransactionalSpout.Emitter _emitter;
         TransactionalState _state;
@@ -67,21 +66,21 @@ public class OpaquePartitionedTransactionalSpoutExecutor implements ICommitterTr
         Map<Integer, RotatingTransactionalState> _partitionStates = new HashMap<Integer, RotatingTransactionalState>();
         int _index;
         int _numTasks;
-        
+
         public Emitter(Map conf, TopologyContext context) {
             _emitter = _spout.getEmitter(conf, context);
             _index = context.getThisTaskIndex();
             _numTasks = context.getComponentTasks(context.getThisComponentId()).size();
-            _state = TransactionalState.newUserState(conf, (String) conf.get(Config.TOPOLOGY_TRANSACTIONAL_ID), getComponentConfiguration()); 
+            _state = TransactionalState.newUserState(conf, (String) conf.get(Config.TOPOLOGY_TRANSACTIONAL_ID), getComponentConfiguration());
             List<String> existingPartitions = _state.list("");
-            for(String p: existingPartitions) {
+            for (String p : existingPartitions) {
                 int partition = Integer.parseInt(p);
-                if((partition - _index) % _numTasks == 0) {
+                if ((partition - _index) % _numTasks == 0) {
                     _partitionStates.put(partition, new RotatingTransactionalState(_state, p));
                 }
             }
         }
-        
+
         @Override
         public void emitBatch(TransactionAttempt tx, Object coordinatorMeta, BatchOutputCollector collector) {
             Map<Integer, Object> metas = new HashMap<Integer, Object>();
@@ -89,21 +88,22 @@ public class OpaquePartitionedTransactionalSpoutExecutor implements ICommitterTr
             int partitions = _emitter.numPartitions();
             Entry<BigInteger, Map<Integer, Object>> entry = _cachedMetas.lowerEntry(tx.getTransactionId());
             Map<Integer, Object> prevCached;
-            if(entry!=null) {
+            if (entry != null) {
                 prevCached = entry.getValue();
             } else {
                 prevCached = new HashMap<Integer, Object>();
             }
-            
-            for(int i=_index; i < partitions; i+=_numTasks) {
+
+            for (int i = _index; i < partitions; i += _numTasks) {
                 RotatingTransactionalState state = _partitionStates.get(i);
-                if(state==null) {
+                if (state == null) {
                     state = new RotatingTransactionalState(_state, "" + i);
                     _partitionStates.put(i, state);
                 }
                 state.removeState(tx.getTransactionId());
                 Object lastMeta = prevCached.get(i);
-                if(lastMeta==null) lastMeta = state.getLastState();
+                if (lastMeta == null)
+                    lastMeta = state.getLastState();
                 Object meta = _emitter.emitPartitionBatch(tx, collector, i, lastMeta);
                 metas.put(i, meta);
             }
@@ -111,16 +111,16 @@ public class OpaquePartitionedTransactionalSpoutExecutor implements ICommitterTr
 
         @Override
         public void cleanupBefore(BigInteger txid) {
-            for(RotatingTransactionalState state: _partitionStates.values()) {
+            for (RotatingTransactionalState state : _partitionStates.values()) {
                 state.cleanupBefore(txid);
-            }            
+            }
         }
 
         @Override
         public void commit(TransactionAttempt attempt) {
             BigInteger txid = attempt.getTransactionId();
             Map<Integer, Object> metas = _cachedMetas.remove(txid);
-            for(Integer partition: metas.keySet()) {
+            for (Integer partition : metas.keySet()) {
                 Object meta = metas.get(partition);
                 _partitionStates.get(partition).overrideState(txid, meta);
             }
@@ -130,12 +130,12 @@ public class OpaquePartitionedTransactionalSpoutExecutor implements ICommitterTr
         public void close() {
             _emitter.close();
         }
-    } 
-    
+    }
+
     public OpaquePartitionedTransactionalSpoutExecutor(IOpaquePartitionedTransactionalSpout spout) {
         _spout = spout;
     }
-    
+
     @Override
     public ITransactionalSpout.Coordinator<Object> getCoordinator(Map conf, TopologyContext context) {
         return new Coordinator(conf, context);
@@ -155,5 +155,5 @@ public class OpaquePartitionedTransactionalSpoutExecutor implements ICommitterTr
     public Map<String, Object> getComponentConfiguration() {
         return _spout.getComponentConfiguration();
     }
-    
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/transactional/partitioned/PartitionedTransactionalSpoutExecutor.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/transactional/partitioned/PartitionedTransactionalSpoutExecutor.java b/jstorm-core/src/main/java/backtype/storm/transactional/partitioned/PartitionedTransactionalSpoutExecutor.java
index 479dda4..8422576 100644
--- a/jstorm-core/src/main/java/backtype/storm/transactional/partitioned/PartitionedTransactionalSpoutExecutor.java
+++ b/jstorm-core/src/main/java/backtype/storm/transactional/partitioned/PartitionedTransactionalSpoutExecutor.java
@@ -29,30 +29,29 @@ import java.math.BigInteger;
 import java.util.HashMap;
 import java.util.Map;
 
-
 public class PartitionedTransactionalSpoutExecutor implements ITransactionalSpout<Integer> {
     IPartitionedTransactionalSpout _spout;
-    
+
     public PartitionedTransactionalSpoutExecutor(IPartitionedTransactionalSpout spout) {
         _spout = spout;
     }
-    
+
     public IPartitionedTransactionalSpout getPartitionedSpout() {
         return _spout;
     }
-    
+
     class Coordinator implements ITransactionalSpout.Coordinator<Integer> {
         private IPartitionedTransactionalSpout.Coordinator _coordinator;
-        
+
         public Coordinator(Map conf, TopologyContext context) {
             _coordinator = _spout.getCoordinator(conf, context);
         }
-        
+
         @Override
         public Integer initializeTransaction(BigInteger txid, Integer prevMetadata) {
             return _coordinator.numPartitions();
         }
-        
+
         @Override
         public boolean isReady() {
             return _coordinator.isReady();
@@ -61,53 +60,51 @@ public class PartitionedTransactionalSpoutExecutor implements ITransactionalSpou
         @Override
         public void close() {
             _coordinator.close();
-        }        
+        }
     }
-    
+
     class Emitter implements ITransactionalSpout.Emitter<Integer> {
         private IPartitionedTransactionalSpout.Emitter _emitter;
         private TransactionalState _state;
         private Map<Integer, RotatingTransactionalState> _partitionStates = new HashMap<Integer, RotatingTransactionalState>();
         private int _index;
         private int _numTasks;
-        
+
         public Emitter(Map conf, TopologyContext context) {
             _emitter = _spout.getEmitter(conf, context);
-            _state = TransactionalState.newUserState(conf, (String) conf.get(Config.TOPOLOGY_TRANSACTIONAL_ID), getComponentConfiguration()); 
+            _state = TransactionalState.newUserState(conf, (String) conf.get(Config.TOPOLOGY_TRANSACTIONAL_ID), getComponentConfiguration());
             _index = context.getThisTaskIndex();
             _numTasks = context.getComponentTasks(context.getThisComponentId()).size();
         }
 
         @Override
-        public void emitBatch(final TransactionAttempt tx, final Integer partitions,
-                final BatchOutputCollector collector) {
-            for(int i=_index; i < partitions; i+=_numTasks) {
-                if(!_partitionStates.containsKey(i)) {
+        public void emitBatch(final TransactionAttempt tx, final Integer partitions, final BatchOutputCollector collector) {
+            for (int i = _index; i < partitions; i += _numTasks) {
+                if (!_partitionStates.containsKey(i)) {
                     _partitionStates.put(i, new RotatingTransactionalState(_state, "" + i));
                 }
                 RotatingTransactionalState state = _partitionStates.get(i);
                 final int partition = i;
-                Object meta = state.getStateOrCreate(tx.getTransactionId(),
-                        new RotatingTransactionalState.StateInitializer() {
+                Object meta = state.getStateOrCreate(tx.getTransactionId(), new RotatingTransactionalState.StateInitializer() {
                     @Override
                     public Object init(BigInteger txid, Object lastState) {
                         return _emitter.emitPartitionBatchNew(tx, collector, partition, lastState);
                     }
                 });
                 // it's null if one of:
-                //   a) a later transaction batch was emitted before this, so we should skip this batch
-                //   b) if didn't exist and was created (in which case the StateInitializer was invoked and 
-                //      it was emitted
-                if(meta!=null) {
+                // a) a later transaction batch was emitted before this, so we should skip this batch
+                // b) if didn't exist and was created (in which case the StateInitializer was invoked and
+                // it was emitted
+                if (meta != null) {
                     _emitter.emitPartitionBatch(tx, collector, partition, meta);
                 }
             }
-            
+
         }
 
         @Override
         public void cleanupBefore(BigInteger txid) {
-            for(RotatingTransactionalState state: _partitionStates.values()) {
+            for (RotatingTransactionalState state : _partitionStates.values()) {
                 state.cleanupBefore(txid);
             }
         }
@@ -117,7 +114,7 @@ public class PartitionedTransactionalSpoutExecutor implements ITransactionalSpou
             _state.close();
             _emitter.close();
         }
-    }    
+    }
 
     @Override
     public ITransactionalSpout.Coordinator getCoordinator(Map conf, TopologyContext context) {
@@ -138,5 +135,5 @@ public class PartitionedTransactionalSpoutExecutor implements ITransactionalSpou
     public Map<String, Object> getComponentConfiguration() {
         return _spout.getComponentConfiguration();
     }
-    
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/transactional/state/RotatingTransactionalState.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/transactional/state/RotatingTransactionalState.java b/jstorm-core/src/main/java/backtype/storm/transactional/state/RotatingTransactionalState.java
index 20c5cd3..63aced9 100644
--- a/jstorm-core/src/main/java/backtype/storm/transactional/state/RotatingTransactionalState.java
+++ b/jstorm-core/src/main/java/backtype/storm/transactional/state/RotatingTransactionalState.java
@@ -27,19 +27,19 @@ import java.util.SortedMap;
 import java.util.TreeMap;
 
 /**
- * A map from txid to a value. Automatically deletes txids that have been committed. 
+ * A map from txid to a value. Automatically deletes txids that have been committed.
  */
 public class RotatingTransactionalState {
     public static interface StateInitializer {
         Object init(BigInteger txid, Object lastState);
-    }    
+    }
 
     private TransactionalState _state;
     private String _subdir;
     private boolean _strictOrder;
-    
+
     private TreeMap<BigInteger, Object> _curr = new TreeMap<BigInteger, Object>();
-    
+
     public RotatingTransactionalState(TransactionalState state, String subdir, boolean strictOrder) {
         _state = state;
         _subdir = subdir;
@@ -51,32 +51,35 @@ public class RotatingTransactionalState {
     public RotatingTransactionalState(TransactionalState state, String subdir) {
         this(state, subdir, false);
     }
-    
+
     public Object getLastState() {
-        if(_curr.isEmpty()) return null;
-        else return _curr.lastEntry().getValue();
+        if (_curr.isEmpty())
+            return null;
+        else
+            return _curr.lastEntry().getValue();
     }
-    
+
     public void overrideState(BigInteger txid, Object state) {
         _state.setData(txPath(txid), state);
         _curr.put(txid, state);
     }
 
     public void removeState(BigInteger txid) {
-        if(_curr.containsKey(txid)) {
+        if (_curr.containsKey(txid)) {
             _curr.remove(txid);
             _state.delete(txPath(txid));
         }
     }
-    
+
     public Object getState(BigInteger txid, StateInitializer init) {
-        if(!_curr.containsKey(txid)) {
+        if (!_curr.containsKey(txid)) {
             SortedMap<BigInteger, Object> prevMap = _curr.headMap(txid);
-            SortedMap<BigInteger, Object> afterMap = _curr.tailMap(txid);            
-            
+            SortedMap<BigInteger, Object> afterMap = _curr.tailMap(txid);
+
             BigInteger prev = null;
-            if(!prevMap.isEmpty()) prev = prevMap.lastKey();
-            
+            if (!prevMap.isEmpty())
+                prev = prevMap.lastKey();
+
             if (_strictOrder) {
                 if (prev == null && !txid.equals(TransactionalSpoutCoordinator.INIT_TXID)) {
                     throw new IllegalStateException("Trying to initialize transaction for which there should be a previous state");
@@ -88,7 +91,7 @@ public class RotatingTransactionalState {
                     throw new IllegalStateException("Expecting tx state to be initialized in strict order but there are txids after that have state");
                 }
             }
-            
+
             Object data;
             if (afterMap.isEmpty()) {
                 Object prevData;
@@ -106,11 +109,11 @@ public class RotatingTransactionalState {
         }
         return _curr.get(txid);
     }
-    
+
     public boolean hasCache(BigInteger txid) {
         return _curr.containsKey(txid);
     }
-    
+
     /**
      * Returns null if it was created, the value otherwise.
      */
@@ -122,7 +125,7 @@ public class RotatingTransactionalState {
             return null;
         }
     }
-    
+
     public void cleanupBefore(BigInteger txid) {
         Set<BigInteger> toDelete = new HashSet<BigInteger>();
         toDelete.addAll(_curr.headMap(txid).keySet());
@@ -131,21 +134,21 @@ public class RotatingTransactionalState {
             _state.delete(txPath(tx));
         }
     }
-    
+
     private void sync() {
         List<String> txids = _state.list(_subdir);
-        for(String txid_s: txids) {
+        for (String txid_s : txids) {
             Object data = _state.getData(txPath(txid_s));
             _curr.put(new BigInteger(txid_s), data);
         }
     }
-    
+
     private String txPath(BigInteger tx) {
         return txPath(tx.toString());
     }
 
     private String txPath(String tx) {
         return _subdir + "/" + tx;
-    }    
-    
+    }
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/transactional/state/TestTransactionalState.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/transactional/state/TestTransactionalState.java b/jstorm-core/src/main/java/backtype/storm/transactional/state/TestTransactionalState.java
index 3d4a463..02b3d0d 100755
--- a/jstorm-core/src/main/java/backtype/storm/transactional/state/TestTransactionalState.java
+++ b/jstorm-core/src/main/java/backtype/storm/transactional/state/TestTransactionalState.java
@@ -32,16 +32,13 @@ import org.apache.zookeeper.data.ACL;
 public class TestTransactionalState extends TransactionalState {
 
     /**
-     * Matching constructor in absence of a default constructor in the parent
-     * class.
+     * Matching constructor in absence of a default constructor in the parent class.
      */
     protected TestTransactionalState(Map conf, String id, Map componentConf, String subroot) {
         super(conf, id, componentConf, subroot);
     }
 
-    public static void createNode(CuratorFramework curator, 
-            String rootDir, byte[] data, List<ACL> acls, CreateMode mode)
-            throws Exception {
-       TransactionalState.createNode(curator, rootDir, data, acls, mode);
+    public static void createNode(CuratorFramework curator, String rootDir, byte[] data, List<ACL> acls, CreateMode mode) throws Exception {
+        TransactionalState.createNode(curator, rootDir, data, acls, mode);
     }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/transactional/state/TransactionalState.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/transactional/state/TransactionalState.java b/jstorm-core/src/main/java/backtype/storm/transactional/state/TransactionalState.java
index 5afcd0a..71d7cc3 100755
--- a/jstorm-core/src/main/java/backtype/storm/transactional/state/TransactionalState.java
+++ b/jstorm-core/src/main/java/backtype/storm/transactional/state/TransactionalState.java
@@ -40,25 +40,23 @@ public class TransactionalState {
     KryoValuesSerializer _ser;
     KryoValuesDeserializer _des;
     List<ACL> _zkAcls = null;
-    
+
     public static TransactionalState newUserState(Map conf, String id, Map componentConf) {
         return new TransactionalState(conf, id, componentConf, "user");
     }
-    
+
     public static TransactionalState newCoordinatorState(Map conf, String id, Map componentConf) {
-        return new TransactionalState(conf, id, componentConf, "coordinator");        
+        return new TransactionalState(conf, id, componentConf, "coordinator");
     }
-    
+
     protected TransactionalState(Map conf, String id, Map componentConf, String subroot) {
         try {
             conf = new HashMap(conf);
             // ensure that the serialization registrations are consistent with the declarations in this spout
-            if(componentConf!=null) {
-                conf.put(Config.TOPOLOGY_KRYO_REGISTER,
-                         componentConf
-                              .get(Config.TOPOLOGY_KRYO_REGISTER));
+            if (componentConf != null) {
+                conf.put(Config.TOPOLOGY_KRYO_REGISTER, componentConf.get(Config.TOPOLOGY_KRYO_REGISTER));
             }
-            String transactionalRoot = (String)conf.get(Config.TRANSACTIONAL_ZOOKEEPER_ROOT);
+            String transactionalRoot = (String) conf.get(Config.TRANSACTIONAL_ZOOKEEPER_ROOT);
             String rootDir = transactionalRoot + "/" + id + "/" + subroot;
             List<String> servers = (List<String>) getWithBackup(conf, Config.TRANSACTIONAL_ZOOKEEPER_SERVERS, Config.STORM_ZOOKEEPER_SERVERS);
             Object port = getWithBackup(conf, Config.TRANSACTIONAL_ZOOKEEPER_PORT, Config.STORM_ZOOKEEPER_PORT);
@@ -74,29 +72,24 @@ public class TransactionalState {
             } catch (KeeperException.NodeExistsException e) {
             }
             initter.close();
-                                    
+
             _curator = Utils.newCuratorStarted(conf, servers, port, rootDir, auth);
             _ser = new KryoValuesSerializer(conf);
             _des = new KryoValuesDeserializer(conf);
         } catch (Exception e) {
-           throw new RuntimeException(e);
+            throw new RuntimeException(e);
         }
     }
 
-    protected static String forPath(PathAndBytesable<String> builder, 
-            String path, byte[] data) throws Exception {
-        return (data == null) 
-            ? builder.forPath(path) 
-            : builder.forPath(path, data);
+    protected static String forPath(PathAndBytesable<String> builder, String path, byte[] data) throws Exception {
+        return (data == null) ? builder.forPath(path) : builder.forPath(path, data);
     }
 
-    protected static void createNode(CuratorFramework curator, String path,
-            byte[] data, List<ACL> acls, CreateMode mode) throws Exception {
-        ProtectACLCreateModePathAndBytesable<String> builder =
-            curator.create().creatingParentsIfNeeded();
-    
+    protected static void createNode(CuratorFramework curator, String path, byte[] data, List<ACL> acls, CreateMode mode) throws Exception {
+        ProtectACLCreateModePathAndBytesable<String> builder = curator.create().creatingParentsIfNeeded();
+
         if (acls == null) {
-            if (mode == null ) {
+            if (mode == null) {
                 TransactionalState.forPath(builder, path, data);
             } else {
                 TransactionalState.forPath(builder.withMode(mode), path, data);
@@ -111,17 +104,16 @@ public class TransactionalState {
         path = "/" + path;
         byte[] ser = _ser.serializeObject(obj);
         try {
-            if(_curator.checkExists().forPath(path)!=null) {
+            if (_curator.checkExists().forPath(path) != null) {
                 _curator.setData().forPath(path, ser);
             } else {
-                TransactionalState.createNode(_curator, path, ser, _zkAcls,
-                        CreateMode.PERSISTENT);
+                TransactionalState.createNode(_curator, path, ser, _zkAcls, CreateMode.PERSISTENT);
             }
-        } catch(Exception e) {
+        } catch (Exception e) {
             throw new RuntimeException(e);
-        }        
+        }
     }
-    
+
     public void delete(String path) {
         path = "/" + path;
         try {
@@ -130,44 +122,45 @@ public class TransactionalState {
             throw new RuntimeException(e);
         }
     }
-    
+
     public List<String> list(String path) {
         path = "/" + path;
         try {
-            if(_curator.checkExists().forPath(path)==null) {
+            if (_curator.checkExists().forPath(path) == null) {
                 return new ArrayList<String>();
             } else {
                 return _curator.getChildren().forPath(path);
             }
-        } catch(Exception e) {
+        } catch (Exception e) {
             throw new RuntimeException(e);
-        }   
+        }
     }
-    
+
     public void mkdir(String path) {
         setData(path, 7);
     }
-    
+
     public Object getData(String path) {
         path = "/" + path;
         try {
-            if(_curator.checkExists().forPath(path)!=null) {
+            if (_curator.checkExists().forPath(path) != null) {
                 return _des.deserializeObject(_curator.getData().forPath(path));
             } else {
                 return null;
             }
-        } catch(Exception e) {
+        } catch (Exception e) {
             throw new RuntimeException(e);
         }
     }
-    
+
     public void close() {
         _curator.close();
     }
-    
+
     private Object getWithBackup(Map amap, Object primary, Object backup) {
         Object ret = amap.get(primary);
-        if(ret==null) return amap.get(backup);
+        if (ret == null)
+            return amap.get(backup);
         return ret;
     }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/tuple/BatchTuple.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/tuple/BatchTuple.java b/jstorm-core/src/main/java/backtype/storm/tuple/BatchTuple.java
index 47df545..eb3d0ce 100644
--- a/jstorm-core/src/main/java/backtype/storm/tuple/BatchTuple.java
+++ b/jstorm-core/src/main/java/backtype/storm/tuple/BatchTuple.java
@@ -20,11 +20,10 @@ package backtype.storm.tuple;
 import java.util.ArrayList;
 import java.util.List;
 
-
-public class BatchTuple {
+public class BatchTuple implements ITupleExt{
     private int targetTaskId;
 
-    private List<Tuple> batch;
+    private List<Tuple> batch = new ArrayList<Tuple>();
     private int batchSize;
 
     public BatchTuple() {
@@ -37,15 +36,12 @@ public class BatchTuple {
     }
 
     public void addToBatch(Tuple tuple) {
-        if (batch == null) {
-            batch = new ArrayList<Tuple>();
-        }
         batch.add(tuple);
     }
 
     public boolean isBatchFull() {
         boolean ret = false;
-        if (batch != null && batch.size() >= batchSize)
+        if (batch.size() >= batchSize)
             ret = true;
 
         return ret;
@@ -60,7 +56,7 @@ public class BatchTuple {
     }
 
     public int currBatchSize() {
-        return batch == null ? 0 : batch.size();
+        return batch.size();
     }
 
     public void setTargetTaskId(int taskId) {
@@ -74,4 +70,16 @@ public class BatchTuple {
     public void setBatchSize(int batchSize) {
         this.batchSize = batchSize;
     }
-}
+
+	@Deprecated
+	public long getCreationTimeStamp() {
+		// TODO Auto-generated method stub
+		return 0;
+	}
+
+	@Deprecated
+	public void setCreationTimeStamp(long timeStamp) {
+		// TODO Auto-generated method stub
+		
+	}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/tuple/Fields.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/tuple/Fields.java b/jstorm-core/src/main/java/backtype/storm/tuple/Fields.java
index 9805ba6..6ba1e5c 100644
--- a/jstorm-core/src/main/java/backtype/storm/tuple/Fields.java
+++ b/jstorm-core/src/main/java/backtype/storm/tuple/Fields.java
@@ -28,26 +28,24 @@ import java.io.Serializable;
 public class Fields implements Iterable<String>, Serializable {
     private List<String> _fields;
     private Map<String, Integer> _index = new HashMap<String, Integer>();
-    
+
     public Fields(String... fields) {
         this(Arrays.asList(fields));
     }
-    
+
     public Fields(List<String> fields) {
         _fields = new ArrayList<String>(fields.size());
         for (String field : fields) {
             if (_fields.contains(field))
-                throw new IllegalArgumentException(
-                    String.format("duplicate field '%s'", field)
-                );
+                throw new IllegalArgumentException(String.format("duplicate field '%s'", field));
             _fields.add(field);
         }
         index();
     }
-    
+
     public List<Object> select(Fields selector, List<Object> tuple) {
         List<Object> ret = new ArrayList<Object>(selector.size());
-        for(String s: selector) {
+        for (String s : selector) {
             ret.add(tuple.get(_index.get(s)));
         }
         return ret;
@@ -56,7 +54,7 @@ public class Fields implements Iterable<String>, Serializable {
     public List<String> toList() {
         return new ArrayList<String>(_fields);
     }
-    
+
     public int size() {
         return _fields.size();
     }
@@ -68,27 +66,27 @@ public class Fields implements Iterable<String>, Serializable {
     public Iterator<String> iterator() {
         return _fields.iterator();
     }
-    
+
     /**
      * Returns the position of the specified field.
      */
     public int fieldIndex(String field) {
         Integer ret = _index.get(field);
-        if(ret==null) {
+        if (ret == null) {
             throw new IllegalArgumentException(field + " does not exist");
         }
         return ret;
     }
-    
+
     /**
      * Returns true if this contains the specified name of the field.
      */
     public boolean contains(String field) {
         return _index.containsKey(field);
     }
-    
+
     private void index() {
-        for(int i=0; i<_fields.size(); i++) {
+        for (int i = 0; i < _fields.size(); i++) {
             _index.put(_fields.get(i), i);
         }
     }
@@ -96,5 +94,5 @@ public class Fields implements Iterable<String>, Serializable {
     @Override
     public String toString() {
         return _fields.toString();
-    }    
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/tuple/ITuple.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/tuple/ITuple.java b/jstorm-core/src/main/java/backtype/storm/tuple/ITuple.java
index c85848d..21696b5 100755
--- a/jstorm-core/src/main/java/backtype/storm/tuple/ITuple.java
+++ b/jstorm-core/src/main/java/backtype/storm/tuple/ITuple.java
@@ -52,60 +52,50 @@ public interface ITuple {
     public Object getValue(int i);
 
     /**
-     * Returns the String at position i in the tuple. If that field is not a String,
-     * you will get a runtime error.
+     * Returns the String at position i in the tuple. If that field is not a String, you will get a runtime error.
      */
     public String getString(int i);
 
     /**
-     * Returns the Integer at position i in the tuple. If that field is not an Integer,
-     * you will get a runtime error.
+     * Returns the Integer at position i in the tuple. If that field is not an Integer, you will get a runtime error.
      */
     public Integer getInteger(int i);
 
     /**
-     * Returns the Long at position i in the tuple. If that field is not a Long,
-     * you will get a runtime error.
+     * Returns the Long at position i in the tuple. If that field is not a Long, you will get a runtime error.
      */
     public Long getLong(int i);
 
     /**
-     * Returns the Boolean at position i in the tuple. If that field is not a Boolean,
-     * you will get a runtime error.
+     * Returns the Boolean at position i in the tuple. If that field is not a Boolean, you will get a runtime error.
      */
     public Boolean getBoolean(int i);
 
     /**
-     * Returns the Short at position i in the tuple. If that field is not a Short,
-     * you will get a runtime error.
+     * Returns the Short at position i in the tuple. If that field is not a Short, you will get a runtime error.
      */
     public Short getShort(int i);
 
     /**
-     * Returns the Byte at position i in the tuple. If that field is not a Byte,
-     * you will get a runtime error.
+     * Returns the Byte at position i in the tuple. If that field is not a Byte, you will get a runtime error.
      */
     public Byte getByte(int i);
 
     /**
-     * Returns the Double at position i in the tuple. If that field is not a Double,
-     * you will get a runtime error.
+     * Returns the Double at position i in the tuple. If that field is not a Double, you will get a runtime error.
      */
     public Double getDouble(int i);
 
     /**
-     * Returns the Float at position i in the tuple. If that field is not a Float,
-     * you will get a runtime error.
+     * Returns the Float at position i in the tuple. If that field is not a Float, you will get a runtime error.
      */
     public Float getFloat(int i);
 
     /**
-     * Returns the byte array at position i in the tuple. If that field is not a byte array,
-     * you will get a runtime error.
+     * Returns the byte array at position i in the tuple. If that field is not a byte array, you will get a runtime error.
      */
     public byte[] getBinary(int i);
 
-
     public Object getValueByField(String field);
 
     public String getStringByField(String field);
@@ -131,6 +121,4 @@ public interface ITuple {
      */
     public List<Object> getValues();
 
-
-
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/tuple/ITupleExt.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/tuple/ITupleExt.java b/jstorm-core/src/main/java/backtype/storm/tuple/ITupleExt.java
new file mode 100644
index 0000000..92a7157
--- /dev/null
+++ b/jstorm-core/src/main/java/backtype/storm/tuple/ITupleExt.java
@@ -0,0 +1,25 @@
+package backtype.storm.tuple;
+
+public interface ITupleExt {
+    
+    /**
+     * Get Target TaskId
+     * 
+     * @return
+     */
+    int getTargetTaskId();
+
+    void setTargetTaskId(int targetTaskId);
+
+    /**
+     * Get the timeStamp of creating tuple
+     * 
+     * @return
+     */
+    long getCreationTimeStamp();
+
+    /*
+     * set ms
+     */
+    void setCreationTimeStamp(long timeStamp);
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/tuple/MessageId.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/tuple/MessageId.java b/jstorm-core/src/main/java/backtype/storm/tuple/MessageId.java
index 688946d..329a4ae 100755
--- a/jstorm-core/src/main/java/backtype/storm/tuple/MessageId.java
+++ b/jstorm-core/src/main/java/backtype/storm/tuple/MessageId.java
@@ -29,12 +29,12 @@ import java.util.Set;
 
 public class MessageId {
     private Map<Long, Long> _anchorsToIds;
-    
+
     @Deprecated
     public static long generateId() {
         return Utils.secureRandomLong();
     }
-    
+
     public static long generateId(Random rand) {
         return rand.nextLong();
     }
@@ -42,17 +42,17 @@ public class MessageId {
     public static MessageId makeUnanchored() {
         return makeId(new HashMap<Long, Long>());
     }
-        
+
     public static MessageId makeId(Map<Long, Long> anchorsToIds) {
         return new MessageId(anchorsToIds);
     }
-        
+
     public static MessageId makeRootId(long id, long val) {
         Map<Long, Long> anchorsToIds = new HashMap<Long, Long>();
         anchorsToIds.put(id, val);
         return new MessageId(anchorsToIds);
     }
-    
+
     protected MessageId(Map<Long, Long> anchorsToIds) {
         _anchorsToIds = anchorsToIds;
     }
@@ -63,8 +63,8 @@ public class MessageId {
 
     public Set<Long> getAnchors() {
         return _anchorsToIds.keySet();
-    }    
-    
+    }
+
     @Override
     public int hashCode() {
         return _anchorsToIds.hashCode();
@@ -72,7 +72,7 @@ public class MessageId {
 
     @Override
     public boolean equals(Object other) {
-        if(other instanceof MessageId) {
+        if (other instanceof MessageId) {
             return _anchorsToIds.equals(((MessageId) other)._anchorsToIds);
         } else {
             return false;
@@ -86,7 +86,7 @@ public class MessageId {
 
     public void serialize(Output out) throws IOException {
         out.writeInt(_anchorsToIds.size(), true);
-        for(Entry<Long, Long> anchorToId: _anchorsToIds.entrySet()) {
+        for (Entry<Long, Long> anchorToId : _anchorsToIds.entrySet()) {
             out.writeLong(anchorToId.getKey());
             out.writeLong(anchorToId.getValue());
         }
@@ -95,7 +95,7 @@ public class MessageId {
     public static MessageId deserialize(Input in) throws IOException {
         int numAnchors = in.readInt(true);
         Map<Long, Long> anchorsToIds = new HashMap<Long, Long>();
-        for(int i=0; i<numAnchors; i++) {
+        for (int i = 0; i < numAnchors; i++) {
             anchorsToIds.put(in.readLong(), in.readLong());
         }
         return new MessageId(anchorsToIds);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/tuple/Tuple.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/tuple/Tuple.java b/jstorm-core/src/main/java/backtype/storm/tuple/Tuple.java
index 34dc61a..95253df 100755
--- a/jstorm-core/src/main/java/backtype/storm/tuple/Tuple.java
+++ b/jstorm-core/src/main/java/backtype/storm/tuple/Tuple.java
@@ -21,38 +21,35 @@ import backtype.storm.generated.GlobalStreamId;
 import java.util.List;
 
 /**
- * The tuple is the main data structure in Storm. A tuple is a named list of values, 
- * where each value can be any type. Tuples are dynamically typed -- the types of the fields 
- * do not need to be declared. Tuples have helper methods like getInteger and getString 
- * to get field values without having to cast the result.
+ * The tuple is the main data structure in Storm. A tuple is a named list of values, where each value can be any type. Tuples are dynamically typed -- the types
+ * of the fields do not need to be declared. Tuples have helper methods like getInteger and getString to get field values without having to cast the result.
  * 
- * Storm needs to know how to serialize all the values in a tuple. By default, Storm 
- * knows how to serialize the primitive types, strings, and byte arrays. If you want to 
- * use another type, you'll need to implement and register a serializer for that type.
- * See {@link http://github.com/nathanmarz/storm/wiki/Serialization} for more info.
+ * Storm needs to know how to serialize all the values in a tuple. By default, Storm knows how to serialize the primitive types, strings, and byte arrays. If
+ * you want to use another type, you'll need to implement and register a serializer for that type. See {@link http
+ * ://github.com/nathanmarz/storm/wiki/Serialization} for more info.
  */
-public interface Tuple extends ITuple{
+public interface Tuple extends ITuple {
 
     /**
      * Returns the global stream id (component + stream) of this tuple.
      */
     public GlobalStreamId getSourceGlobalStreamid();
-    
+
     /**
      * Gets the id of the component that created this tuple.
      */
     public String getSourceComponent();
-    
+
     /**
      * Gets the id of the task that created this tuple.
      */
     public int getSourceTask();
-    
+
     /**
      * Gets the id of the stream that this tuple was emitted to.
      */
     public String getSourceStreamId();
-    
+
     /**
      * Gets the message id that associated with this tuple.
      */

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/tuple/TupleExt.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/tuple/TupleExt.java b/jstorm-core/src/main/java/backtype/storm/tuple/TupleExt.java
index 60676c9..8f004cc 100755
--- a/jstorm-core/src/main/java/backtype/storm/tuple/TupleExt.java
+++ b/jstorm-core/src/main/java/backtype/storm/tuple/TupleExt.java
@@ -17,13 +17,6 @@
  */
 package backtype.storm.tuple;
 
-public interface TupleExt extends Tuple {
-    /**
-     * Get Target TaskId
-     * 
-     * @return
-     */
-    int getTargetTaskId();
+public interface TupleExt extends Tuple, ITupleExt {
     
-    void setTargetTaskId(int targetTaskId);
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/tuple/TupleImpl.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/tuple/TupleImpl.java b/jstorm-core/src/main/java/backtype/storm/tuple/TupleImpl.java
index 818eff1..417774e 100755
--- a/jstorm-core/src/main/java/backtype/storm/tuple/TupleImpl.java
+++ b/jstorm-core/src/main/java/backtype/storm/tuple/TupleImpl.java
@@ -41,31 +41,29 @@ public class TupleImpl extends IndifferentAccessMap implements Seqable, Indexed,
     private GeneralTopologyContext context;
     private MessageId id;
     private IPersistentMap _meta = null;
-    
+
     public TupleImpl(GeneralTopologyContext context, List<Object> values, int taskId, String streamId, MessageId id) {
         this.values = values;
         this.taskId = taskId;
         this.streamId = streamId;
         this.id = id;
         this.context = context;
-        
+
         String componentId = context.getComponentId(taskId);
         Fields schema = context.getComponentOutputFields(componentId, streamId);
-        if(values.size()!=schema.size()) {
-            throw new IllegalArgumentException(
-                    "Tuple created with wrong number of fields. " +
-                    "Expected " + schema.size() + " fields but got " +
-                    values.size() + " fields");
+        if (values.size() != schema.size()) {
+            throw new IllegalArgumentException("Tuple created with wrong number of fields. " + "Expected " + schema.size() + " fields but got " + values.size()
+                    + " fields");
         }
     }
 
     public TupleImpl(GeneralTopologyContext context, List<Object> values, int taskId, String streamId) {
         this(context, values, taskId, streamId, MessageId.makeUnanchored());
-    }    
-    
+    }
+
     Long _processSampleStartTime = null;
     Long _executeSampleStartTime = null;
-    
+
     public void setProcessSampleStartTime(long ms) {
         _processSampleStartTime = ms;
     }
@@ -73,7 +71,7 @@ public class TupleImpl extends IndifferentAccessMap implements Seqable, Indexed,
     public Long getProcessSampleStartTime() {
         return _processSampleStartTime;
     }
-    
+
     public void setExecuteSampleStartTime(long ms) {
         _executeSampleStartTime = ms;
     }
@@ -81,13 +79,13 @@ public class TupleImpl extends IndifferentAccessMap implements Seqable, Indexed,
     public Long getExecuteSampleStartTime() {
         return _executeSampleStartTime;
     }
-    
+
     long _outAckVal = 0;
-    
+
     public void updateAckVal(long val) {
         _outAckVal = _outAckVal ^ val;
     }
-    
+
     public long getAckVal() {
         return _outAckVal;
     }
@@ -95,15 +93,15 @@ public class TupleImpl extends IndifferentAccessMap implements Seqable, Indexed,
     public int size() {
         return values.size();
     }
-    
+
     public int fieldIndex(String field) {
         return getFields().fieldIndex(field);
     }
-    
+
     public boolean contains(String field) {
         return getFields().contains(field);
     }
-    
+
     public Object getValue(int i) {
         return values.get(i);
     }
@@ -143,8 +141,7 @@ public class TupleImpl extends IndifferentAccessMap implements Seqable, Indexed,
     public byte[] getBinary(int i) {
         return (byte[]) values.get(i);
     }
-    
-    
+
     public Object getValueByField(String field) {
         return values.get(fieldIndex(field));
     }
@@ -184,11 +181,11 @@ public class TupleImpl extends IndifferentAccessMap implements Seqable, Indexed,
     public byte[] getBinaryByField(String field) {
         return (byte[]) values.get(fieldIndex(field));
     }
-    
+
     public List<Object> getValues() {
         return values;
     }
-    
+
     public Fields getFields() {
         return context.getComponentOutputFields(getSourceComponent(), getSourceStreamId());
     }
@@ -196,37 +193,37 @@ public class TupleImpl extends IndifferentAccessMap implements Seqable, Indexed,
     public List<Object> select(Fields selector) {
         return getFields().select(selector, values);
     }
-      
+
     public GlobalStreamId getSourceGlobalStreamid() {
         return new GlobalStreamId(getSourceComponent(), streamId);
     }
-    
+
     public String getSourceComponent() {
         return context.getComponentId(taskId);
     }
-    
+
     public int getSourceTask() {
         return taskId;
     }
-    
+
     public String getSourceStreamId() {
         return streamId;
     }
-    
+
     public MessageId getMessageId() {
         return id;
     }
-    
+
     @Override
     public String toString() {
-        return "source: " + getSourceComponent() + ":" + taskId + ", stream: " + streamId + ", id: "+ id.toString() + ", " + values.toString();
+        return "source: " + getSourceComponent() + ":" + taskId + ", stream: " + streamId + ", id: " + id.toString() + ", " + values.toString();
     }
-    
+
     @Override
     public boolean equals(Object other) {
         return this == other;
-    }    
-    
+    }
+
     @Override
     public int hashCode() {
         return System.identityHashCode(this);
@@ -234,25 +231,25 @@ public class TupleImpl extends IndifferentAccessMap implements Seqable, Indexed,
 
     private final Keyword makeKeyword(String name) {
         return Keyword.intern(Symbol.create(name));
-    }    
+    }
 
     /* ILookup */
     @Override
     public Object valAt(Object o) {
         try {
-            if(o instanceof Keyword) {
+            if (o instanceof Keyword) {
                 return getValueByField(((Keyword) o).getName());
-            } else if(o instanceof String) {
+            } else if (o instanceof String) {
                 return getValueByField((String) o);
             }
-        } catch(IllegalArgumentException e) {
+        } catch (IllegalArgumentException e) {
         }
         return null;
     }
 
     /* Seqable */
     public ISeq seq() {
-        if(values.size() > 0) {
+        if (values.size() > 0) {
             return new Seq(getFields().toList(), values, 0);
         }
         return null;
@@ -272,7 +269,7 @@ public class TupleImpl extends IndifferentAccessMap implements Seqable, Indexed,
 
         public Seq(IPersistentMap meta, List<String> fields, List<Object> values, int i) {
             super(meta);
-            this.fields= fields;
+            this.fields = fields;
             this.values = values;
             assert i >= 0;
             this.i = i;
@@ -283,16 +280,16 @@ public class TupleImpl extends IndifferentAccessMap implements Seqable, Indexed,
         }
 
         public ISeq next() {
-            if(i+1 < fields.size()) {
-                return new Seq(fields, values, i+1);
+            if (i + 1 < fields.size()) {
+                return new Seq(fields, values, i + 1);
             }
             return null;
         }
 
         public int count() {
-            assert fields.size() -i >= 0 : "index out of bounds";
+            assert fields.size() - i >= 0 : "index out of bounds";
             // i being the position in the fields of this seq, the remainder of the seq is the size
-            return fields.size() -i;
+            return fields.size() - i;
         }
 
         public Obj withMeta(IPersistentMap meta) {
@@ -302,7 +299,7 @@ public class TupleImpl extends IndifferentAccessMap implements Seqable, Indexed,
 
     /* Indexed */
     public Object nth(int i) {
-        if(i < values.size()) {
+        if (i < values.size()) {
             return values.get(i);
         } else {
             return null;
@@ -311,7 +308,8 @@ public class TupleImpl extends IndifferentAccessMap implements Seqable, Indexed,
 
     public Object nth(int i, Object notfound) {
         Object ret = nth(i);
-        if(ret==null) ret = notfound;
+        if (ret == null)
+            ret = notfound;
         return ret;
     }
 
@@ -319,33 +317,32 @@ public class TupleImpl extends IndifferentAccessMap implements Seqable, Indexed,
     public int count() {
         return values.size();
     }
-    
+
     /* IMeta */
     public IPersistentMap meta() {
-        if(_meta==null) {
-            _meta = new PersistentArrayMap( new Object[] {
-            makeKeyword("stream"), getSourceStreamId(), 
-            makeKeyword("component"), getSourceComponent(), 
-            makeKeyword("task"), getSourceTask()});
+        if (_meta == null) {
+            _meta =
+                    new PersistentArrayMap(new Object[] { makeKeyword("stream"), getSourceStreamId(), makeKeyword("component"), getSourceComponent(),
+                            makeKeyword("task"), getSourceTask() });
         }
         return _meta;
     }
 
     private PersistentArrayMap toMap() {
-        Object array[] = new Object[values.size()*2];
+        Object array[] = new Object[values.size() * 2];
         List<String> fields = getFields().toList();
-        for(int i=0; i < values.size(); i++) {
-            array[i*2] = fields.get(i);
-            array[(i*2)+1] = values.get(i);
+        for (int i = 0; i < values.size(); i++) {
+            array[i * 2] = fields.get(i);
+            array[(i * 2) + 1] = values.get(i);
         }
         return new PersistentArrayMap(array);
     }
 
     public IPersistentMap getMap() {
-        if(_map==null) {
+        if (_map == null) {
             setMap(toMap());
         }
         return _map;
-    }    
-    
+    }
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/tuple/TupleImplExt.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/tuple/TupleImplExt.java b/jstorm-core/src/main/java/backtype/storm/tuple/TupleImplExt.java
index 2e966a0..4017769 100755
--- a/jstorm-core/src/main/java/backtype/storm/tuple/TupleImplExt.java
+++ b/jstorm-core/src/main/java/backtype/storm/tuple/TupleImplExt.java
@@ -22,25 +22,36 @@ import java.util.List;
 import backtype.storm.task.GeneralTopologyContext;
 
 public class TupleImplExt extends TupleImpl implements TupleExt {
-    
+
     protected int targetTaskId;
-    
+    protected long creationTimeStamp = System.currentTimeMillis();
+
     public TupleImplExt(GeneralTopologyContext context, List<Object> values, int taskId, String streamId) {
         super(context, values, taskId, streamId);
     }
-    
+
     public TupleImplExt(GeneralTopologyContext context, List<Object> values, int taskId, String streamId, MessageId id) {
         super(context, values, taskId, streamId, id);
     }
-    
+
     @Override
     public int getTargetTaskId() {
         return targetTaskId;
     }
-    
+
     @Override
     public void setTargetTaskId(int targetTaskId) {
         this.targetTaskId = targetTaskId;
     }
-    
+
+	@Override
+	public long getCreationTimeStamp() {
+		return creationTimeStamp;
+	}
+
+	@Override
+	public void setCreationTimeStamp(long timeStamp) {
+		this.creationTimeStamp = timeStamp;
+	}
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/tuple/Values.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/tuple/Values.java b/jstorm-core/src/main/java/backtype/storm/tuple/Values.java
index 41bbc71..c25363b 100755
--- a/jstorm-core/src/main/java/backtype/storm/tuple/Values.java
+++ b/jstorm-core/src/main/java/backtype/storm/tuple/Values.java
@@ -20,17 +20,16 @@ package backtype.storm.tuple;
 import java.util.ArrayList;
 
 /**
- * A convenience class for making tuple values using new Values("field1", 2, 3)
- * syntax.
+ * A convenience class for making tuple values using new Values("field1", 2, 3) syntax.
  */
-public class Values extends ArrayList<Object>{
+public class Values extends ArrayList<Object> {
     public Values() {
-        
+
     }
-    
+
     public Values(Object... vals) {
         super(vals.length);
-        for(Object o: vals) {
+        for (Object o : vals) {
             add(o);
         }
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/BufferFileInputStream.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/BufferFileInputStream.java b/jstorm-core/src/main/java/backtype/storm/utils/BufferFileInputStream.java
index 1311d6d..d9fa692 100755
--- a/jstorm-core/src/main/java/backtype/storm/utils/BufferFileInputStream.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/BufferFileInputStream.java
@@ -22,7 +22,6 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.Arrays;
 
-
 public class BufferFileInputStream {
     byte[] buffer;
     FileInputStream stream;
@@ -33,15 +32,15 @@ public class BufferFileInputStream {
     }
 
     public BufferFileInputStream(String file) throws FileNotFoundException {
-        this(file, 15*1024);
+        this(file, 15 * 1024);
     }
 
     public byte[] read() throws IOException {
         int length = stream.read(buffer);
-        if(length==-1) {
+        if (length == -1) {
             close();
             return new byte[0];
-        } else if(length==buffer.length) {
+        } else if (length == buffer.length) {
             return buffer;
         } else {
             return Arrays.copyOf(buffer, length);


[40/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/NimbusStat.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/NimbusStat.java b/jstorm-core/src/main/java/backtype/storm/generated/NimbusStat.java
index 90badb6..9ab9bcb 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/NimbusStat.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/NimbusStat.java
@@ -34,12 +34,12 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class NimbusStat implements org.apache.thrift.TBase<NimbusStat, NimbusStat._Fields>, java.io.Serializable, Cloneable, Comparable<NimbusStat> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("NimbusStat");
 
   private static final org.apache.thrift.protocol.TField HOST_FIELD_DESC = new org.apache.thrift.protocol.TField("host", org.apache.thrift.protocol.TType.STRING, (short)1);
-  private static final org.apache.thrift.protocol.TField UPTIME_SECS_FIELD_DESC = new org.apache.thrift.protocol.TField("uptime_secs", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField UPTIME_SECS_FIELD_DESC = new org.apache.thrift.protocol.TField("uptimeSecs", org.apache.thrift.protocol.TType.STRING, (short)2);
 
   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
   static {
@@ -48,12 +48,12 @@ public class NimbusStat implements org.apache.thrift.TBase<NimbusStat, NimbusSta
   }
 
   private String host; // required
-  private String uptime_secs; // required
+  private String uptimeSecs; // required
 
   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
     HOST((short)1, "host"),
-    UPTIME_SECS((short)2, "uptime_secs");
+    UPTIME_SECS((short)2, "uptimeSecs");
 
     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 
@@ -117,7 +117,7 @@ public class NimbusStat implements org.apache.thrift.TBase<NimbusStat, NimbusSta
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
     tmpMap.put(_Fields.HOST, new org.apache.thrift.meta_data.FieldMetaData("host", org.apache.thrift.TFieldRequirementType.REQUIRED, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    tmpMap.put(_Fields.UPTIME_SECS, new org.apache.thrift.meta_data.FieldMetaData("uptime_secs", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+    tmpMap.put(_Fields.UPTIME_SECS, new org.apache.thrift.meta_data.FieldMetaData("uptimeSecs", org.apache.thrift.TFieldRequirementType.REQUIRED, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(NimbusStat.class, metaDataMap);
@@ -128,11 +128,11 @@ public class NimbusStat implements org.apache.thrift.TBase<NimbusStat, NimbusSta
 
   public NimbusStat(
     String host,
-    String uptime_secs)
+    String uptimeSecs)
   {
     this();
     this.host = host;
-    this.uptime_secs = uptime_secs;
+    this.uptimeSecs = uptimeSecs;
   }
 
   /**
@@ -142,8 +142,8 @@ public class NimbusStat implements org.apache.thrift.TBase<NimbusStat, NimbusSta
     if (other.is_set_host()) {
       this.host = other.host;
     }
-    if (other.is_set_uptime_secs()) {
-      this.uptime_secs = other.uptime_secs;
+    if (other.is_set_uptimeSecs()) {
+      this.uptimeSecs = other.uptimeSecs;
     }
   }
 
@@ -154,7 +154,7 @@ public class NimbusStat implements org.apache.thrift.TBase<NimbusStat, NimbusSta
   @Override
   public void clear() {
     this.host = null;
-    this.uptime_secs = null;
+    this.uptimeSecs = null;
   }
 
   public String get_host() {
@@ -180,26 +180,26 @@ public class NimbusStat implements org.apache.thrift.TBase<NimbusStat, NimbusSta
     }
   }
 
-  public String get_uptime_secs() {
-    return this.uptime_secs;
+  public String get_uptimeSecs() {
+    return this.uptimeSecs;
   }
 
-  public void set_uptime_secs(String uptime_secs) {
-    this.uptime_secs = uptime_secs;
+  public void set_uptimeSecs(String uptimeSecs) {
+    this.uptimeSecs = uptimeSecs;
   }
 
-  public void unset_uptime_secs() {
-    this.uptime_secs = null;
+  public void unset_uptimeSecs() {
+    this.uptimeSecs = null;
   }
 
-  /** Returns true if field uptime_secs is set (has been assigned a value) and false otherwise */
-  public boolean is_set_uptime_secs() {
-    return this.uptime_secs != null;
+  /** Returns true if field uptimeSecs is set (has been assigned a value) and false otherwise */
+  public boolean is_set_uptimeSecs() {
+    return this.uptimeSecs != null;
   }
 
-  public void set_uptime_secs_isSet(boolean value) {
+  public void set_uptimeSecs_isSet(boolean value) {
     if (!value) {
-      this.uptime_secs = null;
+      this.uptimeSecs = null;
     }
   }
 
@@ -215,9 +215,9 @@ public class NimbusStat implements org.apache.thrift.TBase<NimbusStat, NimbusSta
 
     case UPTIME_SECS:
       if (value == null) {
-        unset_uptime_secs();
+        unset_uptimeSecs();
       } else {
-        set_uptime_secs((String)value);
+        set_uptimeSecs((String)value);
       }
       break;
 
@@ -230,7 +230,7 @@ public class NimbusStat implements org.apache.thrift.TBase<NimbusStat, NimbusSta
       return get_host();
 
     case UPTIME_SECS:
-      return get_uptime_secs();
+      return get_uptimeSecs();
 
     }
     throw new IllegalStateException();
@@ -246,7 +246,7 @@ public class NimbusStat implements org.apache.thrift.TBase<NimbusStat, NimbusSta
     case HOST:
       return is_set_host();
     case UPTIME_SECS:
-      return is_set_uptime_secs();
+      return is_set_uptimeSecs();
     }
     throw new IllegalStateException();
   }
@@ -273,12 +273,12 @@ public class NimbusStat implements org.apache.thrift.TBase<NimbusStat, NimbusSta
         return false;
     }
 
-    boolean this_present_uptime_secs = true && this.is_set_uptime_secs();
-    boolean that_present_uptime_secs = true && that.is_set_uptime_secs();
-    if (this_present_uptime_secs || that_present_uptime_secs) {
-      if (!(this_present_uptime_secs && that_present_uptime_secs))
+    boolean this_present_uptimeSecs = true && this.is_set_uptimeSecs();
+    boolean that_present_uptimeSecs = true && that.is_set_uptimeSecs();
+    if (this_present_uptimeSecs || that_present_uptimeSecs) {
+      if (!(this_present_uptimeSecs && that_present_uptimeSecs))
         return false;
-      if (!this.uptime_secs.equals(that.uptime_secs))
+      if (!this.uptimeSecs.equals(that.uptimeSecs))
         return false;
     }
 
@@ -294,10 +294,10 @@ public class NimbusStat implements org.apache.thrift.TBase<NimbusStat, NimbusSta
     if (present_host)
       list.add(host);
 
-    boolean present_uptime_secs = true && (is_set_uptime_secs());
-    list.add(present_uptime_secs);
-    if (present_uptime_secs)
-      list.add(uptime_secs);
+    boolean present_uptimeSecs = true && (is_set_uptimeSecs());
+    list.add(present_uptimeSecs);
+    if (present_uptimeSecs)
+      list.add(uptimeSecs);
 
     return list.hashCode();
   }
@@ -320,12 +320,12 @@ public class NimbusStat implements org.apache.thrift.TBase<NimbusStat, NimbusSta
         return lastComparison;
       }
     }
-    lastComparison = Boolean.valueOf(is_set_uptime_secs()).compareTo(other.is_set_uptime_secs());
+    lastComparison = Boolean.valueOf(is_set_uptimeSecs()).compareTo(other.is_set_uptimeSecs());
     if (lastComparison != 0) {
       return lastComparison;
     }
-    if (is_set_uptime_secs()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.uptime_secs, other.uptime_secs);
+    if (is_set_uptimeSecs()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.uptimeSecs, other.uptimeSecs);
       if (lastComparison != 0) {
         return lastComparison;
       }
@@ -337,11 +337,11 @@ public class NimbusStat implements org.apache.thrift.TBase<NimbusStat, NimbusSta
     return _Fields.findByThriftId(fieldId);
   }
 
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
@@ -358,25 +358,25 @@ public class NimbusStat implements org.apache.thrift.TBase<NimbusStat, NimbusSta
     }
     first = false;
     if (!first) sb.append(", ");
-    sb.append("uptime_secs:");
-    if (this.uptime_secs == null) {
+    sb.append("uptimeSecs:");
+    if (this.uptimeSecs == null) {
       sb.append("null");
     } else {
-      sb.append(this.uptime_secs);
+      sb.append(this.uptimeSecs);
     }
     first = false;
     sb.append(")");
     return sb.toString();
   }
 
-  public void validate() throws org.apache.thrift.TException {
+  public void validate() throws TException {
     // check for required fields
     if (!is_set_host()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'host' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'host' is unset! Struct:" + toString());
     }
 
-    if (!is_set_uptime_secs()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'uptime_secs' is unset! Struct:" + toString());
+    if (!is_set_uptimeSecs()) {
+      throw new TProtocolException("Required field 'uptimeSecs' is unset! Struct:" + toString());
     }
 
     // check for sub-struct validity
@@ -385,7 +385,7 @@ public class NimbusStat implements org.apache.thrift.TBase<NimbusStat, NimbusSta
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -393,7 +393,7 @@ public class NimbusStat implements org.apache.thrift.TBase<NimbusStat, NimbusSta
   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
     try {
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -406,7 +406,7 @@ public class NimbusStat implements org.apache.thrift.TBase<NimbusStat, NimbusSta
 
   private static class NimbusStatStandardScheme extends StandardScheme<NimbusStat> {
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot, NimbusStat struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot, NimbusStat struct) throws TException {
       org.apache.thrift.protocol.TField schemeField;
       iprot.readStructBegin();
       while (true)
@@ -426,8 +426,8 @@ public class NimbusStat implements org.apache.thrift.TBase<NimbusStat, NimbusSta
             break;
           case 2: // UPTIME_SECS
             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
-              struct.uptime_secs = iprot.readString();
-              struct.set_uptime_secs_isSet(true);
+              struct.uptimeSecs = iprot.readString();
+              struct.set_uptimeSecs_isSet(true);
             } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
@@ -441,7 +441,7 @@ public class NimbusStat implements org.apache.thrift.TBase<NimbusStat, NimbusSta
       struct.validate();
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot, NimbusStat struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot, NimbusStat struct) throws TException {
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
@@ -450,9 +450,9 @@ public class NimbusStat implements org.apache.thrift.TBase<NimbusStat, NimbusSta
         oprot.writeString(struct.host);
         oprot.writeFieldEnd();
       }
-      if (struct.uptime_secs != null) {
+      if (struct.uptimeSecs != null) {
         oprot.writeFieldBegin(UPTIME_SECS_FIELD_DESC);
-        oprot.writeString(struct.uptime_secs);
+        oprot.writeString(struct.uptimeSecs);
         oprot.writeFieldEnd();
       }
       oprot.writeFieldStop();
@@ -470,19 +470,19 @@ public class NimbusStat implements org.apache.thrift.TBase<NimbusStat, NimbusSta
   private static class NimbusStatTupleScheme extends TupleScheme<NimbusStat> {
 
     @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, NimbusStat struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol prot, NimbusStat struct) throws TException {
       TTupleProtocol oprot = (TTupleProtocol) prot;
       oprot.writeString(struct.host);
-      oprot.writeString(struct.uptime_secs);
+      oprot.writeString(struct.uptimeSecs);
     }
 
     @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, NimbusStat struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol prot, NimbusStat struct) throws TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       struct.host = iprot.readString();
       struct.set_host_isSet(true);
-      struct.uptime_secs = iprot.readString();
-      struct.set_uptime_secs_isSet(true);
+      struct.uptimeSecs = iprot.readString();
+      struct.set_uptimeSecs_isSet(true);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/NimbusSummary.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/NimbusSummary.java b/jstorm-core/src/main/java/backtype/storm/generated/NimbusSummary.java
index 55b6e35..8e3173b 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/NimbusSummary.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/NimbusSummary.java
@@ -34,16 +34,16 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class NimbusSummary implements org.apache.thrift.TBase<NimbusSummary, NimbusSummary._Fields>, java.io.Serializable, Cloneable, Comparable<NimbusSummary> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("NimbusSummary");
 
-  private static final org.apache.thrift.protocol.TField NIMBUS_MASTER_FIELD_DESC = new org.apache.thrift.protocol.TField("nimbus_master", org.apache.thrift.protocol.TType.STRUCT, (short)1);
-  private static final org.apache.thrift.protocol.TField NIMBUS_SLAVES_FIELD_DESC = new org.apache.thrift.protocol.TField("nimbus_slaves", org.apache.thrift.protocol.TType.LIST, (short)2);
-  private static final org.apache.thrift.protocol.TField SUPERVISOR_NUM_FIELD_DESC = new org.apache.thrift.protocol.TField("supervisor_num", org.apache.thrift.protocol.TType.I32, (short)3);
-  private static final org.apache.thrift.protocol.TField TOTAL_PORT_NUM_FIELD_DESC = new org.apache.thrift.protocol.TField("total_port_num", org.apache.thrift.protocol.TType.I32, (short)4);
-  private static final org.apache.thrift.protocol.TField USED_PORT_NUM_FIELD_DESC = new org.apache.thrift.protocol.TField("used_port_num", org.apache.thrift.protocol.TType.I32, (short)5);
-  private static final org.apache.thrift.protocol.TField FREE_PORT_NUM_FIELD_DESC = new org.apache.thrift.protocol.TField("free_port_num", org.apache.thrift.protocol.TType.I32, (short)6);
+  private static final org.apache.thrift.protocol.TField NIMBUS_MASTER_FIELD_DESC = new org.apache.thrift.protocol.TField("nimbusMaster", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+  private static final org.apache.thrift.protocol.TField NIMBUS_SLAVES_FIELD_DESC = new org.apache.thrift.protocol.TField("nimbusSlaves", org.apache.thrift.protocol.TType.LIST, (short)2);
+  private static final org.apache.thrift.protocol.TField SUPERVISOR_NUM_FIELD_DESC = new org.apache.thrift.protocol.TField("supervisorNum", org.apache.thrift.protocol.TType.I32, (short)3);
+  private static final org.apache.thrift.protocol.TField TOTAL_PORT_NUM_FIELD_DESC = new org.apache.thrift.protocol.TField("totalPortNum", org.apache.thrift.protocol.TType.I32, (short)4);
+  private static final org.apache.thrift.protocol.TField USED_PORT_NUM_FIELD_DESC = new org.apache.thrift.protocol.TField("usedPortNum", org.apache.thrift.protocol.TType.I32, (short)5);
+  private static final org.apache.thrift.protocol.TField FREE_PORT_NUM_FIELD_DESC = new org.apache.thrift.protocol.TField("freePortNum", org.apache.thrift.protocol.TType.I32, (short)6);
   private static final org.apache.thrift.protocol.TField VERSION_FIELD_DESC = new org.apache.thrift.protocol.TField("version", org.apache.thrift.protocol.TType.STRING, (short)7);
 
   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
@@ -52,22 +52,22 @@ public class NimbusSummary implements org.apache.thrift.TBase<NimbusSummary, Nim
     schemes.put(TupleScheme.class, new NimbusSummaryTupleSchemeFactory());
   }
 
-  private NimbusStat nimbus_master; // required
-  private List<NimbusStat> nimbus_slaves; // required
-  private int supervisor_num; // required
-  private int total_port_num; // required
-  private int used_port_num; // required
-  private int free_port_num; // required
+  private NimbusStat nimbusMaster; // required
+  private List<NimbusStat> nimbusSlaves; // required
+  private int supervisorNum; // required
+  private int totalPortNum; // required
+  private int usedPortNum; // required
+  private int freePortNum; // required
   private String version; // required
 
   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-    NIMBUS_MASTER((short)1, "nimbus_master"),
-    NIMBUS_SLAVES((short)2, "nimbus_slaves"),
-    SUPERVISOR_NUM((short)3, "supervisor_num"),
-    TOTAL_PORT_NUM((short)4, "total_port_num"),
-    USED_PORT_NUM((short)5, "used_port_num"),
-    FREE_PORT_NUM((short)6, "free_port_num"),
+    NIMBUS_MASTER((short)1, "nimbusMaster"),
+    NIMBUS_SLAVES((short)2, "nimbusSlaves"),
+    SUPERVISOR_NUM((short)3, "supervisorNum"),
+    TOTAL_PORT_NUM((short)4, "totalPortNum"),
+    USED_PORT_NUM((short)5, "usedPortNum"),
+    FREE_PORT_NUM((short)6, "freePortNum"),
     VERSION((short)7, "version");
 
     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -137,26 +137,26 @@ public class NimbusSummary implements org.apache.thrift.TBase<NimbusSummary, Nim
   }
 
   // isset id assignments
-  private static final int __SUPERVISOR_NUM_ISSET_ID = 0;
-  private static final int __TOTAL_PORT_NUM_ISSET_ID = 1;
-  private static final int __USED_PORT_NUM_ISSET_ID = 2;
-  private static final int __FREE_PORT_NUM_ISSET_ID = 3;
+  private static final int __SUPERVISORNUM_ISSET_ID = 0;
+  private static final int __TOTALPORTNUM_ISSET_ID = 1;
+  private static final int __USEDPORTNUM_ISSET_ID = 2;
+  private static final int __FREEPORTNUM_ISSET_ID = 3;
   private byte __isset_bitfield = 0;
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.NIMBUS_MASTER, new org.apache.thrift.meta_data.FieldMetaData("nimbus_master", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+    tmpMap.put(_Fields.NIMBUS_MASTER, new org.apache.thrift.meta_data.FieldMetaData("nimbusMaster", org.apache.thrift.TFieldRequirementType.REQUIRED, 
         new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, NimbusStat.class)));
-    tmpMap.put(_Fields.NIMBUS_SLAVES, new org.apache.thrift.meta_data.FieldMetaData("nimbus_slaves", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+    tmpMap.put(_Fields.NIMBUS_SLAVES, new org.apache.thrift.meta_data.FieldMetaData("nimbusSlaves", org.apache.thrift.TFieldRequirementType.REQUIRED, 
         new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
             new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, NimbusStat.class))));
-    tmpMap.put(_Fields.SUPERVISOR_NUM, new org.apache.thrift.meta_data.FieldMetaData("supervisor_num", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+    tmpMap.put(_Fields.SUPERVISOR_NUM, new org.apache.thrift.meta_data.FieldMetaData("supervisorNum", org.apache.thrift.TFieldRequirementType.REQUIRED, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
-    tmpMap.put(_Fields.TOTAL_PORT_NUM, new org.apache.thrift.meta_data.FieldMetaData("total_port_num", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+    tmpMap.put(_Fields.TOTAL_PORT_NUM, new org.apache.thrift.meta_data.FieldMetaData("totalPortNum", org.apache.thrift.TFieldRequirementType.REQUIRED, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
-    tmpMap.put(_Fields.USED_PORT_NUM, new org.apache.thrift.meta_data.FieldMetaData("used_port_num", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+    tmpMap.put(_Fields.USED_PORT_NUM, new org.apache.thrift.meta_data.FieldMetaData("usedPortNum", org.apache.thrift.TFieldRequirementType.REQUIRED, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
-    tmpMap.put(_Fields.FREE_PORT_NUM, new org.apache.thrift.meta_data.FieldMetaData("free_port_num", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+    tmpMap.put(_Fields.FREE_PORT_NUM, new org.apache.thrift.meta_data.FieldMetaData("freePortNum", org.apache.thrift.TFieldRequirementType.REQUIRED, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
     tmpMap.put(_Fields.VERSION, new org.apache.thrift.meta_data.FieldMetaData("version", org.apache.thrift.TFieldRequirementType.REQUIRED, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
@@ -168,25 +168,25 @@ public class NimbusSummary implements org.apache.thrift.TBase<NimbusSummary, Nim
   }
 
   public NimbusSummary(
-    NimbusStat nimbus_master,
-    List<NimbusStat> nimbus_slaves,
-    int supervisor_num,
-    int total_port_num,
-    int used_port_num,
-    int free_port_num,
+    NimbusStat nimbusMaster,
+    List<NimbusStat> nimbusSlaves,
+    int supervisorNum,
+    int totalPortNum,
+    int usedPortNum,
+    int freePortNum,
     String version)
   {
     this();
-    this.nimbus_master = nimbus_master;
-    this.nimbus_slaves = nimbus_slaves;
-    this.supervisor_num = supervisor_num;
-    set_supervisor_num_isSet(true);
-    this.total_port_num = total_port_num;
-    set_total_port_num_isSet(true);
-    this.used_port_num = used_port_num;
-    set_used_port_num_isSet(true);
-    this.free_port_num = free_port_num;
-    set_free_port_num_isSet(true);
+    this.nimbusMaster = nimbusMaster;
+    this.nimbusSlaves = nimbusSlaves;
+    this.supervisorNum = supervisorNum;
+    set_supervisorNum_isSet(true);
+    this.totalPortNum = totalPortNum;
+    set_totalPortNum_isSet(true);
+    this.usedPortNum = usedPortNum;
+    set_usedPortNum_isSet(true);
+    this.freePortNum = freePortNum;
+    set_freePortNum_isSet(true);
     this.version = version;
   }
 
@@ -195,20 +195,20 @@ public class NimbusSummary implements org.apache.thrift.TBase<NimbusSummary, Nim
    */
   public NimbusSummary(NimbusSummary other) {
     __isset_bitfield = other.__isset_bitfield;
-    if (other.is_set_nimbus_master()) {
-      this.nimbus_master = new NimbusStat(other.nimbus_master);
+    if (other.is_set_nimbusMaster()) {
+      this.nimbusMaster = new NimbusStat(other.nimbusMaster);
     }
-    if (other.is_set_nimbus_slaves()) {
-      List<NimbusStat> __this__nimbus_slaves = new ArrayList<NimbusStat>(other.nimbus_slaves.size());
-      for (NimbusStat other_element : other.nimbus_slaves) {
-        __this__nimbus_slaves.add(new NimbusStat(other_element));
+    if (other.is_set_nimbusSlaves()) {
+      List<NimbusStat> __this__nimbusSlaves = new ArrayList<NimbusStat>(other.nimbusSlaves.size());
+      for (NimbusStat other_element : other.nimbusSlaves) {
+        __this__nimbusSlaves.add(new NimbusStat(other_element));
       }
-      this.nimbus_slaves = __this__nimbus_slaves;
+      this.nimbusSlaves = __this__nimbusSlaves;
     }
-    this.supervisor_num = other.supervisor_num;
-    this.total_port_num = other.total_port_num;
-    this.used_port_num = other.used_port_num;
-    this.free_port_num = other.free_port_num;
+    this.supervisorNum = other.supervisorNum;
+    this.totalPortNum = other.totalPortNum;
+    this.usedPortNum = other.usedPortNum;
+    this.freePortNum = other.freePortNum;
     if (other.is_set_version()) {
       this.version = other.version;
     }
@@ -220,166 +220,166 @@ public class NimbusSummary implements org.apache.thrift.TBase<NimbusSummary, Nim
 
   @Override
   public void clear() {
-    this.nimbus_master = null;
-    this.nimbus_slaves = null;
-    set_supervisor_num_isSet(false);
-    this.supervisor_num = 0;
-    set_total_port_num_isSet(false);
-    this.total_port_num = 0;
-    set_used_port_num_isSet(false);
-    this.used_port_num = 0;
-    set_free_port_num_isSet(false);
-    this.free_port_num = 0;
+    this.nimbusMaster = null;
+    this.nimbusSlaves = null;
+    set_supervisorNum_isSet(false);
+    this.supervisorNum = 0;
+    set_totalPortNum_isSet(false);
+    this.totalPortNum = 0;
+    set_usedPortNum_isSet(false);
+    this.usedPortNum = 0;
+    set_freePortNum_isSet(false);
+    this.freePortNum = 0;
     this.version = null;
   }
 
-  public NimbusStat get_nimbus_master() {
-    return this.nimbus_master;
+  public NimbusStat get_nimbusMaster() {
+    return this.nimbusMaster;
   }
 
-  public void set_nimbus_master(NimbusStat nimbus_master) {
-    this.nimbus_master = nimbus_master;
+  public void set_nimbusMaster(NimbusStat nimbusMaster) {
+    this.nimbusMaster = nimbusMaster;
   }
 
-  public void unset_nimbus_master() {
-    this.nimbus_master = null;
+  public void unset_nimbusMaster() {
+    this.nimbusMaster = null;
   }
 
-  /** Returns true if field nimbus_master is set (has been assigned a value) and false otherwise */
-  public boolean is_set_nimbus_master() {
-    return this.nimbus_master != null;
+  /** Returns true if field nimbusMaster is set (has been assigned a value) and false otherwise */
+  public boolean is_set_nimbusMaster() {
+    return this.nimbusMaster != null;
   }
 
-  public void set_nimbus_master_isSet(boolean value) {
+  public void set_nimbusMaster_isSet(boolean value) {
     if (!value) {
-      this.nimbus_master = null;
+      this.nimbusMaster = null;
     }
   }
 
-  public int get_nimbus_slaves_size() {
-    return (this.nimbus_slaves == null) ? 0 : this.nimbus_slaves.size();
+  public int get_nimbusSlaves_size() {
+    return (this.nimbusSlaves == null) ? 0 : this.nimbusSlaves.size();
   }
 
-  public java.util.Iterator<NimbusStat> get_nimbus_slaves_iterator() {
-    return (this.nimbus_slaves == null) ? null : this.nimbus_slaves.iterator();
+  public java.util.Iterator<NimbusStat> get_nimbusSlaves_iterator() {
+    return (this.nimbusSlaves == null) ? null : this.nimbusSlaves.iterator();
   }
 
-  public void add_to_nimbus_slaves(NimbusStat elem) {
-    if (this.nimbus_slaves == null) {
-      this.nimbus_slaves = new ArrayList<NimbusStat>();
+  public void add_to_nimbusSlaves(NimbusStat elem) {
+    if (this.nimbusSlaves == null) {
+      this.nimbusSlaves = new ArrayList<NimbusStat>();
     }
-    this.nimbus_slaves.add(elem);
+    this.nimbusSlaves.add(elem);
   }
 
-  public List<NimbusStat> get_nimbus_slaves() {
-    return this.nimbus_slaves;
+  public List<NimbusStat> get_nimbusSlaves() {
+    return this.nimbusSlaves;
   }
 
-  public void set_nimbus_slaves(List<NimbusStat> nimbus_slaves) {
-    this.nimbus_slaves = nimbus_slaves;
+  public void set_nimbusSlaves(List<NimbusStat> nimbusSlaves) {
+    this.nimbusSlaves = nimbusSlaves;
   }
 
-  public void unset_nimbus_slaves() {
-    this.nimbus_slaves = null;
+  public void unset_nimbusSlaves() {
+    this.nimbusSlaves = null;
   }
 
-  /** Returns true if field nimbus_slaves is set (has been assigned a value) and false otherwise */
-  public boolean is_set_nimbus_slaves() {
-    return this.nimbus_slaves != null;
+  /** Returns true if field nimbusSlaves is set (has been assigned a value) and false otherwise */
+  public boolean is_set_nimbusSlaves() {
+    return this.nimbusSlaves != null;
   }
 
-  public void set_nimbus_slaves_isSet(boolean value) {
+  public void set_nimbusSlaves_isSet(boolean value) {
     if (!value) {
-      this.nimbus_slaves = null;
+      this.nimbusSlaves = null;
     }
   }
 
-  public int get_supervisor_num() {
-    return this.supervisor_num;
+  public int get_supervisorNum() {
+    return this.supervisorNum;
   }
 
-  public void set_supervisor_num(int supervisor_num) {
-    this.supervisor_num = supervisor_num;
-    set_supervisor_num_isSet(true);
+  public void set_supervisorNum(int supervisorNum) {
+    this.supervisorNum = supervisorNum;
+    set_supervisorNum_isSet(true);
   }
 
-  public void unset_supervisor_num() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __SUPERVISOR_NUM_ISSET_ID);
+  public void unset_supervisorNum() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __SUPERVISORNUM_ISSET_ID);
   }
 
-  /** Returns true if field supervisor_num is set (has been assigned a value) and false otherwise */
-  public boolean is_set_supervisor_num() {
-    return EncodingUtils.testBit(__isset_bitfield, __SUPERVISOR_NUM_ISSET_ID);
+  /** Returns true if field supervisorNum is set (has been assigned a value) and false otherwise */
+  public boolean is_set_supervisorNum() {
+    return EncodingUtils.testBit(__isset_bitfield, __SUPERVISORNUM_ISSET_ID);
   }
 
-  public void set_supervisor_num_isSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __SUPERVISOR_NUM_ISSET_ID, value);
+  public void set_supervisorNum_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __SUPERVISORNUM_ISSET_ID, value);
   }
 
-  public int get_total_port_num() {
-    return this.total_port_num;
+  public int get_totalPortNum() {
+    return this.totalPortNum;
   }
 
-  public void set_total_port_num(int total_port_num) {
-    this.total_port_num = total_port_num;
-    set_total_port_num_isSet(true);
+  public void set_totalPortNum(int totalPortNum) {
+    this.totalPortNum = totalPortNum;
+    set_totalPortNum_isSet(true);
   }
 
-  public void unset_total_port_num() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TOTAL_PORT_NUM_ISSET_ID);
+  public void unset_totalPortNum() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TOTALPORTNUM_ISSET_ID);
   }
 
-  /** Returns true if field total_port_num is set (has been assigned a value) and false otherwise */
-  public boolean is_set_total_port_num() {
-    return EncodingUtils.testBit(__isset_bitfield, __TOTAL_PORT_NUM_ISSET_ID);
+  /** Returns true if field totalPortNum is set (has been assigned a value) and false otherwise */
+  public boolean is_set_totalPortNum() {
+    return EncodingUtils.testBit(__isset_bitfield, __TOTALPORTNUM_ISSET_ID);
   }
 
-  public void set_total_port_num_isSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TOTAL_PORT_NUM_ISSET_ID, value);
+  public void set_totalPortNum_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TOTALPORTNUM_ISSET_ID, value);
   }
 
-  public int get_used_port_num() {
-    return this.used_port_num;
+  public int get_usedPortNum() {
+    return this.usedPortNum;
   }
 
-  public void set_used_port_num(int used_port_num) {
-    this.used_port_num = used_port_num;
-    set_used_port_num_isSet(true);
+  public void set_usedPortNum(int usedPortNum) {
+    this.usedPortNum = usedPortNum;
+    set_usedPortNum_isSet(true);
   }
 
-  public void unset_used_port_num() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __USED_PORT_NUM_ISSET_ID);
+  public void unset_usedPortNum() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __USEDPORTNUM_ISSET_ID);
   }
 
-  /** Returns true if field used_port_num is set (has been assigned a value) and false otherwise */
-  public boolean is_set_used_port_num() {
-    return EncodingUtils.testBit(__isset_bitfield, __USED_PORT_NUM_ISSET_ID);
+  /** Returns true if field usedPortNum is set (has been assigned a value) and false otherwise */
+  public boolean is_set_usedPortNum() {
+    return EncodingUtils.testBit(__isset_bitfield, __USEDPORTNUM_ISSET_ID);
   }
 
-  public void set_used_port_num_isSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __USED_PORT_NUM_ISSET_ID, value);
+  public void set_usedPortNum_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __USEDPORTNUM_ISSET_ID, value);
   }
 
-  public int get_free_port_num() {
-    return this.free_port_num;
+  public int get_freePortNum() {
+    return this.freePortNum;
   }
 
-  public void set_free_port_num(int free_port_num) {
-    this.free_port_num = free_port_num;
-    set_free_port_num_isSet(true);
+  public void set_freePortNum(int freePortNum) {
+    this.freePortNum = freePortNum;
+    set_freePortNum_isSet(true);
   }
 
-  public void unset_free_port_num() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __FREE_PORT_NUM_ISSET_ID);
+  public void unset_freePortNum() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __FREEPORTNUM_ISSET_ID);
   }
 
-  /** Returns true if field free_port_num is set (has been assigned a value) and false otherwise */
-  public boolean is_set_free_port_num() {
-    return EncodingUtils.testBit(__isset_bitfield, __FREE_PORT_NUM_ISSET_ID);
+  /** Returns true if field freePortNum is set (has been assigned a value) and false otherwise */
+  public boolean is_set_freePortNum() {
+    return EncodingUtils.testBit(__isset_bitfield, __FREEPORTNUM_ISSET_ID);
   }
 
-  public void set_free_port_num_isSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __FREE_PORT_NUM_ISSET_ID, value);
+  public void set_freePortNum_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __FREEPORTNUM_ISSET_ID, value);
   }
 
   public String get_version() {
@@ -409,49 +409,49 @@ public class NimbusSummary implements org.apache.thrift.TBase<NimbusSummary, Nim
     switch (field) {
     case NIMBUS_MASTER:
       if (value == null) {
-        unset_nimbus_master();
+        unset_nimbusMaster();
       } else {
-        set_nimbus_master((NimbusStat)value);
+        set_nimbusMaster((NimbusStat)value);
       }
       break;
 
     case NIMBUS_SLAVES:
       if (value == null) {
-        unset_nimbus_slaves();
+        unset_nimbusSlaves();
       } else {
-        set_nimbus_slaves((List<NimbusStat>)value);
+        set_nimbusSlaves((List<NimbusStat>)value);
       }
       break;
 
     case SUPERVISOR_NUM:
       if (value == null) {
-        unset_supervisor_num();
+        unset_supervisorNum();
       } else {
-        set_supervisor_num((Integer)value);
+        set_supervisorNum((Integer)value);
       }
       break;
 
     case TOTAL_PORT_NUM:
       if (value == null) {
-        unset_total_port_num();
+        unset_totalPortNum();
       } else {
-        set_total_port_num((Integer)value);
+        set_totalPortNum((Integer)value);
       }
       break;
 
     case USED_PORT_NUM:
       if (value == null) {
-        unset_used_port_num();
+        unset_usedPortNum();
       } else {
-        set_used_port_num((Integer)value);
+        set_usedPortNum((Integer)value);
       }
       break;
 
     case FREE_PORT_NUM:
       if (value == null) {
-        unset_free_port_num();
+        unset_freePortNum();
       } else {
-        set_free_port_num((Integer)value);
+        set_freePortNum((Integer)value);
       }
       break;
 
@@ -469,22 +469,22 @@ public class NimbusSummary implements org.apache.thrift.TBase<NimbusSummary, Nim
   public Object getFieldValue(_Fields field) {
     switch (field) {
     case NIMBUS_MASTER:
-      return get_nimbus_master();
+      return get_nimbusMaster();
 
     case NIMBUS_SLAVES:
-      return get_nimbus_slaves();
+      return get_nimbusSlaves();
 
     case SUPERVISOR_NUM:
-      return Integer.valueOf(get_supervisor_num());
+      return Integer.valueOf(get_supervisorNum());
 
     case TOTAL_PORT_NUM:
-      return Integer.valueOf(get_total_port_num());
+      return Integer.valueOf(get_totalPortNum());
 
     case USED_PORT_NUM:
-      return Integer.valueOf(get_used_port_num());
+      return Integer.valueOf(get_usedPortNum());
 
     case FREE_PORT_NUM:
-      return Integer.valueOf(get_free_port_num());
+      return Integer.valueOf(get_freePortNum());
 
     case VERSION:
       return get_version();
@@ -501,17 +501,17 @@ public class NimbusSummary implements org.apache.thrift.TBase<NimbusSummary, Nim
 
     switch (field) {
     case NIMBUS_MASTER:
-      return is_set_nimbus_master();
+      return is_set_nimbusMaster();
     case NIMBUS_SLAVES:
-      return is_set_nimbus_slaves();
+      return is_set_nimbusSlaves();
     case SUPERVISOR_NUM:
-      return is_set_supervisor_num();
+      return is_set_supervisorNum();
     case TOTAL_PORT_NUM:
-      return is_set_total_port_num();
+      return is_set_totalPortNum();
     case USED_PORT_NUM:
-      return is_set_used_port_num();
+      return is_set_usedPortNum();
     case FREE_PORT_NUM:
-      return is_set_free_port_num();
+      return is_set_freePortNum();
     case VERSION:
       return is_set_version();
     }
@@ -531,57 +531,57 @@ public class NimbusSummary implements org.apache.thrift.TBase<NimbusSummary, Nim
     if (that == null)
       return false;
 
-    boolean this_present_nimbus_master = true && this.is_set_nimbus_master();
-    boolean that_present_nimbus_master = true && that.is_set_nimbus_master();
-    if (this_present_nimbus_master || that_present_nimbus_master) {
-      if (!(this_present_nimbus_master && that_present_nimbus_master))
+    boolean this_present_nimbusMaster = true && this.is_set_nimbusMaster();
+    boolean that_present_nimbusMaster = true && that.is_set_nimbusMaster();
+    if (this_present_nimbusMaster || that_present_nimbusMaster) {
+      if (!(this_present_nimbusMaster && that_present_nimbusMaster))
         return false;
-      if (!this.nimbus_master.equals(that.nimbus_master))
+      if (!this.nimbusMaster.equals(that.nimbusMaster))
         return false;
     }
 
-    boolean this_present_nimbus_slaves = true && this.is_set_nimbus_slaves();
-    boolean that_present_nimbus_slaves = true && that.is_set_nimbus_slaves();
-    if (this_present_nimbus_slaves || that_present_nimbus_slaves) {
-      if (!(this_present_nimbus_slaves && that_present_nimbus_slaves))
+    boolean this_present_nimbusSlaves = true && this.is_set_nimbusSlaves();
+    boolean that_present_nimbusSlaves = true && that.is_set_nimbusSlaves();
+    if (this_present_nimbusSlaves || that_present_nimbusSlaves) {
+      if (!(this_present_nimbusSlaves && that_present_nimbusSlaves))
         return false;
-      if (!this.nimbus_slaves.equals(that.nimbus_slaves))
+      if (!this.nimbusSlaves.equals(that.nimbusSlaves))
         return false;
     }
 
-    boolean this_present_supervisor_num = true;
-    boolean that_present_supervisor_num = true;
-    if (this_present_supervisor_num || that_present_supervisor_num) {
-      if (!(this_present_supervisor_num && that_present_supervisor_num))
+    boolean this_present_supervisorNum = true;
+    boolean that_present_supervisorNum = true;
+    if (this_present_supervisorNum || that_present_supervisorNum) {
+      if (!(this_present_supervisorNum && that_present_supervisorNum))
         return false;
-      if (this.supervisor_num != that.supervisor_num)
+      if (this.supervisorNum != that.supervisorNum)
         return false;
     }
 
-    boolean this_present_total_port_num = true;
-    boolean that_present_total_port_num = true;
-    if (this_present_total_port_num || that_present_total_port_num) {
-      if (!(this_present_total_port_num && that_present_total_port_num))
+    boolean this_present_totalPortNum = true;
+    boolean that_present_totalPortNum = true;
+    if (this_present_totalPortNum || that_present_totalPortNum) {
+      if (!(this_present_totalPortNum && that_present_totalPortNum))
         return false;
-      if (this.total_port_num != that.total_port_num)
+      if (this.totalPortNum != that.totalPortNum)
         return false;
     }
 
-    boolean this_present_used_port_num = true;
-    boolean that_present_used_port_num = true;
-    if (this_present_used_port_num || that_present_used_port_num) {
-      if (!(this_present_used_port_num && that_present_used_port_num))
+    boolean this_present_usedPortNum = true;
+    boolean that_present_usedPortNum = true;
+    if (this_present_usedPortNum || that_present_usedPortNum) {
+      if (!(this_present_usedPortNum && that_present_usedPortNum))
         return false;
-      if (this.used_port_num != that.used_port_num)
+      if (this.usedPortNum != that.usedPortNum)
         return false;
     }
 
-    boolean this_present_free_port_num = true;
-    boolean that_present_free_port_num = true;
-    if (this_present_free_port_num || that_present_free_port_num) {
-      if (!(this_present_free_port_num && that_present_free_port_num))
+    boolean this_present_freePortNum = true;
+    boolean that_present_freePortNum = true;
+    if (this_present_freePortNum || that_present_freePortNum) {
+      if (!(this_present_freePortNum && that_present_freePortNum))
         return false;
-      if (this.free_port_num != that.free_port_num)
+      if (this.freePortNum != that.freePortNum)
         return false;
     }
 
@@ -601,35 +601,35 @@ public class NimbusSummary implements org.apache.thrift.TBase<NimbusSummary, Nim
   public int hashCode() {
     List<Object> list = new ArrayList<Object>();
 
-    boolean present_nimbus_master = true && (is_set_nimbus_master());
-    list.add(present_nimbus_master);
-    if (present_nimbus_master)
-      list.add(nimbus_master);
+    boolean present_nimbusMaster = true && (is_set_nimbusMaster());
+    list.add(present_nimbusMaster);
+    if (present_nimbusMaster)
+      list.add(nimbusMaster);
 
-    boolean present_nimbus_slaves = true && (is_set_nimbus_slaves());
-    list.add(present_nimbus_slaves);
-    if (present_nimbus_slaves)
-      list.add(nimbus_slaves);
+    boolean present_nimbusSlaves = true && (is_set_nimbusSlaves());
+    list.add(present_nimbusSlaves);
+    if (present_nimbusSlaves)
+      list.add(nimbusSlaves);
 
-    boolean present_supervisor_num = true;
-    list.add(present_supervisor_num);
-    if (present_supervisor_num)
-      list.add(supervisor_num);
+    boolean present_supervisorNum = true;
+    list.add(present_supervisorNum);
+    if (present_supervisorNum)
+      list.add(supervisorNum);
 
-    boolean present_total_port_num = true;
-    list.add(present_total_port_num);
-    if (present_total_port_num)
-      list.add(total_port_num);
+    boolean present_totalPortNum = true;
+    list.add(present_totalPortNum);
+    if (present_totalPortNum)
+      list.add(totalPortNum);
 
-    boolean present_used_port_num = true;
-    list.add(present_used_port_num);
-    if (present_used_port_num)
-      list.add(used_port_num);
+    boolean present_usedPortNum = true;
+    list.add(present_usedPortNum);
+    if (present_usedPortNum)
+      list.add(usedPortNum);
 
-    boolean present_free_port_num = true;
-    list.add(present_free_port_num);
-    if (present_free_port_num)
-      list.add(free_port_num);
+    boolean present_freePortNum = true;
+    list.add(present_freePortNum);
+    if (present_freePortNum)
+      list.add(freePortNum);
 
     boolean present_version = true && (is_set_version());
     list.add(present_version);
@@ -647,62 +647,62 @@ public class NimbusSummary implements org.apache.thrift.TBase<NimbusSummary, Nim
 
     int lastComparison = 0;
 
-    lastComparison = Boolean.valueOf(is_set_nimbus_master()).compareTo(other.is_set_nimbus_master());
+    lastComparison = Boolean.valueOf(is_set_nimbusMaster()).compareTo(other.is_set_nimbusMaster());
     if (lastComparison != 0) {
       return lastComparison;
     }
-    if (is_set_nimbus_master()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.nimbus_master, other.nimbus_master);
+    if (is_set_nimbusMaster()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.nimbusMaster, other.nimbusMaster);
       if (lastComparison != 0) {
         return lastComparison;
       }
     }
-    lastComparison = Boolean.valueOf(is_set_nimbus_slaves()).compareTo(other.is_set_nimbus_slaves());
+    lastComparison = Boolean.valueOf(is_set_nimbusSlaves()).compareTo(other.is_set_nimbusSlaves());
     if (lastComparison != 0) {
       return lastComparison;
     }
-    if (is_set_nimbus_slaves()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.nimbus_slaves, other.nimbus_slaves);
+    if (is_set_nimbusSlaves()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.nimbusSlaves, other.nimbusSlaves);
       if (lastComparison != 0) {
         return lastComparison;
       }
     }
-    lastComparison = Boolean.valueOf(is_set_supervisor_num()).compareTo(other.is_set_supervisor_num());
+    lastComparison = Boolean.valueOf(is_set_supervisorNum()).compareTo(other.is_set_supervisorNum());
     if (lastComparison != 0) {
       return lastComparison;
     }
-    if (is_set_supervisor_num()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.supervisor_num, other.supervisor_num);
+    if (is_set_supervisorNum()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.supervisorNum, other.supervisorNum);
       if (lastComparison != 0) {
         return lastComparison;
       }
     }
-    lastComparison = Boolean.valueOf(is_set_total_port_num()).compareTo(other.is_set_total_port_num());
+    lastComparison = Boolean.valueOf(is_set_totalPortNum()).compareTo(other.is_set_totalPortNum());
     if (lastComparison != 0) {
       return lastComparison;
     }
-    if (is_set_total_port_num()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.total_port_num, other.total_port_num);
+    if (is_set_totalPortNum()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.totalPortNum, other.totalPortNum);
       if (lastComparison != 0) {
         return lastComparison;
       }
     }
-    lastComparison = Boolean.valueOf(is_set_used_port_num()).compareTo(other.is_set_used_port_num());
+    lastComparison = Boolean.valueOf(is_set_usedPortNum()).compareTo(other.is_set_usedPortNum());
     if (lastComparison != 0) {
       return lastComparison;
     }
-    if (is_set_used_port_num()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.used_port_num, other.used_port_num);
+    if (is_set_usedPortNum()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.usedPortNum, other.usedPortNum);
       if (lastComparison != 0) {
         return lastComparison;
       }
     }
-    lastComparison = Boolean.valueOf(is_set_free_port_num()).compareTo(other.is_set_free_port_num());
+    lastComparison = Boolean.valueOf(is_set_freePortNum()).compareTo(other.is_set_freePortNum());
     if (lastComparison != 0) {
       return lastComparison;
     }
-    if (is_set_free_port_num()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.free_port_num, other.free_port_num);
+    if (is_set_freePortNum()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.freePortNum, other.freePortNum);
       if (lastComparison != 0) {
         return lastComparison;
       }
@@ -724,11 +724,11 @@ public class NimbusSummary implements org.apache.thrift.TBase<NimbusSummary, Nim
     return _Fields.findByThriftId(fieldId);
   }
 
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
@@ -737,36 +737,36 @@ public class NimbusSummary implements org.apache.thrift.TBase<NimbusSummary, Nim
     StringBuilder sb = new StringBuilder("NimbusSummary(");
     boolean first = true;
 
-    sb.append("nimbus_master:");
-    if (this.nimbus_master == null) {
+    sb.append("nimbusMaster:");
+    if (this.nimbusMaster == null) {
       sb.append("null");
     } else {
-      sb.append(this.nimbus_master);
+      sb.append(this.nimbusMaster);
     }
     first = false;
     if (!first) sb.append(", ");
-    sb.append("nimbus_slaves:");
-    if (this.nimbus_slaves == null) {
+    sb.append("nimbusSlaves:");
+    if (this.nimbusSlaves == null) {
       sb.append("null");
     } else {
-      sb.append(this.nimbus_slaves);
+      sb.append(this.nimbusSlaves);
     }
     first = false;
     if (!first) sb.append(", ");
-    sb.append("supervisor_num:");
-    sb.append(this.supervisor_num);
+    sb.append("supervisorNum:");
+    sb.append(this.supervisorNum);
     first = false;
     if (!first) sb.append(", ");
-    sb.append("total_port_num:");
-    sb.append(this.total_port_num);
+    sb.append("totalPortNum:");
+    sb.append(this.totalPortNum);
     first = false;
     if (!first) sb.append(", ");
-    sb.append("used_port_num:");
-    sb.append(this.used_port_num);
+    sb.append("usedPortNum:");
+    sb.append(this.usedPortNum);
     first = false;
     if (!first) sb.append(", ");
-    sb.append("free_port_num:");
-    sb.append(this.free_port_num);
+    sb.append("freePortNum:");
+    sb.append(this.freePortNum);
     first = false;
     if (!first) sb.append(", ");
     sb.append("version:");
@@ -780,46 +780,46 @@ public class NimbusSummary implements org.apache.thrift.TBase<NimbusSummary, Nim
     return sb.toString();
   }
 
-  public void validate() throws org.apache.thrift.TException {
+  public void validate() throws TException {
     // check for required fields
-    if (!is_set_nimbus_master()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'nimbus_master' is unset! Struct:" + toString());
+    if (!is_set_nimbusMaster()) {
+      throw new TProtocolException("Required field 'nimbusMaster' is unset! Struct:" + toString());
     }
 
-    if (!is_set_nimbus_slaves()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'nimbus_slaves' is unset! Struct:" + toString());
+    if (!is_set_nimbusSlaves()) {
+      throw new TProtocolException("Required field 'nimbusSlaves' is unset! Struct:" + toString());
     }
 
-    if (!is_set_supervisor_num()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'supervisor_num' is unset! Struct:" + toString());
+    if (!is_set_supervisorNum()) {
+      throw new TProtocolException("Required field 'supervisorNum' is unset! Struct:" + toString());
     }
 
-    if (!is_set_total_port_num()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'total_port_num' is unset! Struct:" + toString());
+    if (!is_set_totalPortNum()) {
+      throw new TProtocolException("Required field 'totalPortNum' is unset! Struct:" + toString());
     }
 
-    if (!is_set_used_port_num()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'used_port_num' is unset! Struct:" + toString());
+    if (!is_set_usedPortNum()) {
+      throw new TProtocolException("Required field 'usedPortNum' is unset! Struct:" + toString());
     }
 
-    if (!is_set_free_port_num()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'free_port_num' is unset! Struct:" + toString());
+    if (!is_set_freePortNum()) {
+      throw new TProtocolException("Required field 'freePortNum' is unset! Struct:" + toString());
     }
 
     if (!is_set_version()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'version' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'version' is unset! Struct:" + toString());
     }
 
     // check for sub-struct validity
-    if (nimbus_master != null) {
-      nimbus_master.validate();
+    if (nimbusMaster != null) {
+      nimbusMaster.validate();
     }
   }
 
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -829,7 +829,7 @@ public class NimbusSummary implements org.apache.thrift.TBase<NimbusSummary, Nim
       // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
       __isset_bitfield = 0;
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -842,7 +842,7 @@ public class NimbusSummary implements org.apache.thrift.TBase<NimbusSummary, Nim
 
   private static class NimbusSummaryStandardScheme extends StandardScheme<NimbusSummary> {
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot, NimbusSummary struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot, NimbusSummary struct) throws TException {
       org.apache.thrift.protocol.TField schemeField;
       iprot.readStructBegin();
       while (true)
@@ -854,9 +854,9 @@ public class NimbusSummary implements org.apache.thrift.TBase<NimbusSummary, Nim
         switch (schemeField.id) {
           case 1: // NIMBUS_MASTER
             if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
-              struct.nimbus_master = new NimbusStat();
-              struct.nimbus_master.read(iprot);
-              struct.set_nimbus_master_isSet(true);
+              struct.nimbusMaster = new NimbusStat();
+              struct.nimbusMaster.read(iprot);
+              struct.set_nimbusMaster_isSet(true);
             } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
@@ -865,49 +865,49 @@ public class NimbusSummary implements org.apache.thrift.TBase<NimbusSummary, Nim
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
                 org.apache.thrift.protocol.TList _list74 = iprot.readListBegin();
-                struct.nimbus_slaves = new ArrayList<NimbusStat>(_list74.size);
+                struct.nimbusSlaves = new ArrayList<NimbusStat>(_list74.size);
                 NimbusStat _elem75;
                 for (int _i76 = 0; _i76 < _list74.size; ++_i76)
                 {
                   _elem75 = new NimbusStat();
                   _elem75.read(iprot);
-                  struct.nimbus_slaves.add(_elem75);
+                  struct.nimbusSlaves.add(_elem75);
                 }
                 iprot.readListEnd();
               }
-              struct.set_nimbus_slaves_isSet(true);
+              struct.set_nimbusSlaves_isSet(true);
             } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
           case 3: // SUPERVISOR_NUM
             if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
-              struct.supervisor_num = iprot.readI32();
-              struct.set_supervisor_num_isSet(true);
+              struct.supervisorNum = iprot.readI32();
+              struct.set_supervisorNum_isSet(true);
             } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
           case 4: // TOTAL_PORT_NUM
             if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
-              struct.total_port_num = iprot.readI32();
-              struct.set_total_port_num_isSet(true);
+              struct.totalPortNum = iprot.readI32();
+              struct.set_totalPortNum_isSet(true);
             } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
           case 5: // USED_PORT_NUM
             if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
-              struct.used_port_num = iprot.readI32();
-              struct.set_used_port_num_isSet(true);
+              struct.usedPortNum = iprot.readI32();
+              struct.set_usedPortNum_isSet(true);
             } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
           case 6: // FREE_PORT_NUM
             if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
-              struct.free_port_num = iprot.readI32();
-              struct.set_free_port_num_isSet(true);
+              struct.freePortNum = iprot.readI32();
+              struct.set_freePortNum_isSet(true);
             } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
@@ -929,20 +929,20 @@ public class NimbusSummary implements org.apache.thrift.TBase<NimbusSummary, Nim
       struct.validate();
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot, NimbusSummary struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot, NimbusSummary struct) throws TException {
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
-      if (struct.nimbus_master != null) {
+      if (struct.nimbusMaster != null) {
         oprot.writeFieldBegin(NIMBUS_MASTER_FIELD_DESC);
-        struct.nimbus_master.write(oprot);
+        struct.nimbusMaster.write(oprot);
         oprot.writeFieldEnd();
       }
-      if (struct.nimbus_slaves != null) {
+      if (struct.nimbusSlaves != null) {
         oprot.writeFieldBegin(NIMBUS_SLAVES_FIELD_DESC);
         {
-          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.nimbus_slaves.size()));
-          for (NimbusStat _iter77 : struct.nimbus_slaves)
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.nimbusSlaves.size()));
+          for (NimbusStat _iter77 : struct.nimbusSlaves)
           {
             _iter77.write(oprot);
           }
@@ -951,16 +951,16 @@ public class NimbusSummary implements org.apache.thrift.TBase<NimbusSummary, Nim
         oprot.writeFieldEnd();
       }
       oprot.writeFieldBegin(SUPERVISOR_NUM_FIELD_DESC);
-      oprot.writeI32(struct.supervisor_num);
+      oprot.writeI32(struct.supervisorNum);
       oprot.writeFieldEnd();
       oprot.writeFieldBegin(TOTAL_PORT_NUM_FIELD_DESC);
-      oprot.writeI32(struct.total_port_num);
+      oprot.writeI32(struct.totalPortNum);
       oprot.writeFieldEnd();
       oprot.writeFieldBegin(USED_PORT_NUM_FIELD_DESC);
-      oprot.writeI32(struct.used_port_num);
+      oprot.writeI32(struct.usedPortNum);
       oprot.writeFieldEnd();
       oprot.writeFieldBegin(FREE_PORT_NUM_FIELD_DESC);
-      oprot.writeI32(struct.free_port_num);
+      oprot.writeI32(struct.freePortNum);
       oprot.writeFieldEnd();
       if (struct.version != null) {
         oprot.writeFieldBegin(VERSION_FIELD_DESC);
@@ -982,49 +982,49 @@ public class NimbusSummary implements org.apache.thrift.TBase<NimbusSummary, Nim
   private static class NimbusSummaryTupleScheme extends TupleScheme<NimbusSummary> {
 
     @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, NimbusSummary struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol prot, NimbusSummary struct) throws TException {
       TTupleProtocol oprot = (TTupleProtocol) prot;
-      struct.nimbus_master.write(oprot);
+      struct.nimbusMaster.write(oprot);
       {
-        oprot.writeI32(struct.nimbus_slaves.size());
-        for (NimbusStat _iter78 : struct.nimbus_slaves)
+        oprot.writeI32(struct.nimbusSlaves.size());
+        for (NimbusStat _iter78 : struct.nimbusSlaves)
         {
           _iter78.write(oprot);
         }
       }
-      oprot.writeI32(struct.supervisor_num);
-      oprot.writeI32(struct.total_port_num);
-      oprot.writeI32(struct.used_port_num);
-      oprot.writeI32(struct.free_port_num);
+      oprot.writeI32(struct.supervisorNum);
+      oprot.writeI32(struct.totalPortNum);
+      oprot.writeI32(struct.usedPortNum);
+      oprot.writeI32(struct.freePortNum);
       oprot.writeString(struct.version);
     }
 
     @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, NimbusSummary struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol prot, NimbusSummary struct) throws TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
-      struct.nimbus_master = new NimbusStat();
-      struct.nimbus_master.read(iprot);
-      struct.set_nimbus_master_isSet(true);
+      struct.nimbusMaster = new NimbusStat();
+      struct.nimbusMaster.read(iprot);
+      struct.set_nimbusMaster_isSet(true);
       {
         org.apache.thrift.protocol.TList _list79 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-        struct.nimbus_slaves = new ArrayList<NimbusStat>(_list79.size);
+        struct.nimbusSlaves = new ArrayList<NimbusStat>(_list79.size);
         NimbusStat _elem80;
         for (int _i81 = 0; _i81 < _list79.size; ++_i81)
         {
           _elem80 = new NimbusStat();
           _elem80.read(iprot);
-          struct.nimbus_slaves.add(_elem80);
+          struct.nimbusSlaves.add(_elem80);
         }
       }
-      struct.set_nimbus_slaves_isSet(true);
-      struct.supervisor_num = iprot.readI32();
-      struct.set_supervisor_num_isSet(true);
-      struct.total_port_num = iprot.readI32();
-      struct.set_total_port_num_isSet(true);
-      struct.used_port_num = iprot.readI32();
-      struct.set_used_port_num_isSet(true);
-      struct.free_port_num = iprot.readI32();
-      struct.set_free_port_num_isSet(true);
+      struct.set_nimbusSlaves_isSet(true);
+      struct.supervisorNum = iprot.readI32();
+      struct.set_supervisorNum_isSet(true);
+      struct.totalPortNum = iprot.readI32();
+      struct.set_totalPortNum_isSet(true);
+      struct.usedPortNum = iprot.readI32();
+      struct.set_usedPortNum_isSet(true);
+      struct.freePortNum = iprot.readI32();
+      struct.set_freePortNum_isSet(true);
       struct.version = iprot.readString();
       struct.set_version_isSet(true);
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/NotAliveException.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/NotAliveException.java b/jstorm-core/src/main/java/backtype/storm/generated/NotAliveException.java
index ae0f056..d306d08 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/NotAliveException.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/NotAliveException.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class NotAliveException extends TException implements org.apache.thrift.TBase<NotAliveException, NotAliveException._Fields>, java.io.Serializable, Cloneable, Comparable<NotAliveException> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("NotAliveException");
 
@@ -264,11 +264,11 @@ public class NotAliveException extends TException implements org.apache.thrift.T
     return _Fields.findByThriftId(fieldId);
   }
 
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
@@ -288,10 +288,10 @@ public class NotAliveException extends TException implements org.apache.thrift.T
     return sb.toString();
   }
 
-  public void validate() throws org.apache.thrift.TException {
+  public void validate() throws TException {
     // check for required fields
     if (!is_set_msg()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'msg' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'msg' is unset! Struct:" + toString());
     }
 
     // check for sub-struct validity
@@ -300,7 +300,7 @@ public class NotAliveException extends TException implements org.apache.thrift.T
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -308,7 +308,7 @@ public class NotAliveException extends TException implements org.apache.thrift.T
   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
     try {
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -321,7 +321,7 @@ public class NotAliveException extends TException implements org.apache.thrift.T
 
   private static class NotAliveExceptionStandardScheme extends StandardScheme<NotAliveException> {
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot, NotAliveException struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot, NotAliveException struct) throws TException {
       org.apache.thrift.protocol.TField schemeField;
       iprot.readStructBegin();
       while (true)
@@ -348,7 +348,7 @@ public class NotAliveException extends TException implements org.apache.thrift.T
       struct.validate();
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot, NotAliveException struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot, NotAliveException struct) throws TException {
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
@@ -372,13 +372,13 @@ public class NotAliveException extends TException implements org.apache.thrift.T
   private static class NotAliveExceptionTupleScheme extends TupleScheme<NotAliveException> {
 
     @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, NotAliveException struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol prot, NotAliveException struct) throws TException {
       TTupleProtocol oprot = (TTupleProtocol) prot;
       oprot.writeString(struct.msg);
     }
 
     @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, NotAliveException struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol prot, NotAliveException struct) throws TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       struct.msg = iprot.readString();
       struct.set_msg_isSet(true);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/NullStruct.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/NullStruct.java b/jstorm-core/src/main/java/backtype/storm/generated/NullStruct.java
index 0494eb2..f9e958d 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/NullStruct.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/NullStruct.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class NullStruct implements org.apache.thrift.TBase<NullStruct, NullStruct._Fields>, java.io.Serializable, Cloneable, Comparable<NullStruct> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("NullStruct");
 
@@ -185,11 +185,11 @@ public class NullStruct implements org.apache.thrift.TBase<NullStruct, NullStruc
     return _Fields.findByThriftId(fieldId);
   }
 
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
@@ -202,7 +202,7 @@ public class NullStruct implements org.apache.thrift.TBase<NullStruct, NullStruc
     return sb.toString();
   }
 
-  public void validate() throws org.apache.thrift.TException {
+  public void validate() throws TException {
     // check for required fields
     // check for sub-struct validity
   }
@@ -210,7 +210,7 @@ public class NullStruct implements org.apache.thrift.TBase<NullStruct, NullStruc
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -218,7 +218,7 @@ public class NullStruct implements org.apache.thrift.TBase<NullStruct, NullStruc
   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
     try {
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -231,7 +231,7 @@ public class NullStruct implements org.apache.thrift.TBase<NullStruct, NullStruc
 
   private static class NullStructStandardScheme extends StandardScheme<NullStruct> {
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot, NullStruct struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot, NullStruct struct) throws TException {
       org.apache.thrift.protocol.TField schemeField;
       iprot.readStructBegin();
       while (true)
@@ -250,7 +250,7 @@ public class NullStruct implements org.apache.thrift.TBase<NullStruct, NullStruc
       struct.validate();
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot, NullStruct struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot, NullStruct struct) throws TException {
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
@@ -269,12 +269,12 @@ public class NullStruct implements org.apache.thrift.TBase<NullStruct, NullStruc
   private static class NullStructTupleScheme extends TupleScheme<NullStruct> {
 
     @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, NullStruct struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol prot, NullStruct struct) throws TException {
       TTupleProtocol oprot = (TTupleProtocol) prot;
     }
 
     @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, NullStruct struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol prot, NullStruct struct) throws TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
     }
   }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/RebalanceOptions.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/RebalanceOptions.java b/jstorm-core/src/main/java/backtype/storm/generated/RebalanceOptions.java
index 3348189..287559e 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/RebalanceOptions.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/RebalanceOptions.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class RebalanceOptions implements org.apache.thrift.TBase<RebalanceOptions, RebalanceOptions._Fields>, java.io.Serializable, Cloneable, Comparable<RebalanceOptions> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("RebalanceOptions");
 
@@ -400,11 +400,11 @@ public class RebalanceOptions implements org.apache.thrift.TBase<RebalanceOption
     return _Fields.findByThriftId(fieldId);
   }
 
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
@@ -438,7 +438,7 @@ public class RebalanceOptions implements org.apache.thrift.TBase<RebalanceOption
     return sb.toString();
   }
 
-  public void validate() throws org.apache.thrift.TException {
+  public void validate() throws TException {
     // check for required fields
     // check for sub-struct validity
   }
@@ -446,7 +446,7 @@ public class RebalanceOptions implements org.apache.thrift.TBase<RebalanceOption
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -456,7 +456,7 @@ public class RebalanceOptions implements org.apache.thrift.TBase<RebalanceOption
       // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
       __isset_bitfield = 0;
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -469,7 +469,7 @@ public class RebalanceOptions implements org.apache.thrift.TBase<RebalanceOption
 
   private static class RebalanceOptionsStandardScheme extends StandardScheme<RebalanceOptions> {
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot, RebalanceOptions struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot, RebalanceOptions struct) throws TException {
       org.apache.thrift.protocol.TField schemeField;
       iprot.readStructBegin();
       while (true)
@@ -512,7 +512,7 @@ public class RebalanceOptions implements org.apache.thrift.TBase<RebalanceOption
       struct.validate();
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot, RebalanceOptions struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot, RebalanceOptions struct) throws TException {
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
@@ -548,7 +548,7 @@ public class RebalanceOptions implements org.apache.thrift.TBase<RebalanceOption
   private static class RebalanceOptionsTupleScheme extends TupleScheme<RebalanceOptions> {
 
     @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, RebalanceOptions struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol prot, RebalanceOptions struct) throws TException {
       TTupleProtocol oprot = (TTupleProtocol) prot;
       BitSet optionals = new BitSet();
       if (struct.is_set_wait_secs()) {
@@ -573,7 +573,7 @@ public class RebalanceOptions implements org.apache.thrift.TBase<RebalanceOption
     }
 
     @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, RebalanceOptions struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol prot, RebalanceOptions struct) throws TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       BitSet incoming = iprot.readBitSet(3);
       if (incoming.get(0)) {

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/ShellComponent.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/ShellComponent.java b/jstorm-core/src/main/java/backtype/storm/generated/ShellComponent.java
index 1f9e827..adec27e 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/ShellComponent.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/ShellComponent.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class ShellComponent implements org.apache.thrift.TBase<ShellComponent, ShellComponent._Fields>, java.io.Serializable, Cloneable, Comparable<ShellComponent> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ShellComponent");
 
@@ -337,11 +337,11 @@ public class ShellComponent implements org.apache.thrift.TBase<ShellComponent, S
     return _Fields.findByThriftId(fieldId);
   }
 
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
@@ -369,7 +369,7 @@ public class ShellComponent implements org.apache.thrift.TBase<ShellComponent, S
     return sb.toString();
   }
 
-  public void validate() throws org.apache.thrift.TException {
+  public void validate() throws TException {
     // check for required fields
     // check for sub-struct validity
   }
@@ -377,7 +377,7 @@ public class ShellComponent implements org.apache.thrift.TBase<ShellComponent, S
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -385,7 +385,7 @@ public class ShellComponent implements org.apache.thrift.TBase<ShellComponent, S
   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
     try {
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -398,7 +398,7 @@ public class ShellComponent implements org.apache.thrift.TBase<ShellComponent, S
 
   private static class ShellComponentStandardScheme extends StandardScheme<ShellComponent> {
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot, ShellComponent struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot, ShellComponent struct) throws TException {
       org.apache.thrift.protocol.TField schemeField;
       iprot.readStructBegin();
       while (true)
@@ -433,7 +433,7 @@ public class ShellComponent implements org.apache.thrift.TBase<ShellComponent, S
       struct.validate();
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot, ShellComponent struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot, ShellComponent struct) throws TException {
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
@@ -462,7 +462,7 @@ public class ShellComponent implements org.apache.thrift.TBase<ShellComponent, S
   private static class ShellComponentTupleScheme extends TupleScheme<ShellComponent> {
 
     @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, ShellComponent struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol prot, ShellComponent struct) throws TException {
       TTupleProtocol oprot = (TTupleProtocol) prot;
       BitSet optionals = new BitSet();
       if (struct.is_set_execution_command()) {
@@ -481,7 +481,7 @@ public class ShellComponent implements org.apache.thrift.TBase<ShellComponent, S
     }
 
     @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, ShellComponent struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol prot, ShellComponent struct) throws TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       BitSet incoming = iprot.readBitSet(2);
       if (incoming.get(0)) {


[22/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/Cluster.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/Cluster.java b/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/Cluster.java
index f739087..0bb1bb7 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/Cluster.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/Cluster.java
@@ -32,7 +32,6 @@ import com.alibaba.jstorm.daemon.supervisor.SupervisorInfo;
 import com.alibaba.jstorm.schedule.Assignment;
 import com.alibaba.jstorm.task.TaskInfo;
 import com.alibaba.jstorm.task.error.TaskError;
-import com.alibaba.jstorm.task.heartbeat.TaskHeartbeat;
 import com.alibaba.jstorm.utils.TimeUtils;
 
 /**
@@ -61,6 +60,8 @@ public class Cluster {
     public static final String METRIC_ROOT = "metrics";
 
     public static final String LAST_ERROR = "last_error";
+    public static final String NIMBUS_SLAVE_DETAIL_ROOT= "nimbus_slave_detail";
+    public static final String BACKPRESSURE_ROOT = "backpressure";
 
     public static final String ASSIGNMENTS_SUBTREE;
     public static final String ASSIGNMENTS_BAK_SUBTREE;
@@ -72,6 +73,8 @@ public class Cluster {
     public static final String MASTER_SUBTREE;
     public static final String NIMBUS_SLAVE_SUBTREE;
     public static final String METRIC_SUBTREE;
+    public static final String NIMBUS_SLAVE_DETAIL_SUBTREE;
+    public static final String BACKPRESSURE_SUBTREE;
 
     static {
         ASSIGNMENTS_SUBTREE = ZK_SEPERATOR + ASSIGNMENTS_ROOT;
@@ -84,6 +87,8 @@ public class Cluster {
         MASTER_SUBTREE = ZK_SEPERATOR + MASTER_ROOT;
         NIMBUS_SLAVE_SUBTREE = ZK_SEPERATOR + NIMBUS_SLAVE_ROOT;
         METRIC_SUBTREE = ZK_SEPERATOR + METRIC_ROOT;
+        NIMBUS_SLAVE_DETAIL_SUBTREE = ZK_SEPERATOR + NIMBUS_SLAVE_DETAIL_ROOT;
+        BACKPRESSURE_SUBTREE = ZK_SEPERATOR + BACKPRESSURE_ROOT;
     }
 
     public static String supervisor_path(String id) {
@@ -106,10 +111,6 @@ public class Cluster {
         return TASKBEATS_SUBTREE + ZK_SEPERATOR + topology_id;
     }
 
-    public static String taskbeat_path(String topology_id, int task_id) {
-        return taskbeat_storm_root(topology_id) + ZK_SEPERATOR + task_id;
-    }
-
     public static String taskerror_storm_root(String topology_id) {
         return TASKERRORS_SUBTREE + ZK_SEPERATOR + topology_id;
     }
@@ -130,97 +131,71 @@ public class Cluster {
         return ASSIGNMENTS_BAK_SUBTREE + ZK_SEPERATOR + id;
     }
 
+    public static String backpressure_path(String topology_id) {
+        return BACKPRESSURE_SUBTREE + ZK_SEPERATOR + topology_id;
+    }
+
     @SuppressWarnings("rawtypes")
-    public static StormClusterState mk_storm_cluster_state(
-            Map cluster_state_spec) throws Exception {
+    public static StormClusterState mk_storm_cluster_state(Map cluster_state_spec) throws Exception {
         return new StormZkClusterState(cluster_state_spec);
     }
 
-    public static StormClusterState mk_storm_cluster_state(
-            ClusterState cluster_state_spec) throws Exception {
+    public static StormClusterState mk_storm_cluster_state(ClusterState cluster_state_spec) throws Exception {
         return new StormZkClusterState(cluster_state_spec);
     }
 
-    public static Map<Integer, TaskInfo> get_all_taskInfo(
-            StormClusterState zkCluster, String topologyId) throws Exception {
-        return  zkCluster.task_all_info(topologyId);
+    public static Map<Integer, TaskInfo> get_all_taskInfo(StormClusterState zkCluster, String topologyId) throws Exception {
+        return zkCluster.task_all_info(topologyId);
     }
-    
-    
-    public static Map<Integer, String> get_all_task_component(
-    		StormClusterState zkCluster, String topologyId, 
-    		Map<Integer, TaskInfo> taskInfoMap) throws Exception {
+
+    public static Map<Integer, String> get_all_task_component(StormClusterState zkCluster, String topologyId, Map<Integer, TaskInfo> taskInfoMap)
+            throws Exception {
         if (taskInfoMap == null) {
             taskInfoMap = get_all_taskInfo(zkCluster, topologyId);
         }
-        
+
         if (taskInfoMap == null) {
             return null;
         }
-        
+
         return Common.getTaskToComponent(taskInfoMap);
     }
-    
-    public static  Map<Integer, String> get_all_task_type(
-    		StormClusterState zkCluster, String topologyId, 
-    		Map<Integer, TaskInfo> taskInfoMap) throws Exception {
+
+    public static Map<Integer, String> get_all_task_type(StormClusterState zkCluster, String topologyId, Map<Integer, TaskInfo> taskInfoMap) throws Exception {
         if (taskInfoMap == null) {
             taskInfoMap = get_all_taskInfo(zkCluster, topologyId);
         }
-        
+
         if (taskInfoMap == null) {
             return null;
         }
-        
-        return Common.getTaskToType(taskInfoMap);
-    }
-
-    public static Map<String, TaskHeartbeat> get_all_task_heartbeat(
-            StormClusterState zkCluster, String topologyId) throws Exception {
-        Map<String, TaskHeartbeat> ret = new HashMap<String, TaskHeartbeat>();
-
-        List<String> taskList = zkCluster.heartbeat_tasks(topologyId);
-        for (String taskId : taskList) {
-            TaskHeartbeat hb =
-                    zkCluster.task_heartbeat(topologyId,
-                            Integer.valueOf(taskId));
-            if (hb == null) {
-                LOG.error("Failed to get hearbeat of " + topologyId + ":"
-                        + taskId);
-                continue;
-            }
 
-            ret.put(taskId, hb);
-        }
-
-        return ret;
+        return Common.getTaskToType(taskInfoMap);
     }
 
     /**
-     * if one topology's name equal the input storm_name, then return the
-     * topology id, otherwise return null
+     * if one topology's name equal the input storm_name, then return the topology id, otherwise return null
      * 
      * @param zkCluster
      * @param storm_name
      * @return
      * @throws Exception
      */
-    public static String get_topology_id(StormClusterState zkCluster,
-            String storm_name) throws Exception {
+    public static String get_topology_id(StormClusterState zkCluster, String storm_name) throws Exception {
         List<String> active_storms = zkCluster.active_storms();
         String rtn = null;
         if (active_storms != null) {
             for (String topology_id : active_storms) {
-                
+
                 if (topology_id.indexOf(storm_name) < 0) {
                     continue;
                 }
-
-                String zkTopologyName = Common.topologyIdToName(topology_id);
-                if (storm_name.endsWith(zkTopologyName)) {
-                    return topology_id;
+                StormBase base = zkCluster.storm_base(topology_id, null);
+                if (base != null && storm_name.equals(Common.getTopologyNameById(topology_id))) {
+                    rtn = topology_id;
+                    break;
                 }
-                
+
             }
         }
         return rtn;
@@ -233,8 +208,7 @@ public class Cluster {
      * @return <topology_id, StormBase>
      * @throws Exception
      */
-    public static HashMap<String, StormBase> get_all_StormBase(
-            StormClusterState zkCluster) throws Exception {
+    public static HashMap<String, StormBase> get_all_StormBase(StormClusterState zkCluster) throws Exception {
         HashMap<String, StormBase> rtn = new HashMap<String, StormBase>();
         List<String> active_storms = zkCluster.active_storms();
         if (active_storms != null) {
@@ -253,25 +227,20 @@ public class Cluster {
      * 
      * @param stormClusterState
      * @param callback
-     * @return Map<String, SupervisorInfo> String: supervisorId SupervisorInfo:
-     *         [time-secs hostname worker-ports uptime-secs]
+     * @return Map<String, SupervisorInfo> String: supervisorId SupervisorInfo: [time-secs hostname worker-ports uptime-secs]
      * @throws Exception
      */
-    public static Map<String, SupervisorInfo> get_all_SupervisorInfo(
-            StormClusterState stormClusterState, RunnableCallback callback)
-            throws Exception {
+    public static Map<String, SupervisorInfo> get_all_SupervisorInfo(StormClusterState stormClusterState, RunnableCallback callback) throws Exception {
 
         Map<String, SupervisorInfo> rtn = new TreeMap<String, SupervisorInfo>();
         // get /ZK/supervisors
         List<String> supervisorIds = stormClusterState.supervisors(callback);
         if (supervisorIds != null) {
-            for (Iterator<String> iter = supervisorIds.iterator(); iter
-                    .hasNext();) {
+            for (Iterator<String> iter = supervisorIds.iterator(); iter.hasNext();) {
 
                 String supervisorId = iter.next();
                 // get /supervisors/supervisorid
-                SupervisorInfo supervisorInfo =
-                        stormClusterState.supervisor_info(supervisorId);
+                SupervisorInfo supervisorInfo = stormClusterState.supervisor_info(supervisorId);
                 if (supervisorInfo == null) {
                     LOG.warn("Failed to get SupervisorInfo of " + supervisorId);
                 } else {
@@ -286,9 +255,7 @@ public class Cluster {
         return rtn;
     }
 
-    public static Map<String, Assignment> get_all_assignment(
-            StormClusterState stormClusterState, RunnableCallback callback)
-            throws Exception {
+    public static Map<String, Assignment> get_all_assignment(StormClusterState stormClusterState, RunnableCallback callback) throws Exception {
         Map<String, Assignment> ret = new HashMap<String, Assignment>();
 
         // get /assignments {topology_id}
@@ -300,12 +267,10 @@ public class Cluster {
 
         for (String topology_id : assignments) {
 
-            Assignment assignment =
-                    stormClusterState.assignment_info(topology_id, callback);
+            Assignment assignment = stormClusterState.assignment_info(topology_id, callback);
 
             if (assignment == null) {
-                LOG.error("Failed to get Assignment of " + topology_id
-                        + " from ZK");
+                LOG.error("Failed to get Assignment of " + topology_id + " from ZK");
                 continue;
             }
 
@@ -315,8 +280,7 @@ public class Cluster {
         return ret;
     }
 
-    public static Map<String, String> get_all_nimbus_slave(
-            StormClusterState stormClusterState) throws Exception {
+    public static Map<String, String> get_all_nimbus_slave(StormClusterState stormClusterState) throws Exception {
         List<String> hosts = stormClusterState.get_nimbus_slaves();
         if (hosts == null || hosts.size() == 0) {
             return null;
@@ -331,11 +295,8 @@ public class Cluster {
         return ret;
     }
 
-    public static String get_supervisor_hostname(
-            StormClusterState stormClusterState, String supervisorId)
-            throws Exception {
-        SupervisorInfo supervisorInfo =
-                stormClusterState.supervisor_info(supervisorId);
+    public static String get_supervisor_hostname(StormClusterState stormClusterState, String supervisorId) throws Exception {
+        SupervisorInfo supervisorInfo = stormClusterState.supervisor_info(supervisorId);
         if (supervisorInfo == null) {
             return null;
         } else {
@@ -343,12 +304,9 @@ public class Cluster {
         }
     }
 
-    public static boolean is_topology_exist_error(
-            StormClusterState stormClusterState, String topologyId)
-            throws Exception {
+    public static boolean is_topology_exist_error(StormClusterState stormClusterState, String topologyId) throws Exception {
 
-        Map<Integer, String> lastErrMap =
-                stormClusterState.topo_lastErr_time(topologyId);
+        Map<Integer, String> lastErrMap = stormClusterState.topo_lastErr_time(topologyId);
         if (lastErrMap == null || lastErrMap.size() == 0) {
             return false;
         }
@@ -365,34 +323,33 @@ public class Cluster {
 
         return false;
     }
-    
-	public static Map<Integer, List<TaskError>> get_all_task_errors(
-			StormClusterState stormClusterState, String topologyId) {
-		Map<Integer, List<TaskError>> ret = new HashMap<Integer, List<TaskError>>();
-		try {
-			List<String> errorTasks = stormClusterState.task_error_ids(topologyId);
-			if (errorTasks == null || errorTasks.size() == 0) {
-				return ret;
-			}
-
-			for (String taskIdStr : errorTasks) {
-				Integer taskId = -1;
-				try {
-					taskId = Integer.valueOf(taskIdStr);
-				}catch(Exception e) {
-					// skip last_error
-					continue;
-				}
-				
-				List<TaskError> taskErrorList = stormClusterState.task_errors(topologyId, taskId);
-				ret.put(taskId, taskErrorList);
-			}
-			return ret;
-		} catch (Exception e) {
-			// TODO Auto-generated catch block
-			return ret;
-		}
-
-	}
+
+    public static Map<Integer, List<TaskError>> get_all_task_errors(StormClusterState stormClusterState, String topologyId) {
+        Map<Integer, List<TaskError>> ret = new HashMap<Integer, List<TaskError>>();
+        try {
+            List<String> errorTasks = stormClusterState.task_error_ids(topologyId);
+            if (errorTasks == null || errorTasks.size() == 0) {
+                return ret;
+            }
+
+            for (String taskIdStr : errorTasks) {
+                Integer taskId = -1;
+                try {
+                    taskId = Integer.valueOf(taskIdStr);
+                } catch (Exception e) {
+                    // skip last_error
+                    continue;
+                }
+
+                List<TaskError> taskErrorList = stormClusterState.task_errors(topologyId, taskId);
+                ret.put(taskId, taskErrorList);
+            }
+            return ret;
+        } catch (Exception e) {
+            // TODO Auto-generated catch block
+            return ret;
+        }
+
+    }
 
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/ClusterState.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/ClusterState.java b/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/ClusterState.java
index 8cba073..ad88717 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/ClusterState.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/ClusterState.java
@@ -39,8 +39,7 @@ public interface ClusterState {
 
     public byte[] get_data_sync(String path, boolean watch) throws Exception;
 
-    public List<String> get_children(String path, boolean watch)
-            throws Exception;
+    public List<String> get_children(String path, boolean watch) throws Exception;
 
     public void mkdirs(String path) throws Exception;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/Common.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/Common.java b/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/Common.java
index a9e3e0b..48528d7 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/Common.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/Common.java
@@ -17,35 +17,9 @@
  */
 package com.alibaba.jstorm.cluster;
 
-import java.io.IOException;
-import java.net.URLClassLoader;
-import java.security.InvalidParameterException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
-import java.util.Map.Entry;
-import java.util.Set;
-
-import org.apache.log4j.Logger;
-
 import backtype.storm.Config;
 import backtype.storm.Constants;
-import backtype.storm.generated.Bolt;
-import backtype.storm.generated.ComponentCommon;
-import backtype.storm.generated.ComponentObject;
-import backtype.storm.generated.GlobalStreamId;
-import backtype.storm.generated.Grouping;
-import backtype.storm.generated.InvalidTopologyException;
-import backtype.storm.generated.JavaObject;
-import backtype.storm.generated.ShellComponent;
-import backtype.storm.generated.SpoutSpec;
-import backtype.storm.generated.StateSpoutSpec;
-import backtype.storm.generated.StormTopology;
-import backtype.storm.generated.StreamInfo;
+import backtype.storm.generated.*;
 import backtype.storm.metric.SystemBolt;
 import backtype.storm.spout.ShellSpout;
 import backtype.storm.task.IBolt;
@@ -54,17 +28,23 @@ import backtype.storm.task.TopologyContext;
 import backtype.storm.tuple.Fields;
 import backtype.storm.utils.ThriftTopologyUtils;
 import backtype.storm.utils.Utils;
-
 import com.alibaba.jstorm.daemon.worker.WorkerData;
 import com.alibaba.jstorm.schedule.default_assign.DefaultTopologyAssignContext;
-import com.alibaba.jstorm.task.Task;
 import com.alibaba.jstorm.task.TaskInfo;
 import com.alibaba.jstorm.task.acker.Acker;
 import com.alibaba.jstorm.task.group.MkGrouper;
+import com.alibaba.jstorm.task.master.TopologyMaster;
 import com.alibaba.jstorm.utils.JStormUtils;
 import com.alibaba.jstorm.utils.Thrift;
 import com.alibaba.jstorm.utils.TimeUtils;
 import com.google.common.collect.Maps;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.net.URLClassLoader;
+import java.security.InvalidParameterException;
+import java.util.*;
+import java.util.Map.Entry;
 
 /**
  * Base utility function
@@ -75,14 +55,17 @@ import com.google.common.collect.Maps;
  * 
  */
 public class Common {
-    private final static Logger LOG = Logger.getLogger(Common.class);
+    private final static Logger LOG = LoggerFactory.getLogger(Common.class);
+
+    public static final String TOPOLOGY_MASTER_COMPONENT_ID = "__topology_master";
+    public static final String TOPOLOGY_MASTER_HB_STREAM_ID = "__master_task_heartbeat";
+    public static final String TOPOLOGY_MASTER_METRICS_STREAM_ID = "__master_metrics";
+    public static final String TOPOLOGY_MASTER_CONTROL_STREAM_ID = "__master_control_stream";
 
     public static final String ACKER_COMPONENT_ID = Acker.ACKER_COMPONENT_ID;
-    public static final String ACKER_INIT_STREAM_ID =
-            Acker.ACKER_INIT_STREAM_ID;
+    public static final String ACKER_INIT_STREAM_ID = Acker.ACKER_INIT_STREAM_ID;
     public static final String ACKER_ACK_STREAM_ID = Acker.ACKER_ACK_STREAM_ID;
-    public static final String ACKER_FAIL_STREAM_ID =
-            Acker.ACKER_FAIL_STREAM_ID;
+    public static final String ACKER_FAIL_STREAM_ID = Acker.ACKER_FAIL_STREAM_ID;
 
     public static final String SYSTEM_STREAM_ID = "__system";
 
@@ -92,24 +75,20 @@ public class Common {
     public static final String LS_APPROVED_WORKERS = "approved-workers";
     public static final String LS_TASK_CLEANUP_TIMEOUT = "task-cleanup-timeout";
 
-    public static final String compErrorInfo =
-            "ID can only contains a-z, A-Z, 0-9, '-', '_', '.', '$', and should not start with \"__\".";
-    public static final String nameErrorInfo =
-            "Name can only contains a-z, A-Z, 0-9, '-', '_', '.'";
+    public static final String compErrorInfo = "ID can only contains a-z, A-Z, 0-9, '-', '_', '.', '$', and should not start with \"__\".";
+    public static final String nameErrorInfo = "Name can only contains a-z, A-Z, 0-9, '-', '_', '.'";
 
     public static boolean system_id(String id) {
         return Utils.isSystemId(id);
     }
 
-    private static void validate_component(Object obj)
-            throws InvalidTopologyException {
+    private static void validate_component(Object obj) throws InvalidTopologyException {
 
         if (obj instanceof StateSpoutSpec) {
             StateSpoutSpec spec = (StateSpoutSpec) obj;
             for (String id : spec.get_common().get_streams().keySet()) {
                 if (system_id(id) || !charComponentValidate(id)) {
-                    throw new InvalidTopologyException(id
-                            + " is not a valid component id. " + compErrorInfo);
+                    throw new InvalidTopologyException(id + " is not a valid component id. " + compErrorInfo);
                 }
             }
 
@@ -117,16 +96,14 @@ public class Common {
             SpoutSpec spec = (SpoutSpec) obj;
             for (String id : spec.get_common().get_streams().keySet()) {
                 if (system_id(id) || !charComponentValidate(id)) {
-                    throw new InvalidTopologyException(id
-                            + " is not a valid component id. " + compErrorInfo);
+                    throw new InvalidTopologyException(id + " is not a valid component id. " + compErrorInfo);
                 }
             }
         } else if (obj instanceof Bolt) {
             Bolt spec = (Bolt) obj;
             for (String id : spec.get_common().get_streams().keySet()) {
                 if (system_id(id) || !charComponentValidate(id)) {
-                    throw new InvalidTopologyException(id
-                            + " is not a valid component id. " + compErrorInfo);
+                    throw new InvalidTopologyException(id + " is not a valid component id. " + compErrorInfo);
                 }
             }
         } else {
@@ -136,8 +113,7 @@ public class Common {
     }
 
     public static String topologyNameToId(String topologyName, int counter) {
-        return topologyName + "-" + counter + "-"
-                + TimeUtils.current_time_secs();
+        return topologyName + "-" + counter + "-" + TimeUtils.current_time_secs();
     }
 
     public static String getTopologyNameById(String topologyId) {
@@ -151,14 +127,12 @@ public class Common {
     }
 
     /**
-     * Convert topologyId to topologyName. TopologyId =
-     * topoloygName-counter-timeStamp
+     * Convert topologyId to topologyName. TopologyId = topoloygName-counter-timeStamp
      * 
      * @param topologyId
      * @return
      */
-    public static String topologyIdToName(String topologyId)
-            throws InvalidTopologyException {
+    public static String topologyIdToName(String topologyId) throws InvalidTopologyException {
         String ret = null;
         int index = topologyId.lastIndexOf('-');
         if (index != -1 && index > 2) {
@@ -166,17 +140,14 @@ public class Common {
             if (index != -1 && index > 0)
                 ret = topologyId.substring(0, index);
             else
-                throw new InvalidTopologyException(topologyId
-                        + " is not a valid topologyId");
+                throw new InvalidTopologyException(topologyId + " is not a valid topologyId");
         } else
-            throw new InvalidTopologyException(topologyId
-                    + " is not a valid topologyId");
+            throw new InvalidTopologyException(topologyId + " is not a valid topologyId");
         return ret;
     }
 
     /**
-     * Validation of topology name chars. Only alpha char, number, '-', '_', '.'
-     * are valid.
+     * Validation of topology name chars. Only alpha char, number, '-', '_', '.' are valid.
      * 
      * @return
      */
@@ -185,8 +156,7 @@ public class Common {
     }
 
     /**
-     * Validation of topology component chars. Only alpha char, number, '-',
-     * '_', '.', '$' are valid.
+     * Validation of topology component chars. Only alpha char, number, '-', '_', '.', '$' are valid.
      * 
      * @return
      */
@@ -201,12 +171,10 @@ public class Common {
      * @throws InvalidTopologyException
      */
     @SuppressWarnings("unchecked")
-    public static void validate_ids(StormTopology topology, String topologyId)
-            throws InvalidTopologyException {
+    public static void validate_ids(StormTopology topology, String topologyId) throws InvalidTopologyException {
         String topologyName = topologyIdToName(topologyId);
         if (!charValidate(topologyName)) {
-            throw new InvalidTopologyException(topologyName
-                    + " is not a valid topology name. " + nameErrorInfo);
+            throw new InvalidTopologyException(topologyName + " is not a valid topology name. " + nameErrorInfo);
         }
 
         List<String> list = new ArrayList<String>();
@@ -220,9 +188,7 @@ public class Common {
 
                 for (String id : commids) {
                     if (system_id(id) || !charComponentValidate(id)) {
-                        throw new InvalidTopologyException(id
-                                + " is not a valid component id. "
-                                + compErrorInfo);
+                        throw new InvalidTopologyException(id + " is not a valid component id. " + compErrorInfo);
                     }
                 }
 
@@ -236,19 +202,16 @@ public class Common {
 
         List<String> offending = JStormUtils.getRepeat(list);
         if (offending.isEmpty() == false) {
-            throw new InvalidTopologyException("Duplicate component ids: "
-                    + offending);
+            throw new InvalidTopologyException("Duplicate component ids: " + offending);
         }
 
     }
 
-    private static void validate_component_inputs(Object obj)
-            throws InvalidTopologyException {
+    private static void validate_component_inputs(Object obj) throws InvalidTopologyException {
         if (obj instanceof StateSpoutSpec) {
             StateSpoutSpec spec = (StateSpoutSpec) obj;
             if (!spec.get_common().get_inputs().isEmpty()) {
-                throw new InvalidTopologyException(
-                        "May not declare inputs for a spout");
+                throw new InvalidTopologyException("May not declare inputs for a spout");
             }
 
         }
@@ -256,22 +219,18 @@ public class Common {
         if (obj instanceof SpoutSpec) {
             SpoutSpec spec = (SpoutSpec) obj;
             if (!spec.get_common().get_inputs().isEmpty()) {
-                throw new InvalidTopologyException(
-                        "May not declare inputs for a spout");
+                throw new InvalidTopologyException("May not declare inputs for a spout");
             }
         }
     }
 
     /**
-     * Validate the topology 1. component id name is valid or not 2. check some
-     * spout's input is empty or not
+     * Validate the topology 1. component id name is valid or not 2. check some spout's input is empty or not
      * 
      * @param topology
      * @throws InvalidTopologyException
      */
-    public static void validate_basic(StormTopology topology,
-            Map<Object, Object> totalStormConf, String topologyid)
-            throws InvalidTopologyException {
+    public static void validate_basic(StormTopology topology, Map<Object, Object> totalStormConf, String topologyid) throws InvalidTopologyException {
         validate_ids(topology, topologyid);
 
         for (StormTopology._Fields field : Thrift.SPOUT_FIELDS) {
@@ -285,23 +244,15 @@ public class Common {
 
         }
 
-        Integer workerNum =
-                JStormUtils.parseInt(totalStormConf
-                        .get(Config.TOPOLOGY_WORKERS));
+        Integer workerNum = JStormUtils.parseInt(totalStormConf.get(Config.TOPOLOGY_WORKERS));
         if (workerNum == null || workerNum <= 0) {
-            String errMsg =
-                    "There are no Config.TOPOLOGY_WORKERS in configuration of "
-                            + topologyid;
+            String errMsg = "There are no Config.TOPOLOGY_WORKERS in configuration of " + topologyid;
             throw new InvalidParameterException(errMsg);
         }
 
-        Integer ackerNum =
-                JStormUtils.parseInt(totalStormConf
-                        .get(Config.TOPOLOGY_ACKER_EXECUTORS));
+        Integer ackerNum = JStormUtils.parseInt(totalStormConf.get(Config.TOPOLOGY_ACKER_EXECUTORS));
         if (ackerNum != null && ackerNum < 0) {
-            String errMsg =
-                    "Invalide Config.TOPOLOGY_ACKERS in configuration of "
-                            + topologyid;
+            String errMsg = "Invalide Config.TOPOLOGY_ACKERS in configuration of " + topologyid;
             throw new InvalidParameterException(errMsg);
         }
 
@@ -310,23 +261,139 @@ public class Common {
     /**
      * Generate acker's input Map<GlobalStreamId, Grouping>
      * 
-     * for spout <GlobalStreamId(spoutId, ACKER_INIT_STREAM_ID), ...> for bolt
-     * <GlobalStreamId(boltId, ACKER_ACK_STREAM_ID), ...>
-     * <GlobalStreamId(boltId, ACKER_FAIL_STREAM_ID), ...>
+     * for spout <GlobalStreamId(spoutId, ACKER_INIT_STREAM_ID), ...> for bolt <GlobalStreamId(boltId, ACKER_ACK_STREAM_ID), ...> <GlobalStreamId(boltId,
+     * ACKER_FAIL_STREAM_ID), ...>
+     * 
+     * @param topology
+     * @return
+     */
+    public static Map<GlobalStreamId, Grouping> topoMasterInputs(StormTopology topology) {
+        GlobalStreamId stream = null;
+        Grouping group = null;
+
+        Map<GlobalStreamId, Grouping> spout_inputs = new HashMap<GlobalStreamId, Grouping>();
+        Map<String, SpoutSpec> spout_ids = topology.get_spouts();
+        for (Entry<String, SpoutSpec> spout : spout_ids.entrySet()) {
+            String id = spout.getKey();
+
+            stream = new GlobalStreamId(id, TOPOLOGY_MASTER_HB_STREAM_ID);
+            group = Thrift.mkAllGrouping();
+            spout_inputs.put(stream, group);
+
+            stream = new GlobalStreamId(id, TOPOLOGY_MASTER_METRICS_STREAM_ID);
+            group = Thrift.mkAllGrouping();
+            spout_inputs.put(stream, group);
+
+            stream = new GlobalStreamId(id, TOPOLOGY_MASTER_CONTROL_STREAM_ID);
+            group = Thrift.mkAllGrouping();
+            spout_inputs.put(stream, group);
+        }
+
+        Map<String, Bolt> bolt_ids = topology.get_bolts();
+        Map<GlobalStreamId, Grouping> bolt_inputs = new HashMap<GlobalStreamId, Grouping>();
+        for (Entry<String, Bolt> bolt : bolt_ids.entrySet()) {
+            String id = bolt.getKey();
+            stream = new GlobalStreamId(id, TOPOLOGY_MASTER_HB_STREAM_ID);
+            group = Thrift.mkAllGrouping();
+            bolt_inputs.put(stream, group);
+
+            stream = new GlobalStreamId(id, TOPOLOGY_MASTER_METRICS_STREAM_ID);
+            group = Thrift.mkAllGrouping();
+            bolt_inputs.put(stream, group);
+
+            stream = new GlobalStreamId(id, TOPOLOGY_MASTER_CONTROL_STREAM_ID);
+            group = Thrift.mkAllGrouping();
+            bolt_inputs.put(stream, group);
+        }
+
+        Map<GlobalStreamId, Grouping> himself_inputs = new HashMap<GlobalStreamId, Grouping>();
+        stream = new GlobalStreamId(TOPOLOGY_MASTER_COMPONENT_ID, TOPOLOGY_MASTER_HB_STREAM_ID);
+        group = Thrift.mkAllGrouping();
+        himself_inputs.put(stream, group);
+
+        stream = new GlobalStreamId(TOPOLOGY_MASTER_COMPONENT_ID, TOPOLOGY_MASTER_METRICS_STREAM_ID);
+        group = Thrift.mkAllGrouping();
+        himself_inputs.put(stream, group);
+        
+        Map<GlobalStreamId, Grouping> allInputs = new HashMap<GlobalStreamId, Grouping>();
+        allInputs.putAll(bolt_inputs);
+        allInputs.putAll(spout_inputs);
+        allInputs.putAll(himself_inputs);
+        return allInputs;
+    }
+
+    /**
+     * Add topology master bolt to topology
+     */
+    public static void addTopologyMaster(Map stormConf, StormTopology ret) {
+        // generate outputs
+        HashMap<String, StreamInfo> outputs = new HashMap<String, StreamInfo>();
+
+        List<String> list = JStormUtils.mk_list(TopologyMaster.FILED_CTRL_EVENT);
+        outputs.put(TOPOLOGY_MASTER_CONTROL_STREAM_ID, Thrift.outputFields(list));
+        list = JStormUtils.mk_list(TopologyMaster.FIELD_METRIC_WORKER, TopologyMaster.FIELD_METRIC_METRICS);
+        outputs.put(TOPOLOGY_MASTER_METRICS_STREAM_ID, Thrift.outputFields(list));
+        list = JStormUtils.mk_list(TopologyMaster.FILED_HEARBEAT_EVENT);
+        outputs.put(TOPOLOGY_MASTER_HB_STREAM_ID, Thrift.outputFields(list));
+
+        IBolt topologyMaster = new TopologyMaster();
+
+        // generate inputs
+        Map<GlobalStreamId, Grouping> inputs = topoMasterInputs(ret);
+
+        // generate topology master which will be stored in topology
+        Bolt topologyMasterBolt = Thrift.mkBolt(inputs, topologyMaster, outputs, 1);
+
+        // add output stream to spout/bolt
+        for (Entry<String, Bolt> e : ret.get_bolts().entrySet()) {
+            Bolt bolt = e.getValue();
+            ComponentCommon common = bolt.get_common();
+            List<String> fields = JStormUtils.mk_list(TopologyMaster.FIELD_METRIC_WORKER, TopologyMaster.FIELD_METRIC_METRICS);
+            common.put_to_streams(TOPOLOGY_MASTER_METRICS_STREAM_ID, Thrift.directOutputFields(fields));
+            fields = JStormUtils.mk_list(TopologyMaster.FILED_HEARBEAT_EVENT);
+            common.put_to_streams(TOPOLOGY_MASTER_HB_STREAM_ID, Thrift.directOutputFields(fields));
+            fields = JStormUtils.mk_list(TopologyMaster.FILED_CTRL_EVENT);
+            common.put_to_streams(TOPOLOGY_MASTER_CONTROL_STREAM_ID, Thrift.directOutputFields(fields));
+
+            GlobalStreamId stream = new GlobalStreamId(TOPOLOGY_MASTER_COMPONENT_ID, TOPOLOGY_MASTER_CONTROL_STREAM_ID);
+            common.put_to_inputs(stream, Thrift.mkDirectGrouping());
+            bolt.set_common(common);
+        }
+
+        for (Entry<String, SpoutSpec> kv : ret.get_spouts().entrySet()) {
+            SpoutSpec spout = kv.getValue();
+            ComponentCommon common = spout.get_common();
+            List<String> fields = JStormUtils.mk_list(TopologyMaster.FIELD_METRIC_WORKER, TopologyMaster.FIELD_METRIC_METRICS);
+            common.put_to_streams(TOPOLOGY_MASTER_METRICS_STREAM_ID, Thrift.directOutputFields(fields));
+            fields = JStormUtils.mk_list(TopologyMaster.FILED_HEARBEAT_EVENT);
+            common.put_to_streams(TOPOLOGY_MASTER_HB_STREAM_ID, Thrift.directOutputFields(fields));
+            fields = JStormUtils.mk_list(TopologyMaster.FILED_CTRL_EVENT);
+            common.put_to_streams(TOPOLOGY_MASTER_CONTROL_STREAM_ID, Thrift.directOutputFields(fields));
+
+            GlobalStreamId stream = new GlobalStreamId(TOPOLOGY_MASTER_COMPONENT_ID, TOPOLOGY_MASTER_CONTROL_STREAM_ID);
+            common.put_to_inputs(stream, Thrift.mkDirectGrouping());
+            spout.set_common(common);
+        }
+
+        ret.put_to_bolts(TOPOLOGY_MASTER_COMPONENT_ID, topologyMasterBolt);
+    }
+
+    /**
+     * Generate acker's input Map<GlobalStreamId, Grouping>
+     * 
+     * for spout <GlobalStreamId(spoutId, ACKER_INIT_STREAM_ID), ...> for bolt <GlobalStreamId(boltId, ACKER_ACK_STREAM_ID), ...> <GlobalStreamId(boltId,
+     * ACKER_FAIL_STREAM_ID), ...>
      * 
      * @param topology
      * @return
      */
-    public static Map<GlobalStreamId, Grouping> acker_inputs(
-            StormTopology topology) {
-        Map<GlobalStreamId, Grouping> spout_inputs =
-                new HashMap<GlobalStreamId, Grouping>();
+    public static Map<GlobalStreamId, Grouping> acker_inputs(StormTopology topology) {
+        Map<GlobalStreamId, Grouping> spout_inputs = new HashMap<GlobalStreamId, Grouping>();
         Map<String, SpoutSpec> spout_ids = topology.get_spouts();
         for (Entry<String, SpoutSpec> spout : spout_ids.entrySet()) {
             String id = spout.getKey();
 
-            GlobalStreamId stream =
-                    new GlobalStreamId(id, ACKER_INIT_STREAM_ID);
+            GlobalStreamId stream = new GlobalStreamId(id, ACKER_INIT_STREAM_ID);
 
             Grouping group = Thrift.mkFieldsGrouping(JStormUtils.mk_list("id"));
 
@@ -334,27 +401,21 @@ public class Common {
         }
 
         Map<String, Bolt> bolt_ids = topology.get_bolts();
-        Map<GlobalStreamId, Grouping> bolt_inputs =
-                new HashMap<GlobalStreamId, Grouping>();
+        Map<GlobalStreamId, Grouping> bolt_inputs = new HashMap<GlobalStreamId, Grouping>();
         for (Entry<String, Bolt> bolt : bolt_ids.entrySet()) {
             String id = bolt.getKey();
 
-            GlobalStreamId streamAck =
-                    new GlobalStreamId(id, ACKER_ACK_STREAM_ID);
-            Grouping groupAck =
-                    Thrift.mkFieldsGrouping(JStormUtils.mk_list("id"));
+            GlobalStreamId streamAck = new GlobalStreamId(id, ACKER_ACK_STREAM_ID);
+            Grouping groupAck = Thrift.mkFieldsGrouping(JStormUtils.mk_list("id"));
 
-            GlobalStreamId streamFail =
-                    new GlobalStreamId(id, ACKER_FAIL_STREAM_ID);
-            Grouping groupFail =
-                    Thrift.mkFieldsGrouping(JStormUtils.mk_list("id"));
+            GlobalStreamId streamFail = new GlobalStreamId(id, ACKER_FAIL_STREAM_ID);
+            Grouping groupFail = Thrift.mkFieldsGrouping(JStormUtils.mk_list("id"));
 
             bolt_inputs.put(streamAck, groupAck);
             bolt_inputs.put(streamFail, groupFail);
         }
 
-        Map<GlobalStreamId, Grouping> allInputs =
-                new HashMap<GlobalStreamId, Grouping>();
+        Map<GlobalStreamId, Grouping> allInputs = new HashMap<GlobalStreamId, Grouping>();
         allInputs.putAll(bolt_inputs);
         allInputs.putAll(spout_inputs);
         return allInputs;
@@ -397,12 +458,10 @@ public class Common {
 
             List<String> ackList = JStormUtils.mk_list("id", "ack-val");
 
-            common.put_to_streams(ACKER_ACK_STREAM_ID,
-                    Thrift.outputFields(ackList));
+            common.put_to_streams(ACKER_ACK_STREAM_ID, Thrift.outputFields(ackList));
 
             List<String> failList = JStormUtils.mk_list("id");
-            common.put_to_streams(ACKER_FAIL_STREAM_ID,
-                    Thrift.outputFields(failList));
+            common.put_to_streams(ACKER_FAIL_STREAM_ID, Thrift.outputFields(failList));
 
             bolt.set_common(common);
         }
@@ -414,17 +473,13 @@ public class Common {
         for (Entry<String, SpoutSpec> kv : ret.get_spouts().entrySet()) {
             SpoutSpec bolt = kv.getValue();
             ComponentCommon common = bolt.get_common();
-            List<String> initList =
-                    JStormUtils.mk_list("id", "init-val", "spout-task");
-            common.put_to_streams(ACKER_INIT_STREAM_ID,
-                    Thrift.outputFields(initList));
+            List<String> initList = JStormUtils.mk_list("id", "init-val", "spout-task");
+            common.put_to_streams(ACKER_INIT_STREAM_ID, Thrift.outputFields(initList));
 
-            GlobalStreamId ack_ack =
-                    new GlobalStreamId(ACKER_COMPONENT_ID, ACKER_ACK_STREAM_ID);
+            GlobalStreamId ack_ack = new GlobalStreamId(ACKER_COMPONENT_ID, ACKER_ACK_STREAM_ID);
             common.put_to_inputs(ack_ack, Thrift.mkDirectGrouping());
 
-            GlobalStreamId ack_fail =
-                    new GlobalStreamId(ACKER_COMPONENT_ID, ACKER_FAIL_STREAM_ID);
+            GlobalStreamId ack_fail = new GlobalStreamId(ACKER_COMPONENT_ID, ACKER_FAIL_STREAM_ID);
             common.put_to_inputs(ack_fail, Thrift.mkDirectGrouping());
         }
 
@@ -480,26 +535,21 @@ public class Common {
 
     public static StormTopology add_system_components(StormTopology topology) {
         // generate inputs
-        Map<GlobalStreamId, Grouping> inputs =
-                new HashMap<GlobalStreamId, Grouping>();
+        Map<GlobalStreamId, Grouping> inputs = new HashMap<GlobalStreamId, Grouping>();
 
         // generate outputs
         HashMap<String, StreamInfo> outputs = new HashMap<String, StreamInfo>();
         ArrayList<String> fields = new ArrayList<String>();
 
-        outputs.put(Constants.SYSTEM_TICK_STREAM_ID,
-                Thrift.outputFields(JStormUtils.mk_list("rate_secs")));
-        outputs.put(Constants.METRICS_TICK_STREAM_ID,
-                Thrift.outputFields(JStormUtils.mk_list("interval")));
-        outputs.put(Constants.CREDENTIALS_CHANGED_STREAM_ID,
-                Thrift.outputFields(JStormUtils.mk_list("creds")));
+        outputs.put(Constants.SYSTEM_TICK_STREAM_ID, Thrift.outputFields(JStormUtils.mk_list("rate_secs")));
+        outputs.put(Constants.METRICS_TICK_STREAM_ID, Thrift.outputFields(JStormUtils.mk_list("interval")));
+        outputs.put(Constants.CREDENTIALS_CHANGED_STREAM_ID, Thrift.outputFields(JStormUtils.mk_list("creds")));
 
         // ComponentCommon common = new ComponentCommon(inputs, outputs);
 
         IBolt ackerbolt = new SystemBolt();
 
-        Bolt bolt =
-                Thrift.mkBolt(inputs, ackerbolt, outputs, Integer.valueOf(0));
+        Bolt bolt = Thrift.mkBolt(inputs, ackerbolt, outputs, Integer.valueOf(0));
 
         topology.put_to_bolts(Constants.SYSTEM_COMPONENT_ID, bolt);
 
@@ -539,13 +589,15 @@ public class Common {
     }
 
     @SuppressWarnings("rawtypes")
-    public static StormTopology system_topology(Map storm_conf,
-            StormTopology topology) throws InvalidTopologyException {
+    public static StormTopology system_topology(Map storm_conf, StormTopology topology) throws InvalidTopologyException {
 
         StormTopology ret = topology.deepCopy();
 
         add_acker(storm_conf, ret);
 
+        if(StormConfig.local_mode(storm_conf) == false)
+            addTopologyMaster(storm_conf, ret);
+
         add_metrics_component(ret);
 
         add_system_components(ret);
@@ -562,8 +614,7 @@ public class Common {
      * @return
      */
     @SuppressWarnings("unchecked")
-    public static Map component_conf(Map storm_conf,
-            TopologyContext topology_context, String component_id) {
+    public static Map component_conf(Map storm_conf, TopologyContext topology_context, String component_id) {
         List<Object> to_remove = StormConfig.All_CONFIGS();
         to_remove.remove(Config.TOPOLOGY_DEBUG);
         to_remove.remove(Config.TOPOLOGY_MAX_SPOUT_PENDING);
@@ -572,16 +623,13 @@ public class Common {
 
         Map<Object, Object> componentConf = new HashMap<Object, Object>();
 
-        String jconf =
-                topology_context.getComponentCommon(component_id)
-                        .get_json_conf();
+        String jconf = topology_context.getComponentCommon(component_id).get_json_conf();
         if (jconf != null) {
             componentConf = (Map<Object, Object>) JStormUtils.from_json(jconf);
         }
 
         /**
-         * @@@ Don't know why need remove system configuration from component
-         *     conf? //
+         * @@@ Don't know why need remove system configuration from component conf? //
          */
         // for (Object p : to_remove) {
         // componentConf.remove(p);
@@ -601,8 +649,7 @@ public class Common {
      * @param component_id
      * @return
      */
-    public static Object get_task_object(StormTopology topology,
-            String component_id, URLClassLoader loader) {
+    public static Object get_task_object(StormTopology topology, String component_id, URLClassLoader loader) {
         Map<String, SpoutSpec> spouts = topology.get_spouts();
         Map<String, Bolt> bolts = topology.get_bolts();
         Map<String, StateSpoutSpec> state_spouts = topology.get_state_spouts();
@@ -617,8 +664,7 @@ public class Common {
         }
 
         if (obj == null) {
-            throw new RuntimeException("Could not find " + component_id
-                    + " in " + topology.toString());
+            throw new RuntimeException("Could not find " + component_id + " in " + topology.toString());
         }
 
         Object componentObject = Utils.getSetComponentObject(obj, loader);
@@ -646,43 +692,34 @@ public class Common {
      * @param topology_context
      * @return
      */
-    public static Map<String, Map<String, MkGrouper>> outbound_components(
-            TopologyContext topology_context, WorkerData workerData) {
-        Map<String, Map<String, MkGrouper>> rr =
-                new HashMap<String, Map<String, MkGrouper>>();
+    public static Map<String, Map<String, MkGrouper>> outbound_components(TopologyContext topology_context, WorkerData workerData) {
+        Map<String, Map<String, MkGrouper>> rr = new HashMap<String, Map<String, MkGrouper>>();
 
         // <Stream_id,<component,Grouping>>
-        Map<String, Map<String, Grouping>> output_groupings =
-                topology_context.getThisTargets();
+        Map<String, Map<String, Grouping>> output_groupings = topology_context.getThisTargets();
 
-        for (Entry<String, Map<String, Grouping>> entry : output_groupings
-                .entrySet()) {
+        for (Entry<String, Map<String, Grouping>> entry : output_groupings.entrySet()) {
 
             String stream_id = entry.getKey();
             Map<String, Grouping> component_grouping = entry.getValue();
 
             Fields out_fields = topology_context.getThisOutputFields(stream_id);
 
-            Map<String, MkGrouper> componentGrouper =
-                    new HashMap<String, MkGrouper>();
+            Map<String, MkGrouper> componentGrouper = new HashMap<String, MkGrouper>();
 
             for (Entry<String, Grouping> cg : component_grouping.entrySet()) {
 
                 String component = cg.getKey();
                 Grouping tgrouping = cg.getValue();
 
-                List<Integer> outTasks =
-                        topology_context.getComponentTasks(component);
+                List<Integer> outTasks = topology_context.getComponentTasks(component);
                 // ATTENTION: If topology set one component parallelism as 0
                 // so we don't need send tuple to it
                 if (outTasks.size() > 0) {
-                    MkGrouper grouper =
-                            new MkGrouper(topology_context, out_fields,
-                                    tgrouping, outTasks, stream_id, workerData);
+                    MkGrouper grouper = new MkGrouper(topology_context, out_fields, tgrouping, outTasks, stream_id, workerData);
                     componentGrouper.put(component, grouper);
                 }
-                LOG.info("outbound_components, outTasks=" + outTasks
-                        + " for task-" + topology_context.getThisTaskId());
+                LOG.info("outbound_components, outTasks=" + outTasks + " for task-" + topology_context.getThisTaskId());
             }
             if (componentGrouper.size() > 0) {
                 rr.put(stream_id, componentGrouper);
@@ -696,17 +733,12 @@ public class Common {
      * 
      * @param topology_context
      * @param task_id
-     * @return component's configurations
      */
-    public static Map getComponentMap(DefaultTopologyAssignContext context,
-            Integer task) {
+    public static Map getComponentMap(DefaultTopologyAssignContext context, Integer task) {
         String componentName = context.getTaskToComponent().get(task);
-        ComponentCommon componentCommon =
-                ThriftTopologyUtils.getComponentCommon(
-                        context.getSysTopology(), componentName);
+        ComponentCommon componentCommon = ThriftTopologyUtils.getComponentCommon(context.getSysTopology(), componentName);
 
-        Map componentMap =
-                (Map) JStormUtils.from_json(componentCommon.get_json_conf());
+        Map componentMap = (Map) JStormUtils.from_json(componentCommon.get_json_conf());
         if (componentMap == null) {
             componentMap = Maps.newHashMap();
         }
@@ -714,22 +746,17 @@ public class Common {
     }
 
     /**
-     * get all bolts' inputs and spouts' outputs <Bolt_name, <Input_name>>
-     * <Spout_name, <Output_name>>
+     * get all bolts' inputs and spouts' outputs <Bolt_name, <Input_name>> <Spout_name, <Output_name>>
      * 
      * @param topology_context
      * @return all bolts' inputs and spouts' outputs
      */
-    public static Map<String, Set<String>> buildSpoutOutoputAndBoltInputMap(
-            DefaultTopologyAssignContext context) {
+    public static Map<String, Set<String>> buildSpoutOutoputAndBoltInputMap(DefaultTopologyAssignContext context) {
         Set<String> bolts = context.getRawTopology().get_bolts().keySet();
         Set<String> spouts = context.getRawTopology().get_spouts().keySet();
-        Map<String, Set<String>> relationship =
-                new HashMap<String, Set<String>>();
-        for (Entry<String, Bolt> entry : context.getRawTopology().get_bolts()
-                .entrySet()) {
-            Map<GlobalStreamId, Grouping> inputs =
-                    entry.getValue().get_common().get_inputs();
+        Map<String, Set<String>> relationship = new HashMap<String, Set<String>>();
+        for (Entry<String, Bolt> entry : context.getRawTopology().get_bolts().entrySet()) {
+            Map<GlobalStreamId, Grouping> inputs = entry.getValue().get_common().get_inputs();
             Set<String> input = new HashSet<String>();
             relationship.put(entry.getKey(), input);
             for (Entry<GlobalStreamId, Grouping> inEntry : inputs.entrySet()) {
@@ -759,37 +786,34 @@ public class Common {
 
     public static Map<Integer, String> getTaskToComponent(Map<Integer, TaskInfo> taskInfoMap) {
         Map<Integer, String> ret = new TreeMap<Integer, String>();
-        for (Entry<Integer, TaskInfo> entry :taskInfoMap.entrySet()) {
+        for (Entry<Integer, TaskInfo> entry : taskInfoMap.entrySet()) {
             ret.put(entry.getKey(), entry.getValue().getComponentId());
         }
-        
+
         return ret;
     }
-    
+
     public static Map<Integer, String> getTaskToType(Map<Integer, TaskInfo> taskInfoMap) {
         Map<Integer, String> ret = new TreeMap<Integer, String>();
-        for (Entry<Integer, TaskInfo> entry :taskInfoMap.entrySet()) {
+        for (Entry<Integer, TaskInfo> entry : taskInfoMap.entrySet()) {
             ret.put(entry.getKey(), entry.getValue().getComponentType());
         }
-        
+
         return ret;
     }
-    
-    @SuppressWarnings({"rawtypes", "unchecked"})
-    public static Integer mkTaskMaker(Map<Object, Object> stormConf, 
-                    Map<String, ?> cidSpec, 
-                    Map<Integer, TaskInfo> rtn, 
-                    Integer cnt) {
+
+    @SuppressWarnings({ "rawtypes", "unchecked" })
+    public static Integer mkTaskMaker(Map<Object, Object> stormConf, Map<String, ?> cidSpec, Map<Integer, TaskInfo> rtn, Integer cnt) {
         if (cidSpec == null) {
             LOG.warn("Component map is empty");
             return cnt;
         }
-        
+
         Set<?> entrySet = cidSpec.entrySet();
         for (Iterator<?> it = entrySet.iterator(); it.hasNext();) {
             Entry entry = (Entry) it.next();
             Object obj = entry.getValue();
-            
+
             ComponentCommon common = null;
             String componentType = "bolt";
             if (obj instanceof Bolt) {
@@ -802,22 +826,22 @@ public class Common {
                 common = ((StateSpoutSpec) obj).get_common();
                 componentType = "spout";
             }
-            
+
             if (common == null) {
                 throw new RuntimeException("No ComponentCommon of " + entry.getKey());
             }
-            
+
             int declared = Thrift.parallelismHint(common);
             Integer parallelism = declared;
             // Map tmp = (Map) Utils_clj.from_json(common.get_json_conf());
-            
+
             Map newStormConf = new HashMap(stormConf);
             // newStormConf.putAll(tmp);
             Integer maxParallelism = JStormUtils.parseInt(newStormConf.get(Config.TOPOLOGY_MAX_TASK_PARALLELISM));
             if (maxParallelism != null) {
                 parallelism = Math.min(maxParallelism, declared);
             }
-            
+
             for (int i = 0; i < parallelism; i++) {
                 cnt++;
                 TaskInfo taskInfo = new TaskInfo((String) entry.getKey(), componentType);
@@ -826,20 +850,24 @@ public class Common {
         }
         return cnt;
     }
-    
-    public static Map<Integer, TaskInfo> mkTaskInfo(
-                    Map<Object, Object> stormConf, 
-                    StormTopology sysTopology, 
-                    String topologyid) {
-        
+
+    public static Map<Integer, TaskInfo> mkTaskInfo(Map<Object, Object> stormConf, StormTopology sysTopology, String topologyid) {
+
         // use TreeMap to make task as sequence
         Map<Integer, TaskInfo> rtn = new TreeMap<Integer, TaskInfo>();
-        
+
         Integer count = 0;
         count = mkTaskMaker(stormConf, sysTopology.get_bolts(), rtn, count);
         count = mkTaskMaker(stormConf, sysTopology.get_spouts(), rtn, count);
         count = mkTaskMaker(stormConf, sysTopology.get_state_spouts(), rtn, count);
-        
+
         return rtn;
     }
+
+    public static boolean isSystemComponent(String componentId) {
+        if (componentId.equals(Acker.ACKER_COMPONENT_ID) || componentId.equals(Common.TOPOLOGY_MASTER_COMPONENT_ID)) {
+            return true;
+        }
+        return false;
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/DistributedClusterState.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/DistributedClusterState.java b/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/DistributedClusterState.java
index 2ebce83..3d25a25 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/DistributedClusterState.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/DistributedClusterState.java
@@ -49,8 +49,7 @@ import com.alibaba.jstorm.zk.Zookeeper;
  */
 public class DistributedClusterState implements ClusterState {
 
-    private static Logger LOG = LoggerFactory
-            .getLogger(DistributedClusterState.class);
+    private static Logger LOG = LoggerFactory.getLogger(DistributedClusterState.class);
 
     private Zookeeper zkobj = new Zookeeper();
     private CuratorFramework zk;
@@ -59,8 +58,7 @@ public class DistributedClusterState implements ClusterState {
     /**
      * why run all callbacks, when receive one event
      */
-    private ConcurrentHashMap<UUID, ClusterStateCallback> callbacks =
-            new ConcurrentHashMap<UUID, ClusterStateCallback>();
+    private ConcurrentHashMap<UUID, ClusterStateCallback> callbacks = new ConcurrentHashMap<UUID, ClusterStateCallback>();
 
     private Map<Object, Object> conf;
     private AtomicBoolean active;
@@ -83,16 +81,13 @@ public class DistributedClusterState implements ClusterState {
             public void execute(KeeperState state, EventType type, String path) {
                 if (active.get()) {
                     if (!(state.equals(KeeperState.SyncConnected))) {
-                        LOG.warn("Received event " + state + ":" + type + ":"
-                                + path + " with disconnected Zookeeper.");
+                        LOG.warn("Received event " + state + ":" + type + ":" + path + " with disconnected Zookeeper.");
                     } else {
-                        LOG.info("Received event " + state + ":" + type + ":"
-                                + path);
+                        LOG.info("Received event " + state + ":" + type + ":" + path);
                     }
 
                     if (!type.equals(EventType.None)) {
-                        for (Entry<UUID, ClusterStateCallback> e : callbacks
-                                .entrySet()) {
+                        for (Entry<UUID, ClusterStateCallback> e : callbacks.entrySet()) {
                             ClusterStateCallback fn = e.getValue();
                             fn.execute(type, path);
                         }
@@ -107,17 +102,12 @@ public class DistributedClusterState implements ClusterState {
 
     @SuppressWarnings("unchecked")
     private CuratorFramework mkZk() throws IOException {
-        return zkobj.mkClient(conf,
-                (List<String>) conf.get(Config.STORM_ZOOKEEPER_SERVERS),
-                conf.get(Config.STORM_ZOOKEEPER_PORT), "");
+        return zkobj.mkClient(conf, (List<String>) conf.get(Config.STORM_ZOOKEEPER_SERVERS), conf.get(Config.STORM_ZOOKEEPER_PORT), "");
     }
 
     @SuppressWarnings("unchecked")
-    private CuratorFramework mkZk(WatcherCallBack watcher)
-            throws NumberFormatException, IOException {
-        return zkobj.mkClient(conf,
-                (List<String>) conf.get(Config.STORM_ZOOKEEPER_SERVERS),
-                conf.get(Config.STORM_ZOOKEEPER_PORT),
+    private CuratorFramework mkZk(WatcherCallBack watcher) throws NumberFormatException, IOException {
+        return zkobj.mkClient(conf, (List<String>) conf.get(Config.STORM_ZOOKEEPER_SERVERS), conf.get(Config.STORM_ZOOKEEPER_PORT),
                 String.valueOf(conf.get(Config.STORM_ZOOKEEPER_ROOT)), watcher);
     }
 
@@ -136,8 +126,7 @@ public class DistributedClusterState implements ClusterState {
     }
 
     @Override
-    public List<String> get_children(String path, boolean watch)
-            throws Exception {
+    public List<String> get_children(String path, boolean watch) throws Exception {
         return zkobj.getChildren(zk, path, watch);
     }
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/StormBase.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/StormBase.java b/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/StormBase.java
index e6438dd..6923ab5 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/StormBase.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/StormBase.java
@@ -37,8 +37,7 @@ public class StormBase implements Serializable {
     private boolean enableMonitor = true;
     private String group;
 
-    public StormBase(String stormName, int lanchTimeSecs, StormStatus status,
-            String group) {
+    public StormBase(String stormName, int lanchTimeSecs, StormStatus status, String group) {
         this.stormName = stormName;
         this.lanchTimeSecs = lanchTimeSecs;
         this.status = status;
@@ -98,9 +97,7 @@ public class StormBase implements Serializable {
         result = prime * result + ((group == null) ? 0 : group.hashCode());
         result = prime * result + lanchTimeSecs;
         result = prime * result + ((status == null) ? 0 : status.hashCode());
-        result =
-                prime * result
-                        + ((stormName == null) ? 0 : stormName.hashCode());
+        result = prime * result + ((stormName == null) ? 0 : stormName.hashCode());
         return result;
     }
 
@@ -137,8 +134,7 @@ public class StormBase implements Serializable {
 
     @Override
     public String toString() {
-        return ToStringBuilder.reflectionToString(this,
-                ToStringStyle.SHORT_PREFIX_STYLE);
+        return ToStringBuilder.reflectionToString(this, ToStringStyle.SHORT_PREFIX_STYLE);
     }
 
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/StormClusterState.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/StormClusterState.java b/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/StormClusterState.java
index 6486d5e..a399bb9 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/StormClusterState.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/StormClusterState.java
@@ -27,7 +27,10 @@ import com.alibaba.jstorm.schedule.Assignment;
 import com.alibaba.jstorm.schedule.AssignmentBak;
 import com.alibaba.jstorm.task.TaskInfo;
 import com.alibaba.jstorm.task.error.TaskError;
-import com.alibaba.jstorm.task.heartbeat.TaskHeartbeat;
+import com.alibaba.jstorm.task.backpressure.SourceBackpressureInfo;
+import com.alibaba.jstorm.utils.Pair;
+
+import backtype.storm.generated.TopologyTaskHbInfo;
 
 /**
  * all storm in zk operation interface
@@ -41,30 +44,23 @@ public interface StormClusterState {
 
     public List<String> assignments(RunnableCallback callback) throws Exception;
 
-    public Assignment assignment_info(String topology_id,
-            RunnableCallback callback) throws Exception;
+    public Assignment assignment_info(String topology_id, RunnableCallback callback) throws Exception;
 
-    public void set_assignment(String topology_id, Assignment info)
-            throws Exception;
+    public void set_assignment(String topology_id, Assignment info) throws Exception;
 
     public AssignmentBak assignment_bak(String topologyName) throws Exception;
 
-    public void backup_assignment(String topology_id, AssignmentBak info)
-            throws Exception;
+    public void backup_assignment(String topology_id, AssignmentBak info) throws Exception;
 
     public List<String> active_storms() throws Exception;
 
-    public StormBase storm_base(String topology_id, RunnableCallback callback)
-            throws Exception;
+    public StormBase storm_base(String topology_id, RunnableCallback callback) throws Exception;
 
-    public void activate_storm(String topology_id, StormBase storm_base)
-            throws Exception;
+    public void activate_storm(String topology_id, StormBase storm_base) throws Exception;
 
-    public void update_storm(String topology_id, StormStatus new_elems)
-            throws Exception;
+    public void update_storm(String topology_id, StormStatus new_elems) throws Exception;
 
-    public void set_storm_monitor(String topologyId, boolean isEnable)
-            throws Exception;
+    public void set_storm_monitor(String topologyId, boolean isEnable) throws Exception;
 
     public void remove_storm_base(String topology_id) throws Exception;
 
@@ -72,73 +68,53 @@ public interface StormClusterState {
 
     public Set<Integer> task_ids(String topology_id) throws Exception;
 
-    public Set<Integer> task_ids_by_componentId(String topologyId,
-            String componentId) throws Exception;
+    public Set<Integer> task_ids_by_componentId(String topologyId, String componentId) throws Exception;
 
     public void set_task(String topologyId, Map<Integer, TaskInfo> taskInfoMap) throws Exception;
-    public void add_task(String topology_id, Map<Integer, TaskInfo> taskInfoMap)
-            throws Exception;
+
+    public void add_task(String topology_id, Map<Integer, TaskInfo> taskInfoMap) throws Exception;
 
     public void remove_task(String topologyId, Set<Integer> taskIds) throws Exception;
 
     public Map<Integer, TaskInfo> task_all_info(String topology_id) throws Exception;
 
-    public void setup_heartbeats(String topology_id) throws Exception;
-
     public List<String> heartbeat_storms() throws Exception;
 
-    public List<String> heartbeat_tasks(String topology_id) throws Exception;
-
-    public TaskHeartbeat task_heartbeat(String topology_id, int task_id)
-            throws Exception;
+    public void topology_heartbeat(String topology_id, TopologyTaskHbInfo info) throws Exception;
 
-    public void task_heartbeat(String topology_id, int task_id,
-            TaskHeartbeat info) throws Exception;
+    public TopologyTaskHbInfo topology_heartbeat(String topologyId) throws Exception;
 
     public void teardown_heartbeats(String topology_id) throws Exception;
 
-    public void remove_task_heartbeat(String topology_id, int task_id)
-            throws Exception;
-
     public List<String> task_error_storms() throws Exception;
-    
+
     public List<String> task_error_ids(String topologyId) throws Exception;
 
-    public void report_task_error(String topology_id, int task_id,
-            Throwable error) throws Exception;
+    public void report_task_error(String topology_id, int task_id, Throwable error) throws Exception;
 
-    public void report_task_error(String topology_id, int task_id, String error)
-            throws Exception;
+    public void report_task_error(String topology_id, int task_id, String error, String tag) throws Exception;
 
-    public Map<Integer, String> topo_lastErr_time(String topologyId)
-            throws Exception;
+    public Map<Integer, String> topo_lastErr_time(String topologyId) throws Exception;
 
     public void remove_lastErr_time(String topologyId) throws Exception;
 
-    public List<TaskError> task_errors(String topology_id, int task_id)
-            throws Exception;
+    public List<TaskError> task_errors(String topology_id, int task_id) throws Exception;
 
-    public void remove_task_error(String topologyId, int taskId)
-            throws Exception;
+    public void remove_task_error(String topologyId, int taskId) throws Exception;
 
-    public List<String> task_error_time(String topologyId, int taskId)
-            throws Exception;
+    public List<String> task_error_time(String topologyId, int taskId) throws Exception;
 
-    public String task_error_info(String topologyId, int taskId, long timeStamp)
-            throws Exception;
+    public String task_error_info(String topologyId, int taskId, long timeStamp) throws Exception;
 
     public void teardown_task_errors(String topology_id) throws Exception;
 
     public List<String> supervisors(RunnableCallback callback) throws Exception;
 
-    public SupervisorInfo supervisor_info(String supervisor_id)
-            throws Exception;
+    public SupervisorInfo supervisor_info(String supervisor_id) throws Exception;
 
-    public void supervisor_heartbeat(String supervisor_id, SupervisorInfo info)
-            throws Exception;
+    public void supervisor_heartbeat(String supervisor_id, SupervisorInfo info) throws Exception;
 
-    public boolean try_to_be_leader(String path, String host,
-            RunnableCallback callback) throws Exception;
+    public boolean try_to_be_leader(String path, String host, RunnableCallback callback) throws Exception;
 
     public String get_leader_host() throws Exception;
 
@@ -152,11 +128,25 @@ public interface StormClusterState {
 
     public void unregister_nimbus_host(String host) throws Exception;
 
-    public void set_topology_metric(String topologyId, Object metric)
-            throws Exception;
+    public void update_nimbus_detail(String hostPort, Map map) throws Exception;
+
+    public Map get_nimbus_detail(String hostPort, boolean watch) throws Exception;
+
+    public void unregister_nimbus_detail(String hostPort) throws Exception;
+
+    public void set_topology_metric(String topologyId, Object metric) throws Exception;
 
     public Object get_topology_metric(String topologyId) throws Exception;
-    
+
     public List<String> get_metrics() throws Exception;
 
+    public List<String> list_dirs(String path, boolean watch) throws  Exception;
+
+    public List<String> backpressureInfos() throws Exception;
+
+    public void set_backpressure_info(String topologyId, Map<String, SourceBackpressureInfo> sourceToBackpressureInfo) throws Exception;
+    
+    public Map<String, SourceBackpressureInfo> get_backpressure_info(String topologyId) throws Exception;
+
+    public void teardown_backpressure(String topologyId) throws Exception;
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/StormConfig.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/StormConfig.java b/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/StormConfig.java
index 3d1cd29..f78f52a 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/StormConfig.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/StormConfig.java
@@ -17,8 +17,18 @@
  */
 package com.alibaba.jstorm.cluster;
 
+import backtype.storm.Config;
+import backtype.storm.generated.StormTopology;
+import backtype.storm.utils.LocalState;
+import backtype.storm.utils.Utils;
+import com.alibaba.jstorm.client.ConfigExtension;
+import com.alibaba.jstorm.utils.JStormUtils;
+import com.alibaba.jstorm.utils.PathUtils;
+import org.apache.commons.io.FileUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import java.io.File;
-import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.lang.reflect.Field;
 import java.util.ArrayList;
@@ -26,21 +36,8 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.commons.io.FileUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import backtype.storm.Config;
-import backtype.storm.generated.StormTopology;
-import backtype.storm.utils.LocalState;
-import backtype.storm.utils.Utils;
-
-import com.alibaba.jstorm.utils.JStormUtils;
-import com.alibaba.jstorm.utils.PathUtils;
-
 public class StormConfig {
-    private final static Logger LOG = LoggerFactory
-            .getLogger(StormConfig.class);
+    private final static Logger LOG = LoggerFactory.getLogger(StormConfig.class);
     public final static String RESOURCES_SUBDIR = "resources";
     public final static String WORKER_DATA_SUBDIR = "worker_shared_data";
 
@@ -80,11 +77,10 @@ public class StormConfig {
         return rtn;
     }
 
-    public static HashMap<String, Object> getClassFields(Class<?> cls)
-            throws IllegalArgumentException, IllegalAccessException {
-        java.lang.reflect.Field[] list = cls.getDeclaredFields();
+    public static HashMap<String, Object> getClassFields(Class<?> cls) throws IllegalArgumentException, IllegalAccessException {
+        Field[] list = cls.getDeclaredFields();
         HashMap<String, Object> rtn = new HashMap<String, Object>();
-        for (java.lang.reflect.Field f : list) {
+        for (Field f : list) {
             String name = f.getName();
             rtn.put(name, f.get(null).toString());
 
@@ -98,19 +94,26 @@ public class StormConfig {
 
     }
 
+    /**
+     * please use ConfigExtension.getClusterName(Map conf)
+     */
+    @Deprecated
+    public static String cluster_name(Map conf) {
+        return ConfigExtension.getClusterName(conf);
+    }
+
     public static boolean local_mode(Map conf) {
         String mode = (String) conf.get(Config.STORM_CLUSTER_MODE);
         if (mode != null) {
-            if (mode.equals("local")) {
+            if ("local".equals(mode)) {
                 return true;
             }
 
-            if (mode.equals("distributed")) {
+            if ("distributed".equals(mode)) {
                 return false;
             }
         }
-        throw new IllegalArgumentException("Illegal cluster mode in conf:"
-                + mode);
+        throw new IllegalArgumentException("Illegal cluster mode in conf:" + mode);
 
     }
 
@@ -121,24 +124,20 @@ public class StormConfig {
      */
     public static void validate_distributed_mode(Map<?, ?> conf) {
         if (StormConfig.local_mode(conf)) {
-            throw new IllegalArgumentException(
-                    "Cannot start server in local mode!");
+            throw new IllegalArgumentException("Cannot start server in local mode!");
         }
 
     }
 
     public static void validate_local_mode(Map<?, ?> conf) {
         if (!StormConfig.local_mode(conf)) {
-            throw new IllegalArgumentException(
-                    "Cannot start server in distributed mode!");
+            throw new IllegalArgumentException("Cannot start server in distributed mode!");
         }
 
     }
 
     public static String worker_root(Map conf) throws IOException {
-        String ret =
-                String.valueOf(conf.get(Config.STORM_LOCAL_DIR))
-                        + FILE_SEPERATEOR + "workers";
+        String ret = String.valueOf(conf.get(Config.STORM_LOCAL_DIR)) + FILE_SEPERATEOR + "workers";
         FileUtils.forceMkdir(new File(ret));
         return ret;
     }
@@ -149,39 +148,38 @@ public class StormConfig {
         return ret;
     }
 
-    public static String worker_pids_root(Map conf, String id)
-            throws IOException {
+    public static String worker_pids_root(Map conf, String id) throws IOException {
         String ret = worker_root(conf, id) + FILE_SEPERATEOR + "pids";
         FileUtils.forceMkdir(new File(ret));
         return ret;
     }
 
-    public static String worker_pid_path(Map conf, String id, String pid)
-            throws IOException {
+    public static String worker_pid_path(Map conf, String id, String pid) throws IOException {
         String ret = worker_pids_root(conf, id) + FILE_SEPERATEOR + pid;
         return ret;
     }
 
-    public static String worker_heartbeats_root(Map conf, String id)
-            throws IOException {
+    public static String worker_heartbeats_root(Map conf, String id) throws IOException {
         String ret = worker_root(conf, id) + FILE_SEPERATEOR + "heartbeats";
         FileUtils.forceMkdir(new File(ret));
         return ret;
     }
 
     public static String default_worker_shared_dir(Map conf) throws IOException {
-        String ret =
-                String.valueOf(conf.get(Config.STORM_LOCAL_DIR))
-                        + FILE_SEPERATEOR + WORKER_DATA_SUBDIR;
+        String ret = String.valueOf(conf.get(Config.STORM_LOCAL_DIR)) + FILE_SEPERATEOR + WORKER_DATA_SUBDIR;
 
         FileUtils.forceMkdir(new File(ret));
         return ret;
     }
 
+    private static String drpc_local_dir(Map conf) throws IOException {
+        String ret = String.valueOf(conf.get(Config.STORM_LOCAL_DIR)) + FILE_SEPERATEOR + "drpc";
+        FileUtils.forceMkdir(new File(ret));
+        return ret;
+    }
+
     private static String supervisor_local_dir(Map conf) throws IOException {
-        String ret =
-                String.valueOf(conf.get(Config.STORM_LOCAL_DIR))
-                        + FILE_SEPERATEOR + "supervisor";
+        String ret = String.valueOf(conf.get(Config.STORM_LOCAL_DIR)) + FILE_SEPERATEOR + "supervisor";
         FileUtils.forceMkdir(new File(ret));
         return ret;
     }
@@ -192,8 +190,7 @@ public class StormConfig {
         return ret;
     }
 
-    public static String supervisor_stormdist_root(Map conf, String topologyId)
-            throws IOException {
+    public static String supervisor_stormdist_root(Map conf, String topologyId) throws IOException {
         return supervisor_stormdist_root(conf) + FILE_SEPERATEOR + topologyId;
     }
 
@@ -216,17 +213,32 @@ public class StormConfig {
     }
 
     /**
+     * Return drpc's pid dir
+     *
+     * @param conf
+     * @return
+     * @throws IOException
+     */
+    public static String drpcPids(Map conf) throws IOException {
+        String ret = drpc_local_dir(conf) + FILE_SEPERATEOR + "pids";
+        try {
+            FileUtils.forceMkdir(new File(ret));
+        } catch (IOException e) {
+            LOG.error("Failed to create dir " + ret, e);
+            throw e;
+        }
+        return ret;
+    }
+
+    /**
      * Return nimbus's heartbeat dir for apsara
      * 
      * @param conf
      * @return
      * @throws IOException
      */
-    public static String supervisorHearbeatForContainer(Map conf)
-            throws IOException {
-        String ret =
-                supervisor_local_dir(conf) + FILE_SEPERATEOR
-                        + "supervisor.heartbeat";
+    public static String supervisorHearbeatForContainer(Map conf) throws IOException {
+        String ret = supervisor_local_dir(conf) + FILE_SEPERATEOR + "supervisor.heartbeat";
         try {
             FileUtils.forceMkdir(new File(ret));
         } catch (IOException e) {
@@ -272,8 +284,7 @@ public class StormConfig {
         return stormroot + FILE_SEPERATEOR + "timestamp";
     }
 
-    public static LocalState worker_state(Map conf, String id)
-            throws IOException {
+    public static LocalState worker_state(Map conf, String id) throws IOException {
         String path = worker_heartbeats_root(conf, id);
 
         LocalState rtn = new LocalState(path);
@@ -282,9 +293,18 @@ public class StormConfig {
     }
 
     public static String masterLocalDir(Map conf) throws IOException {
-        String ret =
-                String.valueOf(conf.get(Config.STORM_LOCAL_DIR))
-                        + FILE_SEPERATEOR + "nimbus";
+        String ret = String.valueOf(conf.get(Config.STORM_LOCAL_DIR)) + FILE_SEPERATEOR + "nimbus";
+        try {
+            FileUtils.forceMkdir(new File(ret));
+        } catch (IOException e) {
+            LOG.error("Failed to create dir " + ret, e);
+            throw e;
+        }
+        return ret;
+    }
+
+    public static String metricLocalDir(Map conf) throws IOException {
+        String ret = String.valueOf(conf.get(Config.STORM_LOCAL_DIR)) + FILE_SEPERATEOR + "metrics";
         try {
             FileUtils.forceMkdir(new File(ret));
         } catch (IOException e) {
@@ -300,8 +320,7 @@ public class StormConfig {
         return ret;
     }
 
-    public static String masterStormdistRoot(Map conf, String topologyId)
-            throws IOException {
+    public static String masterStormdistRoot(Map conf, String topologyId) throws IOException {
         return masterStormdistRoot(conf) + FILE_SEPERATEOR + topologyId;
     }
 
@@ -311,8 +330,7 @@ public class StormConfig {
         return ret;
     }
 
-    public static String masterStormTmpRoot(Map conf, String topologyId)
-            throws IOException {
+    public static String masterStormTmpRoot(Map conf, String topologyId) throws IOException {
         return masterStormTmpRoot(conf) + FILE_SEPERATEOR + topologyId;
     }
 
@@ -363,10 +381,8 @@ public class StormConfig {
      * @return
      * @throws IOException
      */
-    public static String masterHearbeatForContainer(Map conf)
-            throws IOException {
-        String ret =
-                masterLocalDir(conf) + FILE_SEPERATEOR + "nimbus.heartbeat";
+    public static String masterHearbeatForContainer(Map conf) throws IOException {
+        String ret = masterLocalDir(conf) + FILE_SEPERATEOR + "nimbus.heartbeat";
         try {
             FileUtils.forceMkdir(new File(ret));
         } catch (IOException e) {
@@ -375,11 +391,15 @@ public class StormConfig {
         }
         return ret;
     }
-    
+
     public static String masterDbDir(Map conf) throws IOException {
         return masterLocalDir(conf) + FILE_SEPERATEOR + "rocksdb";
     }
 
+    public static String metricDbDir(Map conf) throws IOException {
+        return metricLocalDir(conf) + FILE_SEPERATEOR + "rocksdb";
+    }
+
     public static String supervisorTmpDir(Map conf) throws IOException {
         String ret = null;
         try {
@@ -397,8 +417,7 @@ public class StormConfig {
     public static LocalState supervisorState(Map conf) throws IOException {
         LocalState localState = null;
         try {
-            String localstateDir =
-                    supervisor_local_dir(conf) + FILE_SEPERATEOR + "localstate";
+            String localstateDir = supervisor_local_dir(conf) + FILE_SEPERATEOR + "localstate";
             FileUtils.forceMkdir(new File(localstateDir));
             localState = new LocalState(localstateDir);
         } catch (IOException e) {
@@ -416,25 +435,20 @@ public class StormConfig {
      * @return
      * @throws IOException
      */
-    public static Map read_supervisor_topology_conf(Map conf, String topologyId)
-            throws IOException {
-        String topologyRoot =
-                StormConfig.supervisor_stormdist_root(conf, topologyId);
+    public static Map read_supervisor_topology_conf(Map conf, String topologyId) throws IOException {
+        String topologyRoot = StormConfig.supervisor_stormdist_root(conf, topologyId);
         String confPath = StormConfig.stormconf_path(topologyRoot);
         return (Map) readLocalObject(topologyId, confPath);
     }
 
-    public static StormTopology read_supervisor_topology_code(Map conf,
-            String topologyId) throws IOException {
-        String topologyRoot =
-                StormConfig.supervisor_stormdist_root(conf, topologyId);
+    public static StormTopology read_supervisor_topology_code(Map conf, String topologyId) throws IOException {
+        String topologyRoot = StormConfig.supervisor_stormdist_root(conf, topologyId);
         String codePath = StormConfig.stormcode_path(topologyRoot);
         return (StormTopology) readLocalObject(topologyId, codePath);
     }
 
     @SuppressWarnings("rawtypes")
-    public static List<String> get_supervisor_toplogy_list(Map conf)
-            throws IOException {
+    public static List<String> get_supervisor_toplogy_list(Map conf) throws IOException {
 
         // get the path: STORM-LOCAL-DIR/supervisor/stormdist/
         String path = StormConfig.supervisor_stormdist_root(conf);
@@ -444,48 +458,40 @@ public class StormConfig {
         return topologyids;
     }
 
-    public static Map read_nimbus_topology_conf(Map conf, String topologyId)
-            throws IOException {
+    public static Map read_nimbus_topology_conf(Map conf, String topologyId) throws IOException {
         String topologyRoot = StormConfig.masterStormdistRoot(conf, topologyId);
         return read_topology_conf(topologyRoot, topologyId);
     }
 
-    public static void write_nimbus_topology_conf(Map conf, String topologyId,
-            Map topoConf) throws IOException {
+    public static void write_nimbus_topology_conf(Map conf, String topologyId, Map topoConf) throws IOException {
         String topologyRoot = StormConfig.masterStormdistRoot(conf, topologyId);
         String confPath = StormConfig.stormconf_path(topologyRoot);
-        FileUtils.writeByteArrayToFile(new File(confPath),
-                Utils.serialize(topoConf));
+        FileUtils.writeByteArrayToFile(new File(confPath), Utils.serialize(topoConf));
     }
 
-    public static Map read_nimbusTmp_topology_conf(Map conf, String topologyId)
-            throws IOException {
+    public static Map read_nimbusTmp_topology_conf(Map conf, String topologyId) throws IOException {
         String topologyRoot = StormConfig.masterStormTmpRoot(conf, topologyId);
         return read_topology_conf(topologyRoot, topologyId);
     }
 
-    public static Map read_topology_conf(String topologyRoot, String topologyId)
-            throws IOException {
+    public static Map read_topology_conf(String topologyRoot, String topologyId) throws IOException {
         String readFile = StormConfig.stormconf_path(topologyRoot);
         return (Map) readLocalObject(topologyId, readFile);
     }
 
-    public static StormTopology read_nimbus_topology_code(Map conf,
-            String topologyId) throws IOException {
+    public static StormTopology read_nimbus_topology_code(Map conf, String topologyId) throws IOException {
         String topologyRoot = StormConfig.masterStormdistRoot(conf, topologyId);
         String codePath = StormConfig.stormcode_path(topologyRoot);
         return (StormTopology) readLocalObject(topologyId, codePath);
     }
 
-    public static void write_nimbus_topology_code(Map conf, String topologyId,
-            byte[] data) throws IOException {
+    public static void write_nimbus_topology_code(Map conf, String topologyId, byte[] data) throws IOException {
         String topologyRoot = StormConfig.masterStormdistRoot(conf, topologyId);
         String codePath = StormConfig.stormcode_path(topologyRoot);
         FileUtils.writeByteArrayToFile(new File(codePath), data);
     }
 
-    public static long read_supervisor_topology_timestamp(Map conf,
-            String topologyId) throws IOException {
+    public static long read_supervisor_topology_timestamp(Map conf, String topologyId) throws IOException {
         String stormRoot = supervisor_stormdist_root(conf, topologyId);
         String timeStampPath = stormts_path(stormRoot);
 
@@ -493,8 +499,7 @@ public class StormConfig {
         return JStormUtils.bytesToLong(data);
     }
 
-    public static void write_supervisor_topology_timestamp(Map conf,
-            String topologyId, long timeStamp) throws IOException {
+    public static void write_supervisor_topology_timestamp(Map conf, String topologyId, long timeStamp) throws IOException {
         String stormRoot = supervisor_stormdist_root(conf, topologyId);
         String timeStampPath = stormts_path(stormRoot);
 
@@ -502,6 +507,22 @@ public class StormConfig {
         FileUtils.writeByteArrayToFile(new File(timeStampPath), data);
     }
 
+    public static long read_nimbus_topology_timestamp(Map conf, String topologyId) throws IOException {
+        String stormRoot = masterStormdistRoot(conf, topologyId);
+        String timeStampPath = stormts_path(stormRoot);
+
+        byte[] data = FileUtils.readFileToByteArray(new File(timeStampPath));
+        return JStormUtils.bytesToLong(data);
+    }
+
+    public static void write_nimbus_topology_timestamp(Map conf, String topologyId, long timeStamp) throws IOException {
+        String stormRoot = masterStormdistRoot(conf, topologyId);
+        String timeStampPath = stormts_path(stormRoot);
+
+        byte[] data = JStormUtils.longToBytes(timeStamp);
+        FileUtils.writeByteArrayToFile(new File(timeStampPath), data);
+    }
+
     /**
      * stormconf has mergered into clusterconf
      * 
@@ -511,12 +532,9 @@ public class StormConfig {
      * @throws IOException
      */
     @SuppressWarnings("unchecked")
-    public static Object readLocalObject(String topologyId, String readFile)
-            throws IOException {
+    public static Object readLocalObject(String topologyId, String readFile) throws IOException {
 
-        String errMsg =
-                "Failed to get topology configuration of " + topologyId
-                        + " file:" + readFile;
+        String errMsg = "Failed to get topology configuration of " + topologyId + " file:" + readFile;
 
         byte[] bconf = FileUtils.readFileToByteArray(new File(readFile));
         if (bconf == null) {
@@ -537,10 +555,8 @@ public class StormConfig {
         return ret;
     }
 
-    public static long get_supervisor_topology_Bianrymodify_time(Map conf,
-            String topologyId) throws IOException {
-        String topologyRoot =
-                StormConfig.supervisor_stormdist_root(conf, topologyId);
+    public static long get_supervisor_topology_Bianrymodify_time(Map conf, String topologyId) throws IOException {
+        String topologyRoot = StormConfig.supervisor_stormdist_root(conf, topologyId);
         File f = new File(topologyRoot);
         long modifyTime = f.lastModified();
         return modifyTime;

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/StormMonitor.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/StormMonitor.java b/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/StormMonitor.java
index 935a638..c92b362 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/StormMonitor.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/cluster/StormMonitor.java
@@ -44,7 +44,6 @@ public class StormMonitor implements Serializable {
 
     @Override
     public String toString() {
-        return ToStringBuilder.reflectionToString(this,
-                ToStringStyle.SHORT_PREFIX_STYLE);
+        return ToStringBuilder.reflectionToString(this, ToStringStyle.SHORT_PREFIX_STYLE);
     }
 }
\ No newline at end of file


[13/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/CgroupManager.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/CgroupManager.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/CgroupManager.java
index 8d2ba24..78cfe73 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/CgroupManager.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/CgroupManager.java
@@ -36,10 +36,12 @@ import com.alibaba.jstorm.container.cgroup.core.CgroupCore;
 import com.alibaba.jstorm.container.cgroup.core.CpuCore;
 import com.alibaba.jstorm.utils.JStormUtils;
 
+/**
+ * @author Johnfang (xiaojian.fxj@alibaba-inc.com)
+ */
 public class CgroupManager {
 
-    public static final Logger LOG = LoggerFactory
-            .getLogger(CgroupManager.class);
+    public static final Logger LOG = LoggerFactory.getLogger(CgroupManager.class);
 
     public static final String JSTORM_HIERARCHY_NAME = "jstorm_cpu";
 
@@ -61,20 +63,16 @@ public class CgroupManager {
         // "/cgroup/cpu"
         rootDir = ConfigExtension.getCgroupRootDir(conf);
         if (rootDir == null)
-            throw new RuntimeException(
-                    "Check configuration file. The supervisor.cgroup.rootdir is missing.");
+            throw new RuntimeException("Check configuration file. The supervisor.cgroup.rootdir is missing.");
 
         File file = new File(JSTORM_CPU_HIERARCHY_DIR + "/" + rootDir);
         if (!file.exists()) {
-            LOG.error(JSTORM_CPU_HIERARCHY_DIR + "/" + rootDir
-                    + " is not existing.");
-            throw new RuntimeException(
-                    "Check if cgconfig service starts or /etc/cgconfig.conf is consistent with configuration file.");
+            LOG.error(JSTORM_CPU_HIERARCHY_DIR + "/" + rootDir + " is not existing.");
+            throw new RuntimeException("Check if cgconfig service starts or /etc/cgconfig.conf is consistent with configuration file.");
         }
         center = CgroupCenter.getInstance();
         if (center == null)
-            throw new RuntimeException(
-                    "Cgroup error, please check /proc/cgroups");
+            throw new RuntimeException("Cgroup error, please check /proc/cgroups");
         this.prepareSubSystem();
     }
 
@@ -90,13 +88,10 @@ public class CgroupManager {
         return value;
     }
 
-    private void setCpuUsageUpperLimit(CpuCore cpuCore, int cpuCoreUpperLimit)
-            throws IOException {
+    private void setCpuUsageUpperLimit(CpuCore cpuCore, int cpuCoreUpperLimit) throws IOException {
         /*
-         * User cfs_period & cfs_quota to control the upper limit use of cpu
-         * core e.g. If making a process to fully use two cpu cores, set
-         * cfs_period_us to 100000 and set cfs_quota_us to 200000 The highest
-         * value of "cpu core upper limit" is 10
+         * User cfs_period & cfs_quota to control the upper limit use of cpu core e.g. If making a process to fully use two cpu cores, set cfs_period_us to
+         * 100000 and set cfs_quota_us to 200000 The highest value of "cpu core upper limit" is 10
          */
         cpuCoreUpperLimit = validateCpuUpperLimitValue(cpuCoreUpperLimit);
 
@@ -109,16 +104,13 @@ public class CgroupManager {
         }
     }
 
-    public String startNewWorker(Map conf, int cpuNum, String workerId)
-            throws SecurityException, IOException {
-        CgroupCommon workerGroup =
-                new CgroupCommon(workerId, h, this.rootCgroup);
+    public String startNewWorker(Map conf, int cpuNum, String workerId) throws SecurityException, IOException {
+        CgroupCommon workerGroup = new CgroupCommon(workerId, h, this.rootCgroup);
         this.center.create(workerGroup);
         CgroupCore cpu = workerGroup.getCores().get(SubSystemType.cpu);
         CpuCore cpuCore = (CpuCore) cpu;
         cpuCore.setCpuShares(cpuNum * ONE_CPU_SLOT);
-        setCpuUsageUpperLimit(cpuCore,
-                ConfigExtension.getWorkerCpuCoreUpperLimit(conf));
+        setCpuUsageUpperLimit(cpuCore, ConfigExtension.getWorkerCpuCoreUpperLimit(conf));
 
         StringBuilder sb = new StringBuilder();
         sb.append("cgexec -g cpu:").append(workerGroup.getName()).append(" ");
@@ -126,8 +118,7 @@ public class CgroupManager {
     }
 
     public void shutDownWorker(String workerId, boolean isKilled) {
-        CgroupCommon workerGroup =
-                new CgroupCommon(workerId, h, this.rootCgroup);
+        CgroupCommon workerGroup = new CgroupCommon(workerId, h, this.rootCgroup);
         try {
             if (isKilled == false) {
                 for (Integer pid : workerGroup.getTasks()) {
@@ -151,9 +142,7 @@ public class CgroupManager {
         if (h == null) {
             Set<SubSystemType> types = new HashSet<SubSystemType>();
             types.add(SubSystemType.cpu);
-            h =
-                    new Hierarchy(JSTORM_HIERARCHY_NAME, types,
-                            JSTORM_CPU_HIERARCHY_DIR);
+            h = new Hierarchy(JSTORM_HIERARCHY_NAME, types, JSTORM_CPU_HIERARCHY_DIR);
         }
         rootCgroup = new CgroupCommon(rootDir, h, h.getRootCgroups());
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/Heartbeat.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/Heartbeat.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/Heartbeat.java
index e55aabe..d003a14 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/Heartbeat.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/Heartbeat.java
@@ -36,6 +36,7 @@ import com.alibaba.jstorm.utils.TimeUtils;
 
 /**
  * supervisor Heartbeat, just write SupervisorInfo to ZK
+ * @author Johnfang (xiaojian.fxj@alibaba-inc.com)
  */
 class Heartbeat extends RunnableCallback {
 
@@ -67,8 +68,7 @@ class Heartbeat extends RunnableCallback {
      * @param myHostName
      */
     @SuppressWarnings({ "rawtypes", "unchecked" })
-    public Heartbeat(Map conf, StormClusterState stormClusterState,
-            String supervisorId) {
+    public Heartbeat(Map conf, StormClusterState stormClusterState, String supervisorId) {
 
         String myHostName = JStormServerUtils.getHostName(conf);
 
@@ -77,15 +77,12 @@ class Heartbeat extends RunnableCallback {
         this.conf = conf;
         this.myHostName = myHostName;
         this.startTime = TimeUtils.current_time_secs();
-        this.frequence =
-                JStormUtils.parseInt(conf
-                        .get(Config.SUPERVISOR_HEARTBEAT_FREQUENCY_SECS));
+        this.frequence = JStormUtils.parseInt(conf.get(Config.SUPERVISOR_HEARTBEAT_FREQUENCY_SECS));
         this.hbUpdateTrigger = new AtomicBoolean(true);
 
         initSupervisorInfo(conf);
 
-        LOG.info("Successfully init supervisor heartbeat thread, "
-                + supervisorInfo);
+        LOG.info("Successfully init supervisor heartbeat thread, " + supervisorInfo);
     }
 
     private void initSupervisorInfo(Map conf) {
@@ -96,32 +93,28 @@ class Heartbeat extends RunnableCallback {
 
                 boolean isLocaliP = false;
                 isLocaliP = myHostName.equals("127.0.0.1");
-                if(isLocaliP){
+                if (isLocaliP) {
                     throw new Exception("the hostname which  supervisor get is localhost");
                 }
-            }catch(Exception e1){
+            } catch (Exception e1) {
                 LOG.error("get supervisor host error!", e1);
                 throw new RuntimeException(e1);
             }
             Set<Integer> ports = JStormUtils.listToSet(portList);
-            supervisorInfo =
-                    new SupervisorInfo(myHostName, supervisorId, ports);
+            supervisorInfo = new SupervisorInfo(myHostName, supervisorId, ports);
         } else {
-            Set<Integer> ports = JStormUtils.listToSet(portList.subList(0, 1));
-            supervisorInfo =
-                    new SupervisorInfo(myHostName, supervisorId, ports);
+            Set<Integer> ports = JStormUtils.listToSet(portList);
+            supervisorInfo = new SupervisorInfo(myHostName, supervisorId, ports);
         }
     }
 
     @SuppressWarnings("unchecked")
     public void update() {
         supervisorInfo.setTimeSecs(TimeUtils.current_time_secs());
-        supervisorInfo
-                .setUptimeSecs((int) (TimeUtils.current_time_secs() - startTime));
+        supervisorInfo.setUptimeSecs((int) (TimeUtils.current_time_secs() - startTime));
 
         try {
-            stormClusterState
-                    .supervisor_heartbeat(supervisorId, supervisorInfo);
+            stormClusterState.supervisor_heartbeat(supervisorId, supervisorInfo);
         } catch (Exception e) {
             LOG.error("Failed to update SupervisorInfo to ZK");
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/Httpserver.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/Httpserver.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/Httpserver.java
index fad1346..4ece066 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/Httpserver.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/Httpserver.java
@@ -62,6 +62,9 @@ import com.sun.net.httpserver.HttpExchange;
 import com.sun.net.httpserver.HttpHandler;
 import com.sun.net.httpserver.HttpServer;
 
+/**
+ * @author Johnfang (xiaojian.fxj@alibaba-inc.com)
+ */
 public class Httpserver implements Shutdownable {
 
     private static Logger LOG = LoggerFactory.getLogger(Httpserver.class);
@@ -119,13 +122,11 @@ public class Httpserver implements Shutdownable {
 
         }
 
-        public void handlFailure(HttpExchange t, String errorMsg)
-                throws IOException {
+        public void handlFailure(HttpExchange t, String errorMsg) throws IOException {
             LOG.error(errorMsg);
 
             byte[] data = errorMsg.getBytes();
-            t.sendResponseHeaders(HttpURLConnection.HTTP_BAD_REQUEST,
-                    data.length);
+            t.sendResponseHeaders(HttpURLConnection.HTTP_BAD_REQUEST, data.length);
             OutputStream os = t.getResponseBody();
             os.write(data);
             os.close();
@@ -136,8 +137,7 @@ public class Httpserver implements Shutdownable {
             Map<String, String> paramMap = parseRawQuery(uri.getRawQuery());
             LOG.info("Receive command " + paramMap);
 
-            String cmd =
-                    paramMap.get(HttpserverUtils.HTTPSERVER_LOGVIEW_PARAM_CMD);
+            String cmd = paramMap.get(HttpserverUtils.HTTPSERVER_LOGVIEW_PARAM_CMD);
             if (StringUtils.isBlank(cmd) == true) {
                 handlFailure(t, "Bad Request, Not set command type");
                 return;
@@ -146,16 +146,13 @@ public class Httpserver implements Shutdownable {
             if (HttpserverUtils.HTTPSERVER_LOGVIEW_PARAM_CMD_SHOW.equals(cmd)) {
                 handleShowLog(t, paramMap);
                 return;
-            } else if (HttpserverUtils.HTTPSERVER_LOGVIEW_PARAM_CMD_LIST
-                    .equals(cmd)) {
+            } else if (HttpserverUtils.HTTPSERVER_LOGVIEW_PARAM_CMD_LIST.equals(cmd)) {
                 handleListDir(t, paramMap);
                 return;
-            } else if (HttpserverUtils.HTTPSERVER_LOGVIEW_PARAM_CMD_JSTACK
-                    .equals(cmd)) {
+            } else if (HttpserverUtils.HTTPSERVER_LOGVIEW_PARAM_CMD_JSTACK.equals(cmd)) {
                 handleJstack(t, paramMap);
                 return;
-            } else if (HttpserverUtils.HTTPSERVER_LOGVIEW_PARAM_CMD_SHOW_CONF
-                    .equals(cmd)) {
+            } else if (HttpserverUtils.HTTPSERVER_LOGVIEW_PARAM_CMD_SHOW_CONF.equals(cmd)) {
                 handleShowConf(t, paramMap);
                 return;
             }
@@ -178,8 +175,7 @@ public class Httpserver implements Shutdownable {
 
             if (isChild == false) {
                 LOG.error("Access one disallowed path: " + canonicalPath);
-                throw new IOException(
-                        "Destination file/path is not accessible.");
+                throw new IOException("Destination file/path is not accessible.");
             }
         }
 
@@ -196,34 +192,27 @@ public class Httpserver implements Shutdownable {
             return paramMap;
         }
 
-        private void handleShowLog(HttpExchange t, Map<String, String> paramMap)
-                throws IOException {
+        private void handleShowLog(HttpExchange t, Map<String, String> paramMap) throws IOException {
             Pair<Long, byte[]> logPair = queryLog(t, paramMap);
             if (logPair == null) {
                 return;
             }
 
-            String size =
-                    String.format(
-                            HttpserverUtils.HTTPSERVER_LOGVIEW_PARAM_SIZE_FORMAT,
-                            logPair.getFirst());
+            String size = String.format(HttpserverUtils.HTTPSERVER_LOGVIEW_PARAM_SIZE_FORMAT, logPair.getFirst());
             byte[] sizeByts = size.getBytes();
 
             byte[] logData = logPair.getSecond();
 
-            t.sendResponseHeaders(HttpURLConnection.HTTP_OK, sizeByts.length
-                    + logData.length);
+            t.sendResponseHeaders(HttpURLConnection.HTTP_OK, sizeByts.length + logData.length);
             OutputStream os = t.getResponseBody();
             os.write(sizeByts);
             os.write(logData);
             os.close();
         }
 
-        private Pair<Long, byte[]> queryLog(HttpExchange t,
-                Map<String, String> paramMap) throws IOException {
+        private Pair<Long, byte[]> queryLog(HttpExchange t, Map<String, String> paramMap) throws IOException {
 
-            String fileParam =
-                    paramMap.get(HttpserverUtils.HTTPSERVER_LOGVIEW_PARAM_LOGFILE);
+            String fileParam = paramMap.get(HttpserverUtils.HTTPSERVER_LOGVIEW_PARAM_LOGFILE);
             if (StringUtils.isBlank(fileParam)) {
                 handlFailure(t, "Bad Request, Params Error, no log file name.");
                 return null;
@@ -242,8 +231,7 @@ public class Httpserver implements Shutdownable {
 
                 long position = fileSize - pageSize;
                 try {
-                    String posStr =
-                            paramMap.get(HttpserverUtils.HTTPSERVER_LOGVIEW_PARAM_POS);
+                    String posStr = paramMap.get(HttpserverUtils.HTTPSERVER_LOGVIEW_PARAM_POS);
                     if (StringUtils.isBlank(posStr) == false) {
                         long pos = Long.valueOf(posStr);
 
@@ -258,15 +246,12 @@ public class Httpserver implements Shutdownable {
 
                 long size = Math.min(fileSize - position, pageSize);
 
-                LOG.info("logview " + logFile + ", position=" + position
-                        + ", size=" + size);
+                LOG.info("logview " + logFile + ", position=" + position + ", size=" + size);
                 fout = fc.map(FileChannel.MapMode.READ_ONLY, position, size);
 
                 ret = new byte[(int) size];
                 fout.get(ret);
-                String str =
-                        new String(ret,
-                                ConfigExtension.getLogViewEncoding(conf));
+                String str = new String(ret, ConfigExtension.getLogViewEncoding(conf));
                 return new Pair<Long, byte[]>(fileSize, str.getBytes());
 
             } catch (FileNotFoundException e) {
@@ -288,8 +273,7 @@ public class Httpserver implements Shutdownable {
         }
 
         byte[] getJSonFiles(String dir) throws Exception {
-            Map<String, FileAttribute> fileMap =
-                    new HashMap<String, FileAttribute>();
+            Map<String, FileAttribute> fileMap = new HashMap<String, FileAttribute>();
 
             String path = logDir;
             if (dir != null) {
@@ -332,13 +316,11 @@ public class Httpserver implements Shutdownable {
             return fileJsonStr.getBytes();
         }
 
-        void handleListDir(HttpExchange t, Map<String, String> paramMap)
-                throws IOException {
+        void handleListDir(HttpExchange t, Map<String, String> paramMap) throws IOException {
             byte[] filesJson = "Failed to get file list".getBytes();
 
             try {
-                String dir =
-                        paramMap.get(HttpserverUtils.HTTPSERVER_LOGVIEW_PARAM_DIR);
+                String dir = paramMap.get(HttpserverUtils.HTTPSERVER_LOGVIEW_PARAM_DIR);
                 filesJson = getJSonFiles(dir);
             } catch (Exception e) {
                 LOG.error("Failed to list files", e);
@@ -358,15 +340,12 @@ public class Httpserver implements Shutdownable {
 
             try {
                 LOG.info("Begin to execute " + cmd);
-                Process process =
-                        JStormUtils.launch_process(cmd,
-                                new HashMap<String, String>(), false);
+                Process process = JStormUtils.launch_process(cmd, new HashMap<String, String>(), false);
 
                 // Process process = Runtime.getRuntime().exec(sb.toString());
 
                 InputStream stdin = process.getInputStream();
-                BufferedReader reader =
-                        new BufferedReader(new InputStreamReader(stdin));
+                BufferedReader reader = new BufferedReader(new InputStreamReader(stdin));
 
                 JStormUtils.sleepMs(1000);
 
@@ -398,10 +377,8 @@ public class Httpserver implements Shutdownable {
             }
         }
 
-        void handleJstack(HttpExchange t, Map<String, String> paramMap)
-                throws IOException {
-            String workerPort =
-                    paramMap.get(HttpserverUtils.HTTPSERVER_LOGVIEW_PARAM_WORKER_PORT);
+        void handleJstack(HttpExchange t, Map<String, String> paramMap) throws IOException {
+            String workerPort = paramMap.get(HttpserverUtils.HTTPSERVER_LOGVIEW_PARAM_WORKER_PORT);
             if (workerPort == null) {
                 handlFailure(t, "Not set worker's port");
                 return;
@@ -425,8 +402,7 @@ public class Httpserver implements Shutdownable {
             os.close();
         }
 
-        void handleShowConf(HttpExchange t, Map<String, String> paramMap)
-                throws IOException {
+        void handleShowConf(HttpExchange t, Map<String, String> paramMap) throws IOException {
             byte[] json = "Failed to get configuration".getBytes();
 
             try {
@@ -452,8 +428,7 @@ public class Httpserver implements Shutdownable {
 
         try {
             hs = HttpServer.create(socketAddr, 0);
-            hs.createContext(HttpserverUtils.HTTPSERVER_CONTEXT_PATH_LOGVIEW,
-                    new LogHandler(conf));
+            hs.createContext(HttpserverUtils.HTTPSERVER_CONTEXT_PATH_LOGVIEW, new LogHandler(conf));
             hs.setExecutor(executor);
             hs.start();
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/SandBoxMaker.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/SandBoxMaker.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/SandBoxMaker.java
index dfee522..8b52607 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/SandBoxMaker.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/SandBoxMaker.java
@@ -50,8 +50,7 @@ import com.alibaba.jstorm.cluster.StormConfig;
  * @version
  */
 public class SandBoxMaker {
-    private static final Logger LOG = LoggerFactory
-            .getLogger(SandBoxMaker.class);
+    private static final Logger LOG = LoggerFactory.getLogger(SandBoxMaker.class);
 
     public static final String SANBOX_TEMPLATE_NAME = "sandbox.policy";
 
@@ -66,8 +65,7 @@ public class SandBoxMaker {
 
     private final boolean isEnable;
 
-    private final Map<String, String> replaceBaseMap =
-            new HashMap<String, String>();
+    private final Map<String, String> replaceBaseMap = new HashMap<String, String>();
 
     public SandBoxMaker(Map conf) {
         this.conf = conf;
@@ -83,8 +81,7 @@ public class SandBoxMaker {
 
         replaceBaseMap.put(JSTORM_HOME_KEY, jstormHome);
 
-        replaceBaseMap.put(LOCAL_DIR_KEY,
-                (String) conf.get(Config.STORM_LOCAL_DIR));
+        replaceBaseMap.put(LOCAL_DIR_KEY, (String) conf.get(Config.STORM_LOCAL_DIR));
 
         LOG.info("JSTORM_HOME is " + jstormHome);
     }
@@ -127,26 +124,19 @@ public class SandBoxMaker {
         return line;
     }
 
-    public String generatePolicyFile(Map<String, String> replaceMap)
-            throws IOException {
+    public String generatePolicyFile(Map<String, String> replaceMap) throws IOException {
         // dynamic generate policy file, no static file
-        String tmpPolicy =
-                StormConfig.supervisorTmpDir(conf) + File.separator
-                        + UUID.randomUUID().toString();
+        String tmpPolicy = StormConfig.supervisorTmpDir(conf) + File.separator + UUID.randomUUID().toString();
 
-        InputStream inputStream =
-                SandBoxMaker.class.getClassLoader().getResourceAsStream(
-                        SANBOX_TEMPLATE_NAME);
+        InputStream inputStream = SandBoxMaker.class.getClassLoader().getResourceAsStream(SANBOX_TEMPLATE_NAME);
 
-        PrintWriter writer =
-                new PrintWriter(new BufferedWriter(new FileWriter(tmpPolicy)));
+        PrintWriter writer = new PrintWriter(new BufferedWriter(new FileWriter(tmpPolicy)));
 
         try {
 
             InputStreamReader inputReader = new InputStreamReader(inputStream);
 
-            BufferedReader reader =
-                    new BufferedReader(new LineNumberReader(inputReader));
+            BufferedReader reader = new BufferedReader(new LineNumberReader(inputReader));
 
             String line = null;
             while ((line = reader.readLine()) != null) {
@@ -177,8 +167,7 @@ public class SandBoxMaker {
      * @return
      * @throws IOException
      */
-    public String sandboxPolicy(String workerId, Map<String, String> replaceMap)
-            throws IOException {
+    public String sandboxPolicy(String workerId, Map<String, String> replaceMap) throws IOException {
         if (isEnable == false) {
             return "";
         }
@@ -188,9 +177,7 @@ public class SandBoxMaker {
         String tmpPolicy = generatePolicyFile(replaceMap);
 
         File file = new File(tmpPolicy);
-        String policyPath =
-                StormConfig.worker_root(conf, workerId) + File.separator
-                        + SANBOX_TEMPLATE_NAME;
+        String policyPath = StormConfig.worker_root(conf, workerId) + File.separator + SANBOX_TEMPLATE_NAME;
         File dest = new File(policyPath);
         file.renameTo(dest);
 
@@ -210,9 +197,7 @@ public class SandBoxMaker {
         SandBoxMaker maker = new SandBoxMaker(conf);
 
         try {
-            System.out.println("sandboxPolicy:"
-                    + maker.sandboxPolicy("simple",
-                            new HashMap<String, String>()));
+            System.out.println("sandboxPolicy:" + maker.sandboxPolicy("simple", new HashMap<String, String>()));
         } catch (IOException e) {
             e.printStackTrace();
         }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/ShutdownWork.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/ShutdownWork.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/ShutdownWork.java
index 0b906e3..71859a1 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/ShutdownWork.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/ShutdownWork.java
@@ -37,6 +37,9 @@ import com.alibaba.jstorm.utils.JStormUtils;
 import com.alibaba.jstorm.utils.PathUtils;
 import com.alibaba.jstorm.utils.TimeUtils;
 
+/**
+ * @author Johnfang (xiaojian.fxj@alibaba-inc.com)
+ */
 public class ShutdownWork extends RunnableCallback {
 
     private static Logger LOG = LoggerFactory.getLogger(ShutdownWork.class);
@@ -54,14 +57,9 @@ public class ShutdownWork extends RunnableCallback {
      * 
      * @return the topologys whose workers are shutdown successfully
      */
-    public void shutWorker(Map conf, String supervisorId,
-            Map<String, String> removed,
-            ConcurrentHashMap<String, String> workerThreadPids,
-            CgroupManager cgroupManager, boolean block,
-            Map<String, Integer> killingWorkers,
-            Map<String, Integer> taskCleanupTimeoutMap) {
-        Map<String, List<String>> workerId2Pids =
-                new HashMap<String, List<String>>();
+    public void shutWorker(Map conf, String supervisorId, Map<String, String> removed, ConcurrentHashMap<String, String> workerThreadPids,
+            CgroupManager cgroupManager, boolean block, Map<String, Integer> killingWorkers, Map<String, Integer> taskCleanupTimeoutMap) {
+        Map<String, List<String>> workerId2Pids = new HashMap<String, List<String>>();
 
         boolean localMode = false;
 
@@ -78,8 +76,7 @@ public class ShutdownWork extends RunnableCallback {
             try {
                 pids = getPid(conf, workerId);
             } catch (IOException e1) {
-                LOG.error("Failed to get pid for " + workerId + " of "
-                        + topologyId);
+                LOG.error("Failed to get pid for " + workerId + " of " + topologyId);
             }
             workerId2Pids.put(workerId, pids);
 
@@ -100,15 +97,10 @@ public class ShutdownWork extends RunnableCallback {
                         JStormUtils.process_killed(Integer.parseInt(pid));
                     }
 
-                    if (taskCleanupTimeoutMap != null
-                            && taskCleanupTimeoutMap.get(topologyId) != null) {
-                        maxWaitTime =
-                                Math.max(maxWaitTime,
-                                        taskCleanupTimeoutMap.get(topologyId));
+                    if (taskCleanupTimeoutMap != null && taskCleanupTimeoutMap.get(topologyId) != null) {
+                        maxWaitTime = Math.max(maxWaitTime, taskCleanupTimeoutMap.get(topologyId));
                     } else {
-                        maxWaitTime =
-                                Math.max(maxWaitTime, ConfigExtension
-                                        .getTaskCleanupTimeoutSec(conf));
+                        maxWaitTime = Math.max(maxWaitTime, ConfigExtension.getTaskCleanupTimeoutSec(conf));
                     }
                 } catch (Exception e) {
                     LOG.info("Failed to shutdown ", e);
@@ -126,8 +118,7 @@ public class ShutdownWork extends RunnableCallback {
             List<String> pids = workerId2Pids.get(workerId);
 
             int cleanupTimeout;
-            if (taskCleanupTimeoutMap != null
-                    && taskCleanupTimeoutMap.get(topologyId) != null) {
+            if (taskCleanupTimeoutMap != null && taskCleanupTimeoutMap.get(topologyId) != null) {
                 cleanupTimeout = taskCleanupTimeoutMap.get(topologyId);
             } else {
                 cleanupTimeout = ConfigExtension.getTaskCleanupTimeoutSec(conf);
@@ -137,8 +128,7 @@ public class ShutdownWork extends RunnableCallback {
             if (TimeUtils.current_time_secs() - initCleaupTime > cleanupTimeout) {
                 if (localMode == false) {
                     for (String pid : pids) {
-                        JStormUtils
-                                .ensure_process_killed(Integer.parseInt(pid));
+                        JStormUtils.ensure_process_killed(Integer.parseInt(pid));
                         if (cgroupManager != null) {
                             cgroupManager.shutDownWorker(workerId, true);
                         }
@@ -169,14 +159,12 @@ public class ShutdownWork extends RunnableCallback {
             // delete workerid dir, LOCAL_DIR/worker/workerid
             PathUtils.rmr(StormConfig.worker_root(conf, workerId));
         } catch (Exception e) {
-            LOG.warn(e + "Failed to cleanup worker " + workerId
-                    + ". Will retry later");
+            LOG.warn(e + "Failed to cleanup worker " + workerId + ". Will retry later");
         }
     }
 
     /**
-     * When worker has been started by manually and supervisor, it will return
-     * multiple pid
+     * When worker has been started by manually and supervisor, it will return multiple pid
      * 
      * @param conf
      * @param workerId

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/StateHeartbeat.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/StateHeartbeat.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/StateHeartbeat.java
index c159f4b..c6bed45a 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/StateHeartbeat.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/StateHeartbeat.java
@@ -47,7 +47,6 @@ public class StateHeartbeat {
 
     @Override
     public String toString() {
-        return ToStringBuilder.reflectionToString(this,
-                ToStringStyle.SHORT_PREFIX_STYLE);
+        return ToStringBuilder.reflectionToString(this, ToStringStyle.SHORT_PREFIX_STYLE);
     }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/Supervisor.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/Supervisor.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/Supervisor.java
index abc2448..c6c2877 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/Supervisor.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/Supervisor.java
@@ -24,6 +24,7 @@ import java.util.Vector;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.atomic.AtomicBoolean;
 
+import com.alibaba.jstorm.daemon.worker.WorkerReportError;
 import org.apache.commons.io.FileUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -51,33 +52,28 @@ import com.alibaba.jstorm.utils.JStormUtils;
  * 
  * Supevisor workflow 1. write SupervisorInfo to ZK
  * 
- * 2. Every 10 seconds run SynchronizeSupervisor 2.1 download new topology 2.2
- * release useless worker 2.3 assgin new task to
- * /local-dir/supervisor/localstate 2.4 add one syncProcesses event
+ * 2. Every 10 seconds run SynchronizeSupervisor 2.1 download new topology 2.2 release useless worker 2.3 assgin new task to /local-dir/supervisor/localstate
+ * 2.4 add one syncProcesses event
  * 
- * 3. Every supervisor.monitor.frequency.secs run SyncProcesses 3.1 kill useless
- * worker 3.2 start new worker
+ * 3. Every supervisor.monitor.frequency.secs run SyncProcesses 3.1 kill useless worker 3.2 start new worker
  * 
- * 4. create heartbeat thread every supervisor.heartbeat.frequency.secs, write
- * SupervisorInfo to ZK
+ * 4. create heartbeat thread every supervisor.heartbeat.frequency.secs, write SupervisorInfo to ZK
+ *  @author Johnfang (xiaojian.fxj@alibaba-inc.com)
  */
 
 public class Supervisor {
 
     private static Logger LOG = LoggerFactory.getLogger(Supervisor.class);
 
-
     /**
      * create and start one supervisor
      * 
      * @param conf : configurationdefault.yaml storm.yaml
      * @param sharedContext : null (right now)
-     * @return SupervisorManger: which is used to shutdown all workers and
-     *         supervisor
+     * @return SupervisorManger: which is used to shutdown all workers and supervisor
      */
     @SuppressWarnings("rawtypes")
-    public SupervisorManger mkSupervisor(Map conf, IContext sharedContext)
-            throws Exception {
+    public SupervisorManger mkSupervisor(Map conf, IContext sharedContext) throws Exception {
 
         LOG.info("Starting Supervisor with conf " + conf);
 
@@ -91,13 +87,15 @@ public class Supervisor {
          * Step 2: create ZK operation instance StromClusterState
          */
 
-        StormClusterState stormClusterState =
-                Cluster.mk_storm_cluster_state(conf);
+        StormClusterState stormClusterState = Cluster.mk_storm_cluster_state(conf);
+
+        String hostName = JStormServerUtils.getHostName(conf);
+        WorkerReportError workerReportError =
+                new WorkerReportError(stormClusterState, hostName);
+
 
         /*
-         * Step 3, create LocalStat LocalStat is one KV database 4.1 create
-         * LocalState instance; 4.2 get supervisorId, if no supervisorId, create
-         * one
+         * Step 3, create LocalStat LocalStat is one KV database 4.1 create LocalState instance; 4.2 get supervisorId, if no supervisorId, create one
          */
 
         LocalState localState = StormConfig.supervisorState(conf);
@@ -115,13 +113,11 @@ public class Supervisor {
         // sync hearbeat to nimbus
         Heartbeat hb = new Heartbeat(conf, stormClusterState, supervisorId);
         hb.update();
-        AsyncLoopThread heartbeat =
-                new AsyncLoopThread(hb, false, null, Thread.MIN_PRIORITY, true);
+        AsyncLoopThread heartbeat = new AsyncLoopThread(hb, false, null, Thread.MIN_PRIORITY, true);
         threads.add(heartbeat);
 
         // Sync heartbeat to Apsara Container
-        AsyncLoopThread syncContainerHbThread =
-                SyncContainerHb.mkSupervisorInstance(conf);
+        AsyncLoopThread syncContainerHbThread = SyncContainerHb.mkSupervisorInstance(conf);
         if (syncContainerHbThread != null) {
             threads.add(syncContainerHbThread);
         }
@@ -129,34 +125,22 @@ public class Supervisor {
         // Step 6 create and start sync Supervisor thread
         // every supervisor.monitor.frequency.secs second run SyncSupervisor
         EventManagerImp processEventManager = new EventManagerImp();
-        AsyncLoopThread processEventThread =
-                new AsyncLoopThread(processEventManager);
+        AsyncLoopThread processEventThread = new AsyncLoopThread(processEventManager);
         threads.add(processEventThread);
 
-        ConcurrentHashMap<String, String> workerThreadPids =
-                new ConcurrentHashMap<String, String>();
-        SyncProcessEvent syncProcessEvent =
-                new SyncProcessEvent(supervisorId, conf, localState,
-                        workerThreadPids, sharedContext);
+        ConcurrentHashMap<String, String> workerThreadPids = new ConcurrentHashMap<String, String>();
+        SyncProcessEvent syncProcessEvent = new SyncProcessEvent(supervisorId, conf, localState, workerThreadPids, sharedContext, workerReportError);
 
         EventManagerImp syncSupEventManager = new EventManagerImp();
-        AsyncLoopThread syncSupEventThread =
-                new AsyncLoopThread(syncSupEventManager);
+        AsyncLoopThread syncSupEventThread = new AsyncLoopThread(syncSupEventManager);
         threads.add(syncSupEventThread);
 
         SyncSupervisorEvent syncSupervisorEvent =
-                new SyncSupervisorEvent(supervisorId, conf,
-                        processEventManager, syncSupEventManager,
-                        stormClusterState, localState, syncProcessEvent, hb);
-
-        int syncFrequence =
-                JStormUtils.parseInt(conf
-                        .get(Config.SUPERVISOR_MONITOR_FREQUENCY_SECS));
-        EventManagerPusher syncSupervisorPusher =
-                new EventManagerPusher(syncSupEventManager,
-                        syncSupervisorEvent, syncFrequence);
-        AsyncLoopThread syncSupervisorThread =
-                new AsyncLoopThread(syncSupervisorPusher);
+                new SyncSupervisorEvent(supervisorId, conf, processEventManager, syncSupEventManager, stormClusterState, localState, syncProcessEvent, hb);
+
+        int syncFrequence = JStormUtils.parseInt(conf.get(Config.SUPERVISOR_MONITOR_FREQUENCY_SECS));
+        EventManagerPusher syncSupervisorPusher = new EventManagerPusher(syncSupEventManager, syncSupervisorEvent, syncFrequence);
+        AsyncLoopThread syncSupervisorThread = new AsyncLoopThread(syncSupervisorPusher);
         threads.add(syncSupervisorThread);
 
         Httpserver httpserver = null;
@@ -168,9 +152,7 @@ public class Supervisor {
         }
 
         // SupervisorManger which can shutdown all supervisor and workers
-        return new SupervisorManger(conf, supervisorId, threads,
-                syncSupEventManager, processEventManager, httpserver,
-                stormClusterState, workerThreadPids);
+        return new SupervisorManger(conf, supervisorId, threads, syncSupEventManager, processEventManager, httpserver, stormClusterState, workerThreadPids);
     }
 
     /**
@@ -210,7 +192,7 @@ public class Supervisor {
             JStormUtils.redirectOutput("/dev/null");
 
             initShutdownHook(supervisorManager);
-            
+
             while (supervisorManager.isFinishShutdown() == false) {
                 try {
                     Thread.sleep(1000);
@@ -222,11 +204,10 @@ public class Supervisor {
         } catch (Exception e) {
             LOG.error("Failed to start supervisor\n", e);
             System.exit(1);
-        }finally {
-        	LOG.info("Shutdown supervisor!!!");
+        } finally {
+            LOG.info("Shutdown supervisor!!!");
         }
 
-        
     }
 
     /**

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/SupervisorInfo.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/SupervisorInfo.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/SupervisorInfo.java
index f53ef72..ae89607 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/SupervisorInfo.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/SupervisorInfo.java
@@ -29,7 +29,7 @@ import org.apache.commons.lang.builder.ToStringStyle;
 
 /**
  * Object stored in ZK /ZK-DIR/supervisors
- *
+ * 
  * @author Xin.Zhou/Longda
  */
 public class SupervisorInfo implements Serializable {
@@ -46,8 +46,7 @@ public class SupervisorInfo implements Serializable {
 
     private transient Set<Integer> availableWorkerPorts;
 
-    public SupervisorInfo(String hostName, String supervisorId,
-                          Set<Integer> workerPorts) {
+    public SupervisorInfo(String hostName, String supervisorId, Set<Integer> workerPorts) {
         this.hostName = hostName;
         this.supervisorId = supervisorId;
         this.workerPorts = workerPorts;
@@ -80,16 +79,19 @@ public class SupervisorInfo implements Serializable {
     public Set<Integer> getWorkerPorts() {
         return workerPorts;
     }
-    public void setAvailableWorkerPorts(Set<Integer> workerPorts){
+
+    public void setAvailableWorkerPorts(Set<Integer> workerPorts) {
         if (availableWorkerPorts == null)
             availableWorkerPorts = new HashSet<Integer>();
         availableWorkerPorts.addAll(workerPorts);
     }
+
     public Set<Integer> getAvailableWorkerPorts() {
         if (availableWorkerPorts == null)
             availableWorkerPorts = new HashSet<Integer>();
         return availableWorkerPorts;
     }
+
     public void setWorkerPorts(Set<Integer> workerPorts) {
         this.workerPorts = workerPorts;
     }
@@ -98,20 +100,11 @@ public class SupervisorInfo implements Serializable {
     public int hashCode() {
         final int prime = 31;
         int result = 1;
-        result =
-                prime * result + ((hostName == null) ? 0 : hostName.hashCode());
-        result =
-                prime
-                        * result
-                        + ((supervisorId == null) ? 0 : supervisorId.hashCode());
-        result =
-                prime * result + ((timeSecs == null) ? 0 : timeSecs.hashCode());
-        result =
-                prime * result
-                        + ((uptimeSecs == null) ? 0 : uptimeSecs.hashCode());
-        result =
-                prime * result
-                        + ((workerPorts == null) ? 0 : workerPorts.hashCode());
+        result = prime * result + ((hostName == null) ? 0 : hostName.hashCode());
+        result = prime * result + ((supervisorId == null) ? 0 : supervisorId.hashCode());
+        result = prime * result + ((timeSecs == null) ? 0 : timeSecs.hashCode());
+        result = prime * result + ((uptimeSecs == null) ? 0 : uptimeSecs.hashCode());
+        result = prime * result + ((workerPorts == null) ? 0 : workerPorts.hashCode());
         return result;
     }
 
@@ -154,19 +147,17 @@ public class SupervisorInfo implements Serializable {
 
     @Override
     public String toString() {
-        return ToStringBuilder.reflectionToString(this,
-                ToStringStyle.SHORT_PREFIX_STYLE);
+        return ToStringBuilder.reflectionToString(this, ToStringStyle.SHORT_PREFIX_STYLE);
     }
 
     /**
      * get Map<supervisorId, hostname>
-     *
+     * 
      * @param stormClusterState
      * @param callback
      * @return
      */
-    public static Map<String, String> getNodeHost(
-            Map<String, SupervisorInfo> supInfos) {
+    public static Map<String, String> getNodeHost(Map<String, SupervisorInfo> supInfos) {
 
         Map<String, String> rtn = new HashMap<String, String>();
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/SupervisorManger.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/SupervisorManger.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/SupervisorManger.java
index a2806de..99c2c76 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/SupervisorManger.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/SupervisorManger.java
@@ -39,9 +39,9 @@ import com.alibaba.jstorm.utils.PathUtils;
 
 /**
  * supervisor shutdown manager which can shutdown supervisor
+ * @author Johnfang (xiaojian.fxj@alibaba-inc.com)
  */
-public class SupervisorManger extends ShutdownWork implements SupervisorDaemon,
-        DaemonCommon, Runnable {
+public class SupervisorManger extends ShutdownWork implements SupervisorDaemon, DaemonCommon, Runnable {
 
     private static Logger LOG = LoggerFactory.getLogger(SupervisorManger.class);
 
@@ -67,11 +67,8 @@ public class SupervisorManger extends ShutdownWork implements SupervisorDaemon,
 
     private volatile boolean isFinishShutdown = false;
 
-    public SupervisorManger(Map conf, String supervisorId,
-            Vector<AsyncLoopThread> threads,
-            EventManager processesEventManager, EventManager eventManager,
-            Httpserver httpserver, StormClusterState stormClusterState,
-            ConcurrentHashMap<String, String> workerThreadPidsAtom) {
+    public SupervisorManger(Map conf, String supervisorId, Vector<AsyncLoopThread> threads, EventManager processesEventManager, EventManager eventManager,
+            Httpserver httpserver, StormClusterState stormClusterState, ConcurrentHashMap<String, String> workerThreadPidsAtom) {
         this.conf = conf;
         this.supervisorId = supervisorId;
         this.shutdown = new AtomicBoolean(false);
@@ -104,8 +101,7 @@ public class SupervisorManger extends ShutdownWork implements SupervisorDaemon,
             // } catch (InterruptedException e) {
             // LOG.error(e.getMessage(), e);
             // }
-            LOG.info("Successfully shutdown thread:"
-                    + thread.getThread().getName());
+            LOG.info("Successfully shutdown thread:" + thread.getThread().getName());
         }
         eventManager.shutdown();
         processesEventManager.shutdown();
@@ -144,15 +140,13 @@ public class SupervisorManger extends ShutdownWork implements SupervisorDaemon,
             return;
         }
         List<String> myWorkerIds = PathUtils.read_dir_contents(path);
-        HashMap<String, String> workerId2topologyIds =
-                new HashMap<String, String>();
+        HashMap<String, String> workerId2topologyIds = new HashMap<String, String>();
 
         for (String workerId : myWorkerIds) {
             workerId2topologyIds.put(workerId, null);
         }
 
-        shutWorker(conf, supervisorId, workerId2topologyIds,
-                workerThreadPidsAtom, null, true, null, null);
+        shutWorker(conf, supervisorId, workerId2topologyIds, workerThreadPidsAtom, null, true, null, null);
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/SyncProcessEvent.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/SyncProcessEvent.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/SyncProcessEvent.java
index d90eb29..01f2a3a 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/SyncProcessEvent.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/supervisor/SyncProcessEvent.java
@@ -33,6 +33,7 @@ import java.util.concurrent.atomic.AtomicReference;
 
 import java.util.regex.Pattern;
 
+import com.alibaba.jstorm.daemon.worker.*;
 import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -45,12 +46,6 @@ import backtype.storm.utils.LocalState;
 import com.alibaba.jstorm.client.ConfigExtension;
 import com.alibaba.jstorm.cluster.Common;
 import com.alibaba.jstorm.cluster.StormConfig;
-import com.alibaba.jstorm.daemon.worker.LocalAssignment;
-import com.alibaba.jstorm.daemon.worker.ProcessSimulator;
-import com.alibaba.jstorm.daemon.worker.State;
-import com.alibaba.jstorm.daemon.worker.Worker;
-import com.alibaba.jstorm.daemon.worker.WorkerHeartbeat;
-import com.alibaba.jstorm.daemon.worker.WorkerShutdown;
 import com.alibaba.jstorm.utils.JStormUtils;
 import com.alibaba.jstorm.utils.Pair;
 import com.alibaba.jstorm.utils.PathUtils;
@@ -59,6 +54,7 @@ import com.alibaba.jstorm.utils.TimeUtils;
 
 /**
  * SyncProcesses (1) kill bad worker (2) start new worker
+ * @author Johnfang (xiaojian.fxj@alibaba-inc.com)
  */
 class SyncProcessEvent extends ShutdownWork {
     private static Logger LOG = LoggerFactory.getLogger(SyncProcessEvent.class);
@@ -78,8 +74,7 @@ class SyncProcessEvent extends ShutdownWork {
     private SandBoxMaker sandBoxMaker;
 
     /**
-     * Due to the worker startTime is put in Supervisor memory, When supervisor
-     * restart, the starting worker is likely to be killed
+     * Due to the worker startTime is put in Supervisor memory, When supervisor restart, the starting worker is likely to be killed
      */
     private Map<String, Pair<Integer, Integer>> workerIdToStartTimeAndPort;
     /**
@@ -95,6 +90,8 @@ class SyncProcessEvent extends ShutdownWork {
     // private Supervisor supervisor;
     private int lastTime;
 
+    private WorkerReportError workerReportError;
+
     /**
      * @param conf
      * @param localState
@@ -104,10 +101,8 @@ class SyncProcessEvent extends ShutdownWork {
      * @param workerThreadPidsReadLock
      * @param workerThreadPidsWriteLock
      */
-    public SyncProcessEvent(String supervisorId, Map conf,
-            LocalState localState,
-            ConcurrentHashMap<String, String> workerThreadPids,
-            IContext sharedContext) {
+    public SyncProcessEvent(String supervisorId, Map conf, LocalState localState, ConcurrentHashMap<String, String> workerThreadPids,
+                            IContext sharedContext, WorkerReportError workerReportError) {
 
         this.supervisorId = supervisorId;
 
@@ -122,8 +117,7 @@ class SyncProcessEvent extends ShutdownWork {
 
         this.sandBoxMaker = new SandBoxMaker(conf);
 
-        this.workerIdToStartTimeAndPort =
-                new HashMap<String, Pair<Integer, Integer>>();
+        this.workerIdToStartTimeAndPort = new HashMap<String, Pair<Integer, Integer>>();
 
         this.needDownloadTopologys = new AtomicReference<Set>();
 
@@ -132,30 +126,27 @@ class SyncProcessEvent extends ShutdownWork {
         }
 
         killingWorkers = new HashMap<String, Integer>();
+        this.workerReportError = workerReportError;
     }
 
     /**
-     * @@@ Change the old logic In the old logic, it will store
-     *     LS_LOCAL_ASSIGNMENTS Map<String, Integer> into LocalState
-     * 
-     *     But I don't think LS_LOCAL_ASSIGNMENTS is useful, so remove this
-     *     logic
+     * @@@ Change the old logic In the old logic, it will store LS_LOCAL_ASSIGNMENTS Map<String, Integer> into LocalState
+     *
+     *     But I don't think LS_LOCAL_ASSIGNMENTS is useful, so remove this logic
      */
     @SuppressWarnings("unchecked")
     @Override
     public void run() {
-        
+
     }
 
-    public void run(Map<Integer, LocalAssignment> localAssignments) {
-        LOG.debug("Syncing processes, interval seconds:"
-                + TimeUtils.time_delta(lastTime));
+    public void run(Map<Integer, LocalAssignment> localAssignments, Set<String> downloadFailedTopologyIds ) {
+        LOG.debug("Syncing processes, interval seconds:" + TimeUtils.time_delta(lastTime));
         lastTime = TimeUtils.current_time_secs();
         try {
 
             /**
-             * Step 1: get assigned tasks from localstat Map<port(type Integer),
-             * LocalAssignment>
+             * Step 1: get assigned tasks from localstat Map<port(type Integer), LocalAssignment>
              */
             if (localAssignments == null) {
                 localAssignments = new HashMap<Integer, LocalAssignment>();
@@ -163,13 +154,11 @@ class SyncProcessEvent extends ShutdownWork {
             LOG.debug("Assigned tasks: " + localAssignments);
 
             /**
-             * Step 2: get local WorkerStats from local_dir/worker/ids/heartbeat
-             * Map<workerid [WorkerHeartbeat, state]>
+             * Step 2: get local WorkerStats from local_dir/worker/ids/heartbeat Map<workerid [WorkerHeartbeat, state]>
              */
             Map<String, StateHeartbeat> localWorkerStats = null;
             try {
-                localWorkerStats =
-                        getLocalWorkerStats(conf, localState, localAssignments);
+                localWorkerStats = getLocalWorkerStats(conf, localState, localAssignments);
             } catch (Exception e) {
                 LOG.error("Failed to get Local worker stats");
                 throw e;
@@ -177,20 +166,14 @@ class SyncProcessEvent extends ShutdownWork {
             LOG.debug("Allocated: " + localWorkerStats);
 
             /**
-             * Step 3: kill Invalid Workers and remove killed worker from
-             * localWorkerStats
+             * Step 3: kill Invalid Workers and remove killed worker from localWorkerStats
              */
             Map<String, Integer> taskCleaupTimeoutMap = null;
             Set<Integer> keepPorts = null;
             try {
-                taskCleaupTimeoutMap =
-                        (Map<String, Integer>) localState
-                                .get(Common.LS_TASK_CLEANUP_TIMEOUT);
-                keepPorts =
-                        killUselessWorkers(localWorkerStats, localAssignments,
-                                taskCleaupTimeoutMap);
-                localState.put(Common.LS_TASK_CLEANUP_TIMEOUT,
-                        taskCleaupTimeoutMap);
+                taskCleaupTimeoutMap = (Map<String, Integer>) localState.get(Common.LS_TASK_CLEANUP_TIMEOUT);
+                keepPorts = killUselessWorkers(localWorkerStats, localAssignments, taskCleaupTimeoutMap);
+                localState.put(Common.LS_TASK_CLEANUP_TIMEOUT, taskCleaupTimeoutMap);
             } catch (IOException e) {
                 LOG.error("Failed to kill workers", e);
             }
@@ -202,7 +185,7 @@ class SyncProcessEvent extends ShutdownWork {
             checkNeedUpdateTopologys(localWorkerStats, localAssignments);
 
             // start new workers
-            startNewWorkers(keepPorts, localAssignments);
+            startNewWorkers(keepPorts, localAssignments, downloadFailedTopologyIds);
 
         } catch (Exception e) {
             LOG.error("Failed Sync Process", e);
@@ -215,14 +198,13 @@ class SyncProcessEvent extends ShutdownWork {
      * check all workers is failed or not
      */
     @SuppressWarnings("unchecked")
-    public void checkNeedUpdateTopologys(
-            Map<String, StateHeartbeat> localWorkerStats,
-            Map<Integer, LocalAssignment> localAssignments) throws Exception {
+    public void checkNeedUpdateTopologys(Map<String, StateHeartbeat> localWorkerStats, Map<Integer, LocalAssignment> localAssignments) throws Exception {
         Set<String> topologys = new HashSet<String>();
+        Map<String, Long> topologyAssignTimeStamps = new HashMap<String, Long>();
 
-        for (Map.Entry<Integer, LocalAssignment> entry : localAssignments
-                .entrySet()) {
+        for (Entry<Integer, LocalAssignment> entry : localAssignments.entrySet()) {
             topologys.add(entry.getValue().getTopologyId());
+            topologyAssignTimeStamps.put(entry.getValue().getTopologyId(), entry.getValue().getTimeStamp());
         }
 
         for (StateHeartbeat stateHb : localWorkerStats.values()) {
@@ -236,32 +218,27 @@ class SyncProcessEvent extends ShutdownWork {
         Set<String> needRemoveTopologys = new HashSet<String>();
         for (String topologyId : topologys) {
             try {
-                long lastModifytime =
-                        StormConfig.get_supervisor_topology_Bianrymodify_time(
-                                conf, topologyId);
-                if ((currTime - lastModifytime) / 1000 < (JStormUtils.MIN_1 * 2)) {
+                long newAssignTime = topologyAssignTimeStamps.get(topologyId);
+                if ((currTime - newAssignTime) / 1000 < (JStormUtils.MIN_1 * 2)) {
                     LOG.debug("less 2 minite ,so removed " + topologyId);
                     needRemoveTopologys.add(topologyId);
                 }
             } catch (Exception e) {
-                LOG.error(
-                        "Failed to get the time of file last modification for topology"
-                                + topologyId, e);
+                LOG.error("Failed to get the time of file last modification for topology" + topologyId, e);
                 needRemoveTopologys.add(topologyId);
             }
         }
         topologys.removeAll(needRemoveTopologys);
 
         if (topologys.size() > 0) {
-            LOG.debug("Following topologys is going to re-download the jars, "
-                    + topologys);
+            LOG.debug("Following topologys is going to re-download the jars, " + topologys);
         }
         needDownloadTopologys.set(topologys);
     }
 
     /**
      * mark all new Workers
-     * 
+     *
      * @param workerIds
      * @pdOid 52b11418-7474-446d-bff5-0ecd68f4954f
      */
@@ -271,40 +248,32 @@ class SyncProcessEvent extends ShutdownWork {
 
         for (Entry<Integer, String> entry : workerIds.entrySet()) {
             String oldWorkerIds = portToWorkerId.get(entry.getKey());
-            if(oldWorkerIds != null){
+            if (oldWorkerIds != null) {
                 workerIdToStartTimeAndPort.remove(oldWorkerIds);
                 // update portToWorkerId
-                LOG.info("exit port is still occupied by old wokerId, so remove unuseful " +
-                        oldWorkerIds+ " form workerIdToStartTimeAndPort");
+                LOG.info("exit port is still occupied by old wokerId, so remove unuseful " + oldWorkerIds + " form workerIdToStartTimeAndPort");
             }
             portToWorkerId.put(entry.getKey(), entry.getValue());
-            workerIdToStartTimeAndPort.put(entry.getValue(),
-                    new Pair<Integer, Integer>(startTime, entry.getKey()));
+            workerIdToStartTimeAndPort.put(entry.getValue(), new Pair<Integer, Integer>(startTime, entry.getKey()));
         }
     }
 
     /**
-     * check new workers if the time is not > *
-     * SUPERVISOR_WORKER_START_TIMEOUT_SECS, otherwise info failed
-     * 
+     * check new workers if the time is not > * SUPERVISOR_WORKER_START_TIMEOUT_SECS, otherwise info failed
+     *
      * @param conf
      * @pdOid f0a6ab43-8cd3-44e1-8fd3-015a2ec51c6a
      */
-    public void checkNewWorkers(Map conf) throws IOException,
-            InterruptedException {
+    public void checkNewWorkers(Map conf) throws IOException, InterruptedException {
 
         Set<String> workers = new HashSet<String>();
-        for (Entry<String, Pair<Integer, Integer>> entry : workerIdToStartTimeAndPort
-                .entrySet()) {
+        for (Entry<String, Pair<Integer, Integer>> entry : workerIdToStartTimeAndPort.entrySet()) {
             String workerId = entry.getKey();
             int startTime = entry.getValue().getFirst();
             LocalState ls = StormConfig.worker_state(conf, workerId);
-            WorkerHeartbeat whb =
-                    (WorkerHeartbeat) ls.get(Common.LS_WORKER_HEARTBEAT);
+            WorkerHeartbeat whb = (WorkerHeartbeat) ls.get(Common.LS_WORKER_HEARTBEAT);
             if (whb == null) {
-                if ((TimeUtils.current_time_secs() - startTime) < JStormUtils
-                        .parseInt(conf
-                                .get(Config.SUPERVISOR_WORKER_START_TIMEOUT_SECS))) {
+                if ((TimeUtils.current_time_secs() - startTime) < JStormUtils.parseInt(conf.get(Config.SUPERVISOR_WORKER_START_TIMEOUT_SECS))) {
                     LOG.info(workerId + " still hasn't started");
                 } else {
                     LOG.error("Failed to start Worker " + workerId);
@@ -321,15 +290,15 @@ class SyncProcessEvent extends ShutdownWork {
             this.portToWorkerId.remove(port);
         }
     }
-    public Map<Integer, String> getPortToWorkerId(){
+
+    public Map<Integer, String> getPortToWorkerId() {
         return portToWorkerId;
     }
 
     /**
      * get localstat approved workerId's map
-     * 
-     * @return Map<workerid [workerheart, state]> [workerheart, state] is also a
-     *         map, key is "workheartbeat" and "state"
+     *
+     * @return Map<workerid [workerheart, state]> [workerheart, state] is also a map, key is "workheartbeat" and "state"
      * @param conf
      * @param localState
      * @param assignedTasks
@@ -337,22 +306,17 @@ class SyncProcessEvent extends ShutdownWork {
      * @pdOid 11c9bebb-d082-4c51-b323-dd3d5522a649
      */
     @SuppressWarnings("unchecked")
-    public Map<String, StateHeartbeat> getLocalWorkerStats(Map conf,
-            LocalState localState, Map<Integer, LocalAssignment> assignedTasks)
-            throws Exception {
+    public Map<String, StateHeartbeat> getLocalWorkerStats(Map conf, LocalState localState, Map<Integer, LocalAssignment> assignedTasks) throws Exception {
 
-        Map<String, StateHeartbeat> workeridHbstate =
-                new HashMap<String, StateHeartbeat>();
+        Map<String, StateHeartbeat> workeridHbstate = new HashMap<String, StateHeartbeat>();
 
         int now = TimeUtils.current_time_secs();
 
         /**
-         * Get Map<workerId, WorkerHeartbeat> from
-         * local_dir/worker/ids/heartbeat
+         * Get Map<workerId, WorkerHeartbeat> from local_dir/worker/ids/heartbeat
          */
         Map<String, WorkerHeartbeat> idToHeartbeat = readWorkerHeartbeats(conf);
-        for (Map.Entry<String, WorkerHeartbeat> entry : idToHeartbeat
-                .entrySet()) {
+        for (Entry<String, WorkerHeartbeat> entry : idToHeartbeat.entrySet()) {
 
             String workerid = entry.getKey().toString();
 
@@ -366,10 +330,9 @@ class SyncProcessEvent extends ShutdownWork {
                 if (timeToPort != null) {
                     LocalAssignment localAssignment = assignedTasks.get(timeToPort.getSecond());
                     if (localAssignment == null) {
-                        LOG.info("Following worker don't exit assignment, so remove this port="
-                                + timeToPort.getSecond());
+                        LOG.info("Following worker don't exit assignment, so remove this port=" + timeToPort.getSecond());
                         state = State.disallowed;
-                        //workerId is disallowed ,so remove it from  workerIdToStartTimeAndPort
+                        // workerId is disallowed ,so remove it from workerIdToStartTimeAndPort
                         Integer port = this.workerIdToStartTimeAndPort.get(workerid).getSecond();
                         this.workerIdToStartTimeAndPort.remove(workerid);
                         this.portToWorkerId.remove(port);
@@ -381,12 +344,21 @@ class SyncProcessEvent extends ShutdownWork {
                 // isn't assigned task
                 state = State.disallowed;
 
-            } else if ((now - whb.getTimeSecs()) > JStormUtils.parseInt(conf
-                    .get(Config.SUPERVISOR_WORKER_TIMEOUT_SECS))) {//
+            } else if ((now - whb.getTimeSecs()) > JStormUtils.parseInt(conf.get(Config.SUPERVISOR_WORKER_TIMEOUT_SECS))) {
+                if (killingWorkers.containsKey(workerid) == false) {
+                    String outTimeInfo = " it is likely to be out of memory, the worker is time out ";
+                    workerReportError.report(whb.getTopologyId(), whb.getPort(),
+                            whb.getTaskIds(), outTimeInfo);
+                }
 
                 state = State.timedOut;
             } else {
                 if (isWorkerDead(workerid)) {
+                    if (killingWorkers.containsKey(workerid) == false){
+                        String workeDeadInfo = "Worker is dead ";
+                        workerReportError.report(whb.getTopologyId(), whb.getPort(),
+                                whb.getTaskIds(), workeDeadInfo);
+                    }
                     state = State.timedOut;
                 } else {
                     state = State.valid;
@@ -395,13 +367,10 @@ class SyncProcessEvent extends ShutdownWork {
 
             if (state != State.valid) {
                 if (killingWorkers.containsKey(workerid) == false)
-                    LOG.info("Worker:" + workerid + " state:" + state
-                            + " WorkerHeartbeat:" + whb + " assignedTasks:"
-                            + assignedTasks + " at supervisor time-secs " + now);
+                    LOG.info("Worker:" + workerid + " state:" + state + " WorkerHeartbeat:" + whb + " assignedTasks:" + assignedTasks
+                            + " at supervisor time-secs " + now);
             } else {
-                LOG.debug("Worker:" + workerid + " state:" + state
-                        + " WorkerHeartbeat: " + whb
-                        + " at supervisor time-secs " + now);
+                LOG.debug("Worker:" + workerid + " state:" + state + " WorkerHeartbeat: " + whb + " at supervisor time-secs " + now);
             }
 
             workeridHbstate.put(workerid, new StateHeartbeat(state, whb));
@@ -412,32 +381,26 @@ class SyncProcessEvent extends ShutdownWork {
 
     /**
      * check whether the workerheartbeat is allowed in the assignedTasks
-     * 
+     *
      * @param whb : WorkerHeartbeat
      * @param assignedTasks
-     * @return boolean if true, the assignments(LS-LOCAL-ASSIGNMENTS) is match
-     *         with workerheart if fasle, is not matched
+     * @return boolean if true, the assignments(LS-LOCAL-ASSIGNMENTS) is match with workerheart if fasle, is not matched
      */
-    public boolean matchesAssignment(WorkerHeartbeat whb,
-            Map<Integer, LocalAssignment> assignedTasks) {
+    public boolean matchesAssignment(WorkerHeartbeat whb, Map<Integer, LocalAssignment> assignedTasks) {
 
         boolean isMatch = true;
         LocalAssignment localAssignment = assignedTasks.get(whb.getPort());
 
         if (localAssignment == null) {
-            LOG.debug("Following worker has been removed, port="
-                    + whb.getPort() + ", assignedTasks=" + assignedTasks);
+            LOG.debug("Following worker has been removed, port=" + whb.getPort() + ", assignedTasks=" + assignedTasks);
             isMatch = false;
         } else if (!whb.getTopologyId().equals(localAssignment.getTopologyId())) {
             // topology id not equal
-            LOG.info("topology id not equal whb=" + whb.getTopologyId()
-                    + ",localAssignment=" + localAssignment.getTopologyId());
+            LOG.info("topology id not equal whb=" + whb.getTopologyId() + ",localAssignment=" + localAssignment.getTopologyId());
             isMatch = false;
         }/*
-          * else if (!(whb.getTaskIds().equals(localAssignment.getTaskIds()))) {
-          * // task-id isn't equal LOG.info("task-id isn't equal whb=" +
-          * whb.getTaskIds() + ",localAssignment=" +
-          * localAssignment.getTaskIds()); isMatch = false; }
+          * else if (!(whb.getTaskIds().equals(localAssignment.getTaskIds()))) { // task-id isn't equal LOG.info("task-id isn't equal whb=" + whb.getTaskIds() +
+          * ",localAssignment=" + localAssignment.getTaskIds()); isMatch = false; }
           */
 
         return isMatch;
@@ -445,17 +408,15 @@ class SyncProcessEvent extends ShutdownWork {
 
     /**
      * get all workers heartbeats of the supervisor
-     * 
+     *
      * @param conf
      * @return Map<workerId, WorkerHeartbeat>
      * @throws IOException
      * @throws IOException
      */
-    public Map<String, WorkerHeartbeat> readWorkerHeartbeats(Map conf)
-            throws Exception {
+    public Map<String, WorkerHeartbeat> readWorkerHeartbeats(Map conf) throws Exception {
 
-        Map<String, WorkerHeartbeat> workerHeartbeats =
-                new HashMap<String, WorkerHeartbeat>();
+        Map<String, WorkerHeartbeat> workerHeartbeats = new HashMap<String, WorkerHeartbeat>();
 
         // get the path: STORM-LOCAL-DIR/workers
         String path = StormConfig.worker_root(conf);
@@ -480,20 +441,19 @@ class SyncProcessEvent extends ShutdownWork {
 
     /**
      * get worker heartbeat by workerid
-     * 
+     *
      * @param conf
      * @param workerId
      * @returns WorkerHeartbeat
      * @throws IOException
      */
-    public WorkerHeartbeat readWorkerHeartbeat(Map conf, String workerId)
-            throws Exception {
+    public WorkerHeartbeat readWorkerHeartbeat(Map conf, String workerId) throws Exception {
 
         try {
             LocalState ls = StormConfig.worker_state(conf, workerId);
 
             return (WorkerHeartbeat) ls.get(Common.LS_WORKER_HEARTBEAT);
-        } catch (IOException e) {
+        } catch (Exception e) {
             LOG.error("Failed to get worker Heartbeat", e);
             return null;
         }
@@ -502,7 +462,7 @@ class SyncProcessEvent extends ShutdownWork {
 
     /**
      * launch a worker in local mode
-     * 
+     *
      * @param conf
      * @param sharedcontext
      * @param topologyId
@@ -512,17 +472,12 @@ class SyncProcessEvent extends ShutdownWork {
      * @param workerThreadPidsAtom
      * @throws Exception
      */
-    public void launchWorker(Map conf, IContext sharedcontext,
-            String topologyId, String supervisorId, Integer port,
-            String workerId,
-            ConcurrentHashMap<String, String> workerThreadPidsAtom)
-            throws Exception {
+    public void launchWorker(Map conf, IContext sharedcontext, String topologyId, String supervisorId, Integer port, String workerId,
+            ConcurrentHashMap<String, String> workerThreadPidsAtom) throws Exception {
 
         String pid = UUID.randomUUID().toString();
 
-        WorkerShutdown worker =
-                Worker.mk_worker(conf, sharedcontext, topologyId, supervisorId,
-                        port, workerId, null);
+        WorkerShutdown worker = Worker.mk_worker(conf, sharedcontext, topologyId, supervisorId, port, workerId, null);
 
         ProcessSimulator.registerProcess(pid, worker);
 
@@ -534,13 +489,11 @@ class SyncProcessEvent extends ShutdownWork {
     private Set<String> setFilterJars(Map totalConf) {
         Set<String> filterJars = new HashSet<String>();
 
-        boolean enableClassloader =
-                ConfigExtension.isEnableTopologyClassLoader(totalConf);
+        boolean enableClassloader = ConfigExtension.isEnableTopologyClassLoader(totalConf);
         if (enableClassloader == false) {
             // avoid logback vs log4j conflict
             boolean enableLog4j = false;
-            String userDefLog4jConf =
-                    ConfigExtension.getUserDefinedLog4jConf(totalConf);
+            String userDefLog4jConf = ConfigExtension.getUserDefinedLog4jConf(totalConf);
             if (StringUtils.isBlank(userDefLog4jConf) == false) {
                 enableLog4j = true;
             }
@@ -601,8 +554,7 @@ class SyncProcessEvent extends ShutdownWork {
         }
 
         if (stormHome != null) {
-            List<String> stormHomeFiles =
-                    PathUtils.read_dir_contents(stormHome);
+            List<String> stormHomeFiles = PathUtils.read_dir_contents(stormHome);
 
             for (String file : stormHomeFiles) {
                 if (file.endsWith(".jar")) {
@@ -610,13 +562,10 @@ class SyncProcessEvent extends ShutdownWork {
                 }
             }
 
-            List<String> stormLibFiles =
-                    PathUtils.read_dir_contents(stormHome + File.separator
-                            + "lib");
+            List<String> stormLibFiles = PathUtils.read_dir_contents(stormHome + File.separator + "lib");
             for (String file : stormLibFiles) {
                 if (file.endsWith(".jar")) {
-                    classSet.add(stormHome + File.separator + "lib"
-                            + File.separator + file);
+                    classSet.add(stormHome + File.separator + "lib" + File.separator + file);
                 }
             }
 
@@ -646,8 +595,7 @@ class SyncProcessEvent extends ShutdownWork {
         String childopts = " ";
 
         if (stormConf.get(Config.TOPOLOGY_WORKER_CHILDOPTS) != null) {
-            childopts +=
-                    (String) stormConf.get(Config.TOPOLOGY_WORKER_CHILDOPTS);
+            childopts += (String) stormConf.get(Config.TOPOLOGY_WORKER_CHILDOPTS);
         } else if (ConfigExtension.getWorkerGc(stormConf) != null) {
             childopts += ConfigExtension.getWorkerGc(stormConf);
         }
@@ -655,8 +603,7 @@ class SyncProcessEvent extends ShutdownWork {
         return childopts;
     }
 
-    public String getLogParameter(Map conf, String stormHome,
-            String topologyName, int port) {
+    public String getLogParameter(Map conf, String stormHome, String topologyName, int port) {
         final String LOGBACK_CONF_TAG = "logback.configurationFile";
         final String LOGBACK_CONF_TAG_CMD = " -D" + LOGBACK_CONF_TAG + "=";
         final String DEFAULT_LOG_CONF = "jstorm.logback.xml";
@@ -664,13 +611,15 @@ class SyncProcessEvent extends ShutdownWork {
         String logFileName = JStormUtils.genLogName(topologyName, port);
         // String logFileName = topologyId + "-worker-" + port + ".log";
 
+
         StringBuilder commandSB = new StringBuilder();
         commandSB.append(" -Dlogfile.name=");
         commandSB.append(logFileName);
+        commandSB.append(" -Dtopology.name=").append(topologyName);
+
         // commandSB.append(" -Dlog4j.ignoreTCL=true");
 
-        String userDefLogbackConf =
-                ConfigExtension.getUserDefinedLogbackConf(conf);
+        String userDefLogbackConf = ConfigExtension.getUserDefinedLogbackConf(conf);
         String logConf = System.getProperty(LOGBACK_CONF_TAG);
 
         if (StringUtils.isBlank(userDefLogbackConf) == false) {
@@ -679,9 +628,7 @@ class SyncProcessEvent extends ShutdownWork {
         } else if (StringUtils.isBlank(logConf) == false) {
             commandSB.append(LOGBACK_CONF_TAG_CMD).append(logConf);
         } else if (StringUtils.isBlank(stormHome) == false) {
-            commandSB.append(LOGBACK_CONF_TAG_CMD).append(stormHome)
-                    .append(File.separator).append("conf")
-                    .append(File.separator).append(DEFAULT_LOG_CONF);
+            commandSB.append(LOGBACK_CONF_TAG_CMD).append(stormHome).append(File.separator).append("conf").append(File.separator).append(DEFAULT_LOG_CONF);
         } else {
             commandSB.append(LOGBACK_CONF_TAG_CMD + DEFAULT_LOG_CONF);
         }
@@ -690,38 +637,35 @@ class SyncProcessEvent extends ShutdownWork {
         String userDefLog4jConf = ConfigExtension.getUserDefinedLog4jConf(conf);
         if (StringUtils.isBlank(userDefLog4jConf) == false) {
             LOG.info("Use user fined log4j conf " + userDefLog4jConf);
-            commandSB.append(" -D" + LOG4J_CONF_TAG + "=").append(
-                    userDefLog4jConf);
+            commandSB.append(" -D" + LOG4J_CONF_TAG + "=").append(userDefLog4jConf);
         }
 
         return commandSB.toString();
     }
 
-    private String getGcDumpParam(Map totalConf) {
+    private String getGcDumpParam(String topologyName, Map totalConf) {
         // String gcPath = ConfigExtension.getWorkerGcPath(totalConf);
         String gcPath = JStormUtils.getLogDir();
 
         Date now = new Date();
         String nowStr = TimeFormat.getSecond(now);
 
-        StringBuilder gc = new StringBuilder();
-
+        StringBuilder gc = new StringBuilder(256);
         gc.append(" -Xloggc:");
-        gc.append(gcPath);
-        gc.append(File.separator);
-        gc.append("%TOPOLOGYID%-worker-%ID%-");
-        gc.append(nowStr);
+        gc.append(gcPath).append(File.separator);
+        gc.append(topologyName).append(File.separator);
+        gc.append("%TOPOLOGYID%-worker-%ID%");
         gc.append("-gc.log -verbose:gc -XX:HeapDumpPath=");
-        gc.append(gcPath).append(File.separator).append("java-%TOPOLOGYID%-")
-                .append(nowStr).append(".hprof");
+        gc.append(gcPath).append(File.separator).append(topologyName).append(File.separator).append("java-%TOPOLOGYID%-").append(nowStr).append(".hprof");
         gc.append(" ");
 
+
         return gc.toString();
     }
 
     /**
      * launch a worker in distributed mode
-     * 
+     *
      * @param conf
      * @param sharedcontext
      * @param topologyId
@@ -731,20 +675,17 @@ class SyncProcessEvent extends ShutdownWork {
      * @throws IOException
      * @pdOid 6ea369dd-5ce2-4212-864b-1f8b2ed94abb
      */
-    public void launchWorker(Map conf, IContext sharedcontext,
-            String topologyId, String supervisorId, Integer port,
-            String workerId, LocalAssignment assignment) throws IOException {
+    public void launchWorker(Map conf, IContext sharedcontext, String topologyId, String supervisorId, Integer port, String workerId, LocalAssignment assignment)
+            throws IOException {
 
         // STORM-LOCAL-DIR/supervisor/stormdist/topologyId
-        String stormroot =
-                StormConfig.supervisor_stormdist_root(conf, topologyId);
+        String stormroot = StormConfig.supervisor_stormdist_root(conf, topologyId);
 
         // STORM-LOCAL-DIR/supervisor/stormdist/topologyId/stormjar.jar
         String stormjar = StormConfig.stormjar_path(stormroot);
 
         // get supervisor conf
-        Map stormConf =
-                StormConfig.read_supervisor_topology_conf(conf, topologyId);
+        Map stormConf = StormConfig.read_supervisor_topology_conf(conf, topologyId);
 
         Map totalConf = new HashMap();
         totalConf.putAll(conf);
@@ -761,12 +702,13 @@ class SyncProcessEvent extends ShutdownWork {
         String stormhome = System.getProperty("jstorm.home");
 
         long memSize = assignment.getMem();
+        long memMinSize = ConfigExtension.getMemMinSizePerWorker(totalConf);
         int cpuNum = assignment.getCpu();
         long memGsize = memSize / JStormUtils.SIZE_1_G;
         int gcThreadsNum = memGsize > 4 ? (int) (memGsize * 1.5) : 4;
         String childopts = getChildOpts(totalConf);
 
-        childopts += getGcDumpParam(totalConf);
+        childopts += getGcDumpParam(Common.getTopologyNameById(topologyId), totalConf);
 
         Map<String, String> environment = new HashMap<String, String>();
 
@@ -776,15 +718,13 @@ class SyncProcessEvent extends ShutdownWork {
             environment.put("REDIRECT", "false");
         }
 
-        environment.put("LD_LIBRARY_PATH",
-                (String) totalConf.get(Config.JAVA_LIBRARY_PATH));
+        environment.put("LD_LIBRARY_PATH", (String) totalConf.get(Config.JAVA_LIBRARY_PATH));
 
         StringBuilder commandSB = new StringBuilder();
 
         try {
             if (this.cgroupManager != null) {
-                commandSB.append(cgroupManager.startNewWorker(totalConf,
-                        cpuNum, workerId));
+                commandSB.append(cgroupManager.startNewWorker(totalConf, cpuNum, workerId));
             }
         } catch (Exception e) {
             LOG.error("fail to prepare cgroup to workerId: " + workerId, e);
@@ -793,15 +733,21 @@ class SyncProcessEvent extends ShutdownWork {
 
         // commandSB.append("java -server -Xdebug -Xrunjdwp:transport=dt_socket,address=8000,server=y,suspend=n ");
         commandSB.append("java -server ");
-        commandSB.append(" -Xms" + memSize);
+        commandSB.append(" -Xms" + memMinSize);
         commandSB.append(" -Xmx" + memSize + " ");
-        commandSB.append(" -Xmn" + memSize / 3 + " ");
-        commandSB.append(" -XX:PermSize=" + memSize / 16);
-        commandSB.append(" -XX:MaxPermSize=" + memSize / 8);
+        if (memMinSize < (memSize / 2))
+            commandSB.append(" -Xmn" + memMinSize + " ");
+        else
+            commandSB.append(" -Xmn" + memSize / 2 + " ");
+        if (memGsize >= 2) {
+            commandSB.append(" -XX:PermSize=" + memSize / 32);
+        } else {
+            commandSB.append(" -XX:PermSize=" + memSize / 16);
+        }
+        commandSB.append(" -XX:MaxPermSize=" + memSize / 16);
         commandSB.append(" -XX:ParallelGCThreads=" + gcThreadsNum);
         commandSB.append(" " + childopts);
-        commandSB.append(" "
-                + (assignment.getJvm() == null ? "" : assignment.getJvm()));
+        commandSB.append(" " + (assignment.getJvm() == null ? "" : assignment.getJvm()));
 
         commandSB.append(" -Djava.library.path=");
         commandSB.append((String) totalConf.get(Config.JAVA_LIBRARY_PATH));
@@ -811,20 +757,18 @@ class SyncProcessEvent extends ShutdownWork {
             commandSB.append(stormhome);
         }
 
-        commandSB.append(getLogParameter(totalConf, stormhome,
-                assignment.getTopologyName(), port));
+        String logDir = System.getProperty("jstorm.log.dir");
+        if (logDir != null)
+             commandSB.append(" -Djstorm.log.dir=").append(logDir);
+        commandSB.append(getLogParameter(totalConf, stormhome, assignment.getTopologyName(), port));
 
         String classpath = getClassPath(stormjar, stormhome, totalConf);
-        String workerClassPath =
-                (String) totalConf.get(Config.TOPOLOGY_CLASSPATH);
-        List<String> otherLibs =
-                (List<String>) stormConf
-                        .get(GenericOptionsParser.TOPOLOGY_LIB_NAME);
+        String workerClassPath = (String) totalConf.get(Config.TOPOLOGY_CLASSPATH);
+        List<String> otherLibs = (List<String>) stormConf.get(GenericOptionsParser.TOPOLOGY_LIB_NAME);
         StringBuilder sb = new StringBuilder();
         if (otherLibs != null) {
             for (String libName : otherLibs) {
-                sb.append(StormConfig.stormlib_path(stormroot, libName))
-                        .append(":");
+                sb.append(StormConfig.stormlib_path(stormroot, libName)).append(":");
             }
         }
         workerClassPath = workerClassPath + ":" + sb.toString();
@@ -832,8 +776,7 @@ class SyncProcessEvent extends ShutdownWork {
         Map<String, String> policyReplaceMap = new HashMap<String, String>();
         String realClassPath = classpath + ":" + workerClassPath;
         policyReplaceMap.put(SandBoxMaker.CLASS_PATH_KEY, realClassPath);
-        commandSB
-                .append(sandBoxMaker.sandboxPolicy(workerId, policyReplaceMap));
+        commandSB.append(sandBoxMaker.sandboxPolicy(workerId, policyReplaceMap));
 
         commandSB.append(" -cp ");
         // commandSB.append(workerClassPath + ":");
@@ -871,9 +814,7 @@ class SyncProcessEvent extends ShutdownWork {
         JStormUtils.launch_process(cmd, environment, true);
     }
 
-    private Set<Integer> killUselessWorkers(
-            Map<String, StateHeartbeat> localWorkerStats,
-            Map<Integer, LocalAssignment> localAssignments,
+    private Set<Integer> killUselessWorkers(Map<String, StateHeartbeat> localWorkerStats, Map<Integer, LocalAssignment> localAssignments,
             Map<String, Integer> taskCleanupTimeoutMap) {
         Map<String, String> removed = new HashMap<String, String>();
         Set<Integer> keepPorts = new HashSet<Integer>();
@@ -882,8 +823,7 @@ class SyncProcessEvent extends ShutdownWork {
 
             String workerid = entry.getKey();
             StateHeartbeat hbstate = entry.getValue();
-            if (workerIdToStartTimeAndPort.containsKey(workerid)
-                    && hbstate.getState().equals(State.notStarted))
+            if (workerIdToStartTimeAndPort.containsKey(workerid) && hbstate.getState().equals(State.notStarted))
                 continue;
 
             if (hbstate.getState().equals(State.valid)) {
@@ -891,8 +831,7 @@ class SyncProcessEvent extends ShutdownWork {
                 keepPorts.add(hbstate.getHeartbeat().getPort());
             } else {
                 if (hbstate.getHeartbeat() != null) {
-                    removed.put(workerid, hbstate.getHeartbeat()
-                            .getTopologyId());
+                    removed.put(workerid, hbstate.getHeartbeat().getTopologyId());
                 } else {
                     removed.put(workerid, null);
                 }
@@ -910,14 +849,12 @@ class SyncProcessEvent extends ShutdownWork {
             }
         }
 
-        shutWorker(conf, supervisorId, removed, workerThreadPids,
-                cgroupManager, false, killingWorkers, taskCleanupTimeoutMap);
+        shutWorker(conf, supervisorId, removed, workerThreadPids, cgroupManager, false, killingWorkers, taskCleanupTimeoutMap);
         Set<String> activeTopologys = new HashSet<String>();
         if (killingWorkers.size() == 0) {
             // When all workers under killing are killed successfully,
             // clean the task cleanup timeout map correspondingly.
-            for (Entry<Integer, LocalAssignment> entry : localAssignments
-                    .entrySet()) {
+            for (Entry<Integer, LocalAssignment> entry : localAssignments.entrySet()) {
                 activeTopologys.add(entry.getValue().getTopologyId());
             }
 
@@ -936,8 +873,7 @@ class SyncProcessEvent extends ShutdownWork {
             localWorkerStats.remove(removedWorkerId);
         }
         // Keep the workers which are still under starting
-        for (Entry<String, Pair<Integer, Integer>> entry : workerIdToStartTimeAndPort
-                .entrySet()) {
+        for (Entry<String, Pair<Integer, Integer>> entry : workerIdToStartTimeAndPort.entrySet()) {
             String workerId = entry.getKey();
             StateHeartbeat hbstate = localWorkerStats.get(workerId);
             if (hbstate != null)
@@ -948,14 +884,12 @@ class SyncProcessEvent extends ShutdownWork {
         return keepPorts;
     }
 
-    private void startNewWorkers(Set<Integer> keepPorts,
-            Map<Integer, LocalAssignment> localAssignments) throws Exception {
+    private void startNewWorkers(Set<Integer> keepPorts, Map<Integer, LocalAssignment> localAssignments, Set<String> downloadFailedTopologyIds)
+            throws Exception {
         /**
-         * Step 4: get reassigned tasks, which is in assignedTasks, but not in
-         * keeperPorts Map<port(type Integer), LocalAssignment>
+         * Step 4: get reassigned tasks, which is in assignedTasks, but not in keeperPorts Map<port(type Integer), LocalAssignment>
          */
-        Map<Integer, LocalAssignment> newWorkers =
-                JStormUtils.select_keys_pred(keepPorts, localAssignments);
+        Map<Integer, LocalAssignment> newWorkers = JStormUtils.select_keys_pred(keepPorts, localAssignments);
 
         /**
          * Step 5: generate new work ids
@@ -965,7 +899,10 @@ class SyncProcessEvent extends ShutdownWork {
         for (Entry<Integer, LocalAssignment> entry : newWorkers.entrySet()) {
             Integer port = entry.getKey();
             LocalAssignment assignment = entry.getValue();
-
+            if (assignment != null && assignment.getTopologyId() != null && downloadFailedTopologyIds.contains(assignment.getTopologyId())) {
+                LOG.info("Can't start this worker: " + port + " about the topology: " + assignment.getTopologyId() + ", due to the damaged binary !!");
+                continue;
+            }
             String workerId = UUID.randomUUID().toString();
 
             newWorkerIds.put(port, workerId);
@@ -994,18 +931,14 @@ class SyncProcessEvent extends ShutdownWork {
                 String clusterMode = StormConfig.cluster_mode(conf);
 
                 if (clusterMode.equals("distributed")) {
-                    launchWorker(conf, sharedContext,
-                            assignment.getTopologyId(), supervisorId, port,
-                            workerId, assignment);
+                    launchWorker(conf, sharedContext, assignment.getTopologyId(), supervisorId, port, workerId, assignment);
                 } else if (clusterMode.equals("local")) {
-                    launchWorker(conf, sharedContext,
-                            assignment.getTopologyId(), supervisorId, port,
-                            workerId, workerThreadPids);
+                    launchWorker(conf, sharedContext, assignment.getTopologyId(), supervisorId, port, workerId, workerThreadPids);
                 }
             } catch (Exception e) {
-                String errorMsg =
-                        "Failed to launchWorker workerId:" + workerId + ":"
-                                + port;
+                workerReportError.report(assignment.getTopologyId(), port,
+                        assignment.getTaskIds(), new String(JStormUtils.getErrorInfo(e)));
+                String errorMsg = "Failed to launchWorker workerId:" + workerId + ":" + port;
                 LOG.error(errorMsg, e);
                 throw e;
             }
@@ -1013,8 +946,7 @@ class SyncProcessEvent extends ShutdownWork {
         }
 
         /**
-         * FIXME, workerIds should be Set, not Collection, but here simplify the
-         * logic
+         * FIXME, workerIds should be Set, not Collection, but here simplify the logic
          */
         markAllNewWorkers(newWorkerIds);
         // try {
@@ -1027,6 +959,9 @@ class SyncProcessEvent extends ShutdownWork {
     }
 
     boolean isWorkerDead(String workerId) {
+        if (ConfigExtension.isCheckWorkerAliveBySystemInfo(conf) == false) {
+            return false;
+        }
 
         try {
             List<String> pids = getPid(conf, workerId);
@@ -1046,9 +981,7 @@ class SyncProcessEvent extends ShutdownWork {
 
             return true;
         } catch (IOException e) {
-            LOG.info(
-                    "Failed to check whether worker is dead through /proc/pid",
-                    e);
+            LOG.info("Failed to check whether worker is dead through /proc/pid", e);
             return false;
         }
 


[09/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/metric/AsmMetricSet.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/metric/AsmMetricSet.java b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/AsmMetricSet.java
new file mode 100644
index 0000000..709db27
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/AsmMetricSet.java
@@ -0,0 +1,31 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.metric;
+
+import com.alibaba.jstorm.common.metric.AsmMetric;
+
+import java.io.Serializable;
+import java.util.Map;
+
+/**
+ * @author Cody (weiyue.wy@alibaba-inc.com)
+ * @since 2.0.5
+ */
+public interface AsmMetricSet extends Serializable {
+    Map<String, AsmMetric> getMetrics();
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/metric/AsmWindow.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/metric/AsmWindow.java b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/AsmWindow.java
new file mode 100644
index 0000000..ecb69d6
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/AsmWindow.java
@@ -0,0 +1,41 @@
+package com.alibaba.jstorm.metric;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeSet;
+
+/**
+ * @author Cody (weiyue.wy@alibaba-inc.com)
+ * @since 2.0.5
+ */
+public class AsmWindow {
+    public static final Integer M1_WINDOW = 60;
+    public static final Integer M10_WINDOW = 600;
+    public static final Integer H2_WINDOW = 7200;
+    public static final Integer D1_WINDOW = 86400;
+
+    public static final String M1_WINDOW_STR = "0d0h1m0s";
+    public static final String M10_WINDOW_STR = "0d0h10m0s";
+    public static final String H2_WINDOW_STR = "0d2h0m0s";
+    public static final String D1_WINDOW_STR = "1d0h0m0s";
+
+    public static final Set<Integer> TIME_WINDOWS = new TreeSet<Integer>();
+    private static final Map<Integer, String> WIN_TO_STR = new HashMap<Integer, String>();
+
+    static {
+        TIME_WINDOWS.add(M1_WINDOW);
+        TIME_WINDOWS.add(M10_WINDOW);
+        TIME_WINDOWS.add(H2_WINDOW);
+        TIME_WINDOWS.add(D1_WINDOW);
+
+        WIN_TO_STR.put(M1_WINDOW, M1_WINDOW_STR);
+        WIN_TO_STR.put(M10_WINDOW, M10_WINDOW_STR);
+        WIN_TO_STR.put(H2_WINDOW, H2_WINDOW_STR);
+        WIN_TO_STR.put(D1_WINDOW, D1_WINDOW_STR);
+    }
+
+    public static String win2str(Integer win) {
+        return WIN_TO_STR.get(win);
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/metric/Bytes.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/metric/Bytes.java b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/Bytes.java
new file mode 100644
index 0000000..290f813
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/Bytes.java
@@ -0,0 +1,842 @@
+package com.alibaba.jstorm.metric;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.UnsupportedEncodingException;
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.nio.ByteBuffer;
+
+public class Bytes {
+
+    private static final Logger LOG = LoggerFactory.getLogger(Bytes.class);
+
+    /**
+     * Size of boolean in bytes
+     */
+    public static final int SIZEOF_BOOLEAN = Byte.SIZE / Byte.SIZE;
+
+    /**
+     * Size of byte in bytes
+     */
+    public static final int SIZEOF_BYTE = SIZEOF_BOOLEAN;
+
+    /**
+     * Size of char in bytes
+     */
+    public static final int SIZEOF_CHAR = Character.SIZE / Byte.SIZE;
+
+    /**
+     * Size of double in bytes
+     */
+    public static final int SIZEOF_DOUBLE = Double.SIZE / Byte.SIZE;
+
+    /**
+     * Size of float in bytes
+     */
+    public static final int SIZEOF_FLOAT = Float.SIZE / Byte.SIZE;
+
+    /**
+     * Size of int in bytes
+     */
+    public static final int SIZEOF_INT = Integer.SIZE / Byte.SIZE;
+
+    /**
+     * Size of long in bytes
+     */
+    public static final int SIZEOF_LONG = Long.SIZE / Byte.SIZE;
+
+    /**
+     * Size of short in bytes
+     */
+    public static final int SIZEOF_SHORT = Short.SIZE / Byte.SIZE;
+
+
+    /**
+     * Estimate of size cost to pay beyond payload in jvm for instance of byte [].
+     * Estimate based on study of jhat and jprofiler numbers.
+     */
+    // JHat says BU is 56 bytes.
+    // SizeOf which uses java.lang.instrument says 24 bytes. (3 longs?)
+    public static final int ESTIMATED_HEAP_TAX = 16;
+
+
+    /**
+     * Put bytes at the specified byte array position.
+     *
+     * @param tgtBytes  the byte array
+     * @param tgtOffset position in the array
+     * @param srcBytes  array to write out
+     * @param srcOffset source offset
+     * @param srcLength source length
+     * @return incremented offset
+     */
+    public static int putBytes(byte[] tgtBytes, int tgtOffset, byte[] srcBytes,
+                               int srcOffset, int srcLength) {
+        System.arraycopy(srcBytes, srcOffset, tgtBytes, tgtOffset, srcLength);
+        return tgtOffset + srcLength;
+    }
+
+    /**
+     * Write a single byte out to the specified byte array position.
+     *
+     * @param bytes  the byte array
+     * @param offset position in the array
+     * @param b      byte to write out
+     * @return incremented offset
+     */
+    public static int putByte(byte[] bytes, int offset, byte b) {
+        bytes[offset] = b;
+        return offset + 1;
+    }
+
+    /**
+     * Returns a new byte array, copied from the passed ByteBuffer.
+     *
+     * @param bb A ByteBuffer
+     * @return the byte array
+     */
+    public static byte[] toBytes(ByteBuffer bb) {
+        int length = bb.limit();
+        byte[] result = new byte[length];
+        System.arraycopy(bb.array(), bb.arrayOffset(), result, 0, length);
+        return result;
+    }
+
+    public static byte[] copyBytes(final byte[] bytes, int offset, int length) {
+        if (offset + length > bytes.length) {
+            throw explainWrongLengthOrOffset(bytes, offset, length, length);
+        }
+        byte[] result = new byte[length];
+        System.arraycopy(bytes, offset, result, 0, length);
+        return result;
+    }
+
+    /**
+     * Write a printable representation of a byte array.
+     *
+     * @param b byte array
+     * @return string
+     * @see #toStringBinary(byte[], int, int)
+     */
+    public static String toStringBinary(final byte[] b) {
+        if (b == null)
+            return "null";
+        return toStringBinary(b, 0, b.length);
+    }
+
+    /**
+     * Converts the given byte buffer, from its array offset to its limit, to
+     * a string. The position and the mark are ignored.
+     *
+     * @param buf a byte buffer
+     * @return a string representation of the buffer's binary contents
+     */
+    public static String toStringBinary(ByteBuffer buf) {
+        if (buf == null)
+            return "null";
+        return toStringBinary(buf.array(), buf.arrayOffset(), buf.limit());
+    }
+
+    /**
+     * Write a printable representation of a byte array. Non-printable
+     * characters are hex escaped in the format \\x%02X, eg:
+     * \x00 \x05 etc
+     *
+     * @param b   array to write out
+     * @param off offset to start at
+     * @param len length to write
+     * @return string output
+     */
+    public static String toStringBinary(final byte[] b, int off, int len) {
+        StringBuilder result = new StringBuilder();
+        try {
+            String first = new String(b, off, len, "ISO-8859-1");
+            for (int i = 0; i < first.length(); ++i) {
+                int ch = first.charAt(i) & 0xFF;
+                if ((ch >= '0' && ch <= '9')
+                        || (ch >= 'A' && ch <= 'Z')
+                        || (ch >= 'a' && ch <= 'z')
+                        || " `~!@#$%^&*()-_=+[]{}\\|;:'\",.<>/?".indexOf(ch) >= 0) {
+                    result.append(first.charAt(i));
+                } else {
+                    result.append(String.format("\\x%02X", ch));
+                }
+            }
+        } catch (UnsupportedEncodingException e) {
+            LOG.error("ISO-8859-1 not supported?", e);
+        }
+        return result.toString();
+    }
+
+    private static boolean isHexDigit(char c) {
+        return
+                (c >= 'A' && c <= 'F') ||
+                        (c >= '0' && c <= '9');
+    }
+
+    /**
+     * Takes a ASCII digit in the range A-F0-9 and returns
+     * the corresponding integer/ordinal value.
+     *
+     * @param ch The hex digit.
+     * @return The converted hex value as a byte.
+     */
+    public static byte toBinaryFromHex(byte ch) {
+        if (ch >= 'A' && ch <= 'F')
+            return (byte) ((byte) 10 + (byte) (ch - 'A'));
+        // else
+        return (byte) (ch - '0');
+    }
+
+    public static byte[] toBytesBinary(String in) {
+        // this may be bigger than we need, but lets be safe.
+        byte[] b = new byte[in.length()];
+        int size = 0;
+        for (int i = 0; i < in.length(); ++i) {
+            char ch = in.charAt(i);
+            if (ch == '\\' && in.length() > i + 1 && in.charAt(i + 1) == 'x') {
+                // ok, take next 2 hex digits.
+                char hd1 = in.charAt(i + 2);
+                char hd2 = in.charAt(i + 3);
+
+                // they need to be A-F0-9:
+                if (!isHexDigit(hd1) ||
+                        !isHexDigit(hd2)) {
+                    // bogus escape code, ignore:
+                    continue;
+                }
+                // turn hex ASCII digit -> number
+                byte d = (byte) ((toBinaryFromHex((byte) hd1) << 4) + toBinaryFromHex((byte) hd2));
+
+                b[size++] = d;
+                i += 3; // skip 3
+            } else {
+                b[size++] = (byte) ch;
+            }
+        }
+        // resize:
+        byte[] b2 = new byte[size];
+        System.arraycopy(b, 0, b2, 0, size);
+        return b2;
+    }
+
+    /**
+     * Convert a boolean to a byte array. True becomes -1
+     * and false becomes 0.
+     *
+     * @param b value
+     * @return <code>b</code> encoded in a byte array.
+     */
+    public static byte[] toBytes(final boolean b) {
+        return new byte[]{b ? (byte) -1 : (byte) 0};
+    }
+
+    /**
+     * Reverses {@link #toBytes(boolean)}
+     *
+     * @param b array
+     * @return True or false.
+     */
+    public static boolean toBoolean(final byte[] b) {
+        if (b.length != 1) {
+            throw new IllegalArgumentException("Array has wrong size: " + b.length);
+        }
+        return b[0] != (byte) 0;
+    }
+
+    public static boolean toBoolean(final byte[] bytes, int offset, int length) {
+        if (length != SIZEOF_BOOLEAN || offset + length > bytes.length) {
+            throw explainWrongLengthOrOffset(bytes, offset, length, SIZEOF_BOOLEAN);
+        }
+        return bytes[offset] != (byte) 0;
+    }
+
+    /**
+     * Convert a long value to a byte array using big-endian.
+     *
+     * @param val value to convert
+     * @return the byte array
+     */
+    public static byte[] toBytes(long val) {
+        byte[] b = new byte[8];
+        for (int i = 7; i > 0; i--) {
+            b[i] = (byte) val;
+            val >>>= 8;
+        }
+        b[0] = (byte) val;
+        return b;
+    }
+
+    /**
+     * Converts a byte array to a long value. Reverses
+     * {@link #toBytes(long)}
+     *
+     * @param bytes array
+     * @return the long value
+     */
+    public static long toLong(byte[] bytes) {
+        return toLong(bytes, 0, SIZEOF_LONG);
+    }
+
+    /**
+     * Converts a byte array to a long value. Assumes there will be
+     * {@link #SIZEOF_LONG} bytes available.
+     *
+     * @param bytes  bytes
+     * @param offset offset
+     * @return the long value
+     */
+    public static long toLong(byte[] bytes, int offset) {
+        return toLong(bytes, offset, SIZEOF_LONG);
+    }
+
+    /**
+     * Converts a byte array to a long value.
+     *
+     * @param bytes  array of bytes
+     * @param offset offset into array
+     * @param length length of data (must be {@link #SIZEOF_LONG})
+     * @return the long value
+     * @throws IllegalArgumentException if length is not {@link #SIZEOF_LONG} or
+     *                                  if there's not enough room in the array at the offset indicated.
+     */
+    public static long toLong(byte[] bytes, int offset, final int length) {
+        if (length != SIZEOF_LONG || offset + length > bytes.length) {
+            throw explainWrongLengthOrOffset(bytes, offset, length, SIZEOF_LONG);
+        }
+        long l = 0;
+        for (int i = offset; i < offset + length; i++) {
+            l <<= 8;
+            l ^= bytes[i] & 0xFF;
+        }
+        return l;
+    }
+
+    private static IllegalArgumentException
+    explainWrongLengthOrOffset(final byte[] bytes,
+                               final int offset,
+                               final int length,
+                               final int expectedLength) {
+        String reason;
+        if (length != expectedLength) {
+            reason = "Wrong length: " + length + ", expected " + expectedLength;
+        } else {
+            reason = "offset (" + offset + ") + length (" + length + ") exceed the"
+                    + " capacity of the array: " + bytes.length;
+        }
+        return new IllegalArgumentException(reason);
+    }
+
+    /**
+     * Put a long value out to the specified byte array position.
+     *
+     * @param bytes  the byte array
+     * @param offset position in the array
+     * @param val    long to write out
+     * @return incremented offset
+     * @throws IllegalArgumentException if the byte array given doesn't have
+     *                                  enough room at the offset specified.
+     */
+    public static int putLong(byte[] bytes, int offset, long val) {
+        if (bytes.length - offset < SIZEOF_LONG) {
+            throw new IllegalArgumentException("Not enough room to put a long at"
+                    + " offset " + offset + " in a " + bytes.length + " byte array");
+        }
+        for (int i = offset + 7; i > offset; i--) {
+            bytes[i] = (byte) val;
+            val >>>= 8;
+        }
+        bytes[offset] = (byte) val;
+        return offset + SIZEOF_LONG;
+    }
+
+    /**
+     * Presumes float encoded as IEEE 754 floating-point "single format"
+     *
+     * @param bytes byte array
+     * @return Float made from passed byte array.
+     */
+    public static float toFloat(byte[] bytes) {
+        return toFloat(bytes, 0);
+    }
+
+    /**
+     * Presumes float encoded as IEEE 754 floating-point "single format"
+     *
+     * @param bytes  array to convert
+     * @param offset offset into array
+     * @return Float made from passed byte array.
+     */
+    public static float toFloat(byte[] bytes, int offset) {
+        return Float.intBitsToFloat(toInt(bytes, offset, SIZEOF_INT));
+    }
+
+    /**
+     * @param bytes  byte array
+     * @param offset offset to write to
+     * @param f      float value
+     * @return New offset in <code>bytes</code>
+     */
+    public static int putFloat(byte[] bytes, int offset, float f) {
+        return putInt(bytes, offset, Float.floatToRawIntBits(f));
+    }
+
+    /**
+     * @param f float value
+     * @return the float represented as byte []
+     */
+    public static byte[] toBytes(final float f) {
+        // Encode it as int
+        return Bytes.toBytes(Float.floatToRawIntBits(f));
+    }
+
+    /**
+     * @param bytes byte array
+     * @return Return double made from passed bytes.
+     */
+    public static double toDouble(final byte[] bytes) {
+        return toDouble(bytes, 0);
+    }
+
+    /**
+     * @param bytes  byte array
+     * @param offset offset where double is
+     * @return Return double made from passed bytes.
+     */
+    public static double toDouble(final byte[] bytes, final int offset) {
+        return Double.longBitsToDouble(toLong(bytes, offset, SIZEOF_LONG));
+    }
+
+    /**
+     * @param bytes  byte array
+     * @param offset offset to write to
+     * @param d      value
+     * @return New offset into array <code>bytes</code>
+     */
+    public static int putDouble(byte[] bytes, int offset, double d) {
+        return putLong(bytes, offset, Double.doubleToLongBits(d));
+    }
+
+    /**
+     * Serialize a double as the IEEE 754 double format output. The resultant
+     * array will be 8 bytes long.
+     *
+     * @param d value
+     * @return the double represented as byte []
+     */
+    public static byte[] toBytes(final double d) {
+        // Encode it as a long
+        return Bytes.toBytes(Double.doubleToRawLongBits(d));
+    }
+
+    /**
+     * Convert an int value to a byte array
+     *
+     * @param val value
+     * @return the byte array
+     */
+    public static byte[] toBytes(int val) {
+        byte[] b = new byte[4];
+        for (int i = 3; i > 0; i--) {
+            b[i] = (byte) val;
+            val >>>= 8;
+        }
+        b[0] = (byte) val;
+        return b;
+    }
+
+    /**
+     * Converts a byte array to an int value
+     *
+     * @param bytes byte array
+     * @return the int value
+     */
+    public static int toInt(byte[] bytes) {
+        return toInt(bytes, 0, SIZEOF_INT);
+    }
+
+    /**
+     * Converts a byte array to an int value
+     *
+     * @param bytes  byte array
+     * @param offset offset into array
+     * @return the int value
+     */
+    public static int toInt(byte[] bytes, int offset) {
+        return toInt(bytes, offset, SIZEOF_INT);
+    }
+
+    /**
+     * Converts a byte array to an int value
+     *
+     * @param bytes  byte array
+     * @param offset offset into array
+     * @param length length of int (has to be {@link #SIZEOF_INT})
+     * @return the int value
+     * @throws IllegalArgumentException if length is not {@link #SIZEOF_INT} or
+     *                                  if there's not enough room in the array at the offset indicated.
+     */
+    public static int toInt(byte[] bytes, int offset, final int length) {
+        if (length != SIZEOF_INT || offset + length > bytes.length) {
+            throw explainWrongLengthOrOffset(bytes, offset, length, SIZEOF_INT);
+        }
+        int n = 0;
+        for (int i = offset; i < (offset + length); i++) {
+            n <<= 8;
+            n ^= bytes[i] & 0xFF;
+        }
+        return n;
+    }
+
+    /**
+     * Put an int value out to the specified byte array position.
+     *
+     * @param bytes  the byte array
+     * @param offset position in the array
+     * @param val    int to write out
+     * @return incremented offset
+     * @throws IllegalArgumentException if the byte array given doesn't have
+     *                                  enough room at the offset specified.
+     */
+    public static int putInt(byte[] bytes, int offset, int val) {
+        if (bytes.length - offset < SIZEOF_INT) {
+            throw new IllegalArgumentException("Not enough room to put an int at"
+                    + " offset " + offset + " in a " + bytes.length + " byte array");
+        }
+        for (int i = offset + 3; i > offset; i--) {
+            bytes[i] = (byte) val;
+            val >>>= 8;
+        }
+        bytes[offset] = (byte) val;
+        return offset + SIZEOF_INT;
+    }
+
+    /**
+     * Convert a short value to a byte array of {@link #SIZEOF_SHORT} bytes long.
+     *
+     * @param val value
+     * @return the byte array
+     */
+    public static byte[] toBytes(short val) {
+        byte[] b = new byte[SIZEOF_SHORT];
+        b[1] = (byte) val;
+        val >>= 8;
+        b[0] = (byte) val;
+        return b;
+    }
+
+    /**
+     * Converts a byte array to a short value
+     *
+     * @param bytes byte array
+     * @return the short value
+     */
+    public static short toShort(byte[] bytes) {
+        return toShort(bytes, 0, SIZEOF_SHORT);
+    }
+
+    /**
+     * Converts a byte array to a short value
+     *
+     * @param bytes  byte array
+     * @param offset offset into array
+     * @return the short value
+     */
+    public static short toShort(byte[] bytes, int offset) {
+        return toShort(bytes, offset, SIZEOF_SHORT);
+    }
+
+    /**
+     * Converts a byte array to a short value
+     *
+     * @param bytes  byte array
+     * @param offset offset into array
+     * @param length length, has to be {@link #SIZEOF_SHORT}
+     * @return the short value
+     * @throws IllegalArgumentException if length is not {@link #SIZEOF_SHORT}
+     *                                  or if there's not enough room in the array at the offset indicated.
+     */
+    public static short toShort(byte[] bytes, int offset, final int length) {
+        if (length != SIZEOF_SHORT || offset + length > bytes.length) {
+            throw explainWrongLengthOrOffset(bytes, offset, length, SIZEOF_SHORT);
+        }
+        short n = 0;
+        n ^= bytes[offset] & 0xFF;
+        n <<= 8;
+        n ^= bytes[offset + 1] & 0xFF;
+        return n;
+    }
+
+    /**
+     * This method will get a sequence of bytes from pos -> limit,
+     * but will restore pos after.
+     *
+     * @param buf
+     * @return byte array
+     */
+    public static byte[] getBytes(ByteBuffer buf) {
+        int savedPos = buf.position();
+        byte[] newBytes = new byte[buf.remaining()];
+        buf.get(newBytes);
+        buf.position(savedPos);
+        return newBytes;
+    }
+
+    /**
+     * Put a short value out to the specified byte array position.
+     *
+     * @param bytes  the byte array
+     * @param offset position in the array
+     * @param val    short to write out
+     * @return incremented offset
+     * @throws IllegalArgumentException if the byte array given doesn't have
+     *                                  enough room at the offset specified.
+     */
+    public static int putShort(byte[] bytes, int offset, short val) {
+        if (bytes.length - offset < SIZEOF_SHORT) {
+            throw new IllegalArgumentException("Not enough room to put a short at"
+                    + " offset " + offset + " in a " + bytes.length + " byte array");
+        }
+        bytes[offset + 1] = (byte) val;
+        val >>= 8;
+        bytes[offset] = (byte) val;
+        return offset + SIZEOF_SHORT;
+    }
+
+    public static byte toByte(byte[] bytes, int offset, int length) {
+        if (length != SIZEOF_BYTE || offset + length > bytes.length) {
+            throw explainWrongLengthOrOffset(bytes, offset, length, SIZEOF_BYTE);
+        }
+        return bytes[offset];
+    }
+
+
+    /**
+     * Convert a BigDecimal value to a byte array
+     *
+     * @param val
+     * @return the byte array
+     */
+    public static byte[] toBytes(BigDecimal val) {
+        byte[] valueBytes = val.unscaledValue().toByteArray();
+        byte[] result = new byte[valueBytes.length + SIZEOF_INT];
+        int offset = putInt(result, 0, val.scale());
+        putBytes(result, offset, valueBytes, 0, valueBytes.length);
+        return result;
+    }
+
+
+    /**
+     * Converts a byte array to a BigDecimal
+     *
+     * @param bytes
+     * @return the char value
+     */
+    public static BigDecimal toBigDecimal(byte[] bytes) {
+        return toBigDecimal(bytes, 0, bytes.length);
+    }
+
+    /**
+     * Converts a byte array to a BigDecimal value
+     *
+     * @param bytes
+     * @param offset
+     * @param length
+     * @return the char value
+     */
+    public static BigDecimal toBigDecimal(byte[] bytes, int offset, final int length) {
+        if (bytes == null || length < SIZEOF_INT + 1 ||
+                (offset + length > bytes.length)) {
+            return null;
+        }
+
+        int scale = toInt(bytes, offset);
+        byte[] tcBytes = new byte[length - SIZEOF_INT];
+        System.arraycopy(bytes, offset + SIZEOF_INT, tcBytes, 0, length - SIZEOF_INT);
+        return new BigDecimal(new BigInteger(tcBytes), scale);
+    }
+
+    /**
+     * Put a BigDecimal value out to the specified byte array position.
+     *
+     * @param bytes  the byte array
+     * @param offset position in the array
+     * @param val    BigDecimal to write out
+     * @return incremented offset
+     */
+    public static int putBigDecimal(byte[] bytes, int offset, BigDecimal val) {
+        if (bytes == null) {
+            return offset;
+        }
+
+        byte[] valueBytes = val.unscaledValue().toByteArray();
+        byte[] result = new byte[valueBytes.length + SIZEOF_INT];
+        offset = putInt(result, offset, val.scale());
+        return putBytes(result, offset, valueBytes, 0, valueBytes.length);
+    }
+
+    /**
+     * @param left  left operand
+     * @param right right operand
+     * @return 0 if equal, < 0 if left is less than right, etc.
+     */
+    public static int compareTo(final byte[] left, final byte[] right) {
+        return compareByteArrayInLexOrder(left, 0, left.length, right, 0, right.length);
+    }
+
+    /**
+     * Lexicographically compare two arrays.
+     *
+     * @param buffer1 left operand
+     * @param buffer2 right operand
+     * @param offset1 Where to start comparing in the left buffer
+     * @param offset2 Where to start comparing in the right buffer
+     * @param length1 How much to compare from the left buffer
+     * @param length2 How much to compare from the right buffer
+     * @return 0 if equal, < 0 if left is less than right, etc.
+     */
+    public static int compareTo(byte[] buffer1, int offset1, int length1,
+                                byte[] buffer2, int offset2, int length2) {
+        return compareByteArrayInLexOrder(buffer1, offset1, length1, buffer2, offset2, length2);
+    }
+
+    public static int compareByteArrayInLexOrder(byte[] buffer1, int offset1, int length1,
+                                                 byte[] buffer2, int offset2, int length2) {
+        // Short circuit equal case
+        if (buffer1 == buffer2 &&
+                offset1 == offset2 &&
+                length1 == length2) {
+            return 0;
+        }
+        // Bring WritableComparator code local
+        int end1 = offset1 + length1;
+        int end2 = offset2 + length2;
+        for (int i = offset1, j = offset2; i < end1 && j < end2; i++, j++) {
+            int a = (buffer1[i] & 0xff);
+            int b = (buffer2[j] & 0xff);
+            if (a != b) {
+                return a - b;
+            }
+        }
+        return length1 - length2;
+    }
+
+    /**
+     * @param left  left operand
+     * @param right right operand
+     * @return True if equal
+     */
+    public static boolean equals(final byte[] left, final byte[] right) {
+        // Could use Arrays.equals?
+        //noinspection SimplifiableConditionalExpression
+        if (left == right) return true;
+        if (left == null || right == null) return false;
+        if (left.length != right.length) return false;
+        if (left.length == 0) return true;
+
+        // Since we're often comparing adjacent sorted data,
+        // it's usual to have equal arrays except for the very last byte
+        // so check that first
+        if (left[left.length - 1] != right[right.length - 1]) return false;
+
+        return compareTo(left, right) == 0;
+    }
+
+    public static boolean equals(final byte[] left, int leftOffset, int leftLen,
+                                 final byte[] right, int rightOffset, int rightLen) {
+        // short circuit case
+        if (left == right &&
+                leftOffset == rightOffset &&
+                leftLen == rightLen) {
+            return true;
+        }
+        // different lengths fast check
+        if (leftLen != rightLen) {
+            return false;
+        }
+        if (leftLen == 0) {
+            return true;
+        }
+
+        // Since we're often comparing adjacent sorted data,
+        // it's usual to have equal arrays except for the very last byte
+        // so check that first
+        if (left[leftOffset + leftLen - 1] != right[rightOffset + rightLen - 1]) return false;
+
+        return compareByteArrayInLexOrder(left, leftOffset, leftLen, right, rightOffset, rightLen) == 0;
+    }
+
+
+    /**
+     * Return true if the byte array on the right is a prefix of the byte
+     * array on the left.
+     */
+    public static boolean startsWith(byte[] bytes, byte[] prefix) {
+        return bytes != null && prefix != null &&
+                bytes.length >= prefix.length &&
+                compareByteArrayInLexOrder(bytes, 0, prefix.length, prefix, 0, prefix.length) == 0;
+    }
+
+    public static int hashCode(final byte[] b) {
+        return hashCode(b, b.length);
+    }
+
+    public static int hashCode(final byte[] b, final int length) {
+        return hashBytes(b, length);
+    }
+
+    /**
+     * Compute hash for binary data.
+     */
+    public static int hashBytes(byte[] bytes, int offset, int length) {
+        int hash = 1;
+        for (int i = offset; i < offset + length; i++)
+            hash = (31 * hash) + (int) bytes[i];
+        return hash;
+    }
+
+    /**
+     * Compute hash for binary data.
+     */
+    public static int hashBytes(byte[] bytes, int length) {
+        return hashBytes(bytes, 0, length);
+    }
+
+    /**
+     * @param bytes  array to hash
+     * @param offset offset to start from
+     * @param length length to hash
+     */
+    public static int hashCode(byte[] bytes, int offset, int length) {
+        int hash = 1;
+        for (int i = offset; i < offset + length; i++)
+            hash = (31 * hash) + (int) bytes[i];
+        return hash;
+    }
+
+    /**
+     * http://tools.ietf.org/html/rfc3629
+     */
+    public static int stringtoUTF8Bytes(String str, byte[] buffer) {
+        int index = 0;
+        for (int i = 0; i < str.length(); i++) {
+            char strChar = str.charAt(i);
+            if ((strChar & 0xFF80) == 0) {
+                // (00000000 00000000 - 00000000 01111111) -> 0xxxxxxx
+                buffer[index++] = (byte) (strChar & 0x00FF);
+            } else if ((strChar & 0xF800) == 0) {
+                // (00000000 10000000 - 00000111 11111111) -> 110xxxxx 10xxxxxx
+                buffer[index++] = (byte) ((strChar >> 6) | 0x00c0);
+                buffer[index++] = (byte) ((strChar & 0x003F) | 0x0080);
+            } else {
+                // (00001000 00000000 - 11111111 11111111) -> 1110xxxx 10xxxxxx 10xxxxxx
+                buffer[index++] = (byte) ((strChar >> 12) | 0x00e0);
+                buffer[index++] = (byte) (((strChar >> 6) & 0x003F) | 0x0080);
+                buffer[index++] = (byte) ((strChar & 0x003F) | 0x0080);
+            }
+        }
+        return index;
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/metric/DefaultMetricIDGenerator.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/metric/DefaultMetricIDGenerator.java b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/DefaultMetricIDGenerator.java
new file mode 100644
index 0000000..44fd4bb
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/DefaultMetricIDGenerator.java
@@ -0,0 +1,15 @@
+package com.alibaba.jstorm.metric;
+
+import java.util.UUID;
+
+/**
+ * @author Cody (weiyue.wy@alibaba-inc.com)
+ * @since 2.0.5
+ */
+public class DefaultMetricIDGenerator implements MetricIDGenerator {
+
+    @Override
+    public long genMetricId(String metricName) {
+        return UUID.randomUUID().getLeastSignificantBits();
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/metric/DefaultMetricQueryClient.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/metric/DefaultMetricQueryClient.java b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/DefaultMetricQueryClient.java
new file mode 100644
index 0000000..5de2b8d
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/DefaultMetricQueryClient.java
@@ -0,0 +1,84 @@
+package com.alibaba.jstorm.metric;
+
+import com.alibaba.jstorm.common.metric.MetricMeta;
+import com.alibaba.jstorm.common.metric.TaskTrack;
+import com.alibaba.jstorm.common.metric.TopologyHistory;
+import com.google.common.collect.Lists;
+
+import java.util.List;
+import java.util.Map;
+
+/**
+ * a dummy metric query client implementation
+ *
+ * @author Cody (weiyue.wy@alibaba-inc.com)
+ * @since 2.0.5
+ */
+public class DefaultMetricQueryClient implements MetricQueryClient {
+    @Override
+    public void init(Map conf) {
+    }
+
+    @Override
+    public List<MetricMeta> getMetricMeta(String clusterName, String topologyId, MetaType type, MetaFilter filter, Object arg) {
+        return Lists.newArrayList();
+    }
+
+    @Override
+    public List<MetricMeta> getMetricMeta(String clusterName, String topologyId, MetaType type) {
+        return Lists.newArrayList();
+    }
+
+    @Override
+    public List<MetricMeta> getWorkerMeta(String clusterName, String topologyId) {
+        return Lists.newArrayList();
+    }
+
+    @Override
+    public List<MetricMeta> getNettyMeta(String clusterName, String topologyId) {
+        return Lists.newArrayList();
+    }
+
+    @Override
+    public List<MetricMeta> getTaskMeta(String clusterName, String topologyId, int taskId) {
+        return Lists.newArrayList();
+    }
+
+    @Override
+    public List<MetricMeta> getComponentMeta(String clusterName, String topologyId, String componentId) {
+        return Lists.newArrayList();
+    }
+
+    @Override
+    public MetricMeta getMetricMeta(String clusterName, String topologyId, MetaType metaType, long metricId) {
+        return null;
+    }
+
+    @Override
+    public List<Object> getMetricData(long metricId, MetricType metricType, int win, long start, long end) {
+        return Lists.newArrayList();
+    }
+
+    @Override
+    public List<TaskTrack> getTaskTrack(String clusterName, String topologyId) {
+        return Lists.newArrayList();
+    }
+
+    @Override
+    public List<TaskTrack> getTaskTrack(String clusterName, String topologyId, int taskId) {
+        return Lists.newArrayList();
+    }
+
+    @Override
+    public List<TopologyHistory> getTopologyHistory(String clusterName, String topologyName, int size) {
+        return Lists.newArrayList();
+    }
+
+    @Override
+    public void deleteMeta(MetricMeta meta) {
+    }
+
+    @Override
+    public void deleteMeta(List<MetricMeta> metaList) {
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/metric/JStormHealthCheck.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/metric/JStormHealthCheck.java b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/JStormHealthCheck.java
index 631c38b..85e7f15 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/metric/JStormHealthCheck.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/JStormHealthCheck.java
@@ -27,19 +27,14 @@ import com.codahale.metrics.health.HealthCheck;
 import com.codahale.metrics.health.HealthCheckRegistry;
 
 public class JStormHealthCheck {
-    private static final Logger LOG = LoggerFactory
-            .getLogger(JStormHealthCheck.class);
+    private static final Logger LOG = LoggerFactory.getLogger(JStormHealthCheck.class);
 
-    private final static Map<Integer, HealthCheckRegistry> taskHealthCheckMap =
-            new ConcurrentHashMap<Integer, HealthCheckRegistry>();
+    private final static Map<Integer, HealthCheckRegistry> taskHealthCheckMap = new ConcurrentHashMap<Integer, HealthCheckRegistry>();
 
-    private final static HealthCheckRegistry workerHealthCheck =
-            new HealthCheckRegistry();
+    private final static HealthCheckRegistry workerHealthCheck = new HealthCheckRegistry();
 
-    public static void registerTaskHealthCheck(int taskId, String name,
-            HealthCheck healthCheck) {
-        HealthCheckRegistry healthCheckRegister =
-                taskHealthCheckMap.get(taskId);
+    public static void registerTaskHealthCheck(int taskId, String name, HealthCheck healthCheck) {
+        HealthCheckRegistry healthCheckRegister = taskHealthCheckMap.get(taskId);
 
         if (healthCheckRegister == null) {
             healthCheckRegister = new HealthCheckRegistry();
@@ -49,14 +44,12 @@ public class JStormHealthCheck {
         healthCheckRegister.register(name, healthCheck);
     }
 
-    public static void registerWorkerHealthCheck(String name,
-            HealthCheck healthCheck) {
+    public static void registerWorkerHealthCheck(String name, HealthCheck healthCheck) {
         workerHealthCheck.register(name, healthCheck);
     }
 
     public static void unregisterTaskHealthCheck(int taskId, String name) {
-        HealthCheckRegistry healthCheckRegister =
-                taskHealthCheckMap.get(taskId);
+        HealthCheckRegistry healthCheckRegister = taskHealthCheckMap.get(taskId);
 
         if (healthCheckRegister != null) {
             healthCheckRegister.unregister(name);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/metric/JStormHealthReporter.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/metric/JStormHealthReporter.java b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/JStormHealthReporter.java
new file mode 100644
index 0000000..e344bfd
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/JStormHealthReporter.java
@@ -0,0 +1,59 @@
+package com.alibaba.jstorm.metric;
+
+import com.alibaba.jstorm.callback.RunnableCallback;
+import com.alibaba.jstorm.cluster.StormClusterState;
+import com.alibaba.jstorm.daemon.worker.WorkerData;
+import com.codahale.metrics.health.HealthCheckRegistry;
+import com.codahale.metrics.health.HealthCheck.Result;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Map;
+
+/**
+ * Created by wuchong on 15/9/17.
+ */
+public class JStormHealthReporter extends RunnableCallback {
+    private static final Logger LOG = LoggerFactory.getLogger(JStormHealthReporter.class);
+    private static final int THREAD_CYCLE = 60;   //report every minute
+    private WorkerData workerData;
+
+    public JStormHealthReporter(WorkerData workerData) {
+        this.workerData = workerData;
+    }
+
+    @Override
+    public void run() {
+        StormClusterState clusterState = workerData.getZkCluster();
+        String topologyId = workerData.getTopologyId();
+
+        Map<Integer, HealthCheckRegistry> taskHealthCheckMap = JStormHealthCheck.getTaskhealthcheckmap();
+        int cnt = 0;
+        for (Map.Entry<Integer, HealthCheckRegistry> entry : taskHealthCheckMap.entrySet()) {
+            Integer taskId = entry.getKey();
+            Map<String, Result> results = entry.getValue().runHealthChecks();
+
+            for (Map.Entry<String, Result> result : results.entrySet()) {
+                if (!result.getValue().isHealthy()) {
+                    try {
+                        clusterState.report_task_error(topologyId, taskId, result.getValue().getMessage(), null);
+                        cnt++;
+                    } catch (Exception e) {
+                        LOG.error("Failed to update health data in ZK for topo-{} task-{}.", topologyId, taskId, e);
+                    }
+                }
+            }
+        }
+        LOG.info("Successfully updated {} health data to ZK for topology:{}", cnt, topologyId);
+    }
+
+    @Override
+    public Object getResult() {
+        return THREAD_CYCLE;
+    }
+
+    @Override
+    public String getThreadName() {
+        return "HealthReporterThread";
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/metric/JStormMetricCache.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/metric/JStormMetricCache.java b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/JStormMetricCache.java
new file mode 100644
index 0000000..3a85b73
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/JStormMetricCache.java
@@ -0,0 +1,351 @@
+package com.alibaba.jstorm.metric;
+
+import backtype.storm.generated.MetricInfo;
+import backtype.storm.generated.TopologyMetric;
+import backtype.storm.utils.Utils;
+import com.alibaba.jstorm.cache.JStormCache;
+import com.alibaba.jstorm.cache.RocksDBCache;
+import com.alibaba.jstorm.cache.TimeoutMemCache;
+import com.alibaba.jstorm.client.ConfigExtension;
+import com.alibaba.jstorm.cluster.StormClusterState;
+import com.alibaba.jstorm.cluster.StormConfig;
+import com.alibaba.jstorm.utils.OSInfo;
+import com.google.common.collect.Lists;
+import org.apache.commons.lang.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.*;
+
+/**
+ * metrics cache. we maintain the following data in rocks DB cache: 1. all topology ids 2. topology id ==> all metrics meta(map<metric_name, metric_id>) 3.
+ * topology id ==> all metrics data
+ *
+ * @author Cody (weiyue.wy@alibaba-inc.com)
+ * @since 2.0.5
+ */
+@SuppressWarnings("unchecked")
+public class JStormMetricCache {
+
+    private static final Logger LOG = LoggerFactory.getLogger(JStormMetricCache.class);
+
+    public static final String TIMEOUT_MEM_CACHE_CLASS = TimeoutMemCache.class.getName();
+    public static final String ROCKS_DB_CACHE_CLASS = RocksDBCache.class.getName();
+
+    protected final Object lock = new Object();
+
+    protected JStormCache cache = null;
+
+    protected static final String METRIC_META_PREFIX = "__metric.meta__";
+    protected static final String SENT_METRIC_META_PREFIX = "__saved.metric.meta__";
+    protected static final String ALL_TOPOLOGIES_KEY = "__all.topologies__";
+    protected static final String TOPOLOGY_SAMPLE_RATE = "__topology.sample.rate__";
+
+    protected static final String METRIC_DATA_PREFIX = "__metric.data__";
+    protected static final String METRIC_DATA_30M_COMPONENT = "__metric.data.comp__";
+    protected static final String METRIC_DATA_30M_TASK = "__metric.data.task__";
+    protected static final String METRIC_DATA_30M_STREAM = "__metric.data.stream__";
+    protected static final String METRIC_DATA_30M_WORKER = "__metric.data.worker__";
+    protected static final String METRIC_DATA_30M_NETTY = "__metric.data.netty__";
+    protected static final String METRIC_DATA_30M_TOPOLOGY = "__metric.data.topology__";
+
+    protected final StormClusterState zkCluster;
+
+    public String getNimbusCacheClass(Map conf) {
+        boolean isLinux = OSInfo.isLinux();
+        boolean isMac = OSInfo.isMac();
+        boolean isLocal = StormConfig.local_mode(conf);
+
+        if (isLocal) {
+            return TIMEOUT_MEM_CACHE_CLASS;
+        }
+
+        if (!isLinux && !isMac) {
+            return TIMEOUT_MEM_CACHE_CLASS;
+        }
+
+        String nimbusCacheClass = ConfigExtension.getNimbusCacheClass(conf);
+        if (!StringUtils.isBlank(nimbusCacheClass)) {
+            return nimbusCacheClass;
+        }
+
+        return ROCKS_DB_CACHE_CLASS;
+    }
+
+    public JStormMetricCache(Map conf, StormClusterState zkCluster) {
+        String dbCacheClass = getNimbusCacheClass(conf);
+        LOG.info("JStorm metrics cache will use {}", dbCacheClass);
+
+        boolean reset = ConfigExtension.getMetricCacheReset(conf);
+        try {
+            cache = (JStormCache) Utils.newInstance(dbCacheClass);
+
+            String dbDir = StormConfig.metricDbDir(conf);
+            conf.put(RocksDBCache.ROCKSDB_ROOT_DIR, dbDir);
+            conf.put(RocksDBCache.ROCKSDB_RESET, reset);
+            cache.init(conf);
+        } catch (Exception e) {
+            if (!reset && cache != null) {
+                LOG.error("Failed to init rocks db, will reset and try to re-init...");
+                conf.put(RocksDBCache.ROCKSDB_RESET, true);
+                try {
+                    cache.init(conf);
+                } catch (Exception ex) {
+                    LOG.error("Error", ex);
+                }
+            } else {
+                LOG.error("Failed to create metrics cache!", e);
+                throw new RuntimeException(e);
+            }
+        }
+
+        this.zkCluster = zkCluster;
+    }
+
+    public JStormCache getCache() {
+        return cache;
+    }
+
+    public JStormCache put(String k, Object v) {
+        cache.put(k, v);
+        return cache;
+    }
+
+    /**
+     * store 30min metric data. the metric data is stored in a ring.
+     */
+    public JStormCache putMetricData(String topologyId, TopologyMetric tpMetric) {
+        // map<key, [ts, metric_info]>
+        Map<String, Object> batchData = new HashMap<String, Object>();
+        long ts = System.currentTimeMillis();
+        int tp = 0, comp = 0, task = 0, stream = 0, worker = 0, netty = 0;
+        if (tpMetric.get_componentMetric().get_metrics_size() > 0) {
+            batchData.put(METRIC_DATA_30M_COMPONENT + topologyId, new Object[]{ts, tpMetric.get_componentMetric()});
+            comp += tpMetric.get_componentMetric().get_metrics_size();
+        }
+        if (tpMetric.get_taskMetric().get_metrics_size() > 0) {
+            tryCombineMetricInfo(METRIC_DATA_30M_TASK + topologyId, tpMetric.get_taskMetric(), MetaType.TASK, ts);
+            task += tpMetric.get_taskMetric().get_metrics_size();
+        }
+        if (tpMetric.get_streamMetric().get_metrics_size() > 0) {
+            tryCombineMetricInfo(METRIC_DATA_30M_STREAM + topologyId, tpMetric.get_streamMetric(), MetaType.STREAM, ts);
+            stream += tpMetric.get_streamMetric().get_metrics_size();
+        }
+        if (tpMetric.get_workerMetric().get_metrics_size() > 0) {
+            tryCombineMetricInfo(METRIC_DATA_30M_WORKER + topologyId, tpMetric.get_workerMetric(), MetaType.WORKER, ts);
+            worker += tpMetric.get_workerMetric().get_metrics_size();
+        }
+        if (tpMetric.get_nettyMetric().get_metrics_size() > 0) {
+            tryCombineMetricInfo(METRIC_DATA_30M_NETTY + topologyId, tpMetric.get_nettyMetric(), MetaType.NETTY, ts);
+            netty += tpMetric.get_nettyMetric().get_metrics_size();
+        }
+
+        // store 30 snapshots of topology metrics
+        if (tpMetric.get_topologyMetric().get_metrics_size() > 0) {
+            String keyPrefix = METRIC_DATA_30M_TOPOLOGY + topologyId + "-";
+            int page = getRingAvailableIndex(keyPrefix);
+
+            batchData.put(keyPrefix + page, new Object[]{ts, tpMetric.get_topologyMetric()});
+            tp += tpMetric.get_topologyMetric().get_metrics_size();
+        }
+        LOG.info("caching metric data for topology:{},tp:{},comp:{},task:{},stream:{},worker:{},netty:{},cost:{}",
+                topologyId, tp, comp, task, stream, worker, netty, System.currentTimeMillis() - ts);
+
+        return putBatch(batchData);
+    }
+
+    private int getRingAvailableIndex(String keyPrefix) {
+        int page = 0;
+        // backward check
+        long last_ts = 0;
+        for (int idx = 1; idx <= 30; idx++) {
+            String key = keyPrefix + idx;
+            if (cache.get(key) != null) {
+                long timestamp = (long) ((Object[]) cache.get(key))[0];
+                if (timestamp > last_ts) {
+                    last_ts = timestamp;
+                    page = idx;
+                }
+            }
+        }
+        if (page < 30) {
+            page += 1;
+        } else {
+            page = 1;
+        }
+        return page;
+    }
+
+    private void tryCombineMetricInfo(String key, MetricInfo incoming, MetaType metaType, long ts) {
+        Object data = cache.get(key);
+        if (data != null) {
+            try {
+                Object[] parts = (Object[]) data;
+                MetricInfo old = (MetricInfo) parts[1];
+
+                LOG.info("combine {} metrics, old:{}, new:{}",
+                        metaType, old.get_metrics_size(), incoming.get_metrics_size());
+                old.get_metrics().putAll(incoming.get_metrics());
+                // remove dead worker
+                cache.put(key, new Object[]{ts, old});
+            } catch (Exception ignored) {
+                cache.remove(key);
+                cache.put(key, new Object[]{ts, incoming});
+            }
+        } else {
+            cache.put(key, new Object[]{ts, incoming});
+        }
+    }
+
+    public List<MetricInfo> getMetricData(String topologyId, MetaType metaType) {
+        Map<Long, MetricInfo> retMap = new TreeMap<Long, MetricInfo>();
+
+        String key = null;
+        if (metaType == MetaType.COMPONENT) {
+            key = METRIC_DATA_30M_COMPONENT + topologyId;
+        } else if (metaType == MetaType.TASK) {
+            key = METRIC_DATA_30M_TASK + topologyId;
+        } else if (metaType == MetaType.STREAM) {
+            key = METRIC_DATA_30M_STREAM + topologyId;
+        } else if (metaType == MetaType.WORKER) {
+            key = METRIC_DATA_30M_WORKER + topologyId;
+        } else if (metaType == MetaType.NETTY) {
+            key = METRIC_DATA_30M_NETTY + topologyId;
+        } else if (metaType == MetaType.TOPOLOGY) {
+            String keyPrefix = METRIC_DATA_30M_TOPOLOGY + topologyId + "-";
+            for (int i = 1; i <= 30; i++) {
+                Object obj = cache.get(keyPrefix + i);
+                if (obj != null) {
+                    Object[] objects = (Object[]) obj;
+                    retMap.put((Long) objects[0], (MetricInfo) objects[1]);
+                }
+            }
+        }
+        if (key != null) {
+            Object obj = cache.get(key);
+            if (obj != null) {
+                Object[] objects = (Object[]) obj;
+                retMap.put((Long) objects[0], (MetricInfo) objects[1]);
+            }
+        }
+        List<MetricInfo> ret = Lists.newArrayList(retMap.values());
+        int cnt = 0;
+        for (MetricInfo metricInfo : ret) {
+            cnt += metricInfo.get_metrics_size();
+        }
+        LOG.info("getMetricData, topology:{}, meta type:{}, metric info size:{}, total metric size:{}",
+                topologyId, metaType, ret.size(), cnt);
+        return ret;
+    }
+
+    public JStormCache putBatch(Map<String, Object> kv) {
+        if (kv.size() > 0) {
+            cache.putBatch(kv);
+        }
+        return cache;
+    }
+
+    public Object get(String k) {
+        return cache.get(k);
+    }
+
+    public void remove(String k) {
+        cache.remove(k);
+    }
+
+    public void removeTopology(String topologyId) {
+        removeTopologyMeta(topologyId);
+        removeTopologyData(topologyId);
+    }
+
+    protected void removeTopologyMeta(String topologyId) {
+        cache.remove(METRIC_META_PREFIX + topologyId);
+    }
+
+    protected void removeTopologyData(String topologyId) {
+        long start = System.currentTimeMillis();
+        cache.remove(METRIC_DATA_PREFIX + topologyId);
+
+        Set<String> metricDataKeys = new HashSet<>();
+        for (int i = 1; i <= 30; i++) {
+            String metricDataKeySuffix = topologyId + "-" + i;
+            metricDataKeys.add(METRIC_DATA_30M_TOPOLOGY + metricDataKeySuffix);
+        }
+        metricDataKeys.add(METRIC_DATA_30M_COMPONENT + topologyId);
+        metricDataKeys.add(METRIC_DATA_30M_TASK + topologyId);
+        metricDataKeys.add(METRIC_DATA_30M_STREAM + topologyId);
+        metricDataKeys.add(METRIC_DATA_30M_WORKER + topologyId);
+        metricDataKeys.add(METRIC_DATA_30M_NETTY + topologyId);
+
+        cache.removeBatch(metricDataKeys);
+        LOG.info("removing metric cache of topology:{}, cost:{}", topologyId, System.currentTimeMillis() - start);
+    }
+
+    public void unregisterWorker(String topologyId, String host, int port) {
+        String prefix = MetricUtils.workerMetricPrefix(topologyId, host, port);
+        synchronized (lock) {
+            //remove dead worker meta info in METRIC_META_PREFIX
+            Map<String, Long> nodes = (Map<String, Long>) cache.get(METRIC_META_PREFIX + topologyId);
+            if (nodes != null) {
+                Iterator<String> keyIterator = nodes.keySet().iterator();
+                while (keyIterator.hasNext()){
+                    String metricName = keyIterator.next();
+                    // remove metric type
+                    metricName = metricName.charAt(0) + metricName.substring(2, metricName.length());
+                    if (metricName.startsWith(prefix)) {
+                        keyIterator.remove();
+                    }
+                }
+                cache.put(METRIC_META_PREFIX + topologyId, nodes);
+            }
+            //remove dead worker in METRIC_DATA_30M_WORKER
+            Object data = cache.get(METRIC_DATA_30M_WORKER + topologyId);
+            if (data != null) {
+                Object[] parts = (Object[]) data;
+                MetricInfo old = (MetricInfo) parts[1];
+                Iterator<String> oldKeys = old.get_metrics().keySet().iterator();
+                while (oldKeys.hasNext()) {
+                    String metricName = oldKeys.next();
+                    metricName = metricName.charAt(0) + metricName.substring(2, metricName.length());
+                    if (metricName.startsWith(prefix)) {
+                        oldKeys.remove();
+                        LOG.info("remove dead worker metric : {}", metricName);
+                    }
+                }
+                cache.put(METRIC_DATA_30M_WORKER + topologyId, data);
+            }
+        }
+    }
+
+    public Map<String, Long> getMeta(String topologyId) {
+        return (Map<String, Long>) cache.get(METRIC_META_PREFIX + topologyId);
+    }
+
+    public void putMeta(String topologyId, Object v) {
+        cache.put(METRIC_META_PREFIX + topologyId, v);
+    }
+
+    public void putSampleRate(String topologyId, double sampleRate) {
+        cache.put(TOPOLOGY_SAMPLE_RATE + topologyId, sampleRate);
+    }
+
+    public void removeSampleRate(String topologyId) {
+        cache.remove(TOPOLOGY_SAMPLE_RATE + topologyId);
+    }
+
+    public double getSampleRate(String topologyId) {
+        String rate = (String) cache.get(TOPOLOGY_SAMPLE_RATE + topologyId);
+        if (rate == null) {
+            return ConfigExtension.DEFAULT_METRIC_SAMPLE_RATE;
+        }
+        return Double.parseDouble(rate);
+    }
+
+    public Map<String, Long> getSentMeta(String topologyId) {
+        return (Map<String, Long>) cache.get(SENT_METRIC_META_PREFIX + topologyId);
+    }
+
+    public void putSentMeta(String topologyId, Object allMetricMeta) {
+        cache.put(SENT_METRIC_META_PREFIX + topologyId, allMetricMeta);
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/metric/JStormMetrics.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/metric/JStormMetrics.java b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/JStormMetrics.java
index 8221cd8..6531c9c 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/metric/JStormMetrics.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/metric/JStormMetrics.java
@@ -15,267 +15,441 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package com.alibaba.jstorm.metric;
 
-import java.io.Serializable;
-import java.lang.management.ManagementFactory;
-import java.lang.management.MemoryMXBean;
-import java.lang.management.MemoryUsage;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
+package com.alibaba.jstorm.metric;
 
+import backtype.storm.generated.MetricInfo;
+import com.alibaba.jstorm.common.metric.*;
+import com.alibaba.jstorm.common.metric.snapshot.AsmSnapshot;
+import com.alibaba.jstorm.utils.NetWorkUtils;
+import com.google.common.base.Joiner;
+import com.google.common.collect.Lists;
+import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import backtype.storm.generated.MetricInfo;
-
-import com.alibaba.jstorm.common.metric.Counter;
-import com.alibaba.jstorm.common.metric.Gauge;
-import com.alibaba.jstorm.common.metric.Histogram;
-import com.alibaba.jstorm.common.metric.Meter;
-import com.alibaba.jstorm.common.metric.MetricRegistry;
-import com.alibaba.jstorm.common.metric.Timer;
-import com.alibaba.jstorm.common.metric.window.Metric;
-import com.alibaba.jstorm.utils.JStormUtils;
+import java.io.Serializable;
+import java.util.*;
 
+/**
+ * @author Cody (weiyue.wy@alibaba-inc.com)
+ * @since 2.0.5
+ */
 public class JStormMetrics implements Serializable {
-    private static final Logger LOG = LoggerFactory
-            .getLogger(JStormMetrics.class);
-    private static final long serialVersionUID = 2046603514943797241L;
+    private static final long serialVersionUID = -2580242512743243267L;
+
+    public static final String NIMBUS_METRIC_KEY = "__NIMBUS__";
+    public static final String CLUSTER_METRIC_KEY = "__CLUSTER__";
+    public static final String SUPERVISOR_METRIC_KEY = "__SUPERVISOR__";
+
+    protected static final Logger LOG = LoggerFactory.getLogger(JStormMetrics.class);
 
     /**
      * Metrics in this object will be uploaded to nimbus
      */
-    static MetricRegistry workerMetrics = new MetricRegistry();
-    static Map<Integer, MetricRegistry> taskMetrics =
-            new ConcurrentHashMap<Integer, MetricRegistry>();
-    /**
-     * Metrics in this object will be just be output to log, won't be uploaded
-     * to nimbus
-     */
-    static MetricRegistry skipMetrics = new MetricRegistry();
+    protected static final AsmMetricRegistry workerMetrics = new AsmMetricRegistry();
+    protected static final AsmMetricRegistry nettyMetrics = new AsmMetricRegistry();
+    protected static final AsmMetricRegistry componentMetrics = new AsmMetricRegistry();
+    protected static final AsmMetricRegistry taskMetrics = new AsmMetricRegistry();
+    protected static final AsmMetricRegistry streamMetrics = new AsmMetricRegistry();
+    protected static final AsmMetricRegistry topologyMetrics = new AsmMetricRegistry();
 
-    protected static MetricInfo exposeWorkerMetrics;
-    protected static Map<String, MetricInfo> exposeNettyMetrics;
-    protected static Map<Integer, MetricInfo> exposeTaskMetrics;
+    protected static final AsmMetricRegistry[] allRegistries = {
+            streamMetrics, taskMetrics, componentMetrics, workerMetrics, nettyMetrics, topologyMetrics};
 
-    static {
-        registerWorkerGauge(new com.codahale.metrics.Gauge<Double>() {
+    protected static String topologyId;
+    protected static String host;
+    protected static int port;
+    protected static boolean debug;
 
-            @Override
-            public Double getValue() {
-                // TODO Auto-generated method stub
-                return JStormUtils.getCpuUsage();
-            }
+    public static final String DEFAULT_GROUP = "sys";
+    public static final String NETTY_GROUP = "netty";
 
-        }, MetricDef.CPU_USED_RATIO);
+    protected static Set<String> debugMetricNames = new HashSet<String>();
 
-        registerWorkerGauge(new com.codahale.metrics.Gauge<Double>() {
+    static {
+        host = NetWorkUtils.ip();
+    }
 
-            @Override
-            public Double getValue() {
-                // TODO Auto-generated method stub
-                return JStormUtils.getMemUsage();
-            }
+    private static boolean enabled = true;
 
-        }, MetricDef.MEMORY_USED);
+    public static int getPort() {
+        return port;
     }
 
-    public static MetricRegistry registerTask(int taskId) {
-        MetricRegistry ret = taskMetrics.get(taskId);
-        if (ret == null) {
-            ret = new MetricRegistry();
-            taskMetrics.put(taskId, ret);
-            LOG.info("Register task MetricRegistry " + taskId);
-        }
+    public static void setPort(int port) {
+        JStormMetrics.port = port;
+    }
 
-        return ret;
+    public static String getHost() {
+        return host;
     }
 
-    public static void unregisterTask(int taskId) {
-    	taskMetrics.remove(taskId);
+    public static void setHost(String host) {
+        JStormMetrics.host = host;
     }
 
-    // the Metric should be one of metrics of task
-    // if register this metric through this function,
-    // the web UI would do sum operation for the metric
-    // the metric will display in component/topology level in web UI
-    public static void registerSumMetric(String name) {
-        MetricDef.MERGE_SUM_TAG.add(name);
+    public static String getTopologyId() {
+        return topologyId;
     }
 
-    public static void unregisterSumMetric(String name) {
-        MetricDef.MERGE_SUM_TAG.remove(name);
+    public static void setTopologyId(String topologyId) {
+        JStormMetrics.topologyId = topologyId;
     }
 
-    // the Metric should be one of metrics of task
-    // if register this metric through this function,
-    // the web UI would do sum operation for the metric
-    // the metric will display in component/topology level in web UI
-    public static void registerAvgMetric(String name) {
-        MetricDef.MERGE_AVG_TAG.add(name);
+    public static boolean isDebug() {
+        return debug;
     }
 
-    public static void unregisterAvgMetric(String name) {
-        MetricDef.MERGE_AVG_TAG.remove(name);
+    public static void setDebug(boolean debug) {
+        JStormMetrics.debug = debug;
+        LOG.info("topology metrics debug enabled:{}", debug);
     }
 
-    public static <T extends Metric> T registerWorkerMetric(T metric,
-            String name, String... args) throws IllegalArgumentException {
-        String registerName = MetricRegistry.name(name, args);
+    public static void setEnabled(boolean enabled) {
+        JStormMetrics.enabled = enabled;
+    }
 
-        return workerMetrics.register(registerName, metric);
+    public static boolean isEnabled() {
+        return enabled;
     }
 
-    public static void unregisterWorkerMetric(String name, String... args) {
-        String registerName = MetricRegistry.name(name, args);
+    public static String workerMetricName(String name, MetricType type) {
+        return MetricUtils.workerMetricName(topologyId, host, port, name, type);
+    }
 
-        workerMetrics.remove(registerName);
+    public static void addDebugMetrics(String names) {
+        String[] metrics = names.split(",");
+        for (String metric : metrics) {
+            metric = metric.trim();
+            if (!StringUtils.isBlank(metric)) {
+                debugMetricNames.add(metric);
+            }
+        }
+        LOG.info("debug metric names:{}", Joiner.on(",").join(debugMetricNames));
     }
 
-    public static <T extends Metric> T registerTaskMetric(T metric, int taskId,
-            String name, String... args) throws IllegalArgumentException {
-        MetricRegistry metrics = taskMetrics.get(taskId);
-        if (metrics == null) {
-            throw new IllegalArgumentException("Invalid taskId " + taskId);
+    /**
+     * reserve for debug purposes
+     */
+    public static AsmMetric find(String name) {
+        for (AsmMetricRegistry registry : allRegistries) {
+            AsmMetric metric = registry.getMetric(name);
+            if (metric != null) {
+                return metric;
+            }
         }
+        return null;
+    }
 
-        String registerName = MetricRegistry.name(name, args);
+    public static AsmMetric registerStreamMetric(String name, AsmMetric metric, boolean mergeTopology) {
+        name = fixNameIfPossible(name);
+        LOG.info("register stream metric:{}", name);
+        AsmMetric ret = streamMetrics.register(name, metric);
 
-        return metrics.register(registerName, metric);
-    }
+        if (metric.isAggregate()) {
+            List<AsmMetric> assocMetrics = new ArrayList<>();
+
+            String taskMetricName = MetricUtils.stream2taskName(name);
+            AsmMetric taskMetric = taskMetrics.register(taskMetricName, metric.clone());
+            assocMetrics.add(taskMetric);
+
+            String compMetricName = MetricUtils.task2compName(taskMetricName);
+            AsmMetric componentMetric = componentMetrics.register(compMetricName, taskMetric.clone());
+            assocMetrics.add(componentMetric);
+
+            String metricName = MetricUtils.getMetricName(name);
+            if (metricName.contains(".")){
+                compMetricName = MetricUtils.task2MergeCompName(taskMetricName);
+                AsmMetric mergeCompMetric = componentMetrics.register(compMetricName, taskMetric.clone());
+                assocMetrics.add(mergeCompMetric);
+            }
+
+            if (mergeTopology){
+                String topologyMetricName = MetricUtils.comp2topologyName(compMetricName);
+                AsmMetric topologyMetric = topologyMetrics.register(topologyMetricName, ret.clone());
+                assocMetrics.add(topologyMetric);
+            }
 
-    public static void unregisterTaskMetric(int taskId, String name,
-            String... args) throws IllegalArgumentException {
-        String registerName = MetricRegistry.name(name, args);
-        MetricRegistry metrics = taskMetrics.get(taskId);
-        if (metrics == null) {
-            throw new IllegalArgumentException("Invalid taskId");
+            ret.addAssocMetrics(assocMetrics.toArray(new AsmMetric[assocMetrics.size()]));
         }
-        metrics.remove(registerName);
-    }
 
-    public static Gauge<Double> registerWorkerGauge(
-            com.codahale.metrics.Gauge<Double> rawGauge, String name,
-            String... args) {
-        Gauge<Double> ret = new Gauge<Double>(rawGauge);
-        registerWorkerMetric(ret, name, args);
         return ret;
     }
 
-    public static Gauge<Double> registerTaskGauge(
-            com.codahale.metrics.Gauge<Double> rawGauge, int taskId,
-            String name, String... args) {
-        Gauge<Double> ret = new Gauge<Double>(rawGauge);
-        registerTaskMetric(ret, taskId, name, args);
+    public static AsmMetric registerTaskMetric(String name, AsmMetric metric) {
+        name = fixNameIfPossible(name);
+        AsmMetric ret = taskMetrics.register(name, metric);
+
+        if (metric.isAggregate()) {
+            String compMetricName = MetricUtils.task2compName(name);
+            AsmMetric componentMetric = componentMetrics.register(compMetricName, ret.clone());
+
+            ret.addAssocMetrics(componentMetric);
+        }
+
         return ret;
     }
 
-    public static Counter<Double> registerWorkerCounter(String name,
-            String... args) throws IllegalArgumentException {
-        Counter<Double> ret =
-                (Counter<Double>) Builder.mkInstance(Builder.COUNTER);
-        registerWorkerMetric(ret, name, args);
-        return ret;
+//    public static AsmMetric registerStreamTopologyMetric(String name, AsmMetric metric) {
+//        name = fixNameIfPossible(name);
+//        LOG.info("register stream metric:{}", name);
+//        AsmMetric ret = streamMetrics.register(name, metric);
+//
+//        if (metric.isAggregate()) {
+//            String taskMetricName = MetricUtils.stream2taskName(name);
+//            AsmMetric taskMetric = taskMetrics.register(taskMetricName, ret.clone());
+//
+//            String compMetricName = MetricUtils.task2compName(taskMetricName);
+//            AsmMetric componentMetric = componentMetrics.register(compMetricName, ret.clone());
+//
+//            String topologyMetricName = MetricUtils.comp2topologyName(compMetricName);
+//            AsmMetric topologyMetric = topologyMetrics.register(topologyMetricName, ret.clone());
+//
+//            ret.addAssocMetrics(taskMetric, componentMetric, topologyMetric);
+//        }
+//
+//        return ret;
+//    }
+
+    public static AsmMetric registerWorkerMetric(String name, AsmMetric metric) {
+        name = fixNameIfPossible(name);
+        return workerMetrics.register(name, metric);
     }
 
-    public static Counter<Double> registerTaskCounter(int taskId, String name,
-            String... args) {
-        Counter<Double> ret =
-                (Counter<Double>) Builder.mkInstance(Builder.COUNTER);
-        registerTaskMetric(ret, taskId, name, args);
+    public static AsmMetric registerWorkerTopologyMetric(String name, AsmMetric metric) {
+        name = fixNameIfPossible(name);
+        AsmMetric ret = workerMetrics.register(name, metric);
+
+        String topologyMetricName = MetricUtils.worker2topologyName(name);
+        AsmMetric topologyMetric = topologyMetrics.register(topologyMetricName, ret.clone());
+
+        ret.addAssocMetrics(topologyMetric);
+
         return ret;
     }
 
-    public static Meter registerWorkerMeter(String name, String... args)
-            throws IllegalArgumentException {
-        Meter ret = (Meter) Builder.mkInstance(Builder.METER);
-        registerWorkerMetric(ret, name, args);
-        return ret;
+    public static AsmMetric registerNettyMetric(String name, AsmMetric metric) {
+        name = fixNameIfPossible(name, NETTY_GROUP);
+        return nettyMetrics.register(name, metric);
     }
 
-    public static Meter registerTaskMeter(int taskId, String name,
-            String... args) {
-        Meter ret = (Meter) Builder.mkInstance(Builder.METER);
-        registerTaskMetric(ret, taskId, name, args);
-        return ret;
+    /**
+     * simplified helper method to register a worker histogram
+     *
+     * @param topologyId topology id
+     * @param name       metric name, NOTE it's not a full-qualified name.
+     * @param histogram  histogram
+     * @return registered histogram
+     */
+    public static AsmHistogram registerWorkerHistogram(String topologyId, String name, AsmHistogram histogram) {
+        return (AsmHistogram) registerWorkerMetric(
+                MetricUtils.workerMetricName(topologyId, host, 0, name, MetricType.HISTOGRAM), histogram);
     }
 
-    public static Histogram registerWorkerHistogram(String name, String... args)
-            throws IllegalArgumentException {
-        Histogram ret = (Histogram) Builder.mkInstance(Builder.HISTOGRAM);
-        registerWorkerMetric(ret, name, args);
-        return ret;
+    /**
+     * simplified helper method to register a worker gauge
+     */
+    public static AsmGauge registerWorkerGauge(String topologyId, String name, AsmGauge gauge) {
+        return (AsmGauge) registerWorkerMetric(
+                MetricUtils.workerMetricName(topologyId, host, 0, name, MetricType.GAUGE), gauge);
     }
 
-    public static Histogram registerTaskHistogram(int taskId, String name,
-            String... args) {
-        Histogram ret = (Histogram) Builder.mkInstance(Builder.HISTOGRAM);
-        registerTaskMetric(ret, taskId, name, args);
-        return ret;
+    /**
+     * simplified helper method to register a worker meter
+     */
+    public static AsmMeter registerWorkerMeter(String topologyId, String name, AsmMeter meter) {
+        return (AsmMeter) registerWorkerMetric(
+                MetricUtils.workerMetricName(topologyId, host, 0, name, MetricType.METER), meter);
     }
 
-    public static Timer registerWorkerTimer(String name, String... args)
-            throws IllegalArgumentException {
-        Timer ret = (Timer) Builder.mkInstance(Builder.TIMER);
-        registerWorkerMetric(ret, name, args);
-        return ret;
+    /**
+     * simplified helper method to register a worker counter
+     */
+    public static AsmCounter registerWorkerCounter(String topologyId, String name, AsmCounter counter) {
+        return (AsmCounter) registerWorkerMetric(
+                MetricUtils.workerMetricName(topologyId, host, 0, name, MetricType.COUNTER), counter);
     }
 
-    public static Timer registerTaskTimer(int taskId, String name,
-            String... args) {
-        Timer ret = (Timer) Builder.mkInstance(Builder.TIMER);
-        registerTaskMetric(ret, taskId, name, args);
-        return ret;
+    /**
+     * simplified helper method to register a worker timer
+     */
+    public static AsmTimer registerWorkerTimer(String topologyId, String name, AsmTimer timer) {
+        return (AsmTimer) registerWorkerMetric(
+                MetricUtils.workerMetricName(topologyId, host, 0, name, MetricType.TIMER), timer);
     }
 
-    public static class Builder {
-        public static final int COUNTER = 1;
-        public static final int METER = 2;
-        public static final int HISTOGRAM = 3;
-        public static final int TIMER = 4;
-
-        public static Metric mkInstance(int type) {
-            if (type == COUNTER) {
-                return new Counter<Double>(Double.valueOf(0));
-            } else if (type == METER) {
-                return new Meter();
-            } else if (type == HISTOGRAM) {
-                return new Histogram();
-            } else if (type == TIMER) {
-                return new Timer();
-            } else {
-                throw new IllegalArgumentException();
-            }
-        }
+    public static AsmMetric getStreamMetric(String name) {
+        name = fixNameIfPossible(name);
+        return streamMetrics.getMetric(name);
+    }
+
+    public static AsmMetric getTaskMetric(String name) {
+        name = fixNameIfPossible(name);
+        return taskMetrics.getMetric(name);
+    }
+
+    public static AsmMetric getComponentMetric(String name) {
+        name = fixNameIfPossible(name);
+        return componentMetrics.getMetric(name);
+    }
+
+    public static AsmMetric getWorkerMetric(String name) {
+        name = fixNameIfPossible(name);
+        return workerMetrics.getMetric(name);
     }
 
-    public static MetricInfo getExposeWorkerMetrics() {
-        return exposeWorkerMetrics;
+    public static void unregisterWorkerMetric(String name) {
+        name = fixNameIfPossible(name);
+        workerMetrics.remove(name);
     }
 
-    public static void setExposeWorkerMetrics(MetricInfo exposeWorkerMetrics) {
-        JStormMetrics.exposeWorkerMetrics = exposeWorkerMetrics;
+    public static void unregisterNettyMetric(String name) {
+        name = fixNameIfPossible(name, NETTY_GROUP);
+        nettyMetrics.remove(name);
     }
 
-    public static Map<Integer, MetricInfo> getExposeTaskMetrics() {
-        return exposeTaskMetrics;
+    public static void unregisterTaskMetric(String name) {
+        name = fixNameIfPossible(name);
+        taskMetrics.remove(name);
     }
 
-    public static void setExposeTaskMetrics(
-            Map<Integer, MetricInfo> exposeTaskMetrics) {
-        JStormMetrics.exposeTaskMetrics = exposeTaskMetrics;
+    public static AsmMetricRegistry getNettyMetrics() {
+        return nettyMetrics;
     }
 
-    public static Map<String, MetricInfo> getExposeNettyMetrics() {
-        return exposeNettyMetrics;
+    public static AsmMetricRegistry getWorkerMetrics() {
+        return workerMetrics;
     }
 
-    public static void setExposeNettyMetrics(Map<String, MetricInfo> exposeNettyMetrics) {
-        JStormMetrics.exposeNettyMetrics = exposeNettyMetrics;
+    public static AsmMetricRegistry getComponentMetrics() {
+        return componentMetrics;
     }
 
-    
+    public static AsmMetricRegistry getTaskMetrics() {
+        return taskMetrics;
+    }
+
+    public static AsmMetricRegistry getStreamMetrics() {
+        return streamMetrics;
+    }
+
+    /**
+     * convert snapshots to thrift objects, note that timestamps are aligned to min during the conversion,
+     * so nimbus server will get snapshots with aligned timestamps (still in ms as TDDL will use it).
+     */
+    public static MetricInfo computeAllMetrics() {
+        long start = System.currentTimeMillis();
+        MetricInfo metricInfo = MetricUtils.mkMetricInfo();
+
+        List<Map.Entry<String, AsmMetric>> entries = Lists.newArrayList();
+        entries.addAll(streamMetrics.metrics.entrySet());
+        entries.addAll(taskMetrics.metrics.entrySet());
+        entries.addAll(componentMetrics.metrics.entrySet());
+        entries.addAll(workerMetrics.metrics.entrySet());
+        entries.addAll(nettyMetrics.metrics.entrySet());
+        entries.addAll(topologyMetrics.metrics.entrySet());
+
+        for (Map.Entry<String, AsmMetric> entry : entries) {
+            String name = entry.getKey();
+            AsmMetric metric = entry.getValue();
+            Map<Integer, AsmSnapshot> snapshots = metric.getSnapshots();
+
+            int op = metric.getOp();
+            if ((op & AsmMetric.MetricOp.LOG) == AsmMetric.MetricOp.LOG) {
+                MetricUtils.printMetricSnapshot(metric, snapshots);
+            }
+
+            if ((op & AsmMetric.MetricOp.REPORT) == AsmMetric.MetricOp.REPORT) {
+                MetaType metaType = MetricUtils.metaType(metric.getMetricName());
+                try {
+                    if (metric instanceof AsmCounter) {
+                        Map data = MetricUtils.toThriftCounterSnapshots(snapshots);
+                        putIfNotEmpty(metricInfo.get_metrics(), name, data);
+                    } else if (metric instanceof AsmGauge) {
+                        Map data = MetricUtils.toThriftGaugeSnapshots(snapshots);
+                        putIfNotEmpty(metricInfo.get_metrics(), name, data);
+                    } else if (metric instanceof AsmMeter) {
+                        Map data = MetricUtils.toThriftMeterSnapshots(snapshots);
+                        putIfNotEmpty(metricInfo.get_metrics(), name, data);
+                    } else if (metric instanceof AsmHistogram) {
+                        Map data = MetricUtils.toThriftHistoSnapshots(metaType, snapshots);
+                        putIfNotEmpty(metricInfo.get_metrics(), name, data);
+                    } else if (metric instanceof AsmTimer) {
+                        Map data = MetricUtils.toThriftTimerSnapshots(metaType, snapshots);
+                        putIfNotEmpty(metricInfo.get_metrics(), name, data);
+                    }
+                } catch (Exception ex) {
+                    LOG.error("Error", ex);
+                }
+            }
+        }
+
+        if (debug) {
+            MetricUtils.printMetricInfo(metricInfo, debugMetricNames);
+        }
+        LOG.info("compute all metrics, cost:{}", System.currentTimeMillis() - start);
+
+        return metricInfo;
+    }
+
+    @SuppressWarnings("unchecked")
+    public static <T extends Map> void putIfNotEmpty(Map base, String name, T data) {
+        if (data != null && data.size() > 0) {
+            base.put(name, data);
+        }
+    }
+
+    public static String fixNameIfPossible(String name) {
+        return fixNameIfPossible(name, DEFAULT_GROUP);
+    }
+
+    public static String fixNameIfPossible(String name, String group) {
+        MetaType type = MetricUtils.metaType(name);
+        String[] parts = name.split(MetricUtils.DELIM);
+        if (parts[1].equals("")) {
+            parts[1] = topologyId;
+        }
+        if (type != MetaType.WORKER && parts[5].equals("")) {
+            parts[5] = group;
+        } else if (parts[2].equals("")) {
+            parts[2] = host;
+            parts[3] = port + "";
+            if (parts[4].equals("")) {
+                parts[4] = group;
+            }
+        }
+        return MetricUtils.concat(parts);
+    }
+
+    public static void main(String[] args) throws Exception {
+        JStormMetrics.topologyId = "topologyId";
+        JStormMetrics.host = "127.0.0.1";
+        JStormMetrics.port = 6800;
+
+        String tpId = "test";
+        String compName = "bolt";
+        int taskId = 1;
+        String streamId = "defaultStream";
+        String type = MetaType.STREAM.getV() + MetricType.COUNTER.getV();
+        String metricName = "counter1";
+        String group = "udf";
+
+        String name = MetricUtils.metricName(type, tpId, compName, taskId, streamId, group, metricName);
+        System.out.println(name);
+
+        AsmCounter counter = new AsmCounter();
+        AsmMetric ret1 = JStormMetrics.registerStreamMetric(name, counter, false);
+        AsmMetric ret2 = JStormMetrics.registerStreamMetric(name, counter, false);
+        System.out.println(ret1 == ret2);
+
+        counter.update(1L);
+
+        metricName = MetricUtils.workerMetricName("metric1", MetricType.COUNTER);
+        System.out.println(metricName);
+        metricName = fixNameIfPossible(metricName);
+        System.out.println(metricName);
+        System.out.println(fixNameIfPossible(metricName));
+    }
 
-    
 }


[23/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/client/ConfigExtension.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/client/ConfigExtension.java b/jstorm-core/src/main/java/com/alibaba/jstorm/client/ConfigExtension.java
index 26a068c..333d3bb 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/client/ConfigExtension.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/client/ConfigExtension.java
@@ -17,35 +17,55 @@
  */
 package com.alibaba.jstorm.client;
 
+import backtype.storm.Config;
+import backtype.storm.utils.Utils;
+import com.alibaba.jstorm.utils.JStormUtils;
+import org.apache.commons.lang.StringUtils;
+
 import java.security.InvalidParameterException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.commons.lang.StringUtils;
-
-import backtype.storm.Config;
-import backtype.storm.utils.Utils;
-
-import com.alibaba.jstorm.utils.JStormUtils;
-
 public class ConfigExtension {
     /**
-     * if this configure has been set, the spout or bolt will log all receive
-     * tuples
-     * 
+     * if this configure has been set, the spout or bolt will log all receive tuples
+     * <p/>
      * topology.debug just for logging all sent tuples
      */
-    protected static final String TOPOLOGY_DEBUG_RECV_TUPLE =
-            "topology.debug.recv.tuple";
+    protected static final String TOPOLOGY_DEBUG_RECV_TUPLE = "topology.debug.recv.tuple";
 
     public static void setTopologyDebugRecvTuple(Map conf, boolean debug) {
         conf.put(TOPOLOGY_DEBUG_RECV_TUPLE, Boolean.valueOf(debug));
     }
 
     public static Boolean isTopologyDebugRecvTuple(Map conf) {
-        return JStormUtils.parseBoolean(conf.get(TOPOLOGY_DEBUG_RECV_TUPLE),
-                false);
+        return JStormUtils.parseBoolean(conf.get(TOPOLOGY_DEBUG_RECV_TUPLE), false);
+    }
+
+    private static final String TOPOLOGY_ENABLE_METRIC_DEBUG = "topology.enable.metric.debug";
+
+    public static boolean isEnableMetricDebug(Map conf) {
+        return JStormUtils.parseBoolean(conf.get(TOPOLOGY_ENABLE_METRIC_DEBUG), false);
+    }
+
+    private static final String TOPOLOGY_DEBUG_METRIC_NAMES = "topology.debug.metric.names";
+
+    public static String getDebugMetricNames(Map conf) {
+        String metrics = (String) conf.get(TOPOLOGY_DEBUG_METRIC_NAMES);
+        if (metrics == null) {
+            return "";
+        }
+        return metrics;
+    }
+
+    /**
+     * metrics switch, ONLY for performance test, DO NOT set it to false in production
+     */
+    private static final String TOPOLOGY_ENABLE_METRICS = "topology.enable.metrics";
+
+    public static boolean isEnableMetrics(Map conf) {
+        return JStormUtils.parseBoolean(conf.get(TOPOLOGY_ENABLE_METRICS), true);
     }
 
     /**
@@ -53,27 +73,20 @@ public class ConfigExtension {
      */
     private static final Integer DEFAULT_DEAMON_HTTPSERVER_PORT = 7621;
 
-    protected static final String SUPERVISOR_DEAMON_HTTPSERVER_PORT =
-            "supervisor.deamon.logview.port";
+    protected static final String SUPERVISOR_DEAMON_HTTPSERVER_PORT = "supervisor.deamon.logview.port";
 
     public static Integer getSupervisorDeamonHttpserverPort(Map conf) {
-        return JStormUtils.parseInt(
-                conf.get(SUPERVISOR_DEAMON_HTTPSERVER_PORT),
-                DEFAULT_DEAMON_HTTPSERVER_PORT + 1);
+        return JStormUtils.parseInt(conf.get(SUPERVISOR_DEAMON_HTTPSERVER_PORT), DEFAULT_DEAMON_HTTPSERVER_PORT + 1);
     }
 
-    protected static final String NIMBUS_DEAMON_HTTPSERVER_PORT =
-            "nimbus.deamon.logview.port";
+    protected static final String NIMBUS_DEAMON_HTTPSERVER_PORT = "nimbus.deamon.logview.port";
 
     public static Integer getNimbusDeamonHttpserverPort(Map conf) {
-        return JStormUtils.parseInt(conf.get(NIMBUS_DEAMON_HTTPSERVER_PORT),
-                DEFAULT_DEAMON_HTTPSERVER_PORT);
+        return JStormUtils.parseInt(conf.get(NIMBUS_DEAMON_HTTPSERVER_PORT), DEFAULT_DEAMON_HTTPSERVER_PORT);
     }
 
     /**
      * Worker gc parameter
-     * 
-     * 
      */
     protected static final String WORKER_GC_CHILDOPTS = "worker.gc.childopts";
 
@@ -85,8 +98,7 @@ public class ConfigExtension {
         return (String) conf.get(WORKER_GC_CHILDOPTS);
     }
 
-    protected static final String WOREKER_REDIRECT_OUTPUT =
-            "worker.redirect.output";
+    protected static final String WOREKER_REDIRECT_OUTPUT = "worker.redirect.output";
 
     public static boolean getWorkerRedirectOutput(Map conf) {
         Object result = conf.get(WOREKER_REDIRECT_OUTPUT);
@@ -95,8 +107,7 @@ public class ConfigExtension {
         return (Boolean) result;
     }
 
-    protected static final String WOREKER_REDIRECT_OUTPUT_FILE =
-            "worker.redirect.output.file";
+    protected static final String WOREKER_REDIRECT_OUTPUT_FILE = "worker.redirect.output.file";
 
     public static void setWorkerRedirectOutputFile(Map conf, String outputPath) {
         conf.put(WOREKER_REDIRECT_OUTPUT_FILE, outputPath);
@@ -107,9 +118,8 @@ public class ConfigExtension {
     }
 
     /**
-     * Usually, spout finish prepare before bolt, so spout need wait several
-     * seconds so that bolt finish preparation
-     * 
+     * Usually, spout finish prepare before bolt, so spout need wait several seconds so that bolt finish preparation
+     * <p/>
      * By default, the setting is 30 seconds
      */
     protected static final String SPOUT_DELAY_RUN = "spout.delay.run";
@@ -154,32 +164,26 @@ public class ConfigExtension {
     }
 
     /**
-     * if the setting has been set, the component's task must run different node
-     * This is conflict with USE_SINGLE_NODE
+     * if the setting has been set, the component's task must run different node This is conflict with USE_SINGLE_NODE
      */
-    protected static final String TASK_ON_DIFFERENT_NODE =
-            "task.on.differ.node";
+    protected static final String TASK_ON_DIFFERENT_NODE = "task.on.differ.node";
 
     public static void setTaskOnDifferentNode(Map conf, boolean isIsolate) {
         conf.put(TASK_ON_DIFFERENT_NODE, Boolean.valueOf(isIsolate));
     }
 
     public static boolean isTaskOnDifferentNode(Map conf) {
-        return JStormUtils
-                .parseBoolean(conf.get(TASK_ON_DIFFERENT_NODE), false);
+        return JStormUtils.parseBoolean(conf.get(TASK_ON_DIFFERENT_NODE), false);
     }
 
-    protected static final String SUPERVISOR_ENABLE_CGROUP =
-            "supervisor.enable.cgroup";
+    protected static final String SUPERVISOR_ENABLE_CGROUP = "supervisor.enable.cgroup";
 
     public static boolean isEnableCgroup(Map conf) {
-        return JStormUtils.parseBoolean(conf.get(SUPERVISOR_ENABLE_CGROUP),
-                false);
+        return JStormUtils.parseBoolean(conf.get(SUPERVISOR_ENABLE_CGROUP), false);
     }
 
     /**
-     * If component or topology configuration set "use.old.assignment", will try
-     * use old assignment firstly
+     * If component or topology configuration set "use.old.assignment", will try use old assignment firstly
      */
     protected static final String USE_OLD_ASSIGNMENT = "use.old.assignment";
 
@@ -213,12 +217,10 @@ public class ConfigExtension {
         return JStormUtils.parseBoolean(conf.get(NIMBUS_USE_IP), false);
     }
 
-    protected static final String TOPOLOGY_ENABLE_CLASSLOADER =
-            "topology.enable.classloader";
+    protected static final String TOPOLOGY_ENABLE_CLASSLOADER = "topology.enable.classloader";
 
     public static boolean isEnableTopologyClassLoader(Map conf) {
-        return JStormUtils.parseBoolean(conf.get(TOPOLOGY_ENABLE_CLASSLOADER),
-                false);
+        return JStormUtils.parseBoolean(conf.get(TOPOLOGY_ENABLE_CLASSLOADER), false);
     }
 
     public static void setEnableTopologyClassLoader(Map conf, boolean enable) {
@@ -235,14 +237,10 @@ public class ConfigExtension {
         conf.put(CLASSLOADER_DEBUG, enable);
     }
 
-    protected static final String CONTAINER_NIMBUS_HEARTBEAT =
-            "container.nimbus.heartbeat";
+    protected static final String CONTAINER_NIMBUS_HEARTBEAT = "container.nimbus.heartbeat";
 
     /**
      * Get to know whether nimbus is run under Apsara/Yarn container
-     * 
-     * @param conf
-     * @return
      */
     public static boolean isEnableContainerNimbus() {
         String path = System.getenv(CONTAINER_NIMBUS_HEARTBEAT);
@@ -256,23 +254,15 @@ public class ConfigExtension {
 
     /**
      * Get Apsara/Yarn nimbus container's hearbeat dir
-     * 
-     * @param conf
-     * @return
      */
     public static String getContainerNimbusHearbeat() {
         return System.getenv(CONTAINER_NIMBUS_HEARTBEAT);
     }
 
-    protected static final String CONTAINER_SUPERVISOR_HEARTBEAT =
-            "container.supervisor.heartbeat";
+    protected static final String CONTAINER_SUPERVISOR_HEARTBEAT = "container.supervisor.heartbeat";
 
     /**
-     * Get to know whether supervisor is run under Apsara/Yarn supervisor
-     * container
-     * 
-     * @param conf
-     * @return
+     * Get to know whether supervisor is run under Apsara/Yarn supervisor container
      */
     public static boolean isEnableContainerSupervisor() {
         String path = System.getenv(CONTAINER_SUPERVISOR_HEARTBEAT);
@@ -286,28 +276,21 @@ public class ConfigExtension {
 
     /**
      * Get Apsara/Yarn supervisor container's hearbeat dir
-     * 
-     * @param conf
-     * @return
      */
     public static String getContainerSupervisorHearbeat() {
         return (String) System.getenv(CONTAINER_SUPERVISOR_HEARTBEAT);
     }
 
-    protected static final String CONTAINER_HEARTBEAT_TIMEOUT_SECONDS =
-            "container.heartbeat.timeout.seconds";
+    protected static final String CONTAINER_HEARTBEAT_TIMEOUT_SECONDS = "container.heartbeat.timeout.seconds";
 
     public static int getContainerHeartbeatTimeoutSeconds(Map conf) {
-        return JStormUtils.parseInt(
-                conf.get(CONTAINER_HEARTBEAT_TIMEOUT_SECONDS), 240);
+        return JStormUtils.parseInt(conf.get(CONTAINER_HEARTBEAT_TIMEOUT_SECONDS), 240);
     }
 
-    protected static final String CONTAINER_HEARTBEAT_FREQUENCE =
-            "container.heartbeat.frequence";
+    protected static final String CONTAINER_HEARTBEAT_FREQUENCE = "container.heartbeat.frequence";
 
     public static int getContainerHeartbeatFrequence(Map conf) {
-        return JStormUtils
-                .parseInt(conf.get(CONTAINER_HEARTBEAT_FREQUENCE), 10);
+        return JStormUtils.parseInt(conf.get(CONTAINER_HEARTBEAT_FREQUENCE), 10);
     }
 
     protected static final String JAVA_SANDBOX_ENABLE = "java.sandbox.enable";
@@ -326,12 +309,10 @@ public class ConfigExtension {
         conf.put(SPOUT_SINGLE_THREAD, enable);
     }
 
-    protected static String WORKER_STOP_WITHOUT_SUPERVISOR =
-            "worker.stop.without.supervisor";
+    protected static String WORKER_STOP_WITHOUT_SUPERVISOR = "worker.stop.without.supervisor";
 
     public static boolean isWorkerStopWithoutSupervisor(Map conf) {
-        return JStormUtils.parseBoolean(
-                conf.get(WORKER_STOP_WITHOUT_SUPERVISOR), false);
+        return JStormUtils.parseBoolean(conf.get(WORKER_STOP_WITHOUT_SUPERVISOR), false);
     }
 
     protected static String CGROUP_ROOT_DIR = "supervisor.cgroup.rootdir";
@@ -340,33 +321,15 @@ public class ConfigExtension {
         return (String) conf.get(CGROUP_ROOT_DIR);
     }
 
-    protected static String NETTY_TRANSFER_ASYNC_AND_BATCH =
-            "storm.messaging.netty.transfer.async.batch";
+    protected static String NETTY_TRANSFER_ASYNC_AND_BATCH = "storm.messaging.netty.transfer.async.batch";
 
     public static boolean isNettyTransferAsyncBatch(Map conf) {
-        return JStormUtils.parseBoolean(
-                conf.get(NETTY_TRANSFER_ASYNC_AND_BATCH), true);
+        return JStormUtils.parseBoolean(conf.get(NETTY_TRANSFER_ASYNC_AND_BATCH), true);
     }
-    
-    protected static String NETTY_PENDING_BUFFER_TIMEOUT =
-            "storm.messaging.netty.pending.buffer.timeout";
 
-    public static void setNettyPendingBufferTimeout(Map conf, Long timeout) {
-        conf.put(NETTY_PENDING_BUFFER_TIMEOUT, timeout);
-    }
-    
-    public static long getNettyPendingBufferTimeout(Map conf) {
-        int messageTimeout = JStormUtils.parseInt(
-                conf.get(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS), 120);
-        return JStormUtils.parseLong(
-                conf.get(NETTY_PENDING_BUFFER_TIMEOUT), messageTimeout*1000);
-    }
+    protected static final String USE_USERDEFINE_ASSIGNMENT = "use.userdefine.assignment";
 
-    protected static final String USE_USERDEFINE_ASSIGNMENT =
-            "use.userdefine.assignment";
-
-    public static void setUserDefineAssignment(Map conf,
-            List<WorkerAssignment> userDefines) {
+    public static void setUserDefineAssignment(Map conf, List<WorkerAssignment> userDefines) {
         List<String> ret = new ArrayList<String>();
         for (WorkerAssignment worker : userDefines) {
             ret.add(Utils.to_json(worker));
@@ -384,6 +347,17 @@ public class ConfigExtension {
         return ret;
     }
 
+    protected static String NETTY_PENDING_BUFFER_TIMEOUT = "storm.messaging.netty.pending.buffer.timeout";
+
+    public static void setNettyPendingBufferTimeout(Map conf, Long timeout) {
+        conf.put(NETTY_PENDING_BUFFER_TIMEOUT, timeout);
+    }
+
+    public static long getNettyPendingBufferTimeout(Map conf) {
+        int messageTimeout = JStormUtils.parseInt(conf.get(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS), 120);
+        return JStormUtils.parseLong(conf.get(NETTY_PENDING_BUFFER_TIMEOUT), messageTimeout * 1000);
+    }
+
     protected static final String MEMSIZE_PER_WORKER = "worker.memory.size";
 
     public static void setMemSizePerWorker(Map conf, long memSize) {
@@ -406,12 +380,25 @@ public class ConfigExtension {
     }
 
     public static long getMemSizePerWorker(Map conf) {
-        long size =
-                JStormUtils.parseLong(conf.get(MEMSIZE_PER_WORKER),
-                        JStormUtils.SIZE_1_G * 2);
+        long size = JStormUtils.parseLong(conf.get(MEMSIZE_PER_WORKER), JStormUtils.SIZE_1_G * 2);
         return size > 0 ? size : JStormUtils.SIZE_1_G * 2;
     }
 
+    protected static final String MIN_MEMSIZE_PER_WORKER = "worker.memory.min.size";
+
+    public static void setMemMinSizePerWorker(Map conf, long memSize) {
+        conf.put(MIN_MEMSIZE_PER_WORKER, memSize);
+    }
+
+    public static long getMemMinSizePerWorker(Map conf) {
+        long maxMemSize = getMemSizePerWorker(conf);
+
+        Long size = JStormUtils.parseLong(conf.get(MIN_MEMSIZE_PER_WORKER));
+        long minMemSize = (size == null || size == 0) ? maxMemSize : size;
+
+        return minMemSize;
+    }
+
     protected static final String CPU_SLOT_PER_WORKER = "worker.cpu.slot.num";
 
     public static void setCpuSlotNumPerWorker(Map conf, int slotNum) {
@@ -423,39 +410,34 @@ public class ConfigExtension {
         return slot > 0 ? slot : 1;
     }
 
-    protected static String TOPOLOGY_PERFORMANCE_METRICS =
-            "topology.performance.metrics";
+    protected static String TOPOLOGY_PERFORMANCE_METRICS = "topology.performance.metrics";
 
     public static boolean isEnablePerformanceMetrics(Map conf) {
-        return JStormUtils.parseBoolean(conf.get(TOPOLOGY_PERFORMANCE_METRICS),
-                true);
+        return JStormUtils.parseBoolean(conf.get(TOPOLOGY_PERFORMANCE_METRICS), true);
     }
 
     public static void setPerformanceMetrics(Map conf, boolean isEnable) {
         conf.put(TOPOLOGY_PERFORMANCE_METRICS, isEnable);
     }
 
-    protected static String NETTY_BUFFER_THRESHOLD_SIZE =
-            "storm.messaging.netty.buffer.threshold";
+    protected static String NETTY_BUFFER_THRESHOLD_SIZE = "storm.messaging.netty.buffer.threshold";
 
     public static long getNettyBufferThresholdSize(Map conf) {
-        return JStormUtils.parseLong(conf.get(NETTY_BUFFER_THRESHOLD_SIZE),
-                8 * JStormUtils.SIZE_1_M);
+        return JStormUtils.parseLong(conf.get(NETTY_BUFFER_THRESHOLD_SIZE), 8 * JStormUtils.SIZE_1_M);
     }
 
     public static void setNettyBufferThresholdSize(Map conf, long size) {
         conf.put(NETTY_BUFFER_THRESHOLD_SIZE, size);
     }
 
-    protected static String NETTY_MAX_SEND_PENDING =
-            "storm.messaging.netty.max.pending";
+    protected static String NETTY_MAX_SEND_PENDING = "storm.messaging.netty.max.pending";
 
     public static void setNettyMaxSendPending(Map conf, long pending) {
         conf.put(NETTY_MAX_SEND_PENDING, pending);
     }
 
     public static long getNettyMaxSendPending(Map conf) {
-        return JStormUtils.parseLong(conf.get(NETTY_MAX_SEND_PENDING), 4);
+        return JStormUtils.parseLong(conf.get(NETTY_MAX_SEND_PENDING), 16);
     }
 
     protected static String DISRUPTOR_USE_SLEEP = "disruptor.use.sleep";
@@ -469,9 +451,7 @@ public class ConfigExtension {
     }
 
     public static boolean isTopologyContainAcker(Map conf) {
-        int num =
-                JStormUtils.parseInt(conf.get(Config.TOPOLOGY_ACKER_EXECUTORS),
-                        1);
+        int num = JStormUtils.parseInt(conf.get(Config.TOPOLOGY_ACKER_EXECUTORS), 1);
         if (num > 0) {
             return true;
         } else {
@@ -489,8 +469,7 @@ public class ConfigExtension {
         conf.put(NETTY_SYNC_MODE, sync);
     }
 
-    protected static String NETTY_ASYNC_BLOCK =
-            "storm.messaging.netty.async.block";
+    protected static String NETTY_ASYNC_BLOCK = "storm.messaging.netty.async.block";
 
     public static boolean isNettyASyncBlock(Map conf) {
         return JStormUtils.parseBoolean(conf.get(NETTY_ASYNC_BLOCK), true);
@@ -500,20 +479,17 @@ public class ConfigExtension {
         conf.put(NETTY_ASYNC_BLOCK, block);
     }
 
-    protected static String ALIMONITOR_METRICS_POST =
-            "topology.alimonitor.metrics.post";
+    protected static String ALIMONITOR_METRICS_POST = "topology.alimonitor.metrics.post";
 
     public static boolean isAlimonitorMetricsPost(Map conf) {
-        return JStormUtils
-                .parseBoolean(conf.get(ALIMONITOR_METRICS_POST), true);
+        return JStormUtils.parseBoolean(conf.get(ALIMONITOR_METRICS_POST), true);
     }
 
     public static void setAlimonitorMetricsPost(Map conf, boolean post) {
         conf.put(ALIMONITOR_METRICS_POST, post);
     }
 
-    public static String TASK_CLEANUP_TIMEOUT_SEC =
-            "task.cleanup.timeout.sec";
+    public static String TASK_CLEANUP_TIMEOUT_SEC = "task.cleanup.timeout.sec";
 
     public static int getTaskCleanupTimeoutSec(Map conf) {
         return JStormUtils.parseInt(conf.get(TASK_CLEANUP_TIMEOUT_SEC), 10);
@@ -566,6 +542,7 @@ public class ConfigExtension {
         return JStormUtils.parseInt(uiCluster.get(UI_CLUSTER_ZK_PORT));
     }
 
+
     protected static String SPOUT_PEND_FULL_SLEEP = "spout.pending.full.sleep";
 
     public static boolean isSpoutPendFullSleep(Map conf) {
@@ -577,8 +554,7 @@ public class ConfigExtension {
 
     }
 
-    protected static String LOGVIEW_ENCODING =
-            "supervisor.deamon.logview.encoding";
+    protected static String LOGVIEW_ENCODING = "supervisor.deamon.logview.encoding";
     protected static String UTF8 = "utf-8";
 
     public static String getLogViewEncoding(Map conf) {
@@ -603,16 +579,13 @@ public class ConfigExtension {
     }
 
     public static String TASK_STATUS_ACTIVE = "Active";
+    public static String TASK_STATUS_INACTIVE = "Inactive";
     public static String TASK_STATUS_STARTING = "Starting";
 
-    protected static String ALIMONITOR_TOPO_METIRC_NAME =
-            "topology.alimonitor.topo.metrics.name";
-    protected static String ALIMONITOR_TASK_METIRC_NAME =
-            "topology.alimonitor.task.metrics.name";
-    protected static String ALIMONITOR_WORKER_METIRC_NAME =
-            "topology.alimonitor.worker.metrics.name";
-    protected static String ALIMONITOR_USER_METIRC_NAME =
-            "topology.alimonitor.user.metrics.name";
+    protected static String ALIMONITOR_TOPO_METIRC_NAME = "topology.alimonitor.topo.metrics.name";
+    protected static String ALIMONITOR_TASK_METIRC_NAME = "topology.alimonitor.task.metrics.name";
+    protected static String ALIMONITOR_WORKER_METIRC_NAME = "topology.alimonitor.worker.metrics.name";
+    protected static String ALIMONITOR_USER_METIRC_NAME = "topology.alimonitor.user.metrics.name";
 
     public static String getAlmonTopoMetricName(Map conf) {
         return (String) conf.get(ALIMONITOR_TOPO_METIRC_NAME);
@@ -635,8 +608,7 @@ public class ConfigExtension {
 
     public static Integer getSpoutParallelism(Map conf, String componentName) {
         Integer ret = null;
-        Map<String, String> map =
-                (Map<String, String>) (conf.get(SPOUT_PARALLELISM));
+        Map<String, String> map = (Map<String, String>) (conf.get(SPOUT_PARALLELISM));
         if (map != null)
             ret = JStormUtils.parseInt(map.get(componentName));
         return ret;
@@ -644,15 +616,13 @@ public class ConfigExtension {
 
     public static Integer getBoltParallelism(Map conf, String componentName) {
         Integer ret = null;
-        Map<String, String> map =
-                (Map<String, String>) (conf.get(BOLT_PARALLELISM));
+        Map<String, String> map = (Map<String, String>) (conf.get(BOLT_PARALLELISM));
         if (map != null)
             ret = JStormUtils.parseInt(map.get(componentName));
         return ret;
     }
 
-    protected static String TOPOLOGY_BUFFER_SIZE_LIMITED =
-            "topology.buffer.size.limited";
+    protected static String TOPOLOGY_BUFFER_SIZE_LIMITED = "topology.buffer.size.limited";
 
     public static void setTopologyBufferSizeLimited(Map conf, boolean limited) {
         conf.put(TOPOLOGY_BUFFER_SIZE_LIMITED, limited);
@@ -664,30 +634,38 @@ public class ConfigExtension {
             return true;
         }
 
-        return JStormUtils.parseBoolean(conf.get(TOPOLOGY_BUFFER_SIZE_LIMITED),
-                true);
+        return JStormUtils.parseBoolean(conf.get(TOPOLOGY_BUFFER_SIZE_LIMITED), true);
 
     }
 
-    protected static String SUPERVISOR_SLOTS_PORTS_BASE =
-            "supervisor.slots.ports.base";
+    protected static String SUPERVISOR_SLOTS_PORTS_BASE = "supervisor.slots.ports.base";
 
     public static int getSupervisorSlotsPortsBase(Map conf) {
-        return JStormUtils
-                .parseInt(conf.get(SUPERVISOR_SLOTS_PORTS_BASE), 6800);
+        return JStormUtils.parseInt(conf.get(SUPERVISOR_SLOTS_PORTS_BASE), 6800);
     }
 
     // SUPERVISOR_SLOTS_PORTS_BASE don't provide setting function, it must be
     // set by configuration
 
-    protected static String SUPERVISOR_SLOTS_PORT_CPU_WEIGHT =
-            "supervisor.slots.port.cpu.weight";
+    protected static String SUPERVISOR_SLOTS_PORT_CPU_WEIGHT = "supervisor.slots.port.cpu.weight";
 
     public static double getSupervisorSlotsPortCpuWeight(Map conf) {
         Object value = conf.get(SUPERVISOR_SLOTS_PORT_CPU_WEIGHT);
         Double ret = JStormUtils.convertToDouble(value);
-        if (ret == null) {
-            return 1.0;
+        if (ret == null || ret <= 0) {
+            return 1.2;
+        } else {
+            return ret;
+        }
+    }
+    
+    protected static String SUPERVISOR_SLOTS_PORT_MEM_WEIGHT = "supervisor.slots.port.mem.weight";
+
+    public static double getSupervisorSlotsPortMemWeight(Map conf) {
+        Object value = conf.get(SUPERVISOR_SLOTS_PORT_MEM_WEIGHT);
+        Double ret = JStormUtils.convertToDouble(value);
+        if (ret == null || ret <= 0) {
+            return 0.7;
         } else {
             return ret;
         }
@@ -706,8 +684,7 @@ public class ConfigExtension {
         conf.put(USER_DEFINED_LOG4J_CONF, fileName);
     }
 
-    protected static String USER_DEFINED_LOGBACK_CONF =
-            "user.defined.logback.conf";
+    protected static String USER_DEFINED_LOGBACK_CONF = "user.defined.logback.conf";
 
     public static String getUserDefinedLogbackConf(Map conf) {
         return (String) conf.get(USER_DEFINED_LOGBACK_CONF);
@@ -717,12 +694,10 @@ public class ConfigExtension {
         conf.put(USER_DEFINED_LOGBACK_CONF, fileName);
     }
 
-    protected static String TASK_ERROR_INFO_REPORT_INTERVAL =
-            "topology.task.error.report.interval";
+    protected static String TASK_ERROR_INFO_REPORT_INTERVAL = "topology.task.error.report.interval";
 
     public static Integer getTaskErrorReportInterval(Map conf) {
-        return JStormUtils.parseInt(conf.get(TASK_ERROR_INFO_REPORT_INTERVAL),
-                60);
+        return JStormUtils.parseInt(conf.get(TASK_ERROR_INFO_REPORT_INTERVAL), 60);
     }
 
     public static void setTaskErrorReportInterval(Map conf, Integer interval) {
@@ -739,18 +714,16 @@ public class ConfigExtension {
         conf.put(DEFAULT_CACHE_TIMEOUT, timeout);
     }
 
-    protected static String WORKER_MERTRIC_REPORT_FREQUENCY =
-            "worker.metric.report.frequency.secs";
+    protected static String WORKER_MERTRIC_REPORT_CHECK_FREQUENCY = "worker.metric.report.frequency.secs";
 
-    public static int getWorkerMetricReportFrequency(Map conf) {
-        return JStormUtils.parseInt(conf.get(WORKER_MERTRIC_REPORT_FREQUENCY),
-                60);
+    public static int getWorkerMetricReportCheckFrequency(Map conf) {
+        return JStormUtils.parseInt(conf.get(WORKER_MERTRIC_REPORT_CHECK_FREQUENCY), 60);
     }
 
     public static void setWorkerMetricReportFrequency(Map conf, int frequence) {
-        conf.put(WORKER_MERTRIC_REPORT_FREQUENCY, frequence);
+        conf.put(WORKER_MERTRIC_REPORT_CHECK_FREQUENCY, frequence);
     }
-    
+
     /**
      * Store local worker port/workerId/supervisorId to configuration
      */
@@ -767,7 +740,7 @@ public class ConfigExtension {
     }
 
     public static String getLocalWorkerId(Map conf) {
-        return (String)conf.get(LOCLA_WORKER_ID);
+        return (String) conf.get(LOCLA_WORKER_ID);
     }
 
     public static void setLocalWorkerId(Map conf, String workerId) {
@@ -775,25 +748,24 @@ public class ConfigExtension {
     }
 
     public static String getLocalSupervisorId(Map conf) {
-        return (String)conf.get(LOCAL_SUPERVISOR_ID);
+        return (String) conf.get(LOCAL_SUPERVISOR_ID);
     }
 
     public static void setLocalSupervisorId(Map conf, String supervisorId) {
         conf.put(LOCAL_SUPERVISOR_ID, supervisorId);
     }
-    
-    protected static String WORKER_CPU_CORE_UPPER_LIMIT =
-            "worker.cpu.core.upper.limit";
+
+    protected static String WORKER_CPU_CORE_UPPER_LIMIT = "worker.cpu.core.upper.limit";
 
     public static Integer getWorkerCpuCoreUpperLimit(Map conf) {
         return JStormUtils.parseInt(conf.get(WORKER_CPU_CORE_UPPER_LIMIT), 1);
     }
 
-    public static void setWorkerCpuCoreUpperLimit(Map conf,
-            Integer cpuUpperLimit) {
+    public static void setWorkerCpuCoreUpperLimit(Map conf, Integer cpuUpperLimit) {
         conf.put(WORKER_CPU_CORE_UPPER_LIMIT, cpuUpperLimit);
     }
 
+
     protected static String CLUSTER_NAME = "cluster.name";
 
     public static String getClusterName(Map conf) {
@@ -803,33 +775,68 @@ public class ConfigExtension {
     public static void setClusterName(Map conf, String clusterName) {
         conf.put(CLUSTER_NAME, clusterName);
     }
-    
+
+
     protected static final String NIMBUS_CACHE_CLASS = "nimbus.cache.class";
-    
+
     public static String getNimbusCacheClass(Map conf) {
-        return (String)conf.get(NIMBUS_CACHE_CLASS);
+        return (String) conf.get(NIMBUS_CACHE_CLASS);
     }
-    
+
     /**
      * if this is set, nimbus cache db will be clean when start nimbus
      */
     protected static final String NIMBUS_CACHE_RESET = "nimbus.cache.reset";
-    
+
     public static boolean getNimbusCacheReset(Map conf) {
         return JStormUtils.parseBoolean(conf.get(NIMBUS_CACHE_RESET), true);
     }
-    
+
+    /**
+     * if this is set, nimbus metrics cache db will be clean when start nimbus
+     */
+    protected static final String NIMBUS_METRIC_CACHE_RESET = "nimbus.metric.cache.reset";
+
+    public static boolean getMetricCacheReset(Map conf) {
+        return JStormUtils.parseBoolean(conf.get(NIMBUS_METRIC_CACHE_RESET), false);
+    }
+
+    public static final double DEFAULT_METRIC_SAMPLE_RATE = 0.10d;
+
+    public static final String TOPOLOGY_METRIC_SAMPLE_RATE = "topology.metric.sample.rate";
+
+    public static double getMetricSampleRate(Map conf) {
+        double sampleRate = JStormUtils.parseDouble(conf.get(TOPOLOGY_METRIC_SAMPLE_RATE), DEFAULT_METRIC_SAMPLE_RATE);
+        if (!conf.containsKey(TOPOLOGY_METRIC_SAMPLE_RATE)) {
+            conf.put(TOPOLOGY_METRIC_SAMPLE_RATE, sampleRate);
+        }
+        return sampleRate;
+    }
+
     public static final String CACHE_TIMEOUT_LIST = "cache.timeout.list";
-    
+
     public static List<Integer> getCacheTimeoutList(Map conf) {
-        return (List<Integer>)conf.get(CACHE_TIMEOUT_LIST);
+        return (List<Integer>) conf.get(CACHE_TIMEOUT_LIST);
     }
-    
+
     protected static final String NIMBUS_METRICS_THREAD_NUM = "nimbus.metrics.thread.num";
+
     public static int getNimbusMetricThreadNum(Map conf) {
         return JStormUtils.parseInt(conf.get(NIMBUS_METRICS_THREAD_NUM), 2);
     }
 
+    public static final String METRIC_UPLOADER_CLASS = "nimbus.metric.uploader.class";
+
+    public static String getMetricUploaderClass(Map<Object, Object> conf) {
+        return (String) conf.get(METRIC_UPLOADER_CLASS);
+    }
+
+    public static final String METRIC_QUERY_CLIENT_CLASS = "nimbus.metric.query.client.class";
+
+    public static String getMetricQueryClientClass(Map<Object, Object> conf) {
+        return (String) conf.get(METRIC_QUERY_CLIENT_CLASS);
+    }
+
     protected static String TASK_MSG_BATCH_SIZE = "task.msg.batch.size";
 
     public static Integer getTaskMsgBatchSize(Map conf) {
@@ -839,9 +846,9 @@ public class ConfigExtension {
     public static void setTaskMsgBatchSize(Map conf, Integer batchSize) {
         conf.put(TASK_MSG_BATCH_SIZE, batchSize);
     }
-    
-    protected static String TASK_BATCH_TUPLE  = "task.batch.tuple";
-    
+
+    protected static String TASK_BATCH_TUPLE = "task.batch.tuple";
+
     public static Boolean isTaskBatchTuple(Map conf) {
         return JStormUtils.parseBoolean(conf.get(TASK_BATCH_TUPLE), false);
     }
@@ -849,19 +856,87 @@ public class ConfigExtension {
     public static void setTaskBatchTuple(Map conf, boolean isBatchTuple) {
         conf.put(TASK_BATCH_TUPLE, isBatchTuple);
     }
-    
-    protected static String TOPOLOGY_ENABLE_NETTY_METRICS = "topology.enable.netty.metrics";
-    public static void setTopologyNettyMetrics(Map conf, boolean enable) {
-    	conf.put(TOPOLOGY_ENABLE_NETTY_METRICS, enable);
+
+    protected static String TOPOLOGY_MAX_WORKER_NUM_FOR_NETTY_METRICS = "topology.max.worker.num.for.netty.metrics";
+
+    public static void setTopologyMaxWorkerNumForNettyMetrics(Map conf, int num) {
+        conf.put(TOPOLOGY_MAX_WORKER_NUM_FOR_NETTY_METRICS, num);
     }
-    
-    public static Boolean isEnableTopologyNettyMetrics(Map conf) {
-    	return (Boolean)conf.get(TOPOLOGY_ENABLE_NETTY_METRICS);
+
+    public static int getTopologyMaxWorkerNumForNettyMetrics(Map conf) {
+        return JStormUtils.parseInt(conf.get(TOPOLOGY_MAX_WORKER_NUM_FOR_NETTY_METRICS), 200);
     }
-    
+
     protected static String UI_ONE_TABLE_PAGE_SIZE = "ui.one.table.page.size";
+
     public static long getUiOneTablePageSize(Map conf) {
-    	return JStormUtils.parseLong(conf.get(UI_ONE_TABLE_PAGE_SIZE), 200);
+        return JStormUtils.parseLong(conf.get(UI_ONE_TABLE_PAGE_SIZE), 200);
     }
-    
-}
+
+    protected static String MAX_PENDING_METRIC_NUM = "topology.max.pending.metric.num";
+
+    public static int getMaxPendingMetricNum(Map conf) {
+        return JStormUtils.parseInt(conf.get(MAX_PENDING_METRIC_NUM), 200);
+    }
+
+    protected static String TOPOLOGY_MASTER_SINGLE_WORKER = "topology.master.single.worker";
+
+    public static Boolean getTopologyMasterSingleWorker(Map conf) {
+        Boolean ret = JStormUtils.parseBoolean(conf.get(TOPOLOGY_MASTER_SINGLE_WORKER));
+        return ret;
+    }
+
+    public static String TOPOLOGY_BACKPRESSURE_WATER_MARK_HIGH = "topology.backpressure.water.mark.high";
+
+    public static double getBackpressureWaterMarkHigh(Map conf) {
+        return JStormUtils.parseDouble(conf.get(TOPOLOGY_BACKPRESSURE_WATER_MARK_HIGH), 0.8);
+    }
+
+    public static String TOPOLOGY_BACKPRESSURE_WATER_MARK_LOW = "topology.backpressure.water.mark.low";
+
+    public static double getBackpressureWaterMarkLow(Map conf) {
+        return JStormUtils.parseDouble(conf.get(TOPOLOGY_BACKPRESSURE_WATER_MARK_LOW), 0.05);
+    }
+
+    protected static String TOPOLOGY_BACKPRESSURE_CHECK_INTERVAL = "topology.backpressure.check.interval";
+
+    public static int getBackpressureCheckIntervl(Map conf) {
+        return JStormUtils.parseInt(conf.get(TOPOLOGY_BACKPRESSURE_CHECK_INTERVAL), 1000);
+    }
+
+    protected static String TOPOLOGY_BACKPRESSURE_TRIGGER_SAMPLE_NUMBER = "topology.backpressure.trigger.sample.number";
+
+    public static int getBackpressureTriggerSampleNumber(Map conf) {
+        return JStormUtils.parseInt(conf.get(TOPOLOGY_BACKPRESSURE_TRIGGER_SAMPLE_NUMBER), 4);
+    }
+
+    protected static String TOPOLOGY_BACKPRESSURE_TRIGGER_SAMPLE_RATE = "topology.backpressure.trigger.sample.rate";
+
+    public static double getBackpressureTriggerSampleRate(Map conf) {
+        return JStormUtils.parseDouble(conf.get(TOPOLOGY_BACKPRESSURE_TRIGGER_SAMPLE_RATE), 0.75);
+    }
+
+    public static String TOPOLOGY_BACKPRESSURE_ENABLE = "topology.backpressure.enable";
+
+    public static boolean isBackpressureEnable(Map conf) {
+        return JStormUtils.parseBoolean(conf.get(TOPOLOGY_BACKPRESSURE_ENABLE), false);
+    }
+
+    public static String TOPOLOGY_BACKPRESSURE_COORDINATOR_RATIO = "topology.backpressure.coordinator.trigger.ratio";
+
+    public static double getBackpressureCoordinatorRatio(Map conf) {
+        return JStormUtils.parseDouble(conf.get(TOPOLOGY_BACKPRESSURE_COORDINATOR_RATIO), 0.1);
+    }
+
+    protected static String SUPERVISOR_CHECK_WORKER_BY_SYSTEM_INFO = "supervisor.check.worker.by.system.info";
+
+    public static boolean isCheckWorkerAliveBySystemInfo(Map conf) {
+        return JStormUtils.parseBoolean(conf.get(SUPERVISOR_CHECK_WORKER_BY_SYSTEM_INFO), true);
+    }
+
+    protected static String TOPOLOGY_TASK_HEARTBEAT_SEND_NUMBER = "topology.task.heartbeat.send.number";
+
+    public static int getTopologyTaskHbSendNumber(Map conf) {
+        return JStormUtils.parseInt(conf.get(TOPOLOGY_TASK_HEARTBEAT_SEND_NUMBER), 2000);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/client/WorkerAssignment.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/client/WorkerAssignment.java b/jstorm-core/src/main/java/com/alibaba/jstorm/client/WorkerAssignment.java
index c994858..545a5f4 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/client/WorkerAssignment.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/client/WorkerAssignment.java
@@ -33,15 +33,12 @@ import backtype.storm.utils.Utils;
 
 import com.alibaba.jstorm.utils.JStormUtils;
 
-public class WorkerAssignment extends WorkerSlot implements Serializable,
-        JSONAware {
-    private static final Logger LOG = LoggerFactory
-            .getLogger(WorkerAssignment.class);
+public class WorkerAssignment extends WorkerSlot implements Serializable, JSONAware {
+    private static final Logger LOG = LoggerFactory.getLogger(WorkerAssignment.class);
 
     private static final long serialVersionUID = -3483047434535537861L;
 
-    private Map<String, Integer> componentToNum =
-            new HashMap<String, Integer>();
+    private Map<String, Integer> componentToNum = new HashMap<String, Integer>();
 
     private long mem;
 
@@ -165,9 +162,7 @@ public class WorkerAssignment extends WorkerSlot implements Serializable,
             String jvm = map.get(JVM_TAG);
             Long mem = JStormUtils.parseLong(map.get(MEM_TAG));
             Integer cpu = JStormUtils.parseInt(map.get(CPU_TAG));
-            Map<String, Object> componentToNum =
-                    (Map<String, Object>) Utils.from_json(map
-                            .get(COMPONENTTONUM_TAG));
+            Map<String, Object> componentToNum = (Map<String, Object>) Utils.from_json(map.get(COMPONENTTONUM_TAG));
 
             WorkerAssignment ret = new WorkerAssignment(supervisorId, port);
 
@@ -185,8 +180,7 @@ public class WorkerAssignment extends WorkerSlot implements Serializable,
             }
 
             for (Entry<String, Object> entry : componentToNum.entrySet()) {
-                ret.addComponent(entry.getKey(),
-                        JStormUtils.parseInt(entry.getValue()));
+                ret.addComponent(entry.getKey(), JStormUtils.parseInt(entry.getValue()));
             }
             return ret;
         } catch (Exception e) {
@@ -202,22 +196,16 @@ public class WorkerAssignment extends WorkerSlot implements Serializable,
 
     @Override
     public String toString() {
-        return ToStringBuilder.reflectionToString(this,
-                ToStringStyle.SHORT_PREFIX_STYLE);
+        return ToStringBuilder.reflectionToString(this, ToStringStyle.SHORT_PREFIX_STYLE);
     }
 
     @Override
     public int hashCode() {
         final int prime = 31;
         int result = super.hashCode();
-        result =
-                prime
-                        * result
-                        + ((componentToNum == null) ? 0 : componentToNum
-                                .hashCode());
+        result = prime * result + ((componentToNum == null) ? 0 : componentToNum.hashCode());
         result = prime * result + cpu;
-        result =
-                prime * result + ((hostName == null) ? 0 : hostName.hashCode());
+        result = prime * result + ((hostName == null) ? 0 : hostName.hashCode());
         result = prime * result + ((jvm == null) ? 0 : jvm.hashCode());
         result = prime * result + (int) (mem ^ (mem >>> 32));
         return result;

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/client/spout/ConfigExtension.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/client/spout/ConfigExtension.java b/jstorm-core/src/main/java/com/alibaba/jstorm/client/spout/ConfigExtension.java
new file mode 100644
index 0000000..ab80c10
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/client/spout/ConfigExtension.java
@@ -0,0 +1,943 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.client.spout;
+
+import backtype.storm.Config;
+import backtype.storm.utils.Utils;
+import com.alibaba.jstorm.client.WorkerAssignment;
+import com.alibaba.jstorm.utils.JStormUtils;
+import org.apache.commons.lang.StringUtils;
+
+import java.security.InvalidParameterException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+public class ConfigExtension {
+    /**
+     * if this configure has been set, the spout or bolt will log all receive tuples
+     * <p/>
+     * topology.debug just for logging all sent tuples
+     */
+    protected static final String TOPOLOGY_DEBUG_RECV_TUPLE = "topology.debug.recv.tuple";
+
+    public static void setTopologyDebugRecvTuple(Map conf, boolean debug) {
+        conf.put(TOPOLOGY_DEBUG_RECV_TUPLE, Boolean.valueOf(debug));
+    }
+
+    public static Boolean isTopologyDebugRecvTuple(Map conf) {
+        return JStormUtils.parseBoolean(conf.get(TOPOLOGY_DEBUG_RECV_TUPLE), false);
+    }
+
+    private static final String TOPOLOGY_ENABLE_METRIC_DEBUG = "topology.enable.metric.debug";
+
+    public static boolean isEnableMetricDebug(Map conf) {
+        return JStormUtils.parseBoolean(conf.get(TOPOLOGY_ENABLE_METRIC_DEBUG), false);
+    }
+
+    private static final String TOPOLOGY_DEBUG_METRIC_NAMES = "topology.debug.metric.names";
+
+    public static String getDebugMetricNames(Map conf) {
+        String metrics = (String) conf.get(TOPOLOGY_DEBUG_METRIC_NAMES);
+        if (metrics == null) {
+            return "";
+        }
+        return metrics;
+    }
+
+    /**
+     * metrics switch, ONLY for performance test, DO NOT set it to false in production
+     */
+    private static final String TOPOLOGY_ENABLE_METRICS = "topology.enable.metrics";
+
+    public static boolean isEnableMetrics(Map conf) {
+        return JStormUtils.parseBoolean(conf.get(TOPOLOGY_ENABLE_METRICS), true);
+    }
+
+    /**
+     * port number of deamon httpserver server
+     */
+    private static final Integer DEFAULT_DEAMON_HTTPSERVER_PORT = 7621;
+
+    protected static final String SUPERVISOR_DEAMON_HTTPSERVER_PORT = "supervisor.deamon.logview.port";
+
+    public static Integer getSupervisorDeamonHttpserverPort(Map conf) {
+        return JStormUtils.parseInt(conf.get(SUPERVISOR_DEAMON_HTTPSERVER_PORT), DEFAULT_DEAMON_HTTPSERVER_PORT + 1);
+    }
+
+    protected static final String NIMBUS_DEAMON_HTTPSERVER_PORT = "nimbus.deamon.logview.port";
+
+    public static Integer getNimbusDeamonHttpserverPort(Map conf) {
+        return JStormUtils.parseInt(conf.get(NIMBUS_DEAMON_HTTPSERVER_PORT), DEFAULT_DEAMON_HTTPSERVER_PORT);
+    }
+
+    /**
+     * Worker gc parameter
+     */
+    protected static final String WORKER_GC_CHILDOPTS = "worker.gc.childopts";
+
+    public static void setWorkerGc(Map conf, String gc) {
+        conf.put(WORKER_GC_CHILDOPTS, gc);
+    }
+
+    public static String getWorkerGc(Map conf) {
+        return (String) conf.get(WORKER_GC_CHILDOPTS);
+    }
+
+    protected static final String WOREKER_REDIRECT_OUTPUT = "worker.redirect.output";
+
+    public static boolean getWorkerRedirectOutput(Map conf) {
+        Object result = conf.get(WOREKER_REDIRECT_OUTPUT);
+        if (result == null)
+            return true;
+        return (Boolean) result;
+    }
+
+    protected static final String WOREKER_REDIRECT_OUTPUT_FILE = "worker.redirect.output.file";
+
+    public static void setWorkerRedirectOutputFile(Map conf, String outputPath) {
+        conf.put(WOREKER_REDIRECT_OUTPUT_FILE, outputPath);
+    }
+
+    public static String getWorkerRedirectOutputFile(Map conf) {
+        return (String) conf.get(WOREKER_REDIRECT_OUTPUT_FILE);
+    }
+
+    /**
+     * Usually, spout finish prepare before bolt, so spout need wait several seconds so that bolt finish preparation
+     * <p/>
+     * By default, the setting is 30 seconds
+     */
+    protected static final String SPOUT_DELAY_RUN = "spout.delay.run";
+
+    public static void setSpoutDelayRunSeconds(Map conf, int delay) {
+        conf.put(SPOUT_DELAY_RUN, Integer.valueOf(delay));
+    }
+
+    public static int getSpoutDelayRunSeconds(Map conf) {
+        return JStormUtils.parseInt(conf.get(SPOUT_DELAY_RUN), 30);
+    }
+
+    /**
+     * Default ZMQ Pending queue size
+     */
+    public static final int DEFAULT_ZMQ_MAX_QUEUE_MSG = 1000;
+
+    /**
+     * One task will alloc how many memory slot, the default setting is 1
+     */
+    protected static final String MEM_SLOTS_PER_TASK = "memory.slots.per.task";
+
+    @Deprecated
+    public static void setMemSlotPerTask(Map conf, int slotNum) {
+        if (slotNum < 1) {
+            throw new InvalidParameterException();
+        }
+        conf.put(MEM_SLOTS_PER_TASK, Integer.valueOf(slotNum));
+    }
+
+    /**
+     * One task will use cpu slot number, the default setting is 1
+     */
+    protected static final String CPU_SLOTS_PER_TASK = "cpu.slots.per.task";
+
+    @Deprecated
+    public static void setCpuSlotsPerTask(Map conf, int slotNum) {
+        if (slotNum < 1) {
+            throw new InvalidParameterException();
+        }
+        conf.put(CPU_SLOTS_PER_TASK, Integer.valueOf(slotNum));
+    }
+
+    /**
+     * if the setting has been set, the component's task must run different node This is conflict with USE_SINGLE_NODE
+     */
+    protected static final String TASK_ON_DIFFERENT_NODE = "task.on.differ.node";
+
+    public static void setTaskOnDifferentNode(Map conf, boolean isIsolate) {
+        conf.put(TASK_ON_DIFFERENT_NODE, Boolean.valueOf(isIsolate));
+    }
+
+    public static boolean isTaskOnDifferentNode(Map conf) {
+        return JStormUtils.parseBoolean(conf.get(TASK_ON_DIFFERENT_NODE), false);
+    }
+
+    protected static final String SUPERVISOR_ENABLE_CGROUP = "supervisor.enable.cgroup";
+
+    public static boolean isEnableCgroup(Map conf) {
+        return JStormUtils.parseBoolean(conf.get(SUPERVISOR_ENABLE_CGROUP), false);
+    }
+
+    /**
+     * If component or topology configuration set "use.old.assignment", will try use old assignment firstly
+     */
+    protected static final String USE_OLD_ASSIGNMENT = "use.old.assignment";
+
+    public static void setUseOldAssignment(Map conf, boolean useOld) {
+        conf.put(USE_OLD_ASSIGNMENT, Boolean.valueOf(useOld));
+    }
+
+    public static boolean isUseOldAssignment(Map conf) {
+        return JStormUtils.parseBoolean(conf.get(USE_OLD_ASSIGNMENT), false);
+    }
+
+    /**
+     * The supervisor's hostname
+     */
+    protected static final String SUPERVISOR_HOSTNAME = "supervisor.hostname";
+    public static final Object SUPERVISOR_HOSTNAME_SCHEMA = String.class;
+
+    public static String getSupervisorHost(Map conf) {
+        return (String) conf.get(SUPERVISOR_HOSTNAME);
+    }
+
+    protected static final String SUPERVISOR_USE_IP = "supervisor.use.ip";
+
+    public static boolean isSupervisorUseIp(Map conf) {
+        return JStormUtils.parseBoolean(conf.get(SUPERVISOR_USE_IP), false);
+    }
+
+    protected static final String NIMBUS_USE_IP = "nimbus.use.ip";
+
+    public static boolean isNimbusUseIp(Map conf) {
+        return JStormUtils.parseBoolean(conf.get(NIMBUS_USE_IP), false);
+    }
+
+    protected static final String TOPOLOGY_ENABLE_CLASSLOADER = "topology.enable.classloader";
+
+    public static boolean isEnableTopologyClassLoader(Map conf) {
+        return JStormUtils.parseBoolean(conf.get(TOPOLOGY_ENABLE_CLASSLOADER), false);
+    }
+
+    public static void setEnableTopologyClassLoader(Map conf, boolean enable) {
+        conf.put(TOPOLOGY_ENABLE_CLASSLOADER, Boolean.valueOf(enable));
+    }
+
+    protected static String CLASSLOADER_DEBUG = "classloader.debug";
+
+    public static boolean isEnableClassloaderDebug(Map conf) {
+        return JStormUtils.parseBoolean(conf.get(CLASSLOADER_DEBUG), false);
+    }
+
+    public static void setEnableClassloaderDebug(Map conf, boolean enable) {
+        conf.put(CLASSLOADER_DEBUG, enable);
+    }
+
+    protected static final String CONTAINER_NIMBUS_HEARTBEAT = "container.nimbus.heartbeat";
+
+    /**
+     * Get to know whether nimbus is run under Apsara/Yarn container
+     */
+    public static boolean isEnableContainerNimbus() {
+        String path = System.getenv(CONTAINER_NIMBUS_HEARTBEAT);
+
+        if (StringUtils.isBlank(path)) {
+            return false;
+        } else {
+            return true;
+        }
+    }
+
+    /**
+     * Get Apsara/Yarn nimbus container's hearbeat dir
+     */
+    public static String getContainerNimbusHearbeat() {
+        return System.getenv(CONTAINER_NIMBUS_HEARTBEAT);
+    }
+
+    protected static final String CONTAINER_SUPERVISOR_HEARTBEAT = "container.supervisor.heartbeat";
+
+    /**
+     * Get to know whether supervisor is run under Apsara/Yarn supervisor container
+     */
+    public static boolean isEnableContainerSupervisor() {
+        String path = System.getenv(CONTAINER_SUPERVISOR_HEARTBEAT);
+
+        if (StringUtils.isBlank(path)) {
+            return false;
+        } else {
+            return true;
+        }
+    }
+
+    /**
+     * Get Apsara/Yarn supervisor container's hearbeat dir
+     */
+    public static String getContainerSupervisorHearbeat() {
+        return (String) System.getenv(CONTAINER_SUPERVISOR_HEARTBEAT);
+    }
+
+    protected static final String CONTAINER_HEARTBEAT_TIMEOUT_SECONDS = "container.heartbeat.timeout.seconds";
+
+    public static int getContainerHeartbeatTimeoutSeconds(Map conf) {
+        return JStormUtils.parseInt(conf.get(CONTAINER_HEARTBEAT_TIMEOUT_SECONDS), 240);
+    }
+
+    protected static final String CONTAINER_HEARTBEAT_FREQUENCE = "container.heartbeat.frequence";
+
+    public static int getContainerHeartbeatFrequence(Map conf) {
+        return JStormUtils.parseInt(conf.get(CONTAINER_HEARTBEAT_FREQUENCE), 10);
+    }
+
+    protected static final String JAVA_SANDBOX_ENABLE = "java.sandbox.enable";
+
+    public static boolean isJavaSandBoxEnable(Map conf) {
+        return JStormUtils.parseBoolean(conf.get(JAVA_SANDBOX_ENABLE), false);
+    }
+
+    protected static String SPOUT_SINGLE_THREAD = "spout.single.thread";
+
+    public static boolean isSpoutSingleThread(Map conf) {
+        return JStormUtils.parseBoolean(conf.get(SPOUT_SINGLE_THREAD), false);
+    }
+
+    public static void setSpoutSingleThread(Map conf, boolean enable) {
+        conf.put(SPOUT_SINGLE_THREAD, enable);
+    }
+
+    protected static String WORKER_STOP_WITHOUT_SUPERVISOR = "worker.stop.without.supervisor";
+
+    public static boolean isWorkerStopWithoutSupervisor(Map conf) {
+        return JStormUtils.parseBoolean(conf.get(WORKER_STOP_WITHOUT_SUPERVISOR), false);
+    }
+
+    protected static String CGROUP_ROOT_DIR = "supervisor.cgroup.rootdir";
+
+    public static String getCgroupRootDir(Map conf) {
+        return (String) conf.get(CGROUP_ROOT_DIR);
+    }
+
+    protected static String NETTY_TRANSFER_ASYNC_AND_BATCH = "storm.messaging.netty.transfer.async.batch";
+
+    public static boolean isNettyTransferAsyncBatch(Map conf) {
+        return JStormUtils.parseBoolean(conf.get(NETTY_TRANSFER_ASYNC_AND_BATCH), true);
+    }
+
+    protected static final String USE_USERDEFINE_ASSIGNMENT = "use.userdefine.assignment";
+
+    public static void setUserDefineAssignment(Map conf, List<WorkerAssignment> userDefines) {
+        List<String> ret = new ArrayList<String>();
+        for (WorkerAssignment worker : userDefines) {
+            ret.add(Utils.to_json(worker));
+        }
+        conf.put(USE_USERDEFINE_ASSIGNMENT, ret);
+    }
+
+    public static List<WorkerAssignment> getUserDefineAssignment(Map conf) {
+        List<WorkerAssignment> ret = new ArrayList<WorkerAssignment>();
+        if (conf.get(USE_USERDEFINE_ASSIGNMENT) == null)
+            return ret;
+        for (String worker : (List<String>) conf.get(USE_USERDEFINE_ASSIGNMENT)) {
+            ret.add(WorkerAssignment.parseFromObj(Utils.from_json(worker)));
+        }
+        return ret;
+    }
+
+    protected static String NETTY_PENDING_BUFFER_TIMEOUT = "storm.messaging.netty.pending.buffer.timeout";
+
+    public static void setNettyPendingBufferTimeout(Map conf, Long timeout) {
+        conf.put(NETTY_PENDING_BUFFER_TIMEOUT, timeout);
+    }
+
+    public static long getNettyPendingBufferTimeout(Map conf) {
+        int messageTimeout = JStormUtils.parseInt(conf.get(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS), 120);
+        return JStormUtils.parseLong(conf.get(NETTY_PENDING_BUFFER_TIMEOUT), messageTimeout * 1000);
+    }
+
+    protected static final String MEMSIZE_PER_WORKER = "worker.memory.size";
+
+    public static void setMemSizePerWorker(Map conf, long memSize) {
+        conf.put(MEMSIZE_PER_WORKER, memSize);
+    }
+
+    public static void setMemSizePerWorkerByKB(Map conf, long memSize) {
+        long size = memSize * 1024l;
+        setMemSizePerWorker(conf, size);
+    }
+
+    public static void setMemSizePerWorkerByMB(Map conf, long memSize) {
+        long size = memSize * 1024l;
+        setMemSizePerWorkerByKB(conf, size);
+    }
+
+    public static void setMemSizePerWorkerByGB(Map conf, long memSize) {
+        long size = memSize * 1024l;
+        setMemSizePerWorkerByMB(conf, size);
+    }
+
+    public static long getMemSizePerWorker(Map conf) {
+        long size = JStormUtils.parseLong(conf.get(MEMSIZE_PER_WORKER), JStormUtils.SIZE_1_G * 2);
+        return size > 0 ? size : JStormUtils.SIZE_1_G * 2;
+    }
+
+    protected static final String MIN_MEMSIZE_PER_WORKER = "worker.memory.min.size";
+
+    public static void setMemMinSizePerWorker(Map conf, long memSize) {
+        conf.put(MIN_MEMSIZE_PER_WORKER, memSize);
+    }
+
+    public static long getMemMinSizePerWorker(Map conf) {
+        long maxMemSize = getMemSizePerWorker(conf);
+
+        Long size = JStormUtils.parseLong(conf.get(MIN_MEMSIZE_PER_WORKER));
+        long minMemSize = (size == null || size == 0) ? maxMemSize : size;
+
+        return minMemSize;
+    }
+
+    protected static final String CPU_SLOT_PER_WORKER = "worker.cpu.slot.num";
+
+    public static void setCpuSlotNumPerWorker(Map conf, int slotNum) {
+        conf.put(CPU_SLOT_PER_WORKER, slotNum);
+    }
+
+    public static int getCpuSlotPerWorker(Map conf) {
+        int slot = JStormUtils.parseInt(conf.get(CPU_SLOT_PER_WORKER), 1);
+        return slot > 0 ? slot : 1;
+    }
+
+    protected static String TOPOLOGY_PERFORMANCE_METRICS = "topology.performance.metrics";
+
+    public static boolean isEnablePerformanceMetrics(Map conf) {
+        return JStormUtils.parseBoolean(conf.get(TOPOLOGY_PERFORMANCE_METRICS), true);
+    }
+
+    public static void setPerformanceMetrics(Map conf, boolean isEnable) {
+        conf.put(TOPOLOGY_PERFORMANCE_METRICS, isEnable);
+    }
+
+    protected static String NETTY_BUFFER_THRESHOLD_SIZE = "storm.messaging.netty.buffer.threshold";
+
+    public static long getNettyBufferThresholdSize(Map conf) {
+        return JStormUtils.parseLong(conf.get(NETTY_BUFFER_THRESHOLD_SIZE), 8 * JStormUtils.SIZE_1_M);
+    }
+
+    public static void setNettyBufferThresholdSize(Map conf, long size) {
+        conf.put(NETTY_BUFFER_THRESHOLD_SIZE, size);
+    }
+
+    protected static String NETTY_MAX_SEND_PENDING = "storm.messaging.netty.max.pending";
+
+    public static void setNettyMaxSendPending(Map conf, long pending) {
+        conf.put(NETTY_MAX_SEND_PENDING, pending);
+    }
+
+    public static long getNettyMaxSendPending(Map conf) {
+        return JStormUtils.parseLong(conf.get(NETTY_MAX_SEND_PENDING), 16);
+    }
+
+    protected static String DISRUPTOR_USE_SLEEP = "disruptor.use.sleep";
+
+    public static boolean isDisruptorUseSleep(Map conf) {
+        return JStormUtils.parseBoolean(conf.get(DISRUPTOR_USE_SLEEP), true);
+    }
+
+    public static void setDisruptorUseSleep(Map conf, boolean useSleep) {
+        conf.put(DISRUPTOR_USE_SLEEP, useSleep);
+    }
+
+    public static boolean isTopologyContainAcker(Map conf) {
+        int num = JStormUtils.parseInt(conf.get(Config.TOPOLOGY_ACKER_EXECUTORS), 1);
+        if (num > 0) {
+            return true;
+        } else {
+            return false;
+        }
+    }
+
+    protected static String NETTY_SYNC_MODE = "storm.messaging.netty.sync.mode";
+
+    public static boolean isNettySyncMode(Map conf) {
+        return JStormUtils.parseBoolean(conf.get(NETTY_SYNC_MODE), false);
+    }
+
+    public static void setNettySyncMode(Map conf, boolean sync) {
+        conf.put(NETTY_SYNC_MODE, sync);
+    }
+
+    protected static String NETTY_ASYNC_BLOCK = "storm.messaging.netty.async.block";
+
+    public static boolean isNettyASyncBlock(Map conf) {
+        return JStormUtils.parseBoolean(conf.get(NETTY_ASYNC_BLOCK), true);
+    }
+
+    public static void setNettyASyncBlock(Map conf, boolean block) {
+        conf.put(NETTY_ASYNC_BLOCK, block);
+    }
+
+    protected static String ALIMONITOR_METRICS_POST = "topology.alimonitor.metrics.post";
+
+    public static boolean isAlimonitorMetricsPost(Map conf) {
+        return JStormUtils.parseBoolean(conf.get(ALIMONITOR_METRICS_POST), true);
+    }
+
+    public static void setAlimonitorMetricsPost(Map conf, boolean post) {
+        conf.put(ALIMONITOR_METRICS_POST, post);
+    }
+
+    public static String TASK_CLEANUP_TIMEOUT_SEC = "task.cleanup.timeout.sec";
+
+    public static int getTaskCleanupTimeoutSec(Map conf) {
+        return JStormUtils.parseInt(conf.get(TASK_CLEANUP_TIMEOUT_SEC), 10);
+    }
+
+    public static void setTaskCleanupTimeoutSec(Map conf, int timeout) {
+        conf.put(TASK_CLEANUP_TIMEOUT_SEC, timeout);
+    }
+
+    protected static String UI_CLUSTERS = "ui.clusters";
+    protected static String UI_CLUSTER_NAME = "name";
+    protected static String UI_CLUSTER_ZK_ROOT = "zkRoot";
+    protected static String UI_CLUSTER_ZK_SERVERS = "zkServers";
+    protected static String UI_CLUSTER_ZK_PORT = "zkPort";
+
+    public static List<Map> getUiClusters(Map conf) {
+        return (List<Map>) conf.get(UI_CLUSTERS);
+    }
+
+    public static void setUiClusters(Map conf, List<Map> uiClusters) {
+        conf.put(UI_CLUSTERS, uiClusters);
+    }
+
+    public static Map getUiClusterInfo(List<Map> uiClusters, String name) {
+        Map ret = null;
+        for (Map cluster : uiClusters) {
+            String clusterName = getUiClusterName(cluster);
+            if (clusterName.equals(name)) {
+                ret = cluster;
+                break;
+            }
+        }
+
+        return ret;
+    }
+
+    public static String getUiClusterName(Map uiCluster) {
+        return (String) uiCluster.get(UI_CLUSTER_NAME);
+    }
+
+    public static String getUiClusterZkRoot(Map uiCluster) {
+        return (String) uiCluster.get(UI_CLUSTER_ZK_ROOT);
+    }
+
+    public static List<String> getUiClusterZkServers(Map uiCluster) {
+        return (List<String>) uiCluster.get(UI_CLUSTER_ZK_SERVERS);
+    }
+
+    public static Integer getUiClusterZkPort(Map uiCluster) {
+        return JStormUtils.parseInt(uiCluster.get(UI_CLUSTER_ZK_PORT));
+    }
+
+
+    protected static String SPOUT_PEND_FULL_SLEEP = "spout.pending.full.sleep";
+
+    public static boolean isSpoutPendFullSleep(Map conf) {
+        return JStormUtils.parseBoolean(conf.get(SPOUT_PEND_FULL_SLEEP), false);
+    }
+
+    public static void setSpoutPendFullSleep(Map conf, boolean sleep) {
+        conf.put(SPOUT_PEND_FULL_SLEEP, sleep);
+
+    }
+
+    protected static String LOGVIEW_ENCODING = "supervisor.deamon.logview.encoding";
+    protected static String UTF8 = "utf-8";
+
+    public static String getLogViewEncoding(Map conf) {
+        String ret = (String) conf.get(LOGVIEW_ENCODING);
+        if (ret == null)
+            ret = UTF8;
+        return ret;
+    }
+
+    public static void setLogViewEncoding(Map conf, String enc) {
+        conf.put(LOGVIEW_ENCODING, enc);
+    }
+
+    protected static String LOG_PAGE_SIZE = "log.page.size";
+
+    public static int getLogPageSize(Map conf) {
+        return JStormUtils.parseInt(conf.get(LOG_PAGE_SIZE), 32 * 1024);
+    }
+
+    public static void setLogPageSize(Map conf, int pageSize) {
+        conf.put(LOG_PAGE_SIZE, pageSize);
+    }
+
+    public static String TASK_STATUS_ACTIVE = "Active";
+    public static String TASK_STATUS_INACTIVE = "Inactive";
+    public static String TASK_STATUS_STARTING = "Starting";
+
+    protected static String ALIMONITOR_TOPO_METIRC_NAME = "topology.alimonitor.topo.metrics.name";
+    protected static String ALIMONITOR_TASK_METIRC_NAME = "topology.alimonitor.task.metrics.name";
+    protected static String ALIMONITOR_WORKER_METIRC_NAME = "topology.alimonitor.worker.metrics.name";
+    protected static String ALIMONITOR_USER_METIRC_NAME = "topology.alimonitor.user.metrics.name";
+
+    public static String getAlmonTopoMetricName(Map conf) {
+        return (String) conf.get(ALIMONITOR_TOPO_METIRC_NAME);
+    }
+
+    public static String getAlmonTaskMetricName(Map conf) {
+        return (String) conf.get(ALIMONITOR_TASK_METIRC_NAME);
+    }
+
+    public static String getAlmonWorkerMetricName(Map conf) {
+        return (String) conf.get(ALIMONITOR_WORKER_METIRC_NAME);
+    }
+
+    public static String getAlmonUserMetricName(Map conf) {
+        return (String) conf.get(ALIMONITOR_USER_METIRC_NAME);
+    }
+
+    protected static String SPOUT_PARALLELISM = "topology.spout.parallelism";
+    protected static String BOLT_PARALLELISM = "topology.bolt.parallelism";
+
+    public static Integer getSpoutParallelism(Map conf, String componentName) {
+        Integer ret = null;
+        Map<String, String> map = (Map<String, String>) (conf.get(SPOUT_PARALLELISM));
+        if (map != null)
+            ret = JStormUtils.parseInt(map.get(componentName));
+        return ret;
+    }
+
+    public static Integer getBoltParallelism(Map conf, String componentName) {
+        Integer ret = null;
+        Map<String, String> map = (Map<String, String>) (conf.get(BOLT_PARALLELISM));
+        if (map != null)
+            ret = JStormUtils.parseInt(map.get(componentName));
+        return ret;
+    }
+
+    protected static String TOPOLOGY_BUFFER_SIZE_LIMITED = "topology.buffer.size.limited";
+
+    public static void setTopologyBufferSizeLimited(Map conf, boolean limited) {
+        conf.put(TOPOLOGY_BUFFER_SIZE_LIMITED, limited);
+    }
+
+    public static boolean getTopologyBufferSizeLimited(Map conf) {
+        boolean isSynchronized = isNettySyncMode(conf);
+        if (isSynchronized == true) {
+            return true;
+        }
+
+        return JStormUtils.parseBoolean(conf.get(TOPOLOGY_BUFFER_SIZE_LIMITED), true);
+
+    }
+
+    protected static String SUPERVISOR_SLOTS_PORTS_BASE = "supervisor.slots.ports.base";
+
+    public static int getSupervisorSlotsPortsBase(Map conf) {
+        return JStormUtils.parseInt(conf.get(SUPERVISOR_SLOTS_PORTS_BASE), 6800);
+    }
+
+    // SUPERVISOR_SLOTS_PORTS_BASE don't provide setting function, it must be
+    // set by configuration
+
+    protected static String SUPERVISOR_SLOTS_PORT_CPU_WEIGHT = "supervisor.slots.port.cpu.weight";
+
+    public static double getSupervisorSlotsPortCpuWeight(Map conf) {
+        Object value = conf.get(SUPERVISOR_SLOTS_PORT_CPU_WEIGHT);
+        Double ret = JStormUtils.convertToDouble(value);
+        if (ret == null || ret <= 0) {
+            return 1.2;
+        } else {
+            return ret;
+        }
+    }
+    
+    protected static String SUPERVISOR_SLOTS_PORT_MEM_WEIGHT = "supervisor.slots.port.mem.weight";
+
+    public static double getSupervisorSlotsPortMemWeight(Map conf) {
+        Object value = conf.get(SUPERVISOR_SLOTS_PORT_MEM_WEIGHT);
+        Double ret = JStormUtils.convertToDouble(value);
+        if (ret == null || ret <= 0) {
+            return 0.7;
+        } else {
+            return ret;
+        }
+    }
+
+    // SUPERVISOR_SLOTS_PORT_CPU_WEIGHT don't provide setting function, it must
+    // be set by configuration
+
+    protected static String USER_DEFINED_LOG4J_CONF = "user.defined.log4j.conf";
+
+    public static String getUserDefinedLog4jConf(Map conf) {
+        return (String) conf.get(USER_DEFINED_LOG4J_CONF);
+    }
+
+    public static void setUserDefinedLog4jConf(Map conf, String fileName) {
+        conf.put(USER_DEFINED_LOG4J_CONF, fileName);
+    }
+
+    protected static String USER_DEFINED_LOGBACK_CONF = "user.defined.logback.conf";
+
+    public static String getUserDefinedLogbackConf(Map conf) {
+        return (String) conf.get(USER_DEFINED_LOGBACK_CONF);
+    }
+
+    public static void setUserDefinedLogbackConf(Map conf, String fileName) {
+        conf.put(USER_DEFINED_LOGBACK_CONF, fileName);
+    }
+
+    protected static String TASK_ERROR_INFO_REPORT_INTERVAL = "topology.task.error.report.interval";
+
+    public static Integer getTaskErrorReportInterval(Map conf) {
+        return JStormUtils.parseInt(conf.get(TASK_ERROR_INFO_REPORT_INTERVAL), 60);
+    }
+
+    public static void setTaskErrorReportInterval(Map conf, Integer interval) {
+        conf.put(TASK_ERROR_INFO_REPORT_INTERVAL, interval);
+    }
+
+    protected static String DEFAULT_CACHE_TIMEOUT = "default.cache.timeout";
+
+    public static int getDefaultCacheTimeout(Map conf) {
+        return JStormUtils.parseInt(conf.get(DEFAULT_CACHE_TIMEOUT), 60);
+    }
+
+    public static void setDefaultCacheTimeout(Map conf, int timeout) {
+        conf.put(DEFAULT_CACHE_TIMEOUT, timeout);
+    }
+
+    protected static String WORKER_MERTRIC_REPORT_CHECK_FREQUENCY = "worker.metric.report.frequency.secs";
+
+    public static int getWorkerMetricReportCheckFrequency(Map conf) {
+        return JStormUtils.parseInt(conf.get(WORKER_MERTRIC_REPORT_CHECK_FREQUENCY), 60);
+    }
+
+    public static void setWorkerMetricReportFrequency(Map conf, int frequence) {
+        conf.put(WORKER_MERTRIC_REPORT_CHECK_FREQUENCY, frequence);
+    }
+
+    /**
+     * Store local worker port/workerId/supervisorId to configuration
+     */
+    protected static String LOCAL_WORKER_PORT = "local.worker.port";
+    protected static String LOCLA_WORKER_ID = "local.worker.id";
+    protected static String LOCAL_SUPERVISOR_ID = "local.supervisor.id";
+
+    public static int getLocalWorkerPort(Map conf) {
+        return JStormUtils.parseInt(conf.get(LOCAL_WORKER_PORT));
+    }
+
+    public static void setLocalWorkerPort(Map conf, int port) {
+        conf.put(LOCAL_WORKER_PORT, port);
+    }
+
+    public static String getLocalWorkerId(Map conf) {
+        return (String) conf.get(LOCLA_WORKER_ID);
+    }
+
+    public static void setLocalWorkerId(Map conf, String workerId) {
+        conf.put(LOCLA_WORKER_ID, workerId);
+    }
+
+    public static String getLocalSupervisorId(Map conf) {
+        return (String) conf.get(LOCAL_SUPERVISOR_ID);
+    }
+
+    public static void setLocalSupervisorId(Map conf, String supervisorId) {
+        conf.put(LOCAL_SUPERVISOR_ID, supervisorId);
+    }
+
+    protected static String WORKER_CPU_CORE_UPPER_LIMIT = "worker.cpu.core.upper.limit";
+
+    public static Integer getWorkerCpuCoreUpperLimit(Map conf) {
+        return JStormUtils.parseInt(conf.get(WORKER_CPU_CORE_UPPER_LIMIT), 1);
+    }
+
+    public static void setWorkerCpuCoreUpperLimit(Map conf, Integer cpuUpperLimit) {
+        conf.put(WORKER_CPU_CORE_UPPER_LIMIT, cpuUpperLimit);
+    }
+
+
+    protected static String CLUSTER_NAME = "cluster.name";
+
+    public static String getClusterName(Map conf) {
+        return (String) conf.get(CLUSTER_NAME);
+    }
+
+    public static void setClusterName(Map conf, String clusterName) {
+        conf.put(CLUSTER_NAME, clusterName);
+    }
+
+
+    protected static final String NIMBUS_CACHE_CLASS = "nimbus.cache.class";
+
+    public static String getNimbusCacheClass(Map conf) {
+        return (String) conf.get(NIMBUS_CACHE_CLASS);
+    }
+
+    /**
+     * if this is set, nimbus cache db will be clean when start nimbus
+     */
+    protected static final String NIMBUS_CACHE_RESET = "nimbus.cache.reset";
+
+    public static boolean getNimbusCacheReset(Map conf) {
+        return JStormUtils.parseBoolean(conf.get(NIMBUS_CACHE_RESET), true);
+    }
+
+    /**
+     * if this is set, nimbus metrics cache db will be clean when start nimbus
+     */
+    protected static final String NIMBUS_METRIC_CACHE_RESET = "nimbus.metric.cache.reset";
+
+    public static boolean getMetricCacheReset(Map conf) {
+        return JStormUtils.parseBoolean(conf.get(NIMBUS_METRIC_CACHE_RESET), false);
+    }
+
+    public static final double DEFAULT_METRIC_SAMPLE_RATE = 0.10d;
+
+    public static final String TOPOLOGY_METRIC_SAMPLE_RATE = "topology.metric.sample.rate";
+
+    public static double getMetricSampleRate(Map conf) {
+        double sampleRate = JStormUtils.parseDouble(conf.get(TOPOLOGY_METRIC_SAMPLE_RATE), DEFAULT_METRIC_SAMPLE_RATE);
+        if (!conf.containsKey(TOPOLOGY_METRIC_SAMPLE_RATE)) {
+            conf.put(TOPOLOGY_METRIC_SAMPLE_RATE, sampleRate);
+        }
+        return sampleRate;
+    }
+
+    public static final String CACHE_TIMEOUT_LIST = "cache.timeout.list";
+
+    public static List<Integer> getCacheTimeoutList(Map conf) {
+        return (List<Integer>) conf.get(CACHE_TIMEOUT_LIST);
+    }
+
+    protected static final String NIMBUS_METRICS_THREAD_NUM = "nimbus.metrics.thread.num";
+
+    public static int getNimbusMetricThreadNum(Map conf) {
+        return JStormUtils.parseInt(conf.get(NIMBUS_METRICS_THREAD_NUM), 2);
+    }
+
+    public static final String METRIC_UPLOADER_CLASS = "nimbus.metric.uploader.class";
+
+    public static String getMetricUploaderClass(Map<Object, Object> conf) {
+        return (String) conf.get(METRIC_UPLOADER_CLASS);
+    }
+
+    public static final String METRIC_QUERY_CLIENT_CLASS = "nimbus.metric.query.client.class";
+
+    public static String getMetricQueryClientClass(Map<Object, Object> conf) {
+        return (String) conf.get(METRIC_QUERY_CLIENT_CLASS);
+    }
+
+    protected static String TASK_MSG_BATCH_SIZE = "task.msg.batch.size";
+
+    public static Integer getTaskMsgBatchSize(Map conf) {
+        return JStormUtils.parseInt(conf.get(TASK_MSG_BATCH_SIZE), 1);
+    }
+
+    public static void setTaskMsgBatchSize(Map conf, Integer batchSize) {
+        conf.put(TASK_MSG_BATCH_SIZE, batchSize);
+    }
+
+    protected static String TASK_BATCH_TUPLE = "task.batch.tuple";
+
+    public static Boolean isTaskBatchTuple(Map conf) {
+        return JStormUtils.parseBoolean(conf.get(TASK_BATCH_TUPLE), false);
+    }
+
+    public static void setTaskBatchTuple(Map conf, boolean isBatchTuple) {
+        conf.put(TASK_BATCH_TUPLE, isBatchTuple);
+    }
+
+    protected static String TOPOLOGY_MAX_WORKER_NUM_FOR_NETTY_METRICS = "topology.max.worker.num.for.netty.metrics";
+
+    public static void setTopologyMaxWorkerNumForNettyMetrics(Map conf, int num) {
+        conf.put(TOPOLOGY_MAX_WORKER_NUM_FOR_NETTY_METRICS, num);
+    }
+
+    public static int getTopologyMaxWorkerNumForNettyMetrics(Map conf) {
+        return JStormUtils.parseInt(conf.get(TOPOLOGY_MAX_WORKER_NUM_FOR_NETTY_METRICS), 100);
+    }
+
+    protected static String UI_ONE_TABLE_PAGE_SIZE = "ui.one.table.page.size";
+
+    public static long getUiOneTablePageSize(Map conf) {
+        return JStormUtils.parseLong(conf.get(UI_ONE_TABLE_PAGE_SIZE), 200);
+    }
+
+    protected static String MAX_PENDING_METRIC_NUM = "topology.max.pending.metric.num";
+
+    public static int getMaxPendingMetricNum(Map conf) {
+        return JStormUtils.parseInt(conf.get(MAX_PENDING_METRIC_NUM), 200);
+    }
+
+    protected static String TOPOLOGY_MASTER_SINGLE_WORKER = "topology.master.single.worker";
+
+    public static Boolean getTopologyMasterSingleWorker(Map conf) {
+        Boolean ret = JStormUtils.parseBoolean(conf.get(TOPOLOGY_MASTER_SINGLE_WORKER));
+        return ret;
+    }
+
+    public static String TOPOLOGY_BACKPRESSURE_WATER_MARK_HIGH = "topology.backpressure.water.mark.high";
+
+    public static double getBackpressureWaterMarkHigh(Map conf) {
+        return JStormUtils.parseDouble(conf.get(TOPOLOGY_BACKPRESSURE_WATER_MARK_HIGH), 0.8);
+    }
+
+    public static String TOPOLOGY_BACKPRESSURE_WATER_MARK_LOW = "topology.backpressure.water.mark.low";
+
+    public static double getBackpressureWaterMarkLow(Map conf) {
+        return JStormUtils.parseDouble(conf.get(TOPOLOGY_BACKPRESSURE_WATER_MARK_LOW), 0.05);
+    }
+
+    protected static String TOPOLOGY_BACKPRESSURE_CHECK_INTERVAL = "topology.backpressure.check.interval";
+
+    public static int getBackpressureCheckIntervl(Map conf) {
+        return JStormUtils.parseInt(conf.get(TOPOLOGY_BACKPRESSURE_CHECK_INTERVAL), 1000);
+    }
+
+    protected static String TOPOLOGY_BACKPRESSURE_TRIGGER_SAMPLE_NUMBER = "topology.backpressure.trigger.sample.number";
+
+    public static int getBackpressureTriggerSampleNumber(Map conf) {
+        return JStormUtils.parseInt(conf.get(TOPOLOGY_BACKPRESSURE_TRIGGER_SAMPLE_NUMBER), 4);
+    }
+
+    protected static String TOPOLOGY_BACKPRESSURE_TRIGGER_SAMPLE_RATE = "topology.backpressure.trigger.sample.rate";
+
+    public static double getBackpressureTriggerSampleRate(Map conf) {
+        return JStormUtils.parseDouble(conf.get(TOPOLOGY_BACKPRESSURE_TRIGGER_SAMPLE_RATE), 0.75);
+    }
+
+    public static String TOPOLOGY_BACKPRESSURE_ENABLE = "topology.backpressure.enable";
+
+    public static boolean isBackpressureEnable(Map conf) {
+        return JStormUtils.parseBoolean(conf.get(TOPOLOGY_BACKPRESSURE_ENABLE), false);
+    }
+
+    public static String TOPOLOGY_BACKPRESSURE_COORDINATOR_RATIO = "topology.backpressure.coordinator.trigger.ratio";
+
+    public static double getBackpressureCoordinatorRatio(Map conf) {
+        return JStormUtils.parseDouble(conf.get(TOPOLOGY_BACKPRESSURE_COORDINATOR_RATIO), 0.1);
+    }
+
+    protected static String SUPERVISOR_CHECK_WORKER_BY_SYSTEM_INFO = "supervisor.check.worker.by.system.info";
+
+    public static boolean isCheckWorkerAliveBySystemInfo(Map conf) {
+        return JStormUtils.parseBoolean(conf.get(SUPERVISOR_CHECK_WORKER_BY_SYSTEM_INFO), true);
+    }
+
+    protected static String TOPOLOGY_TASK_HEARTBEAT_SEND_NUMBER = "topology.task.heartbeat.send.number";
+
+    public static int getTopologyTaskHbSendNumber(Map conf) {
+        return JStormUtils.parseInt(conf.get(TOPOLOGY_TASK_HEARTBEAT_SEND_NUMBER), 2000);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/client/spout/IAckValueSpout.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/client/spout/IAckValueSpout.java b/jstorm-core/src/main/java/com/alibaba/jstorm/client/spout/IAckValueSpout.java
index df88ad8..01d9da4 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/client/spout/IAckValueSpout.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/client/spout/IAckValueSpout.java
@@ -22,8 +22,7 @@ import java.util.List;
 /**
  * This interface will list emit values when tuple success
  * 
- * if spout implement this interface, spout won't call ISpout.ack() when tuple
- * success
+ * if spout implement this interface, spout won't call ISpout.ack() when tuple success
  * 
  * @author longda
  */

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/client/spout/IFailValueSpout.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/client/spout/IFailValueSpout.java b/jstorm-core/src/main/java/com/alibaba/jstorm/client/spout/IFailValueSpout.java
index 9bebfa4..8d16aba 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/client/spout/IFailValueSpout.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/client/spout/IFailValueSpout.java
@@ -22,8 +22,7 @@ import java.util.List;
 /**
  * This interface will list emit values when tuple fails
  * 
- * if spout implement this interface, spout won't call ISpout.fail() when tuple
- * fail
+ * if spout implement this interface, spout won't call ISpout.fail() when tuple fail
  * 
  * @author longda
  */


[29/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/TestEventLogSpout.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/TestEventLogSpout.java b/jstorm-core/src/main/java/backtype/storm/testing/TestEventLogSpout.java
index 1570aeb..aaf6875 100755
--- a/jstorm-core/src/main/java/backtype/storm/testing/TestEventLogSpout.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/TestEventLogSpout.java
@@ -36,103 +36,103 @@ import backtype.storm.tuple.Values;
 
 public class TestEventLogSpout extends BaseRichSpout {
     public static Logger LOG = LoggerFactory.getLogger(TestEventLogSpout.class);
-    
+
     private static final Map<String, Integer> acked = new HashMap<String, Integer>();
     private static final Map<String, Integer> failed = new HashMap<String, Integer>();
-    
+
     private String uid;
     private long totalCount;
-    
+
     SpoutOutputCollector _collector;
     private long eventId = 0;
     private long myCount;
     private int source;
-    
+
     public static int getNumAcked(String stormId) {
-        synchronized(acked) {
+        synchronized (acked) {
             return get(acked, stormId, 0);
         }
     }
 
     public static int getNumFailed(String stormId) {
-        synchronized(failed) {
+        synchronized (failed) {
             return get(failed, stormId, 0);
         }
     }
-    
+
     public TestEventLogSpout(long totalCount) {
         this.uid = UUID.randomUUID().toString();
-        
-        synchronized(acked) {
+
+        synchronized (acked) {
             acked.put(uid, 0);
         }
-        synchronized(failed) {
+        synchronized (failed) {
             failed.put(uid, 0);
         }
-        
+
         this.totalCount = totalCount;
     }
-        
+
     public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
         _collector = collector;
         this.source = context.getThisTaskId();
         long taskCount = context.getComponentTasks(context.getThisComponentId()).size();
         myCount = totalCount / taskCount;
     }
-    
+
     public void close() {
-        
+
     }
-    
+
     public void cleanup() {
-        synchronized(acked) {            
+        synchronized (acked) {
             acked.remove(uid);
-        } 
-        synchronized(failed) {            
+        }
+        synchronized (failed) {
             failed.remove(uid);
         }
     }
-    
+
     public boolean completed() {
-        
+
         int ackedAmt;
         int failedAmt;
-        
-        synchronized(acked) {
+
+        synchronized (acked) {
             ackedAmt = acked.get(uid);
         }
-        synchronized(failed) {
+        synchronized (failed) {
             failedAmt = failed.get(uid);
         }
         int totalEmitted = ackedAmt + failedAmt;
-        
+
         if (totalEmitted >= totalCount) {
             return true;
         }
         return false;
     }
-        
+
     public void nextTuple() {
-        if (eventId < myCount) { 
+        if (eventId < myCount) {
             eventId++;
             _collector.emit(new Values(source, eventId), eventId);
-        }        
+        }
     }
-    
+
     public void ack(Object msgId) {
-        synchronized(acked) {
+        synchronized (acked) {
             int curr = get(acked, uid, 0);
-            acked.put(uid, curr+1);
+            acked.put(uid, curr + 1);
         }
     }
 
     public void fail(Object msgId) {
-        synchronized(failed) {
+        synchronized (failed) {
             int curr = get(failed, uid, 0);
-            failed.put(uid, curr+1);
+            failed.put(uid, curr + 1);
         }
     }
-    
+
     public void declareOutputFields(OutputFieldsDeclarer declarer) {
         declarer.declare(new Fields("source", "eventId"));
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/TestEventOrderCheckBolt.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/TestEventOrderCheckBolt.java b/jstorm-core/src/main/java/backtype/storm/testing/TestEventOrderCheckBolt.java
index 1f80362..8286d0b 100755
--- a/jstorm-core/src/main/java/backtype/storm/testing/TestEventOrderCheckBolt.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/TestEventOrderCheckBolt.java
@@ -36,7 +36,7 @@ import backtype.storm.tuple.Values;
 
 public class TestEventOrderCheckBolt extends BaseRichBolt {
     public static Logger LOG = LoggerFactory.getLogger(TestEventOrderCheckBolt.class);
-    
+
     private int _count;
     OutputCollector _collector;
     Map<Integer, Long> recentEventId = new HashMap<Integer, Long>();
@@ -52,8 +52,9 @@ public class TestEventOrderCheckBolt extends BaseRichBolt {
         Long recentEvent = recentEventId.get(sourceId);
 
         if (null != recentEvent && eventId <= recentEvent) {
-            String error = "Error: event id is not in strict order! event source Id: "
-                    + sourceId + ", last event Id: " + recentEvent + ", current event Id: " + eventId;
+            String error =
+                    "Error: event id is not in strict order! event source Id: " + sourceId + ", last event Id: " + recentEvent + ", current event Id: "
+                            + eventId;
 
             _collector.emit(input, new Values(error));
         }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/TestGlobalCount.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/TestGlobalCount.java b/jstorm-core/src/main/java/backtype/storm/testing/TestGlobalCount.java
index 5ef464a..45f48e4 100755
--- a/jstorm-core/src/main/java/backtype/storm/testing/TestGlobalCount.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/TestGlobalCount.java
@@ -28,7 +28,6 @@ import backtype.storm.tuple.Values;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-
 public class TestGlobalCount extends BaseRichBolt {
     public static Logger LOG = LoggerFactory.getLogger(TestWordCounter.class);
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/TestJob.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/TestJob.java b/jstorm-core/src/main/java/backtype/storm/testing/TestJob.java
index d41c36a..099a8db 100755
--- a/jstorm-core/src/main/java/backtype/storm/testing/TestJob.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/TestJob.java
@@ -20,10 +20,7 @@ package backtype.storm.testing;
 import backtype.storm.ILocalCluster;
 
 /**
- * This is the core interface for the storm java testing, usually
- * we put our java unit testing logic in the run method. A sample
- * code will be:
- * <code>
+ * This is the core interface for the storm java testing, usually we put our java unit testing logic in the run method. A sample code will be: <code>
  * Testing.withSimulatedTimeLocalCluster(new TestJob() {
  *     public void run(Cluster cluster) {
  *         // your testing logic here.
@@ -31,11 +28,10 @@ import backtype.storm.ILocalCluster;
  * });
  */
 public interface TestJob {
-	/**
-	 * run the testing logic with the cluster.
-	 * 
-	 * @param cluster the cluster which created by <code>Testing.withSimulatedTimeLocalCluster</code>
-	 *        and <code>Testing.withTrackedCluster</code>.
-	 */
+    /**
+     * run the testing logic with the cluster.
+     * 
+     * @param cluster the cluster which created by <code>Testing.withSimulatedTimeLocalCluster</code> and <code>Testing.withTrackedCluster</code>.
+     */
     public void run(ILocalCluster cluster) throws Exception;
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/TestPlannerBolt.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/TestPlannerBolt.java b/jstorm-core/src/main/java/backtype/storm/testing/TestPlannerBolt.java
index 0d30b26..769f1cf 100755
--- a/jstorm-core/src/main/java/backtype/storm/testing/TestPlannerBolt.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/TestPlannerBolt.java
@@ -25,16 +25,15 @@ import java.util.Map;
 import backtype.storm.task.TopologyContext;
 import backtype.storm.topology.base.BaseRichBolt;
 
-
 public class TestPlannerBolt extends BaseRichBolt {
     public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
 
     }
-    
+
     public void execute(Tuple input) {
 
     }
-        
+
     public Fields getOutputFields() {
         return new Fields("field1", "field2");
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/TestPlannerSpout.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/TestPlannerSpout.java b/jstorm-core/src/main/java/backtype/storm/testing/TestPlannerSpout.java
index f4c27c0..bcacd4d 100755
--- a/jstorm-core/src/main/java/backtype/storm/testing/TestPlannerSpout.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/TestPlannerSpout.java
@@ -27,11 +27,10 @@ import backtype.storm.tuple.Fields;
 import backtype.storm.utils.Utils;
 import java.util.HashMap;
 
-
 public class TestPlannerSpout extends BaseRichSpout {
     boolean _isDistributed;
     Fields _outFields;
-    
+
     public TestPlannerSpout(Fields outFields, boolean isDistributed) {
         _isDistributed = isDistributed;
         _outFields = outFields;
@@ -40,34 +39,33 @@ public class TestPlannerSpout extends BaseRichSpout {
     public TestPlannerSpout(boolean isDistributed) {
         this(new Fields("field1", "field2"), isDistributed);
     }
-        
+
     public TestPlannerSpout(Fields outFields) {
         this(outFields, true);
     }
-    
+
     public Fields getOutputFields() {
         return _outFields;
     }
 
-    
     public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
-        
+
     }
-    
+
     public void close() {
-        
+
     }
-    
+
     public void nextTuple() {
         Utils.sleep(100);
     }
-    
-    public void ack(Object msgId){
-        
+
+    public void ack(Object msgId) {
+
     }
 
-    public void fail(Object msgId){
-        
+    public void fail(Object msgId) {
+
     }
 
     public void declareOutputFields(OutputFieldsDeclarer declarer) {
@@ -77,9 +75,9 @@ public class TestPlannerSpout extends BaseRichSpout {
     @Override
     public Map<String, Object> getComponentConfiguration() {
         Map<String, Object> ret = new HashMap<String, Object>();
-        if(!_isDistributed) {
+        if (!_isDistributed) {
             ret.put(Config.TOPOLOGY_MAX_TASK_PARALLELISM, 1);
         }
         return ret;
-    }       
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/TestSerObject.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/TestSerObject.java b/jstorm-core/src/main/java/backtype/storm/testing/TestSerObject.java
index 1c7706f..5416ef0 100755
--- a/jstorm-core/src/main/java/backtype/storm/testing/TestSerObject.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/TestSerObject.java
@@ -22,12 +22,12 @@ import java.io.Serializable;
 public class TestSerObject implements Serializable {
     public int f1;
     public int f2;
-    
+
     public TestSerObject(int f1, int f2) {
         this.f1 = f1;
         this.f2 = f2;
     }
-    
+
     @Override
     public int hashCode() {
         final int prime = 31;
@@ -36,7 +36,7 @@ public class TestSerObject implements Serializable {
         result = prime * result + f2;
         return result;
     }
-    
+
     @Override
     public boolean equals(Object obj) {
         if (this == obj)
@@ -52,5 +52,5 @@ public class TestSerObject implements Serializable {
             return false;
         return true;
     }
-    
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/TestWordCounter.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/TestWordCounter.java b/jstorm-core/src/main/java/backtype/storm/testing/TestWordCounter.java
index 551b054..c6b32b5 100755
--- a/jstorm-core/src/main/java/backtype/storm/testing/TestWordCounter.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/TestWordCounter.java
@@ -29,29 +29,28 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import static backtype.storm.utils.Utils.tuple;
 
-
 public class TestWordCounter extends BaseBasicBolt {
     public static Logger LOG = LoggerFactory.getLogger(TestWordCounter.class);
 
     Map<String, Integer> _counts;
-    
+
     public void prepare(Map stormConf, TopologyContext context) {
         _counts = new HashMap<String, Integer>();
     }
-    
+
     public void execute(Tuple input, BasicOutputCollector collector) {
         String word = (String) input.getValues().get(0);
         int count = 0;
-        if(_counts.containsKey(word)) {
+        if (_counts.containsKey(word)) {
             count = _counts.get(word);
         }
         count++;
         _counts.put(word, count);
         collector.emit(tuple(word, count));
     }
-    
+
     public void cleanup() {
-        
+
     }
 
     public void declareOutputFields(OutputFieldsDeclarer declarer) {

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/TestWordSpout.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/TestWordSpout.java b/jstorm-core/src/main/java/backtype/storm/testing/TestWordSpout.java
index 745bf71..d5603a1 100755
--- a/jstorm-core/src/main/java/backtype/storm/testing/TestWordSpout.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/TestWordSpout.java
@@ -31,7 +31,6 @@ import java.util.Random;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-
 public class TestWordSpout extends BaseRichSpout {
     public static Logger LOG = LoggerFactory.getLogger(TestWordSpout.class);
     boolean _isDistributed;
@@ -44,43 +43,43 @@ public class TestWordSpout extends BaseRichSpout {
     public TestWordSpout(boolean isDistributed) {
         _isDistributed = isDistributed;
     }
-        
+
     public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
         _collector = collector;
     }
-    
+
     public void close() {
-        
+
     }
-        
+
     public void nextTuple() {
         Utils.sleep(100);
-        final String[] words = new String[] {"nathan", "mike", "jackson", "golda", "bertels"};
+        final String[] words = new String[] { "nathan", "mike", "jackson", "golda", "bertels" };
         final Random rand = new Random();
         final String word = words[rand.nextInt(words.length)];
         _collector.emit(new Values(word));
     }
-    
+
     public void ack(Object msgId) {
 
     }
 
     public void fail(Object msgId) {
-        
+
     }
-    
+
     public void declareOutputFields(OutputFieldsDeclarer declarer) {
         declarer.declare(new Fields("word"));
     }
 
     @Override
     public Map<String, Object> getComponentConfiguration() {
-        if(!_isDistributed) {
+        if (!_isDistributed) {
             Map<String, Object> ret = new HashMap<String, Object>();
             ret.put(Config.TOPOLOGY_MAX_TASK_PARALLELISM, 1);
             return ret;
         } else {
             return null;
         }
-    }    
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/TrackedTopology.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/TrackedTopology.java b/jstorm-core/src/main/java/backtype/storm/testing/TrackedTopology.java
index f2691b7..60506b5 100755
--- a/jstorm-core/src/main/java/backtype/storm/testing/TrackedTopology.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/TrackedTopology.java
@@ -23,12 +23,12 @@ import java.util.Map;
 import backtype.storm.generated.StormTopology;
 import clojure.lang.Keyword;
 
-public class TrackedTopology extends HashMap{
-	public TrackedTopology(Map map) {
-		super(map);
-	}
-	
-	public StormTopology getTopology() {
-		return (StormTopology)get(Keyword.intern("topology"));
-	}
+public class TrackedTopology extends HashMap {
+    public TrackedTopology(Map map) {
+        super(map);
+    }
+
+    public StormTopology getTopology() {
+        return (StormTopology) get(Keyword.intern("topology"));
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/testing/TupleCaptureBolt.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/testing/TupleCaptureBolt.java b/jstorm-core/src/main/java/backtype/storm/testing/TupleCaptureBolt.java
index e163576..9635887 100755
--- a/jstorm-core/src/main/java/backtype/storm/testing/TupleCaptureBolt.java
+++ b/jstorm-core/src/main/java/backtype/storm/testing/TupleCaptureBolt.java
@@ -28,7 +28,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.UUID;
 
-
 public class TupleCaptureBolt implements IRichBolt {
     public static transient Map<String, Map<String, List<FixedTuple>>> emitted_tuples = new HashMap<String, Map<String, List<FixedTuple>>>();
 
@@ -47,8 +46,8 @@ public class TupleCaptureBolt implements IRichBolt {
     public void execute(Tuple input) {
         String component = input.getSourceComponent();
         Map<String, List<FixedTuple>> captured = emitted_tuples.get(_name);
-        if(!captured.containsKey(component)) {
-           captured.put(component, new ArrayList<FixedTuple>());
+        if (!captured.containsKey(component)) {
+            captured.put(component, new ArrayList<FixedTuple>());
         }
         captured.get(component).add(new FixedTuple(input.getSourceStreamId(), input.getValues()));
         _collector.ack(input);
@@ -60,7 +59,7 @@ public class TupleCaptureBolt implements IRichBolt {
 
     public void cleanup() {
     }
-    
+
     public Map<String, List<FixedTuple>> getAndRemoveResults() {
         return emitted_tuples.remove(_name);
     }
@@ -70,7 +69,7 @@ public class TupleCaptureBolt implements IRichBolt {
         emitted_tuples.get(_name).clear();
         return ret;
     }
-    
+
     @Override
     public void declareOutputFields(OutputFieldsDeclarer declarer) {
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/topology/BaseConfigurationDeclarer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/topology/BaseConfigurationDeclarer.java b/jstorm-core/src/main/java/backtype/storm/topology/BaseConfigurationDeclarer.java
index 0c67324..a6614fc 100755
--- a/jstorm-core/src/main/java/backtype/storm/topology/BaseConfigurationDeclarer.java
+++ b/jstorm-core/src/main/java/backtype/storm/topology/BaseConfigurationDeclarer.java
@@ -36,19 +36,22 @@ public abstract class BaseConfigurationDeclarer<T extends ComponentConfiguration
 
     @Override
     public T setMaxTaskParallelism(Number val) {
-        if(val!=null) val = val.intValue();
+        if (val != null)
+            val = val.intValue();
         return addConfiguration(Config.TOPOLOGY_MAX_TASK_PARALLELISM, val);
     }
 
     @Override
     public T setMaxSpoutPending(Number val) {
-        if(val!=null) val = val.intValue();
+        if (val != null)
+            val = val.intValue();
         return addConfiguration(Config.TOPOLOGY_MAX_SPOUT_PENDING, val);
     }
-    
+
     @Override
     public T setNumTasks(Number val) {
-        if(val!=null) val = val.intValue();
+        if (val != null)
+            val = val.intValue();
         return addConfiguration(Config.TOPOLOGY_TASKS, val);
     }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/topology/BasicBoltExecutor.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/topology/BasicBoltExecutor.java b/jstorm-core/src/main/java/backtype/storm/topology/BasicBoltExecutor.java
index 6c9cdc1..ea437c5 100755
--- a/jstorm-core/src/main/java/backtype/storm/topology/BasicBoltExecutor.java
+++ b/jstorm-core/src/main/java/backtype/storm/topology/BasicBoltExecutor.java
@@ -25,11 +25,11 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public class BasicBoltExecutor implements IRichBolt {
-    public static Logger LOG = LoggerFactory.getLogger(BasicBoltExecutor.class);    
-    
+    public static Logger LOG = LoggerFactory.getLogger(BasicBoltExecutor.class);
+
     private IBasicBolt _bolt;
     private transient BasicOutputCollector _collector;
-    
+
     public BasicBoltExecutor(IBasicBolt bolt) {
         _bolt = bolt;
     }
@@ -38,7 +38,6 @@ public class BasicBoltExecutor implements IRichBolt {
         _bolt.declareOutputFields(declarer);
     }
 
-    
     public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
         _bolt.prepare(stormConf, context);
         _collector = new BasicOutputCollector(collector);
@@ -49,8 +48,8 @@ public class BasicBoltExecutor implements IRichBolt {
         try {
             _bolt.execute(input, _collector);
             _collector.getOutputter().ack(input);
-        } catch(FailedException e) {
-            if(e instanceof ReportedFailedException) {
+        } catch (FailedException e) {
+            if (e instanceof ReportedFailedException) {
                 _collector.reportError(e);
             }
             _collector.getOutputter().fail(input);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/topology/BasicOutputCollector.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/topology/BasicOutputCollector.java b/jstorm-core/src/main/java/backtype/storm/topology/BasicOutputCollector.java
index be1c242..e48f159 100755
--- a/jstorm-core/src/main/java/backtype/storm/topology/BasicOutputCollector.java
+++ b/jstorm-core/src/main/java/backtype/storm/topology/BasicOutputCollector.java
@@ -23,7 +23,6 @@ import backtype.storm.tuple.Tuple;
 import backtype.storm.utils.Utils;
 import java.util.List;
 
-
 public class BasicOutputCollector implements IBasicOutputCollector {
     private OutputCollector out;
     private Tuple inputTuple;

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/topology/BoltDeclarer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/topology/BoltDeclarer.java b/jstorm-core/src/main/java/backtype/storm/topology/BoltDeclarer.java
index 0c4b200..8fe05e2 100755
--- a/jstorm-core/src/main/java/backtype/storm/topology/BoltDeclarer.java
+++ b/jstorm-core/src/main/java/backtype/storm/topology/BoltDeclarer.java
@@ -19,8 +19,9 @@ package backtype.storm.topology;
 
 /**
  * BoltDeclarer includes grouping APIs for storm topology.
+ * 
  * @see <a href="https://storm.apache.org/documentation/Concepts.html">Concepts -Stream groupings-</a>
  */
 public interface BoltDeclarer extends InputDeclarer<BoltDeclarer>, ComponentConfigurationDeclarer<BoltDeclarer> {
-    
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/topology/ComponentConfigurationDeclarer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/topology/ComponentConfigurationDeclarer.java b/jstorm-core/src/main/java/backtype/storm/topology/ComponentConfigurationDeclarer.java
index d05dda0..49d78e5 100755
--- a/jstorm-core/src/main/java/backtype/storm/topology/ComponentConfigurationDeclarer.java
+++ b/jstorm-core/src/main/java/backtype/storm/topology/ComponentConfigurationDeclarer.java
@@ -21,9 +21,14 @@ import java.util.Map;
 
 public interface ComponentConfigurationDeclarer<T extends ComponentConfigurationDeclarer> {
     T addConfigurations(Map conf);
+
     T addConfiguration(String config, Object value);
+
     T setDebug(boolean debug);
+
     T setMaxTaskParallelism(Number val);
+
     T setMaxSpoutPending(Number val);
+
     T setNumTasks(Number val);
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/topology/FailedException.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/topology/FailedException.java b/jstorm-core/src/main/java/backtype/storm/topology/FailedException.java
index e174b5a..6c26bbf 100755
--- a/jstorm-core/src/main/java/backtype/storm/topology/FailedException.java
+++ b/jstorm-core/src/main/java/backtype/storm/topology/FailedException.java
@@ -21,11 +21,11 @@ public class FailedException extends RuntimeException {
     public FailedException() {
         super();
     }
-    
+
     public FailedException(String msg) {
         super(msg);
     }
-    
+
     public FailedException(String msg, Throwable cause) {
         super(msg, cause);
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/topology/IBasicBolt.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/topology/IBasicBolt.java b/jstorm-core/src/main/java/backtype/storm/topology/IBasicBolt.java
index 3b24f4e..81741df 100755
--- a/jstorm-core/src/main/java/backtype/storm/topology/IBasicBolt.java
+++ b/jstorm-core/src/main/java/backtype/storm/topology/IBasicBolt.java
@@ -23,11 +23,13 @@ import java.util.Map;
 
 public interface IBasicBolt extends IComponent {
     void prepare(Map stormConf, TopologyContext context);
+
     /**
      * Process the input tuple and optionally emit new tuples based on the input tuple.
      * 
      * All acking is managed for you. Throw a FailedException if you want to fail the tuple.
      */
     void execute(Tuple input, BasicOutputCollector collector);
+
     void cleanup();
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/topology/IBasicOutputCollector.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/topology/IBasicOutputCollector.java b/jstorm-core/src/main/java/backtype/storm/topology/IBasicOutputCollector.java
index 92d60d2..85008c2 100755
--- a/jstorm-core/src/main/java/backtype/storm/topology/IBasicOutputCollector.java
+++ b/jstorm-core/src/main/java/backtype/storm/topology/IBasicOutputCollector.java
@@ -21,6 +21,8 @@ import java.util.List;
 
 public interface IBasicOutputCollector {
     List<Integer> emit(String streamId, List<Object> tuple);
+
     void emitDirect(int taskId, String streamId, List<Object> tuple);
+
     void reportError(Throwable t);
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/topology/IComponent.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/topology/IComponent.java b/jstorm-core/src/main/java/backtype/storm/topology/IComponent.java
index 560c96f..1d0865d 100755
--- a/jstorm-core/src/main/java/backtype/storm/topology/IComponent.java
+++ b/jstorm-core/src/main/java/backtype/storm/topology/IComponent.java
@@ -21,23 +21,21 @@ import java.io.Serializable;
 import java.util.Map;
 
 /**
- * Common methods for all possible components in a topology. This interface is used
- * when defining topologies using the Java API. 
+ * Common methods for all possible components in a topology. This interface is used when defining topologies using the Java API.
  */
 public interface IComponent extends Serializable {
 
     /**
      * Declare the output schema for all the streams of this topology.
-     *
+     * 
      * @param declarer this is used to declare output stream ids, output fields, and whether or not each output stream is a direct stream
      */
     void declareOutputFields(OutputFieldsDeclarer declarer);
 
     /**
-     * Declare configuration specific to this component. Only a subset of the "topology.*" configs can
-     * be overridden. The component configuration can be further overridden when constructing the 
-     * topology using {@link TopologyBuilder}
-     *
+     * Declare configuration specific to this component. Only a subset of the "topology.*" configs can be overridden. The component configuration can be further
+     * overridden when constructing the topology using {@link TopologyBuilder}
+     * 
      */
     Map<String, Object> getComponentConfiguration();
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/topology/IConfig.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/topology/IConfig.java b/jstorm-core/src/main/java/backtype/storm/topology/IConfig.java
deleted file mode 100644
index 3ce9da7..0000000
--- a/jstorm-core/src/main/java/backtype/storm/topology/IConfig.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm.topology;
-
-import java.util.Map;
-
-/*
- * This interface is used to notify the update of user configuration
- * for bolt and spout 
- */
-public interface IConfig {
-    public void updateConf(Map conf);
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/topology/IDynamicComponent.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/topology/IDynamicComponent.java b/jstorm-core/src/main/java/backtype/storm/topology/IDynamicComponent.java
new file mode 100644
index 0000000..573ca99
--- /dev/null
+++ b/jstorm-core/src/main/java/backtype/storm/topology/IDynamicComponent.java
@@ -0,0 +1,13 @@
+package backtype.storm.topology;
+
+import java.io.Serializable;
+import java.util.Map;
+
+/*
+ * This interface is used to notify the update of user configuration
+ * for bolt and spout 
+ */
+
+public interface IDynamicComponent extends Serializable {
+    public void update(Map conf);
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/topology/IRichBolt.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/topology/IRichBolt.java b/jstorm-core/src/main/java/backtype/storm/topology/IRichBolt.java
index d35244e..d44619c 100755
--- a/jstorm-core/src/main/java/backtype/storm/topology/IRichBolt.java
+++ b/jstorm-core/src/main/java/backtype/storm/topology/IRichBolt.java
@@ -20,9 +20,8 @@ package backtype.storm.topology;
 import backtype.storm.task.IBolt;
 
 /**
- * When writing topologies using Java, {@link IRichBolt} and {@link IRichSpout} are the main interfaces
- * to use to implement components of the topology.
- *
+ * When writing topologies using Java, {@link IRichBolt} and {@link IRichSpout} are the main interfaces to use to implement components of the topology.
+ * 
  */
 public interface IRichBolt extends IBolt, IComponent {
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/topology/IRichSpout.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/topology/IRichSpout.java b/jstorm-core/src/main/java/backtype/storm/topology/IRichSpout.java
index b088641..e1bdc02 100755
--- a/jstorm-core/src/main/java/backtype/storm/topology/IRichSpout.java
+++ b/jstorm-core/src/main/java/backtype/storm/topology/IRichSpout.java
@@ -20,9 +20,8 @@ package backtype.storm.topology;
 import backtype.storm.spout.ISpout;
 
 /**
- * When writing topologies using Java, {@link IRichBolt} and {@link IRichSpout} are the main interfaces
- * to use to implement components of the topology.
- *
+ * When writing topologies using Java, {@link IRichBolt} and {@link IRichSpout} are the main interfaces to use to implement components of the topology.
+ * 
  */
 public interface IRichSpout extends ISpout, IComponent {
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/topology/IRichStateSpout.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/topology/IRichStateSpout.java b/jstorm-core/src/main/java/backtype/storm/topology/IRichStateSpout.java
index edcc0ff..a22acd4 100755
--- a/jstorm-core/src/main/java/backtype/storm/topology/IRichStateSpout.java
+++ b/jstorm-core/src/main/java/backtype/storm/topology/IRichStateSpout.java
@@ -19,7 +19,6 @@ package backtype.storm.topology;
 
 import backtype.storm.state.IStateSpout;
 
-
 public interface IRichStateSpout extends IStateSpout, IComponent {
 
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/topology/InputDeclarer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/topology/InputDeclarer.java b/jstorm-core/src/main/java/backtype/storm/topology/InputDeclarer.java
index 33540de..54f2702 100755
--- a/jstorm-core/src/main/java/backtype/storm/topology/InputDeclarer.java
+++ b/jstorm-core/src/main/java/backtype/storm/topology/InputDeclarer.java
@@ -22,10 +22,10 @@ import backtype.storm.generated.Grouping;
 import backtype.storm.grouping.CustomStreamGrouping;
 import backtype.storm.tuple.Fields;
 
-
 public interface InputDeclarer<T extends InputDeclarer> {
     /**
      * The stream is partitioned by the fields specified in the grouping.
+     * 
      * @param componentId
      * @param fields
      * @return
@@ -34,6 +34,7 @@ public interface InputDeclarer<T extends InputDeclarer> {
 
     /**
      * The stream is partitioned by the fields specified in the grouping.
+     * 
      * @param componentId
      * @param streamId
      * @param fields
@@ -42,16 +43,16 @@ public interface InputDeclarer<T extends InputDeclarer> {
     public T fieldsGrouping(String componentId, String streamId, Fields fields);
 
     /**
-     * The entire stream goes to a single one of the bolt's tasks.
-     * Specifically, it goes to the task with the lowest id.
+     * The entire stream goes to a single one of the bolt's tasks. Specifically, it goes to the task with the lowest id.
+     * 
      * @param componentId
      * @return
      */
     public T globalGrouping(String componentId);
 
     /**
-     * The entire stream goes to a single one of the bolt's tasks.
-     * Specifically, it goes to the task with the lowest id.
+     * The entire stream goes to a single one of the bolt's tasks. Specifically, it goes to the task with the lowest id.
+     * 
      * @param componentId
      * @param streamId
      * @return
@@ -59,16 +60,16 @@ public interface InputDeclarer<T extends InputDeclarer> {
     public T globalGrouping(String componentId, String streamId);
 
     /**
-     * Tuples are randomly distributed across the bolt's tasks in a way such that
-     * each bolt is guaranteed to get an equal number of tuples.
+     * Tuples are randomly distributed across the bolt's tasks in a way such that each bolt is guaranteed to get an equal number of tuples.
+     * 
      * @param componentId
      * @return
      */
     public T shuffleGrouping(String componentId);
 
     /**
-     * Tuples are randomly distributed across the bolt's tasks in a way such that
-     * each bolt is guaranteed to get an equal number of tuples.
+     * Tuples are randomly distributed across the bolt's tasks in a way such that each bolt is guaranteed to get an equal number of tuples.
+     * 
      * @param componentId
      * @param streamId
      * @return
@@ -76,29 +77,31 @@ public interface InputDeclarer<T extends InputDeclarer> {
     public T shuffleGrouping(String componentId, String streamId);
 
     /**
-     * If the target bolt has one or more tasks in the same worker process,
-     * tuples will be shuffled to just those in-process tasks.
-     * Otherwise, this acts like a normal shuffle grouping.
+     * If the target bolt has one or more tasks in the same worker process, tuples will be shuffled to just those in-process tasks. Otherwise, this acts like a
+     * normal shuffle grouping.
+     * 
      * @param componentId
      * @return
      */
     public T localOrShuffleGrouping(String componentId);
 
     /**
-     * If the target bolt has one or more tasks in the same worker process,
-     * tuples will be shuffled to just those in-process tasks.
-     * Otherwise, this acts like a normal shuffle grouping.
+     * If the target bolt has one or more tasks in the same worker process, tuples will be shuffled to just those in-process tasks. Otherwise, this acts like a
+     * normal shuffle grouping.
+     * 
      * @param componentId
      * @param streamId
      * @return
      */
     public T localOrShuffleGrouping(String componentId, String streamId);
-    
+
     public T localFirstGrouping(String componentId);
-    
+
     public T localFirstGrouping(String componentId, String streamId);
+
     /**
      * This grouping specifies that you don't care how the stream is grouped.
+     * 
      * @param componentId
      * @return
      */
@@ -106,6 +109,7 @@ public interface InputDeclarer<T extends InputDeclarer> {
 
     /**
      * This grouping specifies that you don't care how the stream is grouped.
+     * 
      * @param componentId
      * @param streamId
      * @return
@@ -114,6 +118,7 @@ public interface InputDeclarer<T extends InputDeclarer> {
 
     /**
      * The stream is replicated across all the bolt's tasks. Use this grouping with care.
+     * 
      * @param componentId
      * @return
      */
@@ -121,6 +126,7 @@ public interface InputDeclarer<T extends InputDeclarer> {
 
     /**
      * The stream is replicated across all the bolt's tasks. Use this grouping with care.
+     * 
      * @param componentId
      * @param streamId
      * @return
@@ -128,16 +134,16 @@ public interface InputDeclarer<T extends InputDeclarer> {
     public T allGrouping(String componentId, String streamId);
 
     /**
-     * A stream grouped this way means that the producer of the tuple decides
-     * which task of the consumer will receive this tuple.
+     * A stream grouped this way means that the producer of the tuple decides which task of the consumer will receive this tuple.
+     * 
      * @param componentId
      * @return
      */
     public T directGrouping(String componentId);
 
     /**
-     * A stream grouped this way means that the producer of the tuple decides
-     * which task of the consumer will receive this tuple.
+     * A stream grouped this way means that the producer of the tuple decides which task of the consumer will receive this tuple.
+     * 
      * @param componentId
      * @param streamId
      * @return
@@ -145,9 +151,9 @@ public interface InputDeclarer<T extends InputDeclarer> {
     public T directGrouping(String componentId, String streamId);
 
     /**
-     * Tuples are passed to two hashing functions and each target task is
-     * decided based on the comparison of the state of candidate nodes.
-     * @see   https://melmeric.files.wordpress.com/2014/11/the-power-of-both-choices-practical-load-balancing-for-distributed-stream-processing-engines.pdf
+     * Tuples are passed to two hashing functions and each target task is decided based on the comparison of the state of candidate nodes.
+     * 
+     * @see https://melmeric.files.wordpress.com/2014/11/the-power-of-both-choices-practical-load-balancing-for-distributed-stream-processing-engines.pdf
      * @param componentId
      * @param fields
      * @return
@@ -155,9 +161,9 @@ public interface InputDeclarer<T extends InputDeclarer> {
     public T partialKeyGrouping(String componentId, Fields fields);
 
     /**
-     * Tuples are passed to two hashing functions and each target task is
-     * decided based on the comparison of the state of candidate nodes.
-     * @see   https://melmeric.files.wordpress.com/2014/11/the-power-of-both-choices-practical-load-balancing-for-distributed-stream-processing-engines.pdf
+     * Tuples are passed to two hashing functions and each target task is decided based on the comparison of the state of candidate nodes.
+     * 
+     * @see https://melmeric.files.wordpress.com/2014/11/the-power-of-both-choices-practical-load-balancing-for-distributed-stream-processing-engines.pdf
      * @param componentId
      * @param streamId
      * @param fields
@@ -167,6 +173,7 @@ public interface InputDeclarer<T extends InputDeclarer> {
 
     /**
      * A custom stream grouping by implementing the CustomStreamGrouping interface.
+     * 
      * @param componentId
      * @param grouping
      * @return
@@ -175,13 +182,14 @@ public interface InputDeclarer<T extends InputDeclarer> {
 
     /**
      * A custom stream grouping by implementing the CustomStreamGrouping interface.
+     * 
      * @param componentId
      * @param streamId
      * @param grouping
      * @return
      */
     public T customGrouping(String componentId, String streamId, CustomStreamGrouping grouping);
-    
+
     public T grouping(GlobalStreamId id, Grouping grouping);
-    
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/topology/OutputFieldsDeclarer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/topology/OutputFieldsDeclarer.java b/jstorm-core/src/main/java/backtype/storm/topology/OutputFieldsDeclarer.java
index 2ac4794..d5ca7ca 100755
--- a/jstorm-core/src/main/java/backtype/storm/topology/OutputFieldsDeclarer.java
+++ b/jstorm-core/src/main/java/backtype/storm/topology/OutputFieldsDeclarer.java
@@ -19,14 +19,15 @@ package backtype.storm.topology;
 
 import backtype.storm.tuple.Fields;
 
-
 public interface OutputFieldsDeclarer {
     /**
      * Uses default stream id.
      */
     public void declare(Fields fields);
+
     public void declare(boolean direct, Fields fields);
-    
+
     public void declareStream(String streamId, Fields fields);
+
     public void declareStream(String streamId, boolean direct, Fields fields);
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/topology/OutputFieldsGetter.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/topology/OutputFieldsGetter.java b/jstorm-core/src/main/java/backtype/storm/topology/OutputFieldsGetter.java
index 0e7fd59..1fdcf86 100755
--- a/jstorm-core/src/main/java/backtype/storm/topology/OutputFieldsGetter.java
+++ b/jstorm-core/src/main/java/backtype/storm/topology/OutputFieldsGetter.java
@@ -39,13 +39,12 @@ public class OutputFieldsGetter implements OutputFieldsDeclarer {
     }
 
     public void declareStream(String streamId, boolean direct, Fields fields) {
-        if(_fields.containsKey(streamId)) {
+        if (_fields.containsKey(streamId)) {
             throw new IllegalArgumentException("Fields for " + streamId + " already set");
         }
         _fields.put(streamId, new StreamInfo(fields.toList(), direct));
     }
 
-
     public Map<String, StreamInfo> getFieldsDeclaration() {
         return _fields;
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/topology/ReportedFailedException.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/topology/ReportedFailedException.java b/jstorm-core/src/main/java/backtype/storm/topology/ReportedFailedException.java
index 4e4ebe4..c90a545 100755
--- a/jstorm-core/src/main/java/backtype/storm/topology/ReportedFailedException.java
+++ b/jstorm-core/src/main/java/backtype/storm/topology/ReportedFailedException.java
@@ -21,11 +21,11 @@ public class ReportedFailedException extends FailedException {
     public ReportedFailedException() {
         super();
     }
-    
+
     public ReportedFailedException(String msg) {
         super(msg);
     }
-    
+
     public ReportedFailedException(String msg, Throwable cause) {
         super(msg, cause);
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/topology/SpoutDeclarer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/topology/SpoutDeclarer.java b/jstorm-core/src/main/java/backtype/storm/topology/SpoutDeclarer.java
index c0d8254..9c5ec34 100755
--- a/jstorm-core/src/main/java/backtype/storm/topology/SpoutDeclarer.java
+++ b/jstorm-core/src/main/java/backtype/storm/topology/SpoutDeclarer.java
@@ -18,5 +18,5 @@
 package backtype.storm.topology;
 
 public interface SpoutDeclarer extends ComponentConfigurationDeclarer<SpoutDeclarer> {
-    
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/topology/TopologyBuilder.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/topology/TopologyBuilder.java b/jstorm-core/src/main/java/backtype/storm/topology/TopologyBuilder.java
index c04e449..2b546e3 100755
--- a/jstorm-core/src/main/java/backtype/storm/topology/TopologyBuilder.java
+++ b/jstorm-core/src/main/java/backtype/storm/topology/TopologyBuilder.java
@@ -18,108 +18,90 @@
 package backtype.storm.topology;
 
 import backtype.storm.Config;
-import backtype.storm.generated.Bolt;
-import backtype.storm.generated.ComponentCommon;
-import backtype.storm.generated.ComponentObject;
-import backtype.storm.generated.GlobalStreamId;
-import backtype.storm.generated.Grouping;
-import backtype.storm.generated.NullStruct;
-import backtype.storm.generated.SpoutSpec;
-import backtype.storm.generated.StateSpoutSpec;
-import backtype.storm.generated.StormTopology;
+import backtype.storm.generated.*;
 import backtype.storm.grouping.CustomStreamGrouping;
 import backtype.storm.grouping.PartialKeyGrouping;
 import backtype.storm.tuple.Fields;
 import backtype.storm.utils.Utils;
+import org.json.simple.JSONValue;
+
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.Map;
-import org.json.simple.JSONValue;
 
 /**
- * TopologyBuilder exposes the Java API for specifying a topology for Storm
- * to execute. Topologies are Thrift structures in the end, but since the Thrift API
- * is so verbose, TopologyBuilder greatly eases the process of creating topologies.
- * The template for creating and submitting a topology looks something like:
- *
+ * TopologyBuilder exposes the Java API for specifying a topology for Storm to execute. Topologies are Thrift structures in the end, but since the Thrift API is
+ * so verbose, TopologyBuilder greatly eases the process of creating topologies. The template for creating and submitting a topology looks something like:
+ * 
  * <pre>
  * TopologyBuilder builder = new TopologyBuilder();
- *
- * builder.setSpout("1", new TestWordSpout(true), 5);
- * builder.setSpout("2", new TestWordSpout(true), 3);
- * builder.setBolt("3", new TestWordCounter(), 3)
- *          .fieldsGrouping("1", new Fields("word"))
- *          .fieldsGrouping("2", new Fields("word"));
- * builder.setBolt("4", new TestGlobalCount())
- *          .globalGrouping("1");
- *
+ * 
+ * builder.setSpout(&quot;1&quot;, new TestWordSpout(true), 5);
+ * builder.setSpout(&quot;2&quot;, new TestWordSpout(true), 3);
+ * builder.setBolt(&quot;3&quot;, new TestWordCounter(), 3).fieldsGrouping(&quot;1&quot;, new Fields(&quot;word&quot;)).fieldsGrouping(&quot;2&quot;, new Fields(&quot;word&quot;));
+ * builder.setBolt(&quot;4&quot;, new TestGlobalCount()).globalGrouping(&quot;1&quot;);
+ * 
  * Map conf = new HashMap();
  * conf.put(Config.TOPOLOGY_WORKERS, 4);
  * 
- * StormSubmitter.submitTopology("mytopology", conf, builder.createTopology());
+ * StormSubmitter.submitTopology(&quot;mytopology&quot;, conf, builder.createTopology());
  * </pre>
- *
- * Running the exact same topology in local mode (in process), and configuring it to log all tuples
- * emitted, looks like the following. Note that it lets the topology run for 10 seconds
- * before shutting down the local cluster.
- *
+ * 
+ * Running the exact same topology in local mode (in process), and configuring it to log all tuples emitted, looks like the following. Note that it lets the
+ * topology run for 10 seconds before shutting down the local cluster.
+ * 
  * <pre>
  * TopologyBuilder builder = new TopologyBuilder();
- *
- * builder.setSpout("1", new TestWordSpout(true), 5);
- * builder.setSpout("2", new TestWordSpout(true), 3);
- * builder.setBolt("3", new TestWordCounter(), 3)
- *          .fieldsGrouping("1", new Fields("word"))
- *          .fieldsGrouping("2", new Fields("word"));
- * builder.setBolt("4", new TestGlobalCount())
- *          .globalGrouping("1");
- *
+ * 
+ * builder.setSpout(&quot;1&quot;, new TestWordSpout(true), 5);
+ * builder.setSpout(&quot;2&quot;, new TestWordSpout(true), 3);
+ * builder.setBolt(&quot;3&quot;, new TestWordCounter(), 3).fieldsGrouping(&quot;1&quot;, new Fields(&quot;word&quot;)).fieldsGrouping(&quot;2&quot;, new Fields(&quot;word&quot;));
+ * builder.setBolt(&quot;4&quot;, new TestGlobalCount()).globalGrouping(&quot;1&quot;);
+ * 
  * Map conf = new HashMap();
  * conf.put(Config.TOPOLOGY_WORKERS, 4);
  * conf.put(Config.TOPOLOGY_DEBUG, true);
- *
+ * 
  * LocalCluster cluster = new LocalCluster();
- * cluster.submitTopology("mytopology", conf, builder.createTopology());
+ * cluster.submitTopology(&quot;mytopology&quot;, conf, builder.createTopology());
  * Utils.sleep(10000);
  * cluster.shutdown();
  * </pre>
- *
- * <p>The pattern for TopologyBuilder is to map component ids to components using the setSpout
- * and setBolt methods. Those methods return objects that are then used to declare
- * the inputs for that component.</p>
+ * 
+ * <p>
+ * The pattern for TopologyBuilder is to map component ids to components using the setSpout and setBolt methods. Those methods return objects that are then used
+ * to declare the inputs for that component.
+ * </p>
  */
 public class TopologyBuilder {
     private Map<String, IRichBolt> _bolts = new HashMap<String, IRichBolt>();
     private Map<String, IRichSpout> _spouts = new HashMap<String, IRichSpout>();
     private Map<String, ComponentCommon> _commons = new HashMap<String, ComponentCommon>();
 
-//    private Map<String, Map<GlobalStreamId, Grouping>> _inputs = new HashMap<String, Map<GlobalStreamId, Grouping>>();
+    // private Map<String, Map<GlobalStreamId, Grouping>> _inputs = new HashMap<String, Map<GlobalStreamId, Grouping>>();
 
     private Map<String, StateSpoutSpec> _stateSpouts = new HashMap<String, StateSpoutSpec>();
-    
-    
+
     public StormTopology createTopology() {
         Map<String, Bolt> boltSpecs = new HashMap<String, Bolt>();
         Map<String, SpoutSpec> spoutSpecs = new HashMap<String, SpoutSpec>();
-        for(String boltId: _bolts.keySet()) {
+        for (String boltId : _bolts.keySet()) {
             IRichBolt bolt = _bolts.get(boltId);
             ComponentCommon common = getComponentCommon(boltId, bolt);
             boltSpecs.put(boltId, new Bolt(ComponentObject.serialized_java(Utils.javaSerialize(bolt)), common));
         }
-        for(String spoutId: _spouts.keySet()) {
+        for (String spoutId : _spouts.keySet()) {
             IRichSpout spout = _spouts.get(spoutId);
             ComponentCommon common = getComponentCommon(spoutId, spout);
             spoutSpecs.put(spoutId, new SpoutSpec(ComponentObject.serialized_java(Utils.javaSerialize(spout)), common));
-            
+
         }
-        return new StormTopology(spoutSpecs,
-                                 boltSpecs,
-                                 new HashMap<String, StateSpoutSpec>());
+        return new StormTopology(spoutSpecs, boltSpecs, new HashMap<String, StateSpoutSpec>());
     }
 
     /**
      * Define a new bolt in this topology with parallelism of just one thread.
-     *
+     * 
      * @param id the id of this component. This id is referenced by other components that want to consume this bolt's outputs.
      * @param bolt the bolt
      * @return use the returned object to declare the inputs to this component
@@ -130,10 +112,11 @@ public class TopologyBuilder {
 
     /**
      * Define a new bolt in this topology with the specified amount of parallelism.
-     *
+     * 
      * @param id the id of this component. This id is referenced by other components that want to consume this bolt's outputs.
      * @param bolt the bolt
-     * @param parallelism_hint the number of tasks that should be assigned to execute this bolt. Each task will run on a thread in a process somewhere around the cluster.
+     * @param parallelism_hint the number of tasks that should be assigned to execute this bolt. Each task will run on a thread in a process somewhere around
+     *            the cluster.
      * @return use the returned object to declare the inputs to this component
      */
     public BoltDeclarer setBolt(String id, IRichBolt bolt, Number parallelism_hint) {
@@ -144,11 +127,9 @@ public class TopologyBuilder {
     }
 
     /**
-     * Define a new bolt in this topology. This defines a basic bolt, which is a
-     * simpler to use but more restricted kind of bolt. Basic bolts are intended
-     * for non-aggregation processing and automate the anchoring/acking process to
-     * achieve proper reliability in the topology.
-     *
+     * Define a new bolt in this topology. This defines a basic bolt, which is a simpler to use but more restricted kind of bolt. Basic bolts are intended for
+     * non-aggregation processing and automate the anchoring/acking process to achieve proper reliability in the topology.
+     * 
      * @param id the id of this component. This id is referenced by other components that want to consume this bolt's outputs.
      * @param bolt the basic bolt
      * @return use the returned object to declare the inputs to this component
@@ -158,14 +139,13 @@ public class TopologyBuilder {
     }
 
     /**
-     * Define a new bolt in this topology. This defines a basic bolt, which is a
-     * simpler to use but more restricted kind of bolt. Basic bolts are intended
-     * for non-aggregation processing and automate the anchoring/acking process to
-     * achieve proper reliability in the topology.
-     *
+     * Define a new bolt in this topology. This defines a basic bolt, which is a simpler to use but more restricted kind of bolt. Basic bolts are intended for
+     * non-aggregation processing and automate the anchoring/acking process to achieve proper reliability in the topology.
+     * 
      * @param id the id of this component. This id is referenced by other components that want to consume this bolt's outputs.
      * @param bolt the basic bolt
-     * @param parallelism_hint the number of tasks that should be assigned to execute this bolt. Each task will run on a thread in a process somwehere around the cluster.
+     * @param parallelism_hint the number of tasks that should be assigned to execute this bolt. Each task will run on a thread in a process somwehere around
+     *            the cluster.
      * @return use the returned object to declare the inputs to this component
      */
     public BoltDeclarer setBolt(String id, IBasicBolt bolt, Number parallelism_hint) {
@@ -174,7 +154,7 @@ public class TopologyBuilder {
 
     /**
      * Define a new spout in this topology.
-     *
+     * 
      * @param id the id of this component. This id is referenced by other components that want to consume this spout's outputs.
      * @param spout the spout
      */
@@ -183,12 +163,12 @@ public class TopologyBuilder {
     }
 
     /**
-     * Define a new spout in this topology with the specified parallelism. If the spout declares
-     * itself as non-distributed, the parallelism_hint will be ignored and only one task
-     * will be allocated to this component.
-     *
+     * Define a new spout in this topology with the specified parallelism. If the spout declares itself as non-distributed, the parallelism_hint will be ignored
+     * and only one task will be allocated to this component.
+     * 
      * @param id the id of this component. This id is referenced by other components that want to consume this spout's outputs.
-     * @param parallelism_hint the number of tasks that should be assigned to execute this spout. Each task will run on a thread in a process somwehere around the cluster.
+     * @param parallelism_hint the number of tasks that should be assigned to execute this spout. Each task will run on a thread in a process somwehere around
+     *            the cluster.
      * @param spout the spout
      */
     public SpoutDeclarer setSpout(String id, IRichSpout spout, Number parallelism_hint) {
@@ -207,51 +187,51 @@ public class TopologyBuilder {
         // TODO: finish
     }
 
-
     private void validateUnusedId(String id) {
-        if(_bolts.containsKey(id)) {
+        if (_bolts.containsKey(id)) {
             throw new IllegalArgumentException("Bolt has already been declared for id " + id);
         }
-        if(_spouts.containsKey(id)) {
+        if (_spouts.containsKey(id)) {
             throw new IllegalArgumentException("Spout has already been declared for id " + id);
         }
-        if(_stateSpouts.containsKey(id)) {
+        if (_stateSpouts.containsKey(id)) {
             throw new IllegalArgumentException("State spout has already been declared for id " + id);
         }
     }
 
     private ComponentCommon getComponentCommon(String id, IComponent component) {
         ComponentCommon ret = new ComponentCommon(_commons.get(id));
-        
+
         OutputFieldsGetter getter = new OutputFieldsGetter();
         component.declareOutputFields(getter);
         ret.set_streams(getter.getFieldsDeclaration());
-        return ret;        
+        return ret;
     }
-    
+
     private void initCommon(String id, IComponent component, Number parallelism) {
         ComponentCommon common = new ComponentCommon();
         common.set_inputs(new HashMap<GlobalStreamId, Grouping>());
-        if(parallelism!=null) {
+        if (parallelism != null) {
             common.set_parallelism_hint(parallelism.intValue());
-        }else {
+        } else {
             common.set_parallelism_hint(1);
         }
         Map conf = component.getComponentConfiguration();
-        if(conf!=null) common.set_json_conf(JSONValue.toJSONString(conf));
+        if (conf != null)
+            common.set_json_conf(JSONValue.toJSONString(conf));
         _commons.put(id, common);
     }
 
     protected class ConfigGetter<T extends ComponentConfigurationDeclarer> extends BaseConfigurationDeclarer<T> {
         String _id;
-        
+
         public ConfigGetter(String id) {
             _id = id;
         }
-        
+
         @Override
         public T addConfigurations(Map conf) {
-            if(conf!=null && conf.containsKey(Config.TOPOLOGY_KRYO_REGISTER)) {
+            if (conf != null && conf.containsKey(Config.TOPOLOGY_KRYO_REGISTER)) {
                 throw new IllegalArgumentException("Cannot set serializations for a component using fluent API");
             }
             String currConf = _commons.get(_id).get_json_conf();
@@ -259,13 +239,13 @@ public class TopologyBuilder {
             return (T) this;
         }
     }
-    
+
     protected class SpoutGetter extends ConfigGetter<SpoutDeclarer> implements SpoutDeclarer {
         public SpoutGetter(String id) {
             super(id);
-        }        
+        }
     }
-    
+
     protected class BoltGetter extends ConfigGetter<BoltDeclarer> implements BoltDeclarer {
         private String _boltId;
 
@@ -305,17 +285,17 @@ public class TopologyBuilder {
         public BoltDeclarer localOrShuffleGrouping(String componentId, String streamId) {
             return grouping(componentId, streamId, Grouping.local_or_shuffle(new NullStruct()));
         }
-        
+
         @Override
         public BoltDeclarer localFirstGrouping(String componentId) {
             return localFirstGrouping(componentId, Utils.DEFAULT_STREAM_ID);
         }
-        
+
         @Override
         public BoltDeclarer localFirstGrouping(String componentId, String streamId) {
             return grouping(componentId, streamId, Grouping.localFirst(new NullStruct()));
         }
-        
+
         public BoltDeclarer noneGrouping(String componentId) {
             return noneGrouping(componentId, Utils.DEFAULT_STREAM_ID);
         }
@@ -368,17 +348,20 @@ public class TopologyBuilder {
         @Override
         public BoltDeclarer grouping(GlobalStreamId id, Grouping grouping) {
             return grouping(id.get_componentId(), id.get_streamId(), grouping);
-        }        
+        }
     }
-    
+
     private static Map parseJson(String json) {
-        if(json==null) return new HashMap();
-        else return (Map) JSONValue.parse(json);
+        if (json == null)
+            return new HashMap();
+        else
+            return (Map) JSONValue.parse(json);
     }
-    
+
     private static String mergeIntoJson(Map into, Map newMap) {
         Map res = new HashMap(into);
-        if(newMap!=null) res.putAll(newMap);
+        if (newMap != null)
+            res.putAll(newMap);
         return JSONValue.toJSONString(res);
     }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/topology/base/BaseBasicBolt.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/topology/base/BaseBasicBolt.java b/jstorm-core/src/main/java/backtype/storm/topology/base/BaseBasicBolt.java
index e585ee6..eb13e56 100755
--- a/jstorm-core/src/main/java/backtype/storm/topology/base/BaseBasicBolt.java
+++ b/jstorm-core/src/main/java/backtype/storm/topology/base/BaseBasicBolt.java
@@ -29,5 +29,5 @@ public abstract class BaseBasicBolt extends BaseComponent implements IBasicBolt
 
     @Override
     public void cleanup() {
-    }    
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/topology/base/BaseBatchBolt.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/topology/base/BaseBatchBolt.java b/jstorm-core/src/main/java/backtype/storm/topology/base/BaseBatchBolt.java
index 3206941..43d21a3 100755
--- a/jstorm-core/src/main/java/backtype/storm/topology/base/BaseBatchBolt.java
+++ b/jstorm-core/src/main/java/backtype/storm/topology/base/BaseBatchBolt.java
@@ -21,5 +21,5 @@ import backtype.storm.coordination.IBatchBolt;
 import java.util.Map;
 
 public abstract class BaseBatchBolt<T> extends BaseComponent implements IBatchBolt<T> {
- 
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/topology/base/BaseComponent.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/topology/base/BaseComponent.java b/jstorm-core/src/main/java/backtype/storm/topology/base/BaseComponent.java
index 8afcdaa..1206abc 100755
--- a/jstorm-core/src/main/java/backtype/storm/topology/base/BaseComponent.java
+++ b/jstorm-core/src/main/java/backtype/storm/topology/base/BaseComponent.java
@@ -24,5 +24,5 @@ public abstract class BaseComponent implements IComponent {
     @Override
     public Map<String, Object> getComponentConfiguration() {
         return null;
-    }    
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/topology/base/BaseOpaquePartitionedTransactionalSpout.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/topology/base/BaseOpaquePartitionedTransactionalSpout.java b/jstorm-core/src/main/java/backtype/storm/topology/base/BaseOpaquePartitionedTransactionalSpout.java
index 2d20a48..64c3887 100755
--- a/jstorm-core/src/main/java/backtype/storm/topology/base/BaseOpaquePartitionedTransactionalSpout.java
+++ b/jstorm-core/src/main/java/backtype/storm/topology/base/BaseOpaquePartitionedTransactionalSpout.java
@@ -19,7 +19,6 @@ package backtype.storm.topology.base;
 
 import backtype.storm.transactional.partitioned.IOpaquePartitionedTransactionalSpout;
 
-
 public abstract class BaseOpaquePartitionedTransactionalSpout<T> extends BaseComponent implements IOpaquePartitionedTransactionalSpout<T> {
-    
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/topology/base/BaseRichBolt.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/topology/base/BaseRichBolt.java b/jstorm-core/src/main/java/backtype/storm/topology/base/BaseRichBolt.java
index 266736e..ebf31eb 100755
--- a/jstorm-core/src/main/java/backtype/storm/topology/base/BaseRichBolt.java
+++ b/jstorm-core/src/main/java/backtype/storm/topology/base/BaseRichBolt.java
@@ -22,5 +22,5 @@ import backtype.storm.topology.IRichBolt;
 public abstract class BaseRichBolt extends BaseComponent implements IRichBolt {
     @Override
     public void cleanup() {
-    }    
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/topology/base/BaseRichSpout.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/topology/base/BaseRichSpout.java b/jstorm-core/src/main/java/backtype/storm/topology/base/BaseRichSpout.java
index 37513b7..18f1f2c 100755
--- a/jstorm-core/src/main/java/backtype/storm/topology/base/BaseRichSpout.java
+++ b/jstorm-core/src/main/java/backtype/storm/topology/base/BaseRichSpout.java
@@ -24,7 +24,7 @@ package backtype.storm.topology.base;
 import backtype.storm.topology.IRichSpout;
 
 /**
- *
+ * 
  * @author nathan
  */
 public abstract class BaseRichSpout extends BaseComponent implements IRichSpout {

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/topology/base/BaseTransactionalBolt.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/topology/base/BaseTransactionalBolt.java b/jstorm-core/src/main/java/backtype/storm/topology/base/BaseTransactionalBolt.java
index b6451e9..246b3a3 100755
--- a/jstorm-core/src/main/java/backtype/storm/topology/base/BaseTransactionalBolt.java
+++ b/jstorm-core/src/main/java/backtype/storm/topology/base/BaseTransactionalBolt.java
@@ -20,5 +20,5 @@ package backtype.storm.topology.base;
 import backtype.storm.transactional.TransactionAttempt;
 
 public abstract class BaseTransactionalBolt extends BaseBatchBolt<TransactionAttempt> {
-    
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/transactional/ICommitter.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/transactional/ICommitter.java b/jstorm-core/src/main/java/backtype/storm/transactional/ICommitter.java
index 859bad2..0e91178 100755
--- a/jstorm-core/src/main/java/backtype/storm/transactional/ICommitter.java
+++ b/jstorm-core/src/main/java/backtype/storm/transactional/ICommitter.java
@@ -18,9 +18,8 @@
 package backtype.storm.transactional;
 
 /**
- * This marks an IBatchBolt within a transactional topology as a committer. This causes the 
- * finishBatch method to be called in order of the transactions.
+ * This marks an IBatchBolt within a transactional topology as a committer. This causes the finishBatch method to be called in order of the transactions.
  */
 public interface ICommitter {
-    
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/transactional/ICommitterTransactionalSpout.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/transactional/ICommitterTransactionalSpout.java b/jstorm-core/src/main/java/backtype/storm/transactional/ICommitterTransactionalSpout.java
index 5441ee2..1cd448c 100755
--- a/jstorm-core/src/main/java/backtype/storm/transactional/ICommitterTransactionalSpout.java
+++ b/jstorm-core/src/main/java/backtype/storm/transactional/ICommitterTransactionalSpout.java
@@ -20,12 +20,11 @@ package backtype.storm.transactional;
 import backtype.storm.task.TopologyContext;
 import java.util.Map;
 
-
 public interface ICommitterTransactionalSpout<X> extends ITransactionalSpout<X> {
     public interface Emitter extends ITransactionalSpout.Emitter {
         void commit(TransactionAttempt attempt);
-    } 
-    
+    }
+
     @Override
-    public Emitter getEmitter(Map conf, TopologyContext context);    
+    public Emitter getEmitter(Map conf, TopologyContext context);
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/transactional/ITransactionalSpout.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/transactional/ITransactionalSpout.java b/jstorm-core/src/main/java/backtype/storm/transactional/ITransactionalSpout.java
index 3207493..528eda7 100755
--- a/jstorm-core/src/main/java/backtype/storm/transactional/ITransactionalSpout.java
+++ b/jstorm-core/src/main/java/backtype/storm/transactional/ITransactionalSpout.java
@@ -26,69 +26,62 @@ import java.util.Map;
 public interface ITransactionalSpout<T> extends IComponent {
     public interface Coordinator<X> {
         /**
-         * Create metadata for this particular transaction id which has never
-         * been emitted before. The metadata should contain whatever is necessary
-         * to be able to replay the exact batch for the transaction at a later point.
+         * Create metadata for this particular transaction id which has never been emitted before. The metadata should contain whatever is necessary to be able
+         * to replay the exact batch for the transaction at a later point.
          * 
          * The metadata is stored in Zookeeper.
          * 
-         * Storm uses the Kryo serializations configured in the component configuration 
-         * for this spout to serialize and deserialize the metadata.
+         * Storm uses the Kryo serializations configured in the component configuration for this spout to serialize and deserialize the metadata.
          * 
          * @param txid The id of the transaction.
          * @param prevMetadata The metadata of the previous transaction
          * @return the metadata for this new transaction
          */
         X initializeTransaction(BigInteger txid, X prevMetadata);
-        
+
         /**
          * Returns true if its ok to emit start a new transaction, false otherwise (will skip this transaction).
          * 
-         * You should sleep here if you want a delay between asking for the next transaction (this will be called 
-         * repeatedly in a loop).
+         * You should sleep here if you want a delay between asking for the next transaction (this will be called repeatedly in a loop).
          */
         boolean isReady();
-        
+
         /**
          * Release any resources from this coordinator.
          */
         void close();
     }
-    
+
     public interface Emitter<X> {
         /**
-         * Emit a batch for the specified transaction attempt and metadata for the transaction. The metadata
-         * was created by the Coordinator in the initializeTranaction method. This method must always emit
-         * the same batch of tuples across all tasks for the same transaction id.
+         * Emit a batch for the specified transaction attempt and metadata for the transaction. The metadata was created by the Coordinator in the
+         * initializeTranaction method. This method must always emit the same batch of tuples across all tasks for the same transaction id.
          * 
          * The first field of all emitted tuples must contain the provided TransactionAttempt.
          * 
          */
         void emitBatch(TransactionAttempt tx, X coordinatorMeta, BatchOutputCollector collector);
-        
+
         /**
-         * Any state for transactions prior to the provided transaction id can be safely cleaned up, so this
-         * method should clean up that state.
+         * Any state for transactions prior to the provided transaction id can be safely cleaned up, so this method should clean up that state.
          */
         void cleanupBefore(BigInteger txid);
-        
+
         /**
          * Release any resources held by this emitter.
          */
         void close();
     }
-    
+
     /**
-     * The coordinator for a TransactionalSpout runs in a single thread and indicates when batches
-     * of tuples should be emitted and when transactions should commit. The Coordinator that you provide 
-     * in a TransactionalSpout provides metadata for each transaction so that the transactions can be replayed.
+     * The coordinator for a TransactionalSpout runs in a single thread and indicates when batches of tuples should be emitted and when transactions should
+     * commit. The Coordinator that you provide in a TransactionalSpout provides metadata for each transaction so that the transactions can be replayed.
      */
     Coordinator<T> getCoordinator(Map conf, TopologyContext context);
 
     /**
-     * The emitter for a TransactionalSpout runs as many tasks across the cluster. Emitters are responsible for
-     * emitting batches of tuples for a transaction and must ensure that the same batch of tuples is always
-     * emitted for the same transaction id.
-     */    
+     * The emitter for a TransactionalSpout runs as many tasks across the cluster. Emitters are responsible for emitting batches of tuples for a transaction and
+     * must ensure that the same batch of tuples is always emitted for the same transaction id.
+     */
     Emitter<T> getEmitter(Map conf, TopologyContext context);
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/transactional/TransactionAttempt.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/transactional/TransactionAttempt.java b/jstorm-core/src/main/java/backtype/storm/transactional/TransactionAttempt.java
index 80bbb0e..e64a2d7 100755
--- a/jstorm-core/src/main/java/backtype/storm/transactional/TransactionAttempt.java
+++ b/jstorm-core/src/main/java/backtype/storm/transactional/TransactionAttempt.java
@@ -22,22 +22,21 @@ import java.math.BigInteger;
 public class TransactionAttempt {
     BigInteger _txid;
     long _attemptId;
-    
-    
+
     // for kryo compatibility
     public TransactionAttempt() {
-        
+
     }
-    
+
     public TransactionAttempt(BigInteger txid, long attemptId) {
         _txid = txid;
         _attemptId = attemptId;
     }
-    
+
     public BigInteger getTransactionId() {
         return _txid;
     }
-    
+
     public long getAttemptId() {
         return _attemptId;
     }
@@ -49,7 +48,8 @@ public class TransactionAttempt {
 
     @Override
     public boolean equals(Object o) {
-        if(!(o instanceof TransactionAttempt)) return false;
+        if (!(o instanceof TransactionAttempt))
+            return false;
         TransactionAttempt other = (TransactionAttempt) o;
         return _txid.equals(other._txid) && _attemptId == other._attemptId;
     }
@@ -57,5 +57,5 @@ public class TransactionAttempt {
     @Override
     public String toString() {
         return "" + _txid + ":" + _attemptId;
-    }    
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/transactional/TransactionalSpoutBatchExecutor.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/transactional/TransactionalSpoutBatchExecutor.java b/jstorm-core/src/main/java/backtype/storm/transactional/TransactionalSpoutBatchExecutor.java
index 53aacae..9bcd75d 100755
--- a/jstorm-core/src/main/java/backtype/storm/transactional/TransactionalSpoutBatchExecutor.java
+++ b/jstorm-core/src/main/java/backtype/storm/transactional/TransactionalSpoutBatchExecutor.java
@@ -31,18 +31,18 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public class TransactionalSpoutBatchExecutor implements IRichBolt {
-    public static Logger LOG = LoggerFactory.getLogger(TransactionalSpoutBatchExecutor.class);    
+    public static Logger LOG = LoggerFactory.getLogger(TransactionalSpoutBatchExecutor.class);
 
     BatchOutputCollectorImpl _collector;
     ITransactionalSpout _spout;
     ITransactionalSpout.Emitter _emitter;
-    
+
     TreeMap<BigInteger, TransactionAttempt> _activeTransactions = new TreeMap<BigInteger, TransactionAttempt>();
 
     public TransactionalSpoutBatchExecutor(ITransactionalSpout spout) {
         _spout = spout;
     }
-    
+
     @Override
     public void prepare(Map conf, TopologyContext context, OutputCollector collector) {
         _collector = new BatchOutputCollectorImpl(collector);
@@ -53,27 +53,27 @@ public class TransactionalSpoutBatchExecutor implements IRichBolt {
     public void execute(Tuple input) {
         TransactionAttempt attempt = (TransactionAttempt) input.getValue(0);
         try {
-            if(input.getSourceStreamId().equals(TransactionalSpoutCoordinator.TRANSACTION_COMMIT_STREAM_ID)) {
-                if(attempt.equals(_activeTransactions.get(attempt.getTransactionId()))) {
+            if (input.getSourceStreamId().equals(TransactionalSpoutCoordinator.TRANSACTION_COMMIT_STREAM_ID)) {
+                if (attempt.equals(_activeTransactions.get(attempt.getTransactionId()))) {
                     ((ICommitterTransactionalSpout.Emitter) _emitter).commit(attempt);
                     _activeTransactions.remove(attempt.getTransactionId());
                     _collector.ack(input);
                 } else {
                     _collector.fail(input);
                 }
-            } else { 
+            } else {
                 _emitter.emitBatch(attempt, input.getValue(1), _collector);
                 _activeTransactions.put(attempt.getTransactionId(), attempt);
                 _collector.ack(input);
                 BigInteger committed = (BigInteger) input.getValue(2);
-                if(committed!=null) {
-                    // valid to delete before what's been committed since 
+                if (committed != null) {
+                    // valid to delete before what's been committed since
                     // those batches will never be accessed again
                     _activeTransactions.headMap(committed).clear();
                     _emitter.cleanupBefore(committed);
                 }
             }
-        } catch(FailedException e) {
+        } catch (FailedException e) {
             LOG.warn("Failed to emit batch for transaction", e);
             _collector.fail(input);
         }


[25/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/ZookeeperAuthInfo.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/ZookeeperAuthInfo.java b/jstorm-core/src/main/java/backtype/storm/utils/ZookeeperAuthInfo.java
index d972135..24166de 100755
--- a/jstorm-core/src/main/java/backtype/storm/utils/ZookeeperAuthInfo.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/ZookeeperAuthInfo.java
@@ -21,11 +21,10 @@ import backtype.storm.Config;
 import java.io.UnsupportedEncodingException;
 import java.util.Map;
 
-
 public class ZookeeperAuthInfo {
     public String scheme;
     public byte[] payload = null;
-    
+
     public ZookeeperAuthInfo(Map conf) {
         String scheme = (String) conf.get(Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_SCHEME);
         String payload = (String) conf.get(Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_PAYLOAD);
@@ -34,9 +33,9 @@ public class ZookeeperAuthInfo {
             scheme = (String) conf.get(Config.STORM_ZOOKEEPER_AUTH_SCHEME);
             payload = (String) conf.get(Config.STORM_ZOOKEEPER_AUTH_PAYLOAD);
         }
-        if(scheme!=null) {
+        if (scheme != null) {
             this.scheme = scheme;
-            if(payload != null) {
+            if (payload != null) {
                 try {
                     this.payload = payload.getBytes("UTF-8");
                 } catch (UnsupportedEncodingException ex) {
@@ -45,7 +44,7 @@ public class ZookeeperAuthInfo {
             }
         }
     }
-    
+
     public ZookeeperAuthInfo(String scheme, byte[] payload) {
         this.scheme = scheme;
         this.payload = payload;

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/ZookeeperServerCnxnFactory.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/ZookeeperServerCnxnFactory.java b/jstorm-core/src/main/java/backtype/storm/utils/ZookeeperServerCnxnFactory.java
index 08a763a..f0e8f9d 100755
--- a/jstorm-core/src/main/java/backtype/storm/utils/ZookeeperServerCnxnFactory.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/ZookeeperServerCnxnFactory.java
@@ -27,58 +27,58 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public class ZookeeperServerCnxnFactory {
-	private static final Logger LOG = LoggerFactory.getLogger(ZookeeperServerCnxnFactory.class);
-	int _port;
-	NIOServerCnxnFactory _factory;
-	
-	public ZookeeperServerCnxnFactory(int port, int maxClientCnxns)  {
-		//port range
-		int max;
-		if (port <= 0) {
-			_port = 2000;
-			max = 65535;
-		} else {
-			_port = port;
-			max = port;
-		}
+    private static final Logger LOG = LoggerFactory.getLogger(ZookeeperServerCnxnFactory.class);
+    int _port;
+    NIOServerCnxnFactory _factory;
 
-		try {
-			_factory = new NIOServerCnxnFactory();
-		} catch (IOException e) {
-			_port = 0;
-			_factory = null;
-			e.printStackTrace();
-			throw new RuntimeException(e.getMessage());
-		}
-		
-		//look for available port 
-		for (; _port <= max; _port++) {
-			try {
-				_factory.configure(new InetSocketAddress(_port), maxClientCnxns);
-				LOG.debug("Zookeeper server successfully binded at port "+_port);
-				break;
-			} catch (BindException e1) {
-			} catch (IOException e2) {
-				_port = 0;
-				_factory = null;
-				e2.printStackTrace();
-				throw new RuntimeException(e2.getMessage());
-			} 
-		} 		
+    public ZookeeperServerCnxnFactory(int port, int maxClientCnxns) {
+        // port range
+        int max;
+        if (port <= 0) {
+            _port = 2000;
+            max = 65535;
+        } else {
+            _port = port;
+            max = port;
+        }
 
-		if (_port > max) {
-			_port = 0;
-			_factory = null;
-			LOG.error("Failed to find a port for Zookeeper");
-			throw new RuntimeException("No port is available to launch an inprocess zookeeper.");
-		}
-	}
-	
-	public int port() {
-		return _port;
-	}
-		
-	public NIOServerCnxnFactory factory() {
-		return _factory;
-	}
+        try {
+            _factory = new NIOServerCnxnFactory();
+        } catch (IOException e) {
+            _port = 0;
+            _factory = null;
+            e.printStackTrace();
+            throw new RuntimeException(e.getMessage());
+        }
+
+        // look for available port
+        for (; _port <= max; _port++) {
+            try {
+                _factory.configure(new InetSocketAddress(_port), maxClientCnxns);
+                LOG.debug("Zookeeper server successfully binded at port " + _port);
+                break;
+            } catch (BindException e1) {
+            } catch (IOException e2) {
+                _port = 0;
+                _factory = null;
+                e2.printStackTrace();
+                throw new RuntimeException(e2.getMessage());
+            }
+        }
+
+        if (_port > max) {
+            _port = 0;
+            _factory = null;
+            LOG.error("Failed to find a port for Zookeeper");
+            throw new RuntimeException("No port is available to launch an inprocess zookeeper.");
+        }
+    }
+
+    public int port() {
+        return _port;
+    }
+
+    public NIOServerCnxnFactory factory() {
+        return _factory;
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/disruptor/AbstractSequencerExt.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/disruptor/AbstractSequencerExt.java b/jstorm-core/src/main/java/backtype/storm/utils/disruptor/AbstractSequencerExt.java
index c7199c6..7a1e18a 100755
--- a/jstorm-core/src/main/java/backtype/storm/utils/disruptor/AbstractSequencerExt.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/disruptor/AbstractSequencerExt.java
@@ -22,17 +22,17 @@ import com.lmax.disruptor.WaitStrategy;
 
 public abstract class AbstractSequencerExt extends AbstractSequencer {
     private static boolean waitSleep = true;
-    
+
     public static boolean isWaitSleep() {
         return waitSleep;
     }
-    
+
     public static void setWaitSleep(boolean waitSleep) {
         AbstractSequencerExt.waitSleep = waitSleep;
     }
-    
+
     public AbstractSequencerExt(int bufferSize, WaitStrategy waitStrategy) {
         super(bufferSize, waitStrategy);
     }
-    
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/disruptor/MultiProducerSequencer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/disruptor/MultiProducerSequencer.java b/jstorm-core/src/main/java/backtype/storm/utils/disruptor/MultiProducerSequencer.java
index cb5d7f9..2bcfdec 100755
--- a/jstorm-core/src/main/java/backtype/storm/utils/disruptor/MultiProducerSequencer.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/disruptor/MultiProducerSequencer.java
@@ -33,19 +33,19 @@ import com.lmax.disruptor.util.Util;
  * Suitable for use for sequencing across multiple publisher threads.
  */
 public class MultiProducerSequencer extends AbstractSequencerExt {
-    
+
     private static final Unsafe UNSAFE = Util.getUnsafe();
     private static final long BASE = UNSAFE.arrayBaseOffset(int[].class);
     private static final long SCALE = UNSAFE.arrayIndexScale(int[].class);
-    
+
     private final Sequence gatingSequenceCache = new Sequence(Sequencer.INITIAL_CURSOR_VALUE);
-    
+
     // availableBuffer tracks the state of each ringbuffer slot
     // see below for more details on the approach
     private final int[] availableBuffer;
     private final int indexMask;
     private final int indexShift;
-    
+
     /**
      * Construct a Sequencer with the selected wait strategy and buffer size.
      * 
@@ -59,7 +59,7 @@ public class MultiProducerSequencer extends AbstractSequencerExt {
         indexShift = Util.log2(bufferSize);
         initialiseAvailableBuffer();
     }
-    
+
     /**
      * @see Sequencer#hasAvailableCapacity(int)
      */
@@ -67,23 +67,23 @@ public class MultiProducerSequencer extends AbstractSequencerExt {
     public boolean hasAvailableCapacity(final int requiredCapacity) {
         return hasAvailableCapacity(gatingSequences, requiredCapacity, cursor.get());
     }
-    
+
     private boolean hasAvailableCapacity(Sequence[] gatingSequences, final int requiredCapacity, long cursorValue) {
         long wrapPoint = (cursorValue + requiredCapacity) - bufferSize;
         long cachedGatingSequence = gatingSequenceCache.get();
-        
+
         if (wrapPoint > cachedGatingSequence || cachedGatingSequence > cursorValue) {
             long minSequence = Util.getMinimumSequence(gatingSequences, cursorValue);
             gatingSequenceCache.set(minSequence);
-            
+
             if (wrapPoint > minSequence) {
                 return false;
             }
         }
-        
+
         return true;
     }
-    
+
     /**
      * @see Sequencer#claim(long)
      */
@@ -91,7 +91,7 @@ public class MultiProducerSequencer extends AbstractSequencerExt {
     public void claim(long sequence) {
         cursor.set(sequence);
     }
-    
+
     /**
      * @see Sequencer#next()
      */
@@ -99,7 +99,7 @@ public class MultiProducerSequencer extends AbstractSequencerExt {
     public long next() {
         return next(1);
     }
-    
+
     /**
      * @see Sequencer#next(int)
      */
@@ -108,20 +108,20 @@ public class MultiProducerSequencer extends AbstractSequencerExt {
         if (n < 1) {
             throw new IllegalArgumentException("n must be > 0");
         }
-        
+
         long current;
         long next;
-        
+
         do {
             current = cursor.get();
             next = current + n;
-            
+
             long wrapPoint = next - bufferSize;
             long cachedGatingSequence = gatingSequenceCache.get();
-            
+
             if (wrapPoint > cachedGatingSequence || cachedGatingSequence > current) {
                 long gatingSequence = Util.getMinimumSequence(gatingSequences, current);
-                
+
                 if (wrapPoint > gatingSequence) {
                     if (AbstractSequencerExt.isWaitSleep()) {
                         try {
@@ -133,16 +133,16 @@ public class MultiProducerSequencer extends AbstractSequencerExt {
                     }
                     continue;
                 }
-                
+
                 gatingSequenceCache.set(gatingSequence);
             } else if (cursor.compareAndSet(current, next)) {
                 break;
             }
         } while (true);
-        
+
         return next;
     }
-    
+
     /**
      * @see Sequencer#tryNext()
      */
@@ -150,7 +150,7 @@ public class MultiProducerSequencer extends AbstractSequencerExt {
     public long tryNext() throws InsufficientCapacityException {
         return tryNext(1);
     }
-    
+
     /**
      * @see Sequencer#tryNext(int)
      */
@@ -159,22 +159,22 @@ public class MultiProducerSequencer extends AbstractSequencerExt {
         if (n < 1) {
             throw new IllegalArgumentException("n must be > 0");
         }
-        
+
         long current;
         long next;
-        
+
         do {
             current = cursor.get();
             next = current + n;
-            
+
             if (!hasAvailableCapacity(gatingSequences, n, current)) {
                 throw InsufficientCapacityException.INSTANCE;
             }
         } while (!cursor.compareAndSet(current, next));
-        
+
         return next;
     }
-    
+
     /**
      * @see Sequencer#remainingCapacity()
      */
@@ -184,15 +184,15 @@ public class MultiProducerSequencer extends AbstractSequencerExt {
         long produced = cursor.get();
         return getBufferSize() - (produced - consumed);
     }
-    
+
     private void initialiseAvailableBuffer() {
         for (int i = availableBuffer.length - 1; i != 0; i--) {
             setAvailableBufferValue(i, -1);
         }
-        
+
         setAvailableBufferValue(0, -1);
     }
-    
+
     /**
      * @see Sequencer#publish(long)
      */
@@ -201,7 +201,7 @@ public class MultiProducerSequencer extends AbstractSequencerExt {
         setAvailable(sequence);
         waitStrategy.signalAllWhenBlocking();
     }
-    
+
     /**
      * @see Sequencer#publish(long, long)
      */
@@ -212,7 +212,7 @@ public class MultiProducerSequencer extends AbstractSequencerExt {
         }
         waitStrategy.signalAllWhenBlocking();
     }
-    
+
     /**
      * The below methods work on the availableBuffer flag.
      * 
@@ -229,12 +229,12 @@ public class MultiProducerSequencer extends AbstractSequencerExt {
     private void setAvailable(final long sequence) {
         setAvailableBufferValue(calculateIndex(sequence), calculateAvailabilityFlag(sequence));
     }
-    
+
     private void setAvailableBufferValue(int index, int flag) {
         long bufferAddress = (index * SCALE) + BASE;
         UNSAFE.putOrderedInt(availableBuffer, bufferAddress, flag);
     }
-    
+
     /**
      * @see Sequencer#isAvailable(long)
      */
@@ -245,7 +245,7 @@ public class MultiProducerSequencer extends AbstractSequencerExt {
         long bufferAddress = (index * SCALE) + BASE;
         return UNSAFE.getIntVolatile(availableBuffer, bufferAddress) == flag;
     }
-    
+
     @Override
     public long getHighestPublishedSequence(long lowerBound, long availableSequence) {
         for (long sequence = lowerBound; sequence <= availableSequence; sequence++) {
@@ -253,14 +253,14 @@ public class MultiProducerSequencer extends AbstractSequencerExt {
                 return sequence - 1;
             }
         }
-        
+
         return availableSequence;
     }
-    
+
     private int calculateAvailabilityFlag(final long sequence) {
         return (int) (sequence >>> indexShift);
     }
-    
+
     private int calculateIndex(final long sequence) {
         return ((int) sequence) & indexMask;
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/disruptor/RingBuffer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/disruptor/RingBuffer.java b/jstorm-core/src/main/java/backtype/storm/utils/disruptor/RingBuffer.java
index da124f0..763294b 100755
--- a/jstorm-core/src/main/java/backtype/storm/utils/disruptor/RingBuffer.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/disruptor/RingBuffer.java
@@ -43,12 +43,12 @@ import backtype.storm.utils.disruptor.SingleProducerSequencer;
  */
 public class RingBuffer<E> implements Cursored, DataProvider<E> {
     public static final long INITIAL_CURSOR_VALUE = -1L;
-    
+
     private final int indexMask;
     private final Object[] entries;
     private final int bufferSize;
     private final Sequencer sequencer;
-    
+
     /**
      * Construct a RingBuffer with the full option set.
      * 
@@ -59,19 +59,19 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
     public RingBuffer(EventFactory<E> eventFactory, Sequencer sequencer) {
         this.sequencer = sequencer;
         this.bufferSize = sequencer.getBufferSize();
-        
+
         if (bufferSize < 1) {
             throw new IllegalArgumentException("bufferSize must not be less than 1");
         }
         if (Integer.bitCount(bufferSize) != 1) {
             throw new IllegalArgumentException("bufferSize must be a power of 2");
         }
-        
+
         this.indexMask = bufferSize - 1;
         this.entries = new Object[sequencer.getBufferSize()];
         fill(eventFactory);
     }
-    
+
     /**
      * Create a new multiple producer RingBuffer with the specified wait strategy.
      * 
@@ -83,10 +83,10 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
      */
     public static <E> RingBuffer<E> createMultiProducer(EventFactory<E> factory, int bufferSize, WaitStrategy waitStrategy) {
         MultiProducerSequencer sequencer = new MultiProducerSequencer(bufferSize, waitStrategy);
-        
+
         return new RingBuffer<E>(factory, sequencer);
     }
-    
+
     /**
      * Create a new multiple producer RingBuffer using the default wait strategy {@link BlockingWaitStrategy}.
      * 
@@ -98,7 +98,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
     public static <E> RingBuffer<E> createMultiProducer(EventFactory<E> factory, int bufferSize) {
         return createMultiProducer(factory, bufferSize, new BlockingWaitStrategy());
     }
-    
+
     /**
      * Create a new single producer RingBuffer with the specified wait strategy.
      * 
@@ -110,10 +110,10 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
      */
     public static <E> RingBuffer<E> createSingleProducer(EventFactory<E> factory, int bufferSize, WaitStrategy waitStrategy) {
         SingleProducerSequencer sequencer = new SingleProducerSequencer(bufferSize, waitStrategy);
-        
+
         return new RingBuffer<E>(factory, sequencer);
     }
-    
+
     /**
      * Create a new single producer RingBuffer using the default wait strategy {@link BlockingWaitStrategy}.
      * 
@@ -125,7 +125,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
     public static <E> RingBuffer<E> createSingleProducer(EventFactory<E> factory, int bufferSize) {
         return createSingleProducer(factory, bufferSize, new BlockingWaitStrategy());
     }
-    
+
     /**
      * Create a new Ring Buffer with the specified producer type (SINGLE or MULTI)
      * 
@@ -145,7 +145,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
             throw new IllegalStateException(producerType.toString());
         }
     }
-    
+
     /**
      * <p>
      * Get the event for a given sequence in the RingBuffer.
@@ -168,7 +168,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
     public E get(long sequence) {
         return (E) entries[(int) sequence & indexMask];
     }
-    
+
     /**
      * @deprecated Use {@link RingBuffer#get(long)}
      */
@@ -176,7 +176,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
     public E getPreallocated(long sequence) {
         return get(sequence);
     }
-    
+
     /**
      * @deprecated Use {@link RingBuffer#get(long)}
      */
@@ -184,7 +184,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
     public E getPublished(long sequence) {
         return get(sequence);
     }
-    
+
     /**
      * Increment and return the next sequence for the ring buffer. Calls of this method should ensure that they always publish the sequence afterward. E.g.
      * 
@@ -205,7 +205,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
     public long next() {
         return sequencer.next();
     }
-    
+
     /**
      * The same functionality as {@link RingBuffer#next()}, but allows the caller to claim the next n sequences.
      * 
@@ -216,7 +216,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
     public long next(int n) {
         return sequencer.next(n);
     }
-    
+
     /**
      * <p>
      * Increment and return the next sequence for the ring buffer. Calls of this method should ensure that they always publish the sequence afterward. E.g.
@@ -242,7 +242,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
     public long tryNext() throws InsufficientCapacityException {
         return sequencer.tryNext();
     }
-    
+
     /**
      * The same functionality as {@link RingBuffer#tryNext()}, but allows the caller to attempt to claim the next n sequences.
      * 
@@ -253,7 +253,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
     public long tryNext(int n) throws InsufficientCapacityException {
         return sequencer.tryNext(n);
     }
-    
+
     /**
      * Resets the cursor to a specific value. This can be applied at any time, but it is worth not that it is a racy thing to do and should only be used in
      * controlled circumstances. E.g. during initialisation.
@@ -265,7 +265,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
         sequencer.claim(sequence);
         sequencer.publish(sequence);
     }
-    
+
     /**
      * Sets the cursor to a specific sequence and returns the preallocated entry that is stored there. This is another deliberately racy call, that should only
      * be done in controlled circumstances, e.g. initialisation.
@@ -277,7 +277,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
         sequencer.claim(sequence);
         return get(sequence);
     }
-    
+
     /**
      * Determines if a particular entry has been published.
      * 
@@ -287,7 +287,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
     public boolean isPublished(long sequence) {
         return sequencer.isAvailable(sequence);
     }
-    
+
     /**
      * Add the specified gating sequences to this instance of the Disruptor. They will safely and atomically added to the list of gating sequences.
      * 
@@ -296,7 +296,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
     public void addGatingSequences(Sequence... gatingSequences) {
         sequencer.addGatingSequences(gatingSequences);
     }
-    
+
     /**
      * Get the minimum sequence value from all of the gating sequences added to this ringBuffer.
      * 
@@ -305,7 +305,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
     public long getMinimumGatingSequence() {
         return sequencer.getMinimumSequence();
     }
-    
+
     /**
      * Remove the specified sequence from this ringBuffer.
      * 
@@ -315,7 +315,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
     public boolean removeGatingSequence(Sequence sequence) {
         return sequencer.removeGatingSequence(sequence);
     }
-    
+
     /**
      * Create a new SequenceBarrier to be used by an EventProcessor to track which messages are available to be read from the ring buffer given a list of
      * sequences to track.
@@ -327,7 +327,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
     public SequenceBarrier newBarrier(Sequence... sequencesToTrack) {
         return sequencer.newBarrier(sequencesToTrack);
     }
-    
+
     /**
      * Get the current cursor value for the ring buffer. The cursor value is the last value that was published, or the highest available sequence that can be
      * consumed.
@@ -335,14 +335,14 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
     public long getCursor() {
         return sequencer.getCursor();
     }
-    
+
     /**
      * The size of the buffer.
      */
     public int getBufferSize() {
         return bufferSize;
     }
-    
+
     /**
      * Given specified <tt>requiredCapacity</tt> determines if that amount of space is available. Note, you can not assume that if this method returns
      * <tt>true</tt> that a call to {@link RingBuffer#next()} will not block. Especially true if this ring buffer is set up to handle multiple producers.
@@ -353,7 +353,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
     public boolean hasAvailableCapacity(int requiredCapacity) {
         return sequencer.hasAvailableCapacity(requiredCapacity);
     }
-    
+
     /**
      * Publishes an event to the ring buffer. It handles claiming the next sequence, getting the current (uninitialised) event from the ring buffer and
      * publishing the claimed sequence after translation.
@@ -364,7 +364,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
         final long sequence = sequencer.next();
         translateAndPublish(translator, sequence);
     }
-    
+
     /**
      * Attempts to publish an event to the ring buffer. It handles claiming the next sequence, getting the current (uninitialised) event from the ring buffer
      * and publishing the claimed sequence after translation. Will return false if specified capacity was not available.
@@ -381,7 +381,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
             return false;
         }
     }
-    
+
     /**
      * Allows one user supplied argument.
      * 
@@ -393,7 +393,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
         final long sequence = sequencer.next();
         translateAndPublish(translator, sequence, arg0);
     }
-    
+
     /**
      * Allows one user supplied argument.
      * 
@@ -411,7 +411,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
             return false;
         }
     }
-    
+
     /**
      * Allows two user supplied arguments.
      * 
@@ -424,7 +424,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
         final long sequence = sequencer.next();
         translateAndPublish(translator, sequence, arg0, arg1);
     }
-    
+
     /**
      * Allows two user supplied arguments.
      * 
@@ -443,7 +443,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
             return false;
         }
     }
-    
+
     /**
      * Allows three user supplied arguments
      * 
@@ -457,7 +457,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
         final long sequence = sequencer.next();
         translateAndPublish(translator, sequence, arg0, arg1, arg2);
     }
-    
+
     /**
      * Allows three user supplied arguments
      * 
@@ -477,7 +477,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
             return false;
         }
     }
-    
+
     /**
      * Allows a variable number of user supplied arguments
      * 
@@ -489,7 +489,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
         final long sequence = sequencer.next();
         translateAndPublish(translator, sequence, args);
     }
-    
+
     /**
      * Allows a variable number of user supplied arguments
      * 
@@ -507,7 +507,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
             return false;
         }
     }
-    
+
     /**
      * Publishes multiple events to the ring buffer. It handles claiming the next sequence, getting the current (uninitialised) event from the ring buffer and
      * publishing the claimed sequence after translation.
@@ -517,7 +517,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
     public void publishEvents(EventTranslator<E>[] translators) {
         publishEvents(translators, 0, translators.length);
     }
-    
+
     /**
      * Publishes multiple events to the ring buffer. It handles claiming the next sequence, getting the current (uninitialised) event from the ring buffer and
      * publishing the claimed sequence after translation.
@@ -531,7 +531,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
         final long finalSequence = sequencer.next(batchSize);
         translateAndPublishBatch(translators, batchStartsAt, batchSize, finalSequence);
     }
-    
+
     /**
      * Attempts to publish multiple events to the ring buffer. It handles claiming the next sequence, getting the current (uninitialised) event from the ring
      * buffer and publishing the claimed sequence after translation. Will return false if specified capacity was not available.
@@ -542,7 +542,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
     public boolean tryPublishEvents(EventTranslator<E>[] translators) {
         return tryPublishEvents(translators, 0, translators.length);
     }
-    
+
     /**
      * Attempts to publish multiple events to the ring buffer. It handles claiming the next sequence, getting the current (uninitialised) event from the ring
      * buffer and publishing the claimed sequence after translation. Will return false if specified capacity was not available.
@@ -562,18 +562,18 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
             return false;
         }
     }
-    
+
     /**
      * Allows one user supplied argument per event.
      * 
      * @param translator The user specified translation for the event
      * @param arg0 A user supplied argument.
-     * @see #publishEvents(com.lmax.disruptor.EventTranslator[])
+     * @see #publishEvents(EventTranslator[])
      */
     public <A> void publishEvents(EventTranslatorOneArg<E, A> translator, A[] arg0) {
         publishEvents(translator, 0, arg0.length, arg0);
     }
-    
+
     /**
      * Allows one user supplied argument per event.
      * 
@@ -588,19 +588,19 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
         final long finalSequence = sequencer.next(batchSize);
         translateAndPublishBatch(translator, arg0, batchStartsAt, batchSize, finalSequence);
     }
-    
+
     /**
      * Allows one user supplied argument.
      * 
      * @param translator The user specified translation for each event
      * @param arg0 An array of user supplied arguments, one element per event.
      * @return true if the value was published, false if there was insufficient capacity.
-     * @see #tryPublishEvents(com.lmax.disruptor.EventTranslator[])
+     * @see #tryPublishEvents(EventTranslator[])
      */
     public <A> boolean tryPublishEvents(EventTranslatorOneArg<E, A> translator, A[] arg0) {
         return tryPublishEvents(translator, 0, arg0.length, arg0);
     }
-    
+
     /**
      * Allows one user supplied argument.
      * 
@@ -621,19 +621,19 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
             return false;
         }
     }
-    
+
     /**
      * Allows two user supplied arguments per event.
      * 
      * @param translator The user specified translation for the event
      * @param arg0 An array of user supplied arguments, one element per event.
      * @param arg1 An array of user supplied arguments, one element per event.
-     * @see #publishEvents(com.lmax.disruptor.EventTranslator[])
+     * @see #publishEvents(EventTranslator[])
      */
     public <A, B> void publishEvents(EventTranslatorTwoArg<E, A, B> translator, A[] arg0, B[] arg1) {
         publishEvents(translator, 0, arg0.length, arg0, arg1);
     }
-    
+
     /**
      * Allows two user supplied arguments per event.
      * 
@@ -649,7 +649,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
         final long finalSequence = sequencer.next(batchSize);
         translateAndPublishBatch(translator, arg0, arg1, batchStartsAt, batchSize, finalSequence);
     }
-    
+
     /**
      * Allows two user supplied arguments per event.
      * 
@@ -657,12 +657,12 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
      * @param arg0 An array of user supplied arguments, one element per event.
      * @param arg1 An array of user supplied arguments, one element per event.
      * @return true if the value was published, false if there was insufficient capacity.
-     * @see #tryPublishEvents(com.lmax.disruptor.EventTranslator[])
+     * @see #tryPublishEvents(EventTranslator[])
      */
     public <A, B> boolean tryPublishEvents(EventTranslatorTwoArg<E, A, B> translator, A[] arg0, B[] arg1) {
         return tryPublishEvents(translator, 0, arg0.length, arg0, arg1);
     }
-    
+
     /**
      * Allows two user supplied arguments per event.
      * 
@@ -684,7 +684,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
             return false;
         }
     }
-    
+
     /**
      * Allows three user supplied arguments per event.
      * 
@@ -692,12 +692,12 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
      * @param arg0 An array of user supplied arguments, one element per event.
      * @param arg1 An array of user supplied arguments, one element per event.
      * @param arg2 An array of user supplied arguments, one element per event.
-     * @see #publishEvents(com.lmax.disruptor.EventTranslator[])
+     * @see #publishEvents(EventTranslator[])
      */
     public <A, B, C> void publishEvents(EventTranslatorThreeArg<E, A, B, C> translator, A[] arg0, B[] arg1, C[] arg2) {
         publishEvents(translator, 0, arg0.length, arg0, arg1, arg2);
     }
-    
+
     /**
      * Allows three user supplied arguments per event.
      * 
@@ -714,7 +714,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
         final long finalSequence = sequencer.next(batchSize);
         translateAndPublishBatch(translator, arg0, arg1, arg2, batchStartsAt, batchSize, finalSequence);
     }
-    
+
     /**
      * Allows three user supplied arguments per event.
      * 
@@ -723,12 +723,12 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
      * @param arg1 An array of user supplied arguments, one element per event.
      * @param arg2 An array of user supplied arguments, one element per event.
      * @return true if the value was published, false if there was insufficient capacity.
-     * @see #publishEvents(com.lmax.disruptor.EventTranslator[])
+     * @see #publishEvents(EventTranslator[])
      */
     public <A, B, C> boolean tryPublishEvents(EventTranslatorThreeArg<E, A, B, C> translator, A[] arg0, B[] arg1, C[] arg2) {
         return tryPublishEvents(translator, 0, arg0.length, arg0, arg1, arg2);
     }
-    
+
     /**
      * Allows three user supplied arguments per event.
      * 
@@ -751,18 +751,18 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
             return false;
         }
     }
-    
+
     /**
      * Allows a variable number of user supplied arguments per event.
      * 
      * @param translator The user specified translation for the event
      * @param args User supplied arguments, one Object[] per event.
-     * @see #publishEvents(com.lmax.disruptor.EventTranslator[])
+     * @see #publishEvents(EventTranslator[])
      */
     public void publishEvents(EventTranslatorVararg<E> translator, Object[]... args) {
         publishEvents(translator, 0, args.length, args);
     }
-    
+
     /**
      * Allows a variable number of user supplied arguments per event.
      * 
@@ -777,19 +777,19 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
         final long finalSequence = sequencer.next(batchSize);
         translateAndPublishBatch(translator, batchStartsAt, batchSize, finalSequence, args);
     }
-    
+
     /**
      * Allows a variable number of user supplied arguments per event.
      * 
      * @param translator The user specified translation for the event
      * @param args User supplied arguments, one Object[] per event.
      * @return true if the value was published, false if there was insufficient capacity.
-     * @see #publishEvents(com.lmax.disruptor.EventTranslator[])
+     * @see #publishEvents(EventTranslator[])
      */
     public boolean tryPublishEvents(EventTranslatorVararg<E> translator, Object[]... args) {
         return tryPublishEvents(translator, 0, args.length, args);
     }
-    
+
     /**
      * Allows a variable number of user supplied arguments per event.
      * 
@@ -810,7 +810,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
             return false;
         }
     }
-    
+
     /**
      * Publish the specified sequence. This action marks this particular message as being available to be read.
      * 
@@ -819,7 +819,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
     public void publish(long sequence) {
         sequencer.publish(sequence);
     }
-    
+
     /**
      * Publish the specified sequences. This action marks these particular messages as being available to be read.
      * 
@@ -830,7 +830,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
     public void publish(long lo, long hi) {
         sequencer.publish(lo, hi);
     }
-    
+
     /**
      * Get the remaining capacity for this ringBuffer.
      * 
@@ -839,49 +839,51 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
     public long remainingCapacity() {
         return sequencer.remainingCapacity();
     }
-    
+
     private void checkBounds(final EventTranslator<E>[] translators, final int batchStartsAt, final int batchSize) {
         checkBatchSizing(batchStartsAt, batchSize);
         batchOverRuns(translators, batchStartsAt, batchSize);
     }
-    
+
     private void checkBatchSizing(int batchStartsAt, int batchSize) {
         if (batchStartsAt < 0 || batchSize < 0) {
-            throw new IllegalArgumentException("Both batchStartsAt and batchSize must be positive but got: batchStartsAt " + batchStartsAt + " and batchSize " + batchSize);
+            throw new IllegalArgumentException("Both batchStartsAt and batchSize must be positive but got: batchStartsAt " + batchStartsAt + " and batchSize "
+                    + batchSize);
         } else if (batchSize > bufferSize) {
             throw new IllegalArgumentException("The ring buffer cannot accommodate " + batchSize + " it only has space for " + bufferSize + " entities.");
         }
     }
-    
+
     private <A> void checkBounds(final A[] arg0, final int batchStartsAt, final int batchSize) {
         checkBatchSizing(batchStartsAt, batchSize);
         batchOverRuns(arg0, batchStartsAt, batchSize);
     }
-    
+
     private <A, B> void checkBounds(final A[] arg0, final B[] arg1, final int batchStartsAt, final int batchSize) {
         checkBatchSizing(batchStartsAt, batchSize);
         batchOverRuns(arg0, batchStartsAt, batchSize);
         batchOverRuns(arg1, batchStartsAt, batchSize);
     }
-    
+
     private <A, B, C> void checkBounds(final A[] arg0, final B[] arg1, final C[] arg2, final int batchStartsAt, final int batchSize) {
         checkBatchSizing(batchStartsAt, batchSize);
         batchOverRuns(arg0, batchStartsAt, batchSize);
         batchOverRuns(arg1, batchStartsAt, batchSize);
         batchOverRuns(arg2, batchStartsAt, batchSize);
     }
-    
+
     private void checkBounds(final int batchStartsAt, final int batchSize, final Object[][] args) {
         checkBatchSizing(batchStartsAt, batchSize);
         batchOverRuns(args, batchStartsAt, batchSize);
     }
-    
+
     private <A> void batchOverRuns(final A[] arg0, final int batchStartsAt, final int batchSize) {
         if (batchStartsAt + batchSize > arg0.length) {
-            throw new IllegalArgumentException("A batchSize of: " + batchSize + " with batchStatsAt of: " + batchStartsAt + " will overrun the available number of arguments: " + (arg0.length - batchStartsAt));
+            throw new IllegalArgumentException("A batchSize of: " + batchSize + " with batchStatsAt of: " + batchStartsAt
+                    + " will overrun the available number of arguments: " + (arg0.length - batchStartsAt));
         }
     }
-    
+
     private void translateAndPublish(EventTranslator<E> translator, long sequence) {
         try {
             translator.translateTo(get(sequence), sequence);
@@ -889,7 +891,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
             sequencer.publish(sequence);
         }
     }
-    
+
     private <A> void translateAndPublish(EventTranslatorOneArg<E, A> translator, long sequence, A arg0) {
         try {
             translator.translateTo(get(sequence), sequence, arg0);
@@ -897,7 +899,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
             sequencer.publish(sequence);
         }
     }
-    
+
     private <A, B> void translateAndPublish(EventTranslatorTwoArg<E, A, B> translator, long sequence, A arg0, B arg1) {
         try {
             translator.translateTo(get(sequence), sequence, arg0, arg1);
@@ -905,7 +907,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
             sequencer.publish(sequence);
         }
     }
-    
+
     private <A, B, C> void translateAndPublish(EventTranslatorThreeArg<E, A, B, C> translator, long sequence, A arg0, B arg1, C arg2) {
         try {
             translator.translateTo(get(sequence), sequence, arg0, arg1, arg2);
@@ -913,7 +915,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
             sequencer.publish(sequence);
         }
     }
-    
+
     private void translateAndPublish(EventTranslatorVararg<E> translator, long sequence, Object... args) {
         try {
             translator.translateTo(get(sequence), sequence, args);
@@ -921,7 +923,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
             sequencer.publish(sequence);
         }
     }
-    
+
     private void translateAndPublishBatch(final EventTranslator<E>[] translators, int batchStartsAt, final int batchSize, final long finalSequence) {
         final long initialSequence = finalSequence - (batchSize - 1);
         try {
@@ -935,8 +937,9 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
             sequencer.publish(initialSequence, finalSequence);
         }
     }
-    
-    private <A> void translateAndPublishBatch(final EventTranslatorOneArg<E, A> translator, final A[] arg0, int batchStartsAt, final int batchSize, final long finalSequence) {
+
+    private <A> void translateAndPublishBatch(final EventTranslatorOneArg<E, A> translator, final A[] arg0, int batchStartsAt, final int batchSize,
+            final long finalSequence) {
         final long initialSequence = finalSequence - (batchSize - 1);
         try {
             long sequence = initialSequence;
@@ -948,8 +951,9 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
             sequencer.publish(initialSequence, finalSequence);
         }
     }
-    
-    private <A, B> void translateAndPublishBatch(final EventTranslatorTwoArg<E, A, B> translator, final A[] arg0, final B[] arg1, int batchStartsAt, int batchSize, final long finalSequence) {
+
+    private <A, B> void translateAndPublishBatch(final EventTranslatorTwoArg<E, A, B> translator, final A[] arg0, final B[] arg1, int batchStartsAt,
+            int batchSize, final long finalSequence) {
         final long initialSequence = finalSequence - (batchSize - 1);
         try {
             long sequence = initialSequence;
@@ -961,8 +965,9 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
             sequencer.publish(initialSequence, finalSequence);
         }
     }
-    
-    private <A, B, C> void translateAndPublishBatch(final EventTranslatorThreeArg<E, A, B, C> translator, final A[] arg0, final B[] arg1, final C[] arg2, int batchStartsAt, final int batchSize, final long finalSequence) {
+
+    private <A, B, C> void translateAndPublishBatch(final EventTranslatorThreeArg<E, A, B, C> translator, final A[] arg0, final B[] arg1, final C[] arg2,
+            int batchStartsAt, final int batchSize, final long finalSequence) {
         final long initialSequence = finalSequence - (batchSize - 1);
         try {
             long sequence = initialSequence;
@@ -974,8 +979,9 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
             sequencer.publish(initialSequence, finalSequence);
         }
     }
-    
-    private void translateAndPublishBatch(final EventTranslatorVararg<E> translator, int batchStartsAt, final int batchSize, final long finalSequence, final Object[][] args) {
+
+    private void translateAndPublishBatch(final EventTranslatorVararg<E> translator, int batchStartsAt, final int batchSize, final long finalSequence,
+            final Object[][] args) {
         final long initialSequence = finalSequence - (batchSize - 1);
         try {
             long sequence = initialSequence;
@@ -987,7 +993,7 @@ public class RingBuffer<E> implements Cursored, DataProvider<E> {
             sequencer.publish(initialSequence, finalSequence);
         }
     }
-    
+
     private void fill(EventFactory<E> eventFactory) {
         for (int i = 0; i < entries.length; i++) {
             entries[i] = eventFactory.newInstance();

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/disruptor/SingleProducerSequencer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/disruptor/SingleProducerSequencer.java b/jstorm-core/src/main/java/backtype/storm/utils/disruptor/SingleProducerSequencer.java
index 5ca2724..ad0843b 100755
--- a/jstorm-core/src/main/java/backtype/storm/utils/disruptor/SingleProducerSequencer.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/disruptor/SingleProducerSequencer.java
@@ -40,9 +40,9 @@ public class SingleProducerSequencer extends AbstractSequencerExt {
         /** Set to -1 as sequence starting point */
         public long nextValue = -1L, cachedValue = -1L, p2, p3, p4, p5, p6, p7;
     }
-    
+
     private final Padding pad = new Padding();
-    
+
     /**
      * Construct a Sequencer with the selected wait strategy and buffer size.
      * 
@@ -52,29 +52,29 @@ public class SingleProducerSequencer extends AbstractSequencerExt {
     public SingleProducerSequencer(int bufferSize, final WaitStrategy waitStrategy) {
         super(bufferSize, waitStrategy);
     }
-    
+
     /**
      * @see Sequencer#hasAvailableCapacity(int)
      */
     @Override
     public boolean hasAvailableCapacity(final int requiredCapacity) {
         long nextValue = pad.nextValue;
-        
+
         long wrapPoint = (nextValue + requiredCapacity) - bufferSize;
         long cachedGatingSequence = pad.cachedValue;
-        
+
         if (wrapPoint > cachedGatingSequence || cachedGatingSequence > nextValue) {
             long minSequence = Util.getMinimumSequence(gatingSequences, nextValue);
             pad.cachedValue = minSequence;
-            
+
             if (wrapPoint > minSequence) {
                 return false;
             }
         }
-        
+
         return true;
     }
-    
+
     /**
      * @see Sequencer#next()
      */
@@ -82,7 +82,7 @@ public class SingleProducerSequencer extends AbstractSequencerExt {
     public long next() {
         return next(1);
     }
-    
+
     /**
      * @see Sequencer#next(int)
      */
@@ -91,13 +91,13 @@ public class SingleProducerSequencer extends AbstractSequencerExt {
         if (n < 1) {
             throw new IllegalArgumentException("n must be > 0");
         }
-        
+
         long nextValue = pad.nextValue;
-        
+
         long nextSequence = nextValue + n;
         long wrapPoint = nextSequence - bufferSize;
         long cachedGatingSequence = pad.cachedValue;
-        
+
         if (wrapPoint > cachedGatingSequence || cachedGatingSequence > nextValue) {
             long minSequence;
             while (wrapPoint > (minSequence = Util.getMinimumSequence(gatingSequences, nextValue))) {
@@ -110,15 +110,15 @@ public class SingleProducerSequencer extends AbstractSequencerExt {
                     LockSupport.parkNanos(1);
                 }
             }
-            
+
             pad.cachedValue = minSequence;
         }
-        
+
         pad.nextValue = nextSequence;
-        
+
         return nextSequence;
     }
-    
+
     /**
      * @see Sequencer#tryNext()
      */
@@ -126,7 +126,7 @@ public class SingleProducerSequencer extends AbstractSequencerExt {
     public long tryNext() throws InsufficientCapacityException {
         return tryNext(1);
     }
-    
+
     /**
      * @see Sequencer#tryNext(int)
      */
@@ -135,28 +135,28 @@ public class SingleProducerSequencer extends AbstractSequencerExt {
         if (n < 1) {
             throw new IllegalArgumentException("n must be > 0");
         }
-        
+
         if (!hasAvailableCapacity(n)) {
             throw InsufficientCapacityException.INSTANCE;
         }
-        
+
         long nextSequence = pad.nextValue += n;
-        
+
         return nextSequence;
     }
-    
+
     /**
      * @see Sequencer#remainingCapacity()
      */
     @Override
     public long remainingCapacity() {
         long nextValue = pad.nextValue;
-        
+
         long consumed = Util.getMinimumSequence(gatingSequences, nextValue);
         long produced = nextValue;
         return getBufferSize() - (produced - consumed);
     }
-    
+
     /**
      * @see Sequencer#claim(long)
      */
@@ -164,7 +164,7 @@ public class SingleProducerSequencer extends AbstractSequencerExt {
     public void claim(long sequence) {
         pad.nextValue = sequence;
     }
-    
+
     /**
      * @see Sequencer#publish(long)
      */
@@ -173,7 +173,7 @@ public class SingleProducerSequencer extends AbstractSequencerExt {
         cursor.set(sequence);
         waitStrategy.signalAllWhenBlocking();
     }
-    
+
     /**
      * @see Sequencer#publish(long, long)
      */
@@ -181,7 +181,7 @@ public class SingleProducerSequencer extends AbstractSequencerExt {
     public void publish(long lo, long hi) {
         publish(hi);
     }
-    
+
     /**
      * @see Sequencer#isAvailable(long)
      */
@@ -189,7 +189,7 @@ public class SingleProducerSequencer extends AbstractSequencerExt {
     public boolean isAvailable(long sequence) {
         return sequence <= cursor.get();
     }
-    
+
     @Override
     public long getHighestPublishedSequence(long lowerBound, long availableSequence) {
         return availableSequence;

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/batch/BatchId.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/batch/BatchId.java b/jstorm-core/src/main/java/com/alibaba/jstorm/batch/BatchId.java
index 807c5ec..4d0b713 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/batch/BatchId.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/batch/BatchId.java
@@ -59,8 +59,7 @@ public class BatchId implements Serializable {
 
     @Override
     public String toString() {
-        return ToStringBuilder.reflectionToString(this,
-                ToStringStyle.SHORT_PREFIX_STYLE);
+        return ToStringBuilder.reflectionToString(this, ToStringStyle.SHORT_PREFIX_STYLE);
     }
 
     private static AtomicLong staticId = new AtomicLong(0);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/batch/BatchTopologyBuilder.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/batch/BatchTopologyBuilder.java b/jstorm-core/src/main/java/com/alibaba/jstorm/batch/BatchTopologyBuilder.java
index 85dec6c..cff7b34 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/batch/BatchTopologyBuilder.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/batch/BatchTopologyBuilder.java
@@ -30,8 +30,7 @@ import com.alibaba.jstorm.batch.impl.CoordinatedBolt;
 import com.alibaba.jstorm.batch.util.BatchDef;
 
 public class BatchTopologyBuilder {
-    private static final Logger LOG = LoggerFactory
-            .getLogger(BatchTopologyBuilder.class);
+    private static final Logger LOG = LoggerFactory.getLogger(BatchTopologyBuilder.class);
 
     private TopologyBuilder topologyBuilder;
 
@@ -40,17 +39,13 @@ public class BatchTopologyBuilder {
     public BatchTopologyBuilder(String topologyName) {
         topologyBuilder = new TopologyBuilder();
 
-        spoutDeclarer =
-                topologyBuilder.setSpout(BatchDef.SPOUT_TRIGGER,
-                        new BatchSpoutTrigger(), 1);
+        spoutDeclarer = topologyBuilder.setSpout(BatchDef.SPOUT_TRIGGER, new BatchSpoutTrigger(), 1);
     }
 
     public BoltDeclarer setSpout(String id, IBatchSpout spout, int paralel) {
 
-        BoltDeclarer boltDeclarer =
-                this.setBolt(id, (IBatchSpout) spout, paralel);
-        boltDeclarer.allGrouping(BatchDef.SPOUT_TRIGGER,
-                BatchDef.COMPUTING_STREAM_ID);
+        BoltDeclarer boltDeclarer = this.setBolt(id, (IBatchSpout) spout, paralel);
+        boltDeclarer.allGrouping(BatchDef.SPOUT_TRIGGER, BatchDef.COMPUTING_STREAM_ID);
 
         return boltDeclarer;
     }
@@ -58,24 +53,19 @@ public class BatchTopologyBuilder {
     public BoltDeclarer setBolt(String id, IBasicBolt bolt, int paralel) {
         CoordinatedBolt coordinatedBolt = new CoordinatedBolt(bolt);
 
-        BoltDeclarer boltDeclarer =
-                topologyBuilder.setBolt(id, coordinatedBolt, paralel);
+        BoltDeclarer boltDeclarer = topologyBuilder.setBolt(id, coordinatedBolt, paralel);
 
         if (bolt instanceof IPrepareCommit) {
-            boltDeclarer.allGrouping(BatchDef.SPOUT_TRIGGER,
-                    BatchDef.PREPARE_STREAM_ID);
+            boltDeclarer.allGrouping(BatchDef.SPOUT_TRIGGER, BatchDef.PREPARE_STREAM_ID);
         }
 
         if (bolt instanceof ICommitter) {
-            boltDeclarer.allGrouping(BatchDef.SPOUT_TRIGGER,
-                    BatchDef.COMMIT_STREAM_ID);
-            boltDeclarer.allGrouping(BatchDef.SPOUT_TRIGGER,
-                    BatchDef.REVERT_STREAM_ID);
+            boltDeclarer.allGrouping(BatchDef.SPOUT_TRIGGER, BatchDef.COMMIT_STREAM_ID);
+            boltDeclarer.allGrouping(BatchDef.SPOUT_TRIGGER, BatchDef.REVERT_STREAM_ID);
         }
 
         if (bolt instanceof IPostCommit) {
-            boltDeclarer.allGrouping(BatchDef.SPOUT_TRIGGER,
-                    BatchDef.POST_STREAM_ID);
+            boltDeclarer.allGrouping(BatchDef.SPOUT_TRIGGER, BatchDef.POST_STREAM_ID);
         }
 
         return boltDeclarer;

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/batch/IBatchSpout.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/batch/IBatchSpout.java b/jstorm-core/src/main/java/com/alibaba/jstorm/batch/IBatchSpout.java
index 591f0f0..ef917b2 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/batch/IBatchSpout.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/batch/IBatchSpout.java
@@ -28,8 +28,7 @@ public interface IBatchSpout extends IBasicBolt, ICommitter, Serializable {
      * 
      * execute only receive trigger message
      * 
-     * do emitBatch operation in execute whose streamID is
-     * "batch/compute-stream"
+     * do emitBatch operation in execute whose streamID is "batch/compute-stream"
      */
     // void execute(Tuple input, IBasicOutputCollector collector);
     /**
@@ -44,8 +43,7 @@ public interface IBatchSpout extends IBasicBolt, ICommitter, Serializable {
     /**
      * begin to revert batchId's data
      * 
-     * If current task fails to commit batchId, it won't call revert(batchId) If
-     * current task fails to revert batchId, JStorm won't call revert again.
+     * If current task fails to commit batchId, it won't call revert(batchId) If current task fails to revert batchId, JStorm won't call revert again.
      * 
      * if not transaction, it can don't care revert
      * 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/batch/ICommitter.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/batch/ICommitter.java b/jstorm-core/src/main/java/com/alibaba/jstorm/batch/ICommitter.java
index 16f10da..83845ae 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/batch/ICommitter.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/batch/ICommitter.java
@@ -29,8 +29,7 @@ import backtype.storm.topology.FailedException;
  */
 public interface ICommitter extends Serializable {
     /**
-     * begin to commit batchId's data, then return the commit result The
-     * commitResult will store into outside storage
+     * begin to commit batchId's data, then return the commit result The commitResult will store into outside storage
      * 
      * if failed to commit, please throw FailedException
      * 
@@ -43,8 +42,7 @@ public interface ICommitter extends Serializable {
     /**
      * begin to revert batchId's data
      * 
-     * If current task fails to commit batchId, it won't call revert(batchId) If
-     * current task fails to revert batchId, JStorm won't call revert again.
+     * If current task fails to commit batchId, it won't call revert(batchId) If current task fails to revert batchId, JStorm won't call revert again.
      * 
      * @param id
      */

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/batch/IPrepareCommit.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/batch/IPrepareCommit.java b/jstorm-core/src/main/java/com/alibaba/jstorm/batch/IPrepareCommit.java
index aa75f9e..e03d58f 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/batch/IPrepareCommit.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/batch/IPrepareCommit.java
@@ -33,6 +33,5 @@ public interface IPrepareCommit {
      * @param id
      * @param collector
      */
-    void prepareCommit(BatchId id, BasicOutputCollector collector)
-            throws FailedException;
+    void prepareCommit(BatchId id, BasicOutputCollector collector) throws FailedException;
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/batch/impl/BatchSpoutMsgId.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/batch/impl/BatchSpoutMsgId.java b/jstorm-core/src/main/java/com/alibaba/jstorm/batch/impl/BatchSpoutMsgId.java
index 99b1915..d76a8d7 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/batch/impl/BatchSpoutMsgId.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/batch/impl/BatchSpoutMsgId.java
@@ -59,7 +59,6 @@ public class BatchSpoutMsgId implements Serializable {
 
     @Override
     public String toString() {
-        return ToStringBuilder.reflectionToString(this,
-                ToStringStyle.SHORT_PREFIX_STYLE);
+        return ToStringBuilder.reflectionToString(this, ToStringStyle.SHORT_PREFIX_STYLE);
     }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/batch/impl/BatchSpoutTrigger.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/batch/impl/BatchSpoutTrigger.java b/jstorm-core/src/main/java/com/alibaba/jstorm/batch/impl/BatchSpoutTrigger.java
index c1cdae4..edb882b 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/batch/impl/BatchSpoutTrigger.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/batch/impl/BatchSpoutTrigger.java
@@ -53,8 +53,7 @@ public class BatchSpoutTrigger implements IRichSpout {
     /**  */
     private static final long serialVersionUID = 7215109169247425954L;
 
-    private static final Logger LOG = LoggerFactory
-            .getLogger(BatchSpoutTrigger.class);
+    private static final Logger LOG = LoggerFactory.getLogger(BatchSpoutTrigger.class);
 
     private LinkedBlockingQueue<BatchSpoutMsgId> batchQueue;
 
@@ -95,9 +94,7 @@ public class BatchSpoutTrigger implements IRichSpout {
             BatchId.updateId(zkMsgId);
         }
 
-        int max_spout_pending =
-                JStormUtils.parseInt(
-                        conf.get(Config.TOPOLOGY_MAX_SPOUT_PENDING), 1);
+        int max_spout_pending = JStormUtils.parseInt(conf.get(Config.TOPOLOGY_MAX_SPOUT_PENDING), 1);
 
         for (int i = 0; i < max_spout_pending; i++) {
             BatchSpoutMsgId msgId = BatchSpoutMsgId.mkInstance();
@@ -111,8 +108,7 @@ public class BatchSpoutTrigger implements IRichSpout {
     }
 
     @Override
-    public void open(Map conf, TopologyContext context,
-            SpoutOutputCollector collector) {
+    public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
         batchQueue = new LinkedBlockingQueue<BatchSpoutMsgId>();
         this.collector = collector;
         this.conf = conf;
@@ -134,7 +130,7 @@ public class BatchSpoutTrigger implements IRichSpout {
 
     @Override
     public void close() {
-    	zkClient.close();
+        zkClient.close();
     }
 
     @Override
@@ -204,19 +200,16 @@ public class BatchSpoutTrigger implements IRichSpout {
 
             batchQueue.offer(msgId);
             if (intervalCheck.check()) {
-                LOG.info("Current msgId " + msgId
-                        + ", but current commit BatchId is " + currentBatchId);
+                LOG.info("Current msgId " + msgId + ", but current commit BatchId is " + currentBatchId);
             } else {
-                LOG.debug("Current msgId " + msgId
-                        + ", but current commit BatchId is " + currentBatchId);
+                LOG.debug("Current msgId " + msgId + ", but current commit BatchId is " + currentBatchId);
             }
 
             return;
         }
 
         String streamId = getStreamId(msgId.getBatchStatus());
-        List<Integer> outTasks =
-                collector.emit(streamId, new Values(msgId.getBatchId()), msgId);
+        List<Integer> outTasks = collector.emit(streamId, new Values(msgId.getBatchId()), msgId);
         if (outTasks.isEmpty()) {
             forward(msgId);
         }
@@ -278,8 +271,7 @@ public class BatchSpoutTrigger implements IRichSpout {
             forward((BatchSpoutMsgId) msgId);
             return;
         } else {
-            LOG.warn("Unknown type msgId " + msgId.getClass().getName() + ":"
-                    + msgId);
+            LOG.warn("Unknown type msgId " + msgId.getClass().getName() + ":" + msgId);
             return;
         }
     }
@@ -306,18 +298,15 @@ public class BatchSpoutTrigger implements IRichSpout {
         if (msgId instanceof BatchSpoutMsgId) {
             handleFail((BatchSpoutMsgId) msgId);
         } else {
-            LOG.warn("Unknown type msgId " + msgId.getClass().getName() + ":"
-                    + msgId);
+            LOG.warn("Unknown type msgId " + msgId.getClass().getName() + ":" + msgId);
             return;
         }
     }
 
     @Override
     public void declareOutputFields(OutputFieldsDeclarer declarer) {
-        declarer.declareStream(BatchDef.COMPUTING_STREAM_ID, new Fields(
-                "BatchId"));
-        declarer.declareStream(BatchDef.PREPARE_STREAM_ID,
-                new Fields("BatchId"));
+        declarer.declareStream(BatchDef.COMPUTING_STREAM_ID, new Fields("BatchId"));
+        declarer.declareStream(BatchDef.PREPARE_STREAM_ID, new Fields("BatchId"));
         declarer.declareStream(BatchDef.COMMIT_STREAM_ID, new Fields("BatchId"));
         declarer.declareStream(BatchDef.REVERT_STREAM_ID, new Fields("BatchId"));
         declarer.declareStream(BatchDef.POST_STREAM_ID, new Fields("BatchId"));

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/batch/impl/CoordinatedBolt.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/batch/impl/CoordinatedBolt.java b/jstorm-core/src/main/java/com/alibaba/jstorm/batch/impl/CoordinatedBolt.java
index c9bf0b5..63ca6a0 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/batch/impl/CoordinatedBolt.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/batch/impl/CoordinatedBolt.java
@@ -75,23 +75,20 @@ public class CoordinatedBolt implements IRichBolt {
         try {
             zkClient = BatchCommon.getZkClient(conf);
 
-            zkCommitPath =
-                    BatchDef.ZK_COMMIT_DIR + BatchDef.ZK_SEPERATOR + taskId;
+            zkCommitPath = BatchDef.ZK_COMMIT_DIR + BatchDef.ZK_SEPERATOR + taskId;
             if (zkClient.node_existed(zkCommitPath, false)) {
                 zkClient.delete_node(zkCommitPath);
             }
             zkClient.mkdirs(zkCommitPath);
 
-            LOG.info(taskName + " successfully create commit path"
-                    + zkCommitPath);
+            LOG.info(taskName + " successfully create commit path" + zkCommitPath);
         } catch (Exception e) {
             LOG.error("Failed to create zk node", e);
             throw new RuntimeException();
         }
     }
 
-    public void prepare(Map conf, TopologyContext context,
-            OutputCollector collector) {
+    public void prepare(Map conf, TopologyContext context, OutputCollector collector) {
 
         taskId = String.valueOf(context.getThisTaskId());
         taskName = context.getThisComponentId() + "_" + context.getThisTaskId();
@@ -101,9 +98,7 @@ public class CoordinatedBolt implements IRichBolt {
 
         if (delegate instanceof ICommitter) {
             isCommiter = true;
-            commited =
-                    new TimeCacheMap<Object, Object>(
-                            context.maxTopologyMessageTimeout());
+            commited = new TimeCacheMap<Object, Object>(context.maxTopologyMessageTimeout());
             mkCommitDir(conf);
         }
 
@@ -130,8 +125,7 @@ public class CoordinatedBolt implements IRichBolt {
         });
 
         for (int index = 0; index < childs.size() - reserveSize; index++) {
-            zkClient.delete_node(path + BatchDef.ZK_SEPERATOR
-                    + childs.get(index));
+            zkClient.delete_node(path + BatchDef.ZK_SEPERATOR + childs.get(index));
         }
     }
 
@@ -263,8 +257,7 @@ public class CoordinatedBolt implements IRichBolt {
         } else if (batchStatus == BatchStatus.POST_COMMIT) {
             handlePostCommit(tuple);
         } else {
-            throw new RuntimeException(
-                    "Receive commit tuple, but not committer");
+            throw new RuntimeException("Receive commit tuple, but not committer");
         }
     }
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/batch/util/BatchCommon.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/batch/util/BatchCommon.java b/jstorm-core/src/main/java/com/alibaba/jstorm/batch/util/BatchCommon.java
index fcc54fa..bee87ef 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/batch/util/BatchCommon.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/batch/util/BatchCommon.java
@@ -31,8 +31,7 @@ import com.alibaba.jstorm.cluster.DistributedClusterState;
 import com.alibaba.jstorm.utils.JStormUtils;
 
 public class BatchCommon {
-    private static final Logger LOG = LoggerFactory
-            .getLogger(BatchCommon.class);
+    private static final Logger LOG = LoggerFactory.getLogger(BatchCommon.class);
 
     private static ClusterState zkClient = null;
 
@@ -44,26 +43,18 @@ public class BatchCommon {
 
             List<String> zkServers = null;
             if (conf.get(Config.TRANSACTIONAL_ZOOKEEPER_SERVERS) != null) {
-                zkServers =
-                        (List<String>) conf
-                                .get(Config.TRANSACTIONAL_ZOOKEEPER_SERVERS);
+                zkServers = (List<String>) conf.get(Config.TRANSACTIONAL_ZOOKEEPER_SERVERS);
             } else if (conf.get(Config.STORM_ZOOKEEPER_SERVERS) != null) {
-                zkServers =
-                        (List<String>) conf.get(Config.STORM_ZOOKEEPER_SERVERS);
+                zkServers = (List<String>) conf.get(Config.STORM_ZOOKEEPER_SERVERS);
             } else {
                 throw new RuntimeException("No setting zk");
             }
 
             int port = 2181;
             if (conf.get(Config.TRANSACTIONAL_ZOOKEEPER_PORT) != null) {
-                port =
-                        JStormUtils.parseInt(
-                                conf.get(Config.TRANSACTIONAL_ZOOKEEPER_PORT),
-                                2181);
+                port = JStormUtils.parseInt(conf.get(Config.TRANSACTIONAL_ZOOKEEPER_PORT), 2181);
             } else if (conf.get(Config.STORM_ZOOKEEPER_PORT) != null) {
-                port =
-                        JStormUtils.parseInt(
-                                conf.get(Config.STORM_ZOOKEEPER_PORT), 2181);
+                port = JStormUtils.parseInt(conf.get(Config.STORM_ZOOKEEPER_PORT), 2181);
             }
 
             String root = BatchDef.BATCH_ZK_ROOT;
@@ -71,9 +62,7 @@ public class BatchCommon {
                 root = (String) conf.get(Config.TRANSACTIONAL_ZOOKEEPER_ROOT);
             }
 
-            root =
-                    root + BatchDef.ZK_SEPERATOR
-                            + conf.get(Config.TOPOLOGY_NAME);
+            root = root + BatchDef.ZK_SEPERATOR + conf.get(Config.TOPOLOGY_NAME);
 
             Map<Object, Object> tmpConf = new HashMap<Object, Object>();
             tmpConf.putAll(conf);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/cache/JStormCache.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/cache/JStormCache.java b/jstorm-core/src/main/java/com/alibaba/jstorm/cache/JStormCache.java
index a5a6835..37653eb 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/cache/JStormCache.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/cache/JStormCache.java
@@ -23,27 +23,26 @@ import java.util.Map;
 
 import com.alibaba.jstorm.client.ConfigExtension;
 
-
-
 public interface JStormCache extends Serializable {
     public static final String TAG_TIMEOUT_LIST = ConfigExtension.CACHE_TIMEOUT_LIST;
-    
-    void init(Map<Object, Object> conf)throws Exception;
+
+    void init(Map<Object, Object> conf) throws Exception;
+
     void cleanup();
-    
-    Object get(String key) ;
-    
+
+    Object get(String key);
+
     void getBatch(Map<String, Object> map);
 
     void remove(String key);
-    
+
     void removeBatch(Collection<String> keys);
 
     void put(String key, Object value, int timeoutSecond);
 
     void put(String key, Object value);
-    
-    void putBatch(Map<String, Object> map) ;
-    
+
+    void putBatch(Map<String, Object> map);
+
     void putBatch(Map<String, Object> map, int timeoutSeconds);
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/cache/RocksDBCache.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/cache/RocksDBCache.java b/jstorm-core/src/main/java/com/alibaba/jstorm/cache/RocksDBCache.java
index e72e3d0..845ed1e 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/cache/RocksDBCache.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/cache/RocksDBCache.java
@@ -17,101 +17,85 @@
  */
 package com.alibaba.jstorm.cache;
 
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-
-import org.apache.commons.lang.StringUtils;
-import org.rocksdb.ColumnFamilyHandle;
-import org.rocksdb.Options;
-import org.rocksdb.RocksDB;
-import org.rocksdb.WriteBatch;
-import org.rocksdb.WriteOptions;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import backtype.storm.utils.Utils;
-
 import com.alibaba.jstorm.client.ConfigExtension;
 import com.alibaba.jstorm.utils.JStormUtils;
 import com.alibaba.jstorm.utils.PathUtils;
+import org.apache.commons.lang.StringUtils;
+import org.rocksdb.*;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.*;
+import java.util.Map.Entry;
 
 public class RocksDBCache implements JStormCache {
     private static final long serialVersionUID = 705938812240167583L;
     private static Logger LOG = LoggerFactory.getLogger(RocksDBCache.class);
-    
+
     static {
         RocksDB.loadLibrary();
     }
-    
+
     public static final String ROCKSDB_ROOT_DIR = "rocksdb.root.dir";
     public static final String ROCKSDB_RESET = "rocksdb.reset";
     protected RocksDB db;
     protected String rootDir;
-    
+
     public void initDir(Map<Object, Object> conf) {
         String confDir = (String) conf.get(ROCKSDB_ROOT_DIR);
         if (StringUtils.isBlank(confDir) == true) {
             throw new RuntimeException("Doesn't set rootDir of rocksDB");
         }
-        
-        boolean clean = ConfigExtension.getNimbusCacheReset(conf);
+
+        boolean clean = (Boolean) conf.get(ROCKSDB_RESET);
         LOG.info("RocksDB reset is " + clean);
         if (clean == true) {
             try {
                 PathUtils.rmr(confDir);
             } catch (IOException e) {
-                // TODO Auto-generated catch block
                 throw new RuntimeException("Failed to cleanup rooDir of rocksDB " + confDir);
             }
         }
-        
+
         File file = new File(confDir);
         if (file.exists() == false) {
             try {
                 PathUtils.local_mkdirs(confDir);
                 file = new File(confDir);
             } catch (IOException e) {
-                // TODO Auto-generated catch block
                 throw new RuntimeException("Failed to mkdir rooDir of rocksDB " + confDir);
             }
         }
-        
+
         rootDir = file.getAbsolutePath();
     }
-    
+
     public void initDb(List<Integer> list) throws Exception {
         LOG.info("Begin to init rocksDB of {}", rootDir);
-        
+
         Options dbOptions = null;
-        
         try {
             dbOptions = new Options().setCreateMissingColumnFamilies(true).setCreateIfMissing(true);
-            
+
             List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<ColumnFamilyHandle>();
-            
+
             db = RocksDB.open(dbOptions, rootDir);
-            
+
             LOG.info("Successfully init rocksDB of {}", rootDir);
         } finally {
-            
             if (dbOptions != null) {
                 dbOptions.dispose();
             }
         }
     }
-    
+
     @Override
     public void init(Map<Object, Object> conf) throws Exception {
-        // TODO Auto-generated method stub
         initDir(conf);
-        
+
         List<Integer> list = new ArrayList<Integer>();
         if (conf.get(TAG_TIMEOUT_LIST) != null) {
             for (Object obj : (List) ConfigExtension.getCacheTimeoutList(conf)) {
@@ -119,11 +103,11 @@ public class RocksDBCache implements JStormCache {
                 if (timeoutSecond == null || timeoutSecond <= 0) {
                     continue;
                 }
-                
+
                 list.add(timeoutSecond);
             }
         }
-        
+
         // Add retry logic
         boolean isSuccess = false;
         for (int i = 0; i < 3; i++) {
@@ -135,32 +119,29 @@ public class RocksDBCache implements JStormCache {
                 LOG.warn("Failed to init rocksDB " + rootDir, e);
                 try {
                     PathUtils.rmr(rootDir);
-                } catch (IOException e1) {
-                    // TODO Auto-generated catch block
-                    
+                } catch (IOException ignored) {
                 }
             }
         }
-        
+
         if (isSuccess == false) {
             throw new RuntimeException("Failed to init rocksDB " + rootDir);
         }
     }
-    
+
     @Override
     public void cleanup() {
         LOG.info("Begin to close rocketDb of {}", rootDir);
-        
+
         if (db != null) {
             db.close();
         }
-        
+
         LOG.info("Successfully closed rocketDb of {}", rootDir);
     }
-    
+
     @Override
     public Object get(String key) {
-        // TODO Auto-generated method stub
         try {
             byte[] data = db.get(key.getBytes());
             if (data != null) {
@@ -172,36 +153,35 @@ public class RocksDBCache implements JStormCache {
                     return null;
                 }
             }
-            
+
         } catch (Exception e) {
-            
         }
-        
+
         return null;
     }
-    
+
     @Override
     public void getBatch(Map<String, Object> map) {
         List<byte[]> lookupKeys = new ArrayList<byte[]>();
         for (String key : map.keySet()) {
             lookupKeys.add(key.getBytes());
         }
-        
+
         try {
             Map<byte[], byte[]> results = db.multiGet(lookupKeys);
             if (results == null || results.size() == 0) {
                 return;
             }
-            
+
             for (Entry<byte[], byte[]> resultEntry : results.entrySet()) {
                 byte[] keyByte = resultEntry.getKey();
                 byte[] valueByte = resultEntry.getValue();
-                
+
                 if (keyByte == null || valueByte == null) {
                     continue;
                 }
-                
-                Object value = null;
+
+                Object value;
                 try {
                     value = Utils.javaDeserialize(valueByte);
                 } catch (Exception e) {
@@ -209,45 +189,37 @@ public class RocksDBCache implements JStormCache {
                     db.remove(keyByte);
                     continue;
                 }
-                
+
                 map.put(new String(keyByte), value);
             }
-            
-            return;
         } catch (Exception e) {
             LOG.error("Failed to query " + map.keySet() + ", in window: ");
         }
-        
-        return;
     }
-    
+
     @Override
     public void remove(String key) {
         try {
             db.remove(key.getBytes());
-            
+
         } catch (Exception e) {
             LOG.error("Failed to remove " + key);
         }
-        
+
     }
-    
+
     @Override
     public void removeBatch(Collection<String> keys) {
-        // TODO Auto-generated method stub
         for (String key : keys) {
             remove(key);
         }
     }
-    
+
     @Override
     public void put(String key, Object value, int timeoutSecond) {
-        // TODO Auto-generated method stub
-        
         put(key, value);
-        
     }
-    
+
     @Override
     public void put(String key, Object value) {
         byte[] data = Utils.javaSerialize(value);
@@ -255,38 +227,36 @@ public class RocksDBCache implements JStormCache {
             db.put(key.getBytes(), data);
         } catch (Exception e) {
             LOG.error("Failed put into cache, " + key, e);
-            return;
         }
     }
-    
+
     @Override
     public void putBatch(Map<String, Object> map) {
-        // TODO Auto-generated method stub
         WriteOptions writeOpts = null;
         WriteBatch writeBatch = null;
-        
+
         Set<byte[]> putKeys = new HashSet<byte[]>();
-        
+
         try {
             writeOpts = new WriteOptions();
             writeBatch = new WriteBatch();
-            
+
             for (Entry<String, Object> entry : map.entrySet()) {
                 String key = entry.getKey();
                 Object value = entry.getValue();
-                
+
                 byte[] data = Utils.javaSerialize(value);
-                
+
                 if (StringUtils.isBlank(key) || data == null || data.length == 0) {
                     continue;
                 }
-                
+
                 byte[] keyByte = key.getBytes();
                 writeBatch.put(keyByte, data);
-                
+
                 putKeys.add(keyByte);
             }
-            
+
             db.write(writeOpts, writeBatch);
         } catch (Exception e) {
             LOG.error("Failed to putBatch into DB, " + map.keySet(), e);
@@ -294,18 +264,16 @@ public class RocksDBCache implements JStormCache {
             if (writeOpts != null) {
                 writeOpts.dispose();
             }
-            
+
             if (writeBatch != null) {
                 writeBatch.dispose();
             }
         }
-        
+
     }
-    
+
     @Override
     public void putBatch(Map<String, Object> map, int timeoutSeconds) {
-        // TODO Auto-generated method stub
         putBatch(map);
     }
-    
 }


[20/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/MetricMetaParser.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/MetricMetaParser.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/MetricMetaParser.java
new file mode 100644
index 0000000..c0a220f
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/MetricMetaParser.java
@@ -0,0 +1,58 @@
+package com.alibaba.jstorm.common.metric;
+
+import com.alibaba.jstorm.daemon.nimbus.TopologyMetricsRunnable;
+import com.alibaba.jstorm.metric.MetaType;
+import com.alibaba.jstorm.metric.MetricType;
+import com.alibaba.jstorm.metric.MetricUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * @author wange
+ * @since 15/7/14
+ */
+public class MetricMetaParser {
+    private static final Logger logger = LoggerFactory.getLogger(TopologyMetricsRunnable.class);
+
+    public static MetricMeta fromMetricName(String name) {
+        try {
+            String[] parts = name.split(MetricUtils.DELIM);
+            char ch = parts[0].charAt(0);
+            if (ch == 'W' || ch == 'N' || ch == 'P') {
+                return parseWorkerMetricMeta(parts);
+            } else {
+                return parseTaskMetricMeta(parts);
+            }
+        } catch (Exception ex) {
+            logger.error("Error parsing metric meta, name:{}", name, ex);
+        }
+        return null;
+    }
+
+    private static MetricMeta parseTaskMetricMeta(String[] parts) {
+        MetricMeta meta = new MetricMeta();
+        meta.setMetaType(MetaType.parse(parts[0].charAt(0)).getT());
+        meta.setMetricType(MetricType.parse(parts[0].charAt(1)).getT());
+        meta.setTopologyId(parts[1]);
+        meta.setComponent(parts[2]);
+        meta.setTaskId(Integer.valueOf(parts[3]));
+        meta.setStreamId(parts[4]);
+        meta.setMetricGroup(parts[5]);
+        meta.setMetricName(parts[6]);
+
+        return meta;
+    }
+
+    private static MetricMeta parseWorkerMetricMeta(String[] parts) {
+        MetricMeta meta = new MetricMeta();
+        meta.setMetaType(MetaType.parse(parts[0].charAt(0)).getT());
+        meta.setMetricType(MetricType.parse(parts[0].charAt(1)).getT());
+        meta.setTopologyId(parts[1]);
+        meta.setHost(parts[2]);
+        meta.setPort(Integer.valueOf(parts[3]));
+        meta.setMetricGroup(parts[4]);
+        meta.setMetricName(parts[5]);
+
+        return meta;
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/MetricRegistry.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/MetricRegistry.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/MetricRegistry.java
deleted file mode 100755
index 982c5f6..0000000
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/MetricRegistry.java
+++ /dev/null
@@ -1,316 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.alibaba.jstorm.common.metric;
-
-import java.util.Collections;
-import java.util.Map;
-import java.util.SortedMap;
-import java.util.SortedSet;
-import java.util.TreeMap;
-import java.util.TreeSet;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.alibaba.jstorm.common.metric.window.Metric;
-
-public class MetricRegistry implements MetricSet {
-    private static final Logger LOG = LoggerFactory.getLogger(MetricRegistry.class);
-
-    private static final long serialVersionUID = 8184106900230111064L;
-    public static final String NAME_SEPERATOR = ".";
-
-    /**
-     * Concatenates elements to form a dotted name, eliding any null values or
-     * empty strings.
-     *
-     * @param name the first element of the name
-     * @param names the remaining elements of the name
-     * @return {@code name} and {@code names} concatenated by periods
-     */
-    public static String name(String name, String... names) {
-        final StringBuilder builder = new StringBuilder();
-        append(builder, name);
-        if (names != null) {
-            for (String s : names) {
-                append(builder, s);
-            }
-        }
-        return builder.toString();
-    }
-
-    /**
-     * Concatenates a class name and elements to form a dotted name, eliding any
-     * null values or empty strings.
-     *
-     * @param klass the first element of the name
-     * @param names the remaining elements of the name
-     * @return {@code klass} and {@code names} concatenated by periods
-     */
-    public static String name(Class<?> klass, String... names) {
-        return name(klass.getName(), names);
-    }
-
-    private static void append(StringBuilder builder, String part) {
-        if (part != null && !part.isEmpty()) {
-            if (builder.length() > 0) {
-                builder.append(NAME_SEPERATOR);
-            }
-            builder.append(part);
-        }
-    }
-
-    protected final ConcurrentMap<String, Metric> metrics;
-
-    /**
-     * Creates a new {@link MetricRegistry}.
-     */
-    public MetricRegistry() {
-        this.metrics = buildMap();
-    }
-
-    /**
-     * Creates a new {@link ConcurrentMap} implementation for use inside the
-     * registry. Override this to create a {@link MetricRegistry} with space- or
-     * time-bounded metric lifecycles, for example.
-     *
-     * @return a new {@link ConcurrentMap}
-     */
-    protected ConcurrentMap<String, Metric> buildMap() {
-        return new ConcurrentHashMap<String, Metric>();
-    }
-
-    /**
-     * Given a {@link Metric}, registers it under the given name.
-     *
-     * @param name the name of the metric
-     * @param metric the metric
-     * @param <T> the type of the metric
-     * @return {@code metric}
-     * @throws IllegalArgumentException if the name is already registered
-     */
-    @SuppressWarnings("unchecked")
-    public <T extends Metric> T register(String name, T metric)
-            throws IllegalArgumentException {
-        if (metric instanceof MetricSet) {
-            registerAll(name, (MetricSet) metric);
-        } else {
-            final Metric existing = metrics.putIfAbsent(name, metric);
-            if (existing == null) {
-                // add one listener to notify
-                LOG.info("Successfully register metric of {}", name);
-            } else {
-                throw new IllegalArgumentException("A metric named " + name
-                        + " already exists");
-            }
-        }
-        return metric;
-    }
-
-    /**
-     * Given a metric set, registers them.
-     *
-     * @param metrics a set of metrics
-     * @throws IllegalArgumentException if any of the names are already
-     *             registered
-     */
-    public void registerAll(MetricSet metrics) throws IllegalArgumentException {
-        registerAll(null, metrics);
-    }
-
-    /**
-     * Removes the metric with the given name.
-     *
-     * @param name the name of the metric
-     * @return whether or not the metric was removed
-     */
-    public boolean remove(String name) {
-        final Metric metric = metrics.remove(name);
-        if (metric != null) {
-            // call listener to notify remove
-            LOG.info("Successfully unregister metric of {}", name);
-            return true;
-        }
-        return false;
-    }
-
-    /**
-     * Removes all metrics which match the given filter.
-     *
-     * @param filter a filter
-     */
-    public void removeMatching(MetricFilter filter) {
-        for (Map.Entry<String, Metric> entry : metrics.entrySet()) {
-            if (filter.matches(entry.getKey(), entry.getValue())) {
-                remove(entry.getKey());
-            }
-        }
-    }
-
-    /**
-     * Returns a set of the names of all the metrics in the registry.
-     *
-     * @return the names of all the metrics
-     */
-    public SortedSet<String> getNames() {
-        return Collections.unmodifiableSortedSet(new TreeSet<String>(metrics
-                .keySet()));
-    }
-
-    /**
-     * Returns a map of all the gauges in the registry and their names.
-     *
-     * @return all the gauges in the registry
-     */
-    public SortedMap<String, Gauge> getGauges() {
-        return getGauges(MetricFilter.ALL);
-    }
-
-    /**
-     * Returns a map of all the gauges in the registry and their names which
-     * match the given filter.
-     *
-     * @param filter the metric filter to match
-     * @return all the gauges in the registry
-     */
-    public SortedMap<String, Gauge> getGauges(MetricFilter filter) {
-        return getMetrics(Gauge.class, filter);
-    }
-
-    /**
-     * Returns a map of all the counters in the registry and their names.
-     *
-     * @return all the counters in the registry
-     */
-    public SortedMap<String, Counter> getCounters() {
-        return getCounters(MetricFilter.ALL);
-    }
-
-    /**
-     * Returns a map of all the counters in the registry and their names which
-     * match the given filter.
-     *
-     * @param filter the metric filter to match
-     * @return all the counters in the registry
-     */
-    public SortedMap<String, Counter> getCounters(MetricFilter filter) {
-        return getMetrics(Counter.class, filter);
-    }
-
-    /**
-     * Returns a map of all the histograms in the registry and their names.
-     *
-     * @return all the histograms in the registry
-     */
-    public SortedMap<String, Histogram> getHistograms() {
-        return getHistograms(MetricFilter.ALL);
-    }
-
-    /**
-     * Returns a map of all the histograms in the registry and their names which
-     * match the given filter.
-     *
-     * @param filter the metric filter to match
-     * @return all the histograms in the registry
-     */
-    public SortedMap<String, Histogram> getHistograms(MetricFilter filter) {
-        return getMetrics(Histogram.class, filter);
-    }
-
-    /**
-     * Returns a map of all the meters in the registry and their names.
-     *
-     * @return all the meters in the registry
-     */
-    public SortedMap<String, Meter> getMeters() {
-        return getMeters(MetricFilter.ALL);
-    }
-
-    /**
-     * Returns a map of all the meters in the registry and their names which
-     * match the given filter.
-     *
-     * @param filter the metric filter to match
-     * @return all the meters in the registry
-     */
-    public SortedMap<String, Meter> getMeters(MetricFilter filter) {
-        return getMetrics(Meter.class, filter);
-    }
-
-    /**
-     * Returns a map of all the timers in the registry and their names.
-     *
-     * @return all the timers in the registry
-     */
-    public SortedMap<String, Timer> getTimers() {
-        return getTimers(MetricFilter.ALL);
-    }
-
-    /**
-     * Returns a map of all the timers in the registry and their names which
-     * match the given filter.
-     *
-     * @param filter the metric filter to match
-     * @return all the timers in the registry
-     */
-    public SortedMap<String, Timer> getTimers(MetricFilter filter) {
-        return getMetrics(Timer.class, filter);
-    }
-
-    @SuppressWarnings("unchecked")
-    private <T extends Metric> SortedMap<String, T> getMetrics(Class<T> klass,
-            MetricFilter filter) {
-        final TreeMap<String, T> timers = new TreeMap<String, T>();
-        for (Map.Entry<String, Metric> entry : metrics.entrySet()) {
-            if (klass.isInstance(entry.getValue())
-                    && filter.matches(entry.getKey(), entry.getValue())) {
-                timers.put(entry.getKey(), (T) entry.getValue());
-            }
-        }
-        return Collections.unmodifiableSortedMap(timers);
-    }
-
-    private void registerAll(String prefix, MetricSet metrics)
-            throws IllegalArgumentException {
-        for (Map.Entry<String, Metric> entry : metrics.getMetrics().entrySet()) {
-            if (entry.getValue() instanceof MetricSet) {
-                registerAll(name(prefix, entry.getKey()),
-                        (MetricSet) entry.getValue());
-            } else {
-                register(name(prefix, entry.getKey()), entry.getValue());
-            }
-        }
-    }
-
-    @Override
-    public Map<String, Metric> getMetrics() {
-        return Collections.unmodifiableMap(metrics);
-    }
-
-    /**
-     * Expose metrics is to improve performance
-     * 
-     * @return
-     */
-    public Metric getMetric(String name) {
-        return metrics.get(name);
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/MetricSet.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/MetricSet.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/MetricSet.java
deleted file mode 100755
index 243f9b8..0000000
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/MetricSet.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.alibaba.jstorm.common.metric;
-
-import java.io.Serializable;
-import java.util.Map;
-
-import com.alibaba.jstorm.common.metric.window.Metric;
-
-public interface MetricSet extends Serializable {
-    Map<String, Metric> getMetrics();
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/QueueGauge.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/QueueGauge.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/QueueGauge.java
index 0ff964e..114eeb2 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/QueueGauge.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/QueueGauge.java
@@ -17,6 +17,7 @@
  */
 package com.alibaba.jstorm.common.metric;
 
+import com.google.common.base.Joiner;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -34,10 +35,10 @@ public class QueueGauge extends HealthCheck implements Gauge<Double> {
     String name;
     Result healthy;
 
-    public QueueGauge(String name, DisruptorQueue queue) {
+    public QueueGauge(DisruptorQueue queue, String... names) {
         this.queue = queue;
-        this.name = name;
-        this.healthy = HealthCheck.Result.healthy();
+        this.name = Joiner.on("-").join(names);
+        this.healthy = Result.healthy();
     }
 
     @Override
@@ -52,7 +53,7 @@ public class QueueGauge extends HealthCheck implements Gauge<Double> {
         // TODO Auto-generated method stub
         Double ret = (double) queue.pctFull();
         if (ret > 0.9) {
-            return HealthCheck.Result.unhealthy(name + QUEUE_IS_FULL);
+            return Result.unhealthy(name + QUEUE_IS_FULL);
         } else {
             return healthy;
         }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/TaskTrack.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/TaskTrack.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/TaskTrack.java
new file mode 100644
index 0000000..0b49ecf
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/TaskTrack.java
@@ -0,0 +1,180 @@
+package com.alibaba.jstorm.common.metric;
+
+import com.alibaba.jstorm.metric.KVSerializable;
+import com.alibaba.jstorm.metric.MetricUtils;
+import com.alibaba.jstorm.utils.JStormUtils;
+
+import java.util.Date;
+
+/**
+ * @author wange
+ * @since 15/7/16
+ */
+public class TaskTrack implements KVSerializable {
+
+    private long id;
+    private String clusterName;
+    private String topologyId;
+    private String component;
+    private int taskId;
+    private String host;
+    private int port;
+    private Date start;
+    private Date end;
+
+    public TaskTrack() {
+    }
+
+    public TaskTrack(String clusterName, String topologyId) {
+        this.clusterName = clusterName;
+        this.topologyId = topologyId;
+    }
+
+    public long getId() {
+        return id;
+    }
+
+    public void setId(long id) {
+        this.id = id;
+    }
+
+    public String getClusterName() {
+        return clusterName;
+    }
+
+    public void setClusterName(String clusterName) {
+        this.clusterName = clusterName;
+    }
+
+    public String getTopologyId() {
+        return topologyId;
+    }
+
+    public void setTopologyId(String topologyId) {
+        this.topologyId = topologyId;
+    }
+
+    public String getComponent() {
+        return component;
+    }
+
+    public void setComponent(String component) {
+        this.component = component;
+    }
+
+    public int getTaskId() {
+        return taskId;
+    }
+
+    public void setTaskId(int taskId) {
+        this.taskId = taskId;
+    }
+
+    public String getHost() {
+        return host;
+    }
+
+    public void setHost(String host) {
+        this.host = host;
+    }
+
+    public int getPort() {
+        return port;
+    }
+
+    public void setPort(int port) {
+        this.port = port;
+    }
+
+    public Date getStart() {
+        return start;
+    }
+
+    public void setStart(Date start) {
+        this.start = start;
+    }
+
+    public Date getEnd() {
+        return end;
+    }
+
+    public void setEnd(Date end) {
+        this.end = end;
+    }
+
+    /**
+     * key: clusterName + topologyId + taskId + time
+     */
+    @Override
+    public byte[] getKey() {
+        StringBuilder sb = new StringBuilder(128);
+        sb.append(clusterName).append(MetricUtils.AT).append(topologyId).append(MetricUtils.AT)
+                .append(taskId).append(MetricUtils.AT);
+        if (start != null) {
+            sb.append(start.getTime());
+        } else {
+            sb.append(end.getTime());
+        }
+        return sb.toString().getBytes();
+    }
+
+    /**
+     * value: type + host + port
+     * type: S/E (start/end)
+     */
+    @Override
+    public byte[] getValue() {
+        StringBuilder sb = new StringBuilder(32);
+        if (start != null) {
+            sb.append(KVSerializable.START);
+        } else {
+            sb.append(KVSerializable.END);
+        }
+        sb.append(MetricUtils.AT).append(host).append(MetricUtils.AT).append(port);
+        return sb.toString().getBytes();
+    }
+
+    @Override
+    public Object fromKV(byte[] key, byte[] value) {
+        String[] keyParts = new String(key).split(MetricUtils.DELIM);
+
+        String[] valueParts = new String(value).split(MetricUtils.DELIM);
+        boolean isStart = false;
+        if (valueParts.length >= 3){
+            if (valueParts[0].equals(KVSerializable.START)) isStart = true;
+            host = valueParts[1];
+            port = JStormUtils.parseInt(valueParts[2]);
+        }
+
+        if (keyParts.length >= 4){
+            clusterName = keyParts[0];
+            topologyId = keyParts[1];
+            taskId = JStormUtils.parseInt(keyParts[2]);
+            long ts = JStormUtils.parseLong(keyParts[3]);
+            if (isStart) start = new Date(ts);
+            else end = new Date(ts);
+        }
+
+        return this;
+    }
+
+    public Date getTime() {
+        return start != null ? start : end;
+    }
+
+    public String getIdentity(){
+        StringBuilder sb = new StringBuilder();
+        sb.append(clusterName).append(MetricUtils.AT).append(topologyId).append(MetricUtils.AT)
+                .append(taskId).append(MetricUtils.AT).append(host).append(MetricUtils.AT).append(port);
+        return sb.toString();
+    }
+
+    public void merge(TaskTrack taskTrack){
+        if (taskTrack.start != null && this.start == null){
+            this.start = taskTrack.start;
+        }
+        if (taskTrack.end != null && this.end == null){
+            this.end = taskTrack.end;
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/Timer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/Timer.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/Timer.java
deleted file mode 100755
index daf5633..0000000
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/Timer.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.alibaba.jstorm.common.metric;
-
-import java.io.Closeable;
-import java.util.concurrent.Callable;
-import java.util.concurrent.TimeUnit;
-
-/**
- * Use com.codahale.metrics's interface
- * 
- * @author zhongyan.feng
- * 
- */
-public class Timer extends Histogram {
-    private static final long serialVersionUID = 5915881891513771108L;
-
-    /**
-     * A timing context.
-     * 
-     * @see Timer#time()
-     */
-    public static class Context implements Closeable {
-        private final Timer timer;
-        private final long startTime;
-
-        private Context(Timer timer) {
-            this.timer = timer;
-            this.startTime = System.currentTimeMillis();
-        }
-
-        /**
-         * Stops recording the elapsed time, updates the timer and returns the
-         * elapsed time in nanoseconds.
-         */
-        public long stop() {
-            final long elapsed = System.currentTimeMillis() - startTime;
-            timer.update(elapsed, TimeUnit.MILLISECONDS);
-            return elapsed;
-        }
-
-        @Override
-        public void close() {
-            stop();
-        }
-    }
-
-    public Timer() {
-        init();
-    }
-
-    /**
-     * Adds a recorded duration.
-     * 
-     * @param duration the length of the duration
-     * @param unit the scale unit of {@code duration}
-     */
-    public void update(long duration, TimeUnit unit) {
-        update(unit.toMillis(duration));
-    }
-
-    /**
-     * Times and records the duration of event.
-     * 
-     * @param event a {@link Callable} whose {@link Callable#call()} method
-     *            implements a process whose duration should be timed
-     * @param <T> the type of the value returned by {@code event}
-     * @return the value returned by {@code event}
-     * @throws Exception if {@code event} throws an {@link Exception}
-     */
-    public <T> T time(Callable<T> event) throws Exception {
-        final long startTime = System.currentTimeMillis();
-        try {
-            return event.call();
-        } finally {
-            update(System.currentTimeMillis() - startTime);
-        }
-    }
-
-    /**
-     * Returns a new {@link Context}.
-     * 
-     * @return a new {@link Context}
-     * @see Context
-     */
-    public Context time() {
-        return new Context(this);
-    }
-
-    public long getCount() {
-        return allWindow.getSnapshot().getTimes();
-    }
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/TimerData.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/TimerData.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/TimerData.java
new file mode 100644
index 0000000..3f2be9b
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/TimerData.java
@@ -0,0 +1,169 @@
+package com.alibaba.jstorm.common.metric;
+
+
+import com.alibaba.jstorm.metric.Bytes;
+import com.alibaba.jstorm.metric.KVSerializable;
+
+/**
+ * @author wange
+ * @since 15/6/23
+ */
+public class TimerData extends MetricBaseData implements KVSerializable {
+    private long min;
+    private long max;
+    private double mean;
+    private double p50;
+    private double p75;
+    private double p95;
+    private double p98;
+    private double p99;
+    private double p999;
+    private double stddev;
+
+    private double m1;
+    private double m5;
+    private double m15;
+
+    public long getMin() {
+        return min;
+    }
+
+    public void setMin(long min) {
+        this.min = min;
+    }
+
+    public long getMax() {
+        return max;
+    }
+
+    public void setMax(long max) {
+        this.max = max;
+    }
+
+    public double getMean() {
+        return mean;
+    }
+
+    public void setMean(double mean) {
+        this.mean = mean;
+    }
+
+    public double getP50() {
+        return p50;
+    }
+
+    public void setP50(double p50) {
+        this.p50 = p50;
+    }
+
+    public double getP75() {
+        return p75;
+    }
+
+    public void setP75(double p75) {
+        this.p75 = p75;
+    }
+
+    public double getP95() {
+        return p95;
+    }
+
+    public void setP95(double p95) {
+        this.p95 = p95;
+    }
+
+    public double getP98() {
+        return p98;
+    }
+
+    public void setP98(double p98) {
+        this.p98 = p98;
+    }
+
+    public double getP99() {
+        return p99;
+    }
+
+    public void setP99(double p99) {
+        this.p99 = p99;
+    }
+
+    public double getP999() {
+        return p999;
+    }
+
+    public void setP999(double p999) {
+        this.p999 = p999;
+    }
+
+    public double getStddev() {
+        return stddev;
+    }
+
+    public void setStddev(double stddev) {
+        this.stddev = stddev;
+    }
+
+    public double getM1() {
+        return m1;
+    }
+
+    public void setM1(double m1) {
+        this.m1 = m1;
+    }
+
+    public double getM5() {
+        return m5;
+    }
+
+    public void setM5(double m5) {
+        this.m5 = m5;
+    }
+
+    public double getM15() {
+        return m15;
+    }
+
+    public void setM15(double m15) {
+        this.m15 = m15;
+    }
+
+    @Override
+    public byte[] getValue() {
+        byte[] ret = new byte[8 * 12];
+        Bytes.putLong(ret, 0, min);
+        Bytes.putLong(ret, 8, max);
+        Bytes.putDouble(ret, 16, p50);
+        Bytes.putDouble(ret, 24, p75);
+        Bytes.putDouble(ret, 32, p95);
+        Bytes.putDouble(ret, 40, p98);
+        Bytes.putDouble(ret, 48, p99);
+        Bytes.putDouble(ret, 56, p999);
+        Bytes.putDouble(ret, 64, mean);
+        Bytes.putDouble(ret, 72, m1);
+        Bytes.putDouble(ret, 80, m5);
+        Bytes.putDouble(ret, 88, m15);
+
+        return ret;
+    }
+
+    @Override
+    public Object fromKV(byte[] key, byte[] value) {
+        parseKey(key);
+
+        this.min = Bytes.toLong(value, 0, KVSerializable.LONG_SIZE);
+        this.max = Bytes.toLong(value, 8, KVSerializable.LONG_SIZE);
+        this.p50 = Bytes.toDouble(value, 16);
+        this.p75 = Bytes.toDouble(value, 24);
+        this.p95 = Bytes.toDouble(value, 32);
+        this.p98 = Bytes.toDouble(value, 40);
+        this.p99 = Bytes.toDouble(value, 48);
+        this.p999 = Bytes.toDouble(value, 56);
+        this.mean = Bytes.toDouble(value, 64);
+        this.m1 = Bytes.toDouble(value, 72);
+        this.m5 = Bytes.toDouble(value, 80);
+        this.m15 = Bytes.toDouble(value, 88);
+
+        return this;
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/TimerRatio.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/TimerRatio.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/TimerRatio.java
index 0a0e7e2..495ec4f 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/TimerRatio.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/TimerRatio.java
@@ -28,11 +28,7 @@ public class TimerRatio implements Gauge<Double> {
 
     private long lastUpdateTime = 0;
     private long sum = 0;
-    private long lastGaugeTime;
-
-    public void init() {
-        lastGaugeTime = System.nanoTime();
-    }
+    private long lastGaugeTime = 0;
 
     public synchronized void start() {
         if (lastUpdateTime == 0) {

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/Top.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/Top.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/Top.java
deleted file mode 100755
index 00ccc98..0000000
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/Top.java
+++ /dev/null
@@ -1,157 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.alibaba.jstorm.common.metric;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Comparator;
-import java.util.List;
-import java.util.TreeSet;
-
-import com.alibaba.jstorm.common.metric.operator.convert.Convertor;
-import com.alibaba.jstorm.common.metric.operator.merger.Merger;
-import com.alibaba.jstorm.common.metric.operator.updater.Updater;
-import com.alibaba.jstorm.common.metric.window.Metric;
-
-public class Top<T> extends Metric<List<T>, TreeSet<T>> {
-    private static final long serialVersionUID = 4990212679365713831L;
-
-    final protected Comparator<T> comparator;
-    final protected int n;
-
-    public Top(Comparator<T> comparator, int n) {
-        this.comparator = comparator;
-        this.n = n;
-
-        this.defaultValue = new TreeSet<T>(comparator);
-        this.updater = new Top.TopUpdator<T>(comparator, n);
-        this.merger = new Top.TopMerger<T>(comparator, n);
-        this.convertor = new Top.SetToList<T>();
-
-        init();
-    }
-
-    public static class TopUpdator<T> implements Updater<TreeSet<T>> {
-        private static final long serialVersionUID = -3940041101182079146L;
-
-        final protected Comparator<T> comparator;
-        final protected int n;
-
-        public TopUpdator(Comparator<T> comparator, int n) {
-            this.comparator = comparator;
-            this.n = n;
-        }
-
-        @SuppressWarnings("unchecked")
-        @Override
-        public TreeSet<T> update(Number object, TreeSet<T> cache,
-                Object... others) {
-            // TODO Auto-generated method stub
-            if (cache == null) {
-                cache = new TreeSet<T>(comparator);
-            }
-
-            cache.add((T) object);
-
-            if (cache.size() > n) {
-                cache.remove(cache.last());
-            }
-
-            return cache;
-        }
-
-        @Override
-        public TreeSet<T> updateBatch(TreeSet<T> object, TreeSet<T> cache,
-                Object... objects) {
-            // TODO Auto-generated method stub
-            if (cache == null) {
-                cache = new TreeSet<T>(comparator);
-            }
-
-            cache.addAll(object);
-
-            while (cache.size() > n) {
-                cache.remove(cache.last());
-            }
-
-            return cache;
-        }
-
-    }
-
-    public static class TopMerger<T> implements Merger<TreeSet<T>> {
-
-        private static final long serialVersionUID = 4478867986986581638L;
-        final protected Comparator<T> comparator;
-        final protected int n;
-
-        public TopMerger(Comparator<T> comparator, int n) {
-            this.comparator = comparator;
-            this.n = n;
-        }
-
-        @Override
-        public TreeSet<T> merge(Collection<TreeSet<T>> objs,
-                TreeSet<T> unflushed, Object... others) {
-            // TODO Auto-generated method stub
-            TreeSet<T> temp = new TreeSet<T>(comparator);
-            if (unflushed != null) {
-                temp.addAll(unflushed);
-            }
-
-            for (TreeSet<T> set : objs) {
-                temp.addAll(set);
-            }
-
-            if (temp.size() <= n) {
-                return temp;
-            }
-
-            TreeSet<T> ret = new TreeSet<T>(comparator);
-            int i = 0;
-            for (T item : temp) {
-                if (i < n) {
-                    ret.add(item);
-                    i++;
-                } else {
-                    break;
-                }
-            }
-            return ret;
-        }
-
-    }
-
-    public static class SetToList<T> implements Convertor<TreeSet<T>, List<T>> {
-        private static final long serialVersionUID = 4968816655779625255L;
-
-        @Override
-        public List<T> convert(TreeSet<T> set) {
-            // TODO Auto-generated method stub
-            List<T> ret = new ArrayList<T>();
-            if (set != null) {
-                for (T item : set) {
-                    ret.add(item);
-                }
-            }
-            return ret;
-        }
-
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/TopologyHistory.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/TopologyHistory.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/TopologyHistory.java
new file mode 100644
index 0000000..186e2be
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/TopologyHistory.java
@@ -0,0 +1,153 @@
+package com.alibaba.jstorm.common.metric;
+
+import com.alibaba.jstorm.metric.KVSerializable;
+import com.alibaba.jstorm.metric.MetricUtils;
+import com.alibaba.jstorm.utils.JStormUtils;
+
+import java.util.Date;
+
+/**
+ * @author wange
+ * @since 15/7/16
+ */
+public class TopologyHistory implements KVSerializable {
+
+    private long id;
+    private String clusterName;
+    private String topologyName;
+    private String topologyId;
+    private double sampleRate;
+    private Date start;
+    private Date end;
+
+    public TopologyHistory() {
+    }
+
+    public TopologyHistory(String clusterName, String topologyId) {
+        this.clusterName = clusterName;
+        this.topologyId = topologyId;
+    }
+
+    public long getId() {
+        return id;
+    }
+
+    public void setId(long id) {
+        this.id = id;
+    }
+
+    public String getClusterName() {
+        return clusterName;
+    }
+
+    public void setClusterName(String clusterName) {
+        this.clusterName = clusterName;
+    }
+
+    public String getTopologyName() {
+        return topologyName;
+    }
+
+    public void setTopologyName(String topologyName) {
+        this.topologyName = topologyName;
+    }
+
+    public String getTopologyId() {
+        return topologyId;
+    }
+
+    public void setTopologyId(String topologyId) {
+        this.topologyId = topologyId;
+    }
+
+    public Date getStart() {
+        return start;
+    }
+
+    public void setStart(Date start) {
+        this.start = start;
+    }
+
+    public Date getEnd() {
+        return end;
+    }
+
+    public void setEnd(Date end) {
+        this.end = end;
+    }
+
+    public Date getTime() {
+        return start != null ? start : end;
+    }
+
+    public String getTag() {
+        return start != null ? KVSerializable.START : KVSerializable.END;
+    }
+
+    public double getSampleRate() {
+        return sampleRate;
+    }
+
+    public void setSampleRate(Double sampleRate) {
+        if (sampleRate == null) {
+            this.sampleRate = 1.0d;
+        } else {
+            this.sampleRate = sampleRate;
+        }
+    }
+
+    /**
+     * key: clusterName + topologyName + time
+     */
+    @Override
+    public byte[] getKey() {
+        return MetricUtils.concat2(clusterName, topologyName, getTime().getTime()).getBytes();
+
+    }
+
+    /**
+     * value: topologyId + type: S/E
+     */
+    @Override
+    public byte[] getValue() {
+        return MetricUtils.concat2(topologyId, getTag(), sampleRate).getBytes();
+    }
+
+    @Override
+    public Object fromKV(byte[] key, byte[] value) {
+        String[] keyParts = new String(key).split(MetricUtils.DELIM);
+        long time = 0;
+        if (keyParts.length >= 3) {
+            this.clusterName = keyParts[0];
+            this.topologyName = keyParts[1];
+            time = Long.valueOf(keyParts[2]);
+        }
+
+        String[] valueParts = new String(value).split(MetricUtils.DELIM);
+        if (valueParts.length >= 3) {
+            this.topologyId = valueParts[0];
+            String tag = valueParts[1];
+            if (tag.equals(KVSerializable.START)) {
+                this.start = new Date(time);
+            } else {
+                this.end = new Date(time);
+            }
+            this.sampleRate = JStormUtils.parseDouble(valueParts[2], 0.1d);
+        }
+
+        return this;
+    }
+
+    public String getIdentity(){
+        return MetricUtils.concat2(clusterName, topologyId);
+    }
+
+    public void merge(TopologyHistory history){
+        if (history.start != null && this.start == null){
+            this.start = history.start;
+        }
+        if (history.end != null && this.end == null){
+            this.end = history.end;
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/Counter.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/Counter.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/Counter.java
new file mode 100644
index 0000000..6745f14
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/Counter.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.common.metric.old;
+
+import com.alibaba.jstorm.common.metric.old.operator.convert.DefaultConvertor;
+import com.alibaba.jstorm.common.metric.old.operator.merger.SumMerger;
+import com.alibaba.jstorm.common.metric.old.operator.updater.AddUpdater;
+import com.alibaba.jstorm.common.metric.old.window.Metric;
+
+/**
+ * The class is similar to com.codahale.metrics.Counter
+ * 
+ * Sum all window's value
+ * 
+ * how to use Counter , please refer to Sampling Interface
+ * 
+ * @author zhongyan.feng
+ * 
+ * @param <T>
+ */
+public class Counter<T extends Number> extends Metric<T, T> {
+    private static final long serialVersionUID = -1362345159511508074L;
+
+    public Counter(T zero) {
+        updater = new AddUpdater<T>();
+        merger = new SumMerger<T>();
+        convertor = new DefaultConvertor<T>();
+        defaultValue = zero;
+
+        init();
+    }
+
+    public static void main(String[] args) {
+
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/Gauge.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/Gauge.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/Gauge.java
new file mode 100644
index 0000000..b323df8
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/Gauge.java
@@ -0,0 +1,61 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.common.metric.old;
+
+import com.alibaba.jstorm.common.metric.old.window.Metric;
+import com.alibaba.jstorm.common.metric.old.window.StatBuckets;
+
+import java.util.Map;
+import java.util.TreeMap;
+
+public class Gauge<T extends Number> extends Metric<Number, Number> {
+    private static final long serialVersionUID = 1985614006717750790L;
+
+    protected com.codahale.metrics.Gauge<T> gauge;
+
+    public Gauge(com.codahale.metrics.Gauge<T> gauge) {
+        this.gauge = gauge;
+
+        init();
+    }
+
+    @Override
+    public void init() {
+
+    }
+
+    @Override
+    public void update(Number obj) {
+        // TODO Auto-generated method stub
+    }
+
+    @Override
+    public Map<Integer, Number> getSnapshot() {
+        // TODO Auto-generated method stub
+        Number value = gauge.getValue();
+
+        Map<Integer, Number> ret = new TreeMap<Integer, Number>();
+        for (Integer timeKey : windowSeconds) {
+            ret.put(timeKey, value);
+        }
+        ret.put(StatBuckets.ALL_TIME_WINDOW, value);
+
+        return ret;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/Histogram.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/Histogram.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/Histogram.java
new file mode 100644
index 0000000..478de4e
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/Histogram.java
@@ -0,0 +1,102 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.common.metric.old;
+
+import com.alibaba.jstorm.common.metric.old.operator.convert.Convertor;
+import com.alibaba.jstorm.common.metric.old.operator.merger.AvgMerger;
+import com.alibaba.jstorm.common.metric.old.operator.updater.AvgUpdater;
+import com.alibaba.jstorm.common.metric.old.window.Metric;
+
+/**
+ * Meter is used to compute tps
+ * 
+ * Attention: 1.
+ * 
+ * @author zhongyan.feng
+ * 
+ */
+public class Histogram extends Metric<Double, Histogram.HistorgramPair> {
+    private static final long serialVersionUID = -1362345159511508074L;
+
+    public Histogram() {
+        defaultValue = new HistorgramPair();
+        updater = new AvgUpdater();
+        merger = new AvgMerger();
+        convertor = new HistogramConvertor();
+
+        init();
+    }
+
+    public static class HistogramConvertor implements Convertor<HistorgramPair, Double> {
+        private static final long serialVersionUID = -1569170826785657226L;
+
+        @Override
+        public Double convert(HistorgramPair from) {
+            // TODO Auto-generated method stub
+            if (from == null) {
+                return 0.0d;
+            }
+
+            if (from.getTimes() == 0) {
+                return 0.0d;
+            } else {
+                return from.getSum() / from.getTimes();
+            }
+        }
+
+    }
+
+    public static class HistorgramPair {
+        private double sum;
+        private long times;
+
+        public HistorgramPair() {
+
+        }
+
+        public HistorgramPair(double sum, long times) {
+            this.sum = sum;
+            this.times = times;
+        }
+
+        public double getSum() {
+            return sum;
+        }
+
+        public void setSum(double sum) {
+            this.sum = sum;
+        }
+
+        public void addValue(double value) {
+            sum += value;
+        }
+
+        public long getTimes() {
+            return times;
+        }
+
+        public void setTimes(long times) {
+            this.times = times;
+        }
+
+        public void addTimes(long time) {
+            times += time;
+        }
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/LongCounter.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/LongCounter.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/LongCounter.java
new file mode 100644
index 0000000..cd64e62
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/LongCounter.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.common.metric.old;
+
+import com.alibaba.jstorm.common.metric.old.operator.convert.AtomicLongToLong;
+import com.alibaba.jstorm.common.metric.old.operator.merger.LongSumMerger;
+import com.alibaba.jstorm.common.metric.old.operator.updater.LongAddUpdater;
+import com.alibaba.jstorm.common.metric.old.window.Metric;
+
+import java.util.concurrent.atomic.AtomicLong;
+
+public class LongCounter extends Metric<Long, AtomicLong> {
+    private static final long serialVersionUID = -1362345159511508074L;
+
+    public LongCounter() {
+        super.defaultValue = new AtomicLong(0);
+        super.updater = new LongAddUpdater();
+        super.merger = new LongSumMerger();
+        super.convertor = new AtomicLongToLong();
+
+        init();
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/Meter.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/Meter.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/Meter.java
new file mode 100644
index 0000000..cde66fd
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/Meter.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.common.metric.old;
+
+import com.alibaba.jstorm.common.metric.old.operator.convert.DefaultConvertor;
+import com.alibaba.jstorm.common.metric.old.operator.merger.TpsMerger;
+import com.alibaba.jstorm.common.metric.old.operator.updater.AddUpdater;
+import com.alibaba.jstorm.common.metric.old.window.Metric;
+
+/**
+ * Meter is used to compute tps
+ * 
+ * Attention: 1.
+ * 
+ * @author zhongyan.feng
+ * 
+ */
+public class Meter extends Metric<Double, Double> {
+    private static final long serialVersionUID = -1362345159511508074L;
+
+    public Meter() {
+        defaultValue = 0.0d;
+        updater = new AddUpdater<Double>();
+        merger = new TpsMerger();
+        convertor = new DefaultConvertor<Double>();
+
+        init();
+    }
+
+    public void update() {
+        update(Double.valueOf(1));
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/MetricFilter.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/MetricFilter.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/MetricFilter.java
new file mode 100644
index 0000000..a91b925
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/MetricFilter.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.common.metric.old;
+
+import com.alibaba.jstorm.common.metric.old.window.Metric;
+
+import java.io.Serializable;
+
+public interface MetricFilter extends Serializable {
+    /**
+     * Matches all metrics, regardless of type or name.
+     */
+    MetricFilter ALL = new MetricFilter() {
+        private static final long serialVersionUID = 7089987006352295530L;
+
+        @Override
+        public boolean matches(String name, Metric metric) {
+            return true;
+        }
+    };
+
+    /**
+     * Returns {@code true} if the metric matches the filter; {@code false} otherwise.
+     * 
+     * @param name the metric's name
+     * @param metric the metric
+     * @return {@code true} if the metric matches the filter
+     */
+    boolean matches(String name, Metric metric);
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/MetricSet.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/MetricSet.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/MetricSet.java
new file mode 100644
index 0000000..1cce913
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/MetricSet.java
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.common.metric.old;
+
+import com.alibaba.jstorm.common.metric.old.window.Metric;
+
+import java.io.Serializable;
+import java.util.Map;
+
+public interface MetricSet extends Serializable {
+    Map<String, Metric> getMetrics();
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/MetricThrift.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/MetricThrift.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/MetricThrift.java
new file mode 100644
index 0000000..8de6f6d
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/MetricThrift.java
@@ -0,0 +1,106 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.common.metric.old;
+
+import backtype.storm.generated.MetricInfo;
+import backtype.storm.generated.MetricWindow;
+import com.alibaba.jstorm.utils.JStormUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+public class MetricThrift {
+    private static final Logger LOG = LoggerFactory.getLogger(MetricThrift.class);
+
+    public static MetricInfo mkMetricInfo() {
+        return new MetricInfo();
+    }
+
+    public static void insert(MetricInfo metricInfo, String key, Map<Integer, Double> windowSet) {
+    }
+
+    public static MetricWindow merge(Map<String, MetricWindow> details) {
+        Map<Integer, Double> merge = new HashMap<Integer, Double>();
+
+        for (Entry<String, MetricWindow> entry : details.entrySet()) {
+            MetricWindow metricWindow = entry.getValue();
+            Map<Integer, Double> metric = metricWindow.get_metricWindow();
+
+            for (Entry<Integer, Double> metricEntry : metric.entrySet()) {
+                Integer key = metricEntry.getKey();
+                try {
+                    Double value = ((Number) JStormUtils.add(metricEntry.getValue(), merge.get(key))).doubleValue();
+                    merge.put(key, value);
+                } catch (Exception e) {
+                    LOG.error("Invalid type of " + entry.getKey() + ":" + key, e);
+                }
+            }
+        }
+
+        MetricWindow ret = new MetricWindow();
+
+        ret.set_metricWindow(merge);
+        return ret;
+    }
+
+    public static void merge(MetricInfo metricInfo, Map<String, Map<String, MetricWindow>> extraMap) {
+        for (Entry<String, Map<String, MetricWindow>> entry : extraMap.entrySet()) {
+            String metricName = entry.getKey();
+            // metricInfo.put_to_baseMetric(metricName, merge(entry.getValue()));
+        }
+    }
+
+    public static MetricWindow mergeMetricWindow(MetricWindow fromMetric, MetricWindow toMetric) {
+        if (toMetric == null) {
+            toMetric = new MetricWindow(new HashMap<Integer, Double>());
+        }
+
+        if (fromMetric == null) {
+            return toMetric;
+        }
+
+        List<Map<Integer, Double>> list = new ArrayList<Map<Integer, Double>>();
+        list.add(fromMetric.get_metricWindow());
+        list.add(toMetric.get_metricWindow());
+        Map<Integer, Double> merged = JStormUtils.mergeMapList(list);
+
+        toMetric.set_metricWindow(merged);
+
+        return toMetric;
+    }
+
+    public static MetricInfo mergeMetricInfo(MetricInfo from, MetricInfo to) {
+        if (to == null) {
+            to = mkMetricInfo();
+        }
+
+        if (from == null) {
+            return to;
+        }
+        // to.get_baseMetric().putAll(from.get_baseMetric());
+
+        return to;
+
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/RegistryType.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/RegistryType.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/RegistryType.java
new file mode 100644
index 0000000..6e8a020
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/RegistryType.java
@@ -0,0 +1,9 @@
+package com.alibaba.jstorm.common.metric.old;
+
+/**
+ * @author wange
+ * @since 15/6/11
+ */
+public enum RegistryType {
+    STREAM, TASK, COMPONENT, WORKER, SYS
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/StaticsType.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/StaticsType.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/StaticsType.java
new file mode 100644
index 0000000..2094b9a
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/StaticsType.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.common.metric.old;
+
+public enum StaticsType {
+    emitted, send_tps, recv_tps, acked, failed, transferred, process_latencies;
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/Timer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/Timer.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/Timer.java
new file mode 100644
index 0000000..675c237
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/Timer.java
@@ -0,0 +1,106 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.common.metric.old;
+
+import java.io.Closeable;
+import java.util.concurrent.Callable;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Use com.codahale.metrics's interface
+ * 
+ * @author zhongyan.feng
+ * 
+ */
+public class Timer extends Histogram {
+    private static final long serialVersionUID = 5915881891513771108L;
+
+    /**
+     * A timing context.
+     * 
+     * @see Timer#time()
+     */
+    public static class Context implements Closeable {
+        private final Timer timer;
+        private final long startTime;
+
+        private Context(Timer timer) {
+            this.timer = timer;
+            this.startTime = System.currentTimeMillis();
+        }
+
+        /**
+         * Stops recording the elapsed time, updates the timer and returns the elapsed time in nanoseconds.
+         */
+        public long stop() {
+            final long elapsed = System.currentTimeMillis() - startTime;
+            timer.update(elapsed, TimeUnit.MILLISECONDS);
+            return elapsed;
+        }
+
+        @Override
+        public void close() {
+            stop();
+        }
+    }
+
+    public Timer() {
+        init();
+    }
+
+    /**
+     * Adds a recorded duration.
+     * 
+     * @param duration the length of the duration
+     * @param unit the scale unit of {@code duration}
+     */
+    public void update(long duration, TimeUnit unit) {
+        update(unit.toMillis(duration));
+    }
+
+    /**
+     * Times and records the duration of event.
+     * 
+     * @param event a {@link Callable} whose {@link Callable#call()} method implements a process whose duration should be timed
+     * @param <T> the type of the value returned by {@code event}
+     * @return the value returned by {@code event}
+     * @throws Exception if {@code event} throws an {@link Exception}
+     */
+    public <T> T time(Callable<T> event) throws Exception {
+        final long startTime = System.currentTimeMillis();
+        try {
+            return event.call();
+        } finally {
+            update(System.currentTimeMillis() - startTime);
+        }
+    }
+
+    /**
+     * Returns a new {@link Context}.
+     * 
+     * @return a new {@link Context}
+     * @see Context
+     */
+    public Context time() {
+        return new Context(this);
+    }
+
+    public long getCount() {
+        return allWindow.getSnapshot().getTimes();
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/Top.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/Top.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/Top.java
new file mode 100644
index 0000000..e3fdbdd
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/Top.java
@@ -0,0 +1,154 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.common.metric.old;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Comparator;
+import java.util.List;
+import java.util.TreeSet;
+
+import com.alibaba.jstorm.common.metric.old.operator.convert.Convertor;
+import com.alibaba.jstorm.common.metric.old.operator.merger.Merger;
+import com.alibaba.jstorm.common.metric.old.operator.updater.Updater;
+import com.alibaba.jstorm.common.metric.old.window.Metric;
+
+public class Top<T> extends Metric<List<T>, TreeSet<T>> {
+    private static final long serialVersionUID = 4990212679365713831L;
+
+    final protected Comparator<T> comparator;
+    final protected int n;
+
+    public Top(Comparator<T> comparator, int n) {
+        this.comparator = comparator;
+        this.n = n;
+
+        this.defaultValue = new TreeSet<T>(comparator);
+        this.updater = new TopUpdator<T>(comparator, n);
+        this.merger = new TopMerger<T>(comparator, n);
+        this.convertor = new SetToList<T>();
+
+        init();
+    }
+
+    public static class TopUpdator<T> implements Updater<TreeSet<T>> {
+        private static final long serialVersionUID = -3940041101182079146L;
+
+        final protected Comparator<T> comparator;
+        final protected int n;
+
+        public TopUpdator(Comparator<T> comparator, int n) {
+            this.comparator = comparator;
+            this.n = n;
+        }
+
+        @SuppressWarnings("unchecked")
+        @Override
+        public TreeSet<T> update(Number object, TreeSet<T> cache, Object... others) {
+            // TODO Auto-generated method stub
+            if (cache == null) {
+                cache = new TreeSet<T>(comparator);
+            }
+
+            cache.add((T) object);
+
+            if (cache.size() > n) {
+                cache.remove(cache.last());
+            }
+
+            return cache;
+        }
+
+        @Override
+        public TreeSet<T> updateBatch(TreeSet<T> object, TreeSet<T> cache, Object... objects) {
+            // TODO Auto-generated method stub
+            if (cache == null) {
+                cache = new TreeSet<T>(comparator);
+            }
+
+            cache.addAll(object);
+
+            while (cache.size() > n) {
+                cache.remove(cache.last());
+            }
+
+            return cache;
+        }
+
+    }
+
+    public static class TopMerger<T> implements Merger<TreeSet<T>> {
+
+        private static final long serialVersionUID = 4478867986986581638L;
+        final protected Comparator<T> comparator;
+        final protected int n;
+
+        public TopMerger(Comparator<T> comparator, int n) {
+            this.comparator = comparator;
+            this.n = n;
+        }
+
+        @Override
+        public TreeSet<T> merge(Collection<TreeSet<T>> objs, TreeSet<T> unflushed, Object... others) {
+            // TODO Auto-generated method stub
+            TreeSet<T> temp = new TreeSet<T>(comparator);
+            if (unflushed != null) {
+                temp.addAll(unflushed);
+            }
+
+            for (TreeSet<T> set : objs) {
+                temp.addAll(set);
+            }
+
+            if (temp.size() <= n) {
+                return temp;
+            }
+
+            TreeSet<T> ret = new TreeSet<T>(comparator);
+            int i = 0;
+            for (T item : temp) {
+                if (i < n) {
+                    ret.add(item);
+                    i++;
+                } else {
+                    break;
+                }
+            }
+            return ret;
+        }
+
+    }
+
+    public static class SetToList<T> implements Convertor<TreeSet<T>, List<T>> {
+        private static final long serialVersionUID = 4968816655779625255L;
+
+        @Override
+        public List<T> convert(TreeSet<T> set) {
+            // TODO Auto-generated method stub
+            List<T> ret = new ArrayList<T>();
+            if (set != null) {
+                for (T item : set) {
+                    ret.add(item);
+                }
+            }
+            return ret;
+        }
+
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/Sampling.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/Sampling.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/Sampling.java
new file mode 100644
index 0000000..8f3053b
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/Sampling.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.common.metric.old.operator;
+
+import java.io.Serializable;
+
+public interface Sampling<V> extends Serializable {
+
+    /**
+     * Update object into Metric
+     * 
+     * @param obj
+     */
+    void update(Number obj);
+
+    /**
+     * 
+     * Get snapshot of Metric
+     * 
+     * @return
+     */
+    V getSnapshot();
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/StartTime.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/StartTime.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/StartTime.java
new file mode 100644
index 0000000..f87ae5a
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/StartTime.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.common.metric.old.operator;
+
+public interface StartTime {
+    long getStartTime();
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/convert/AtomicLongToLong.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/convert/AtomicLongToLong.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/convert/AtomicLongToLong.java
new file mode 100644
index 0000000..c9a8b24
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/convert/AtomicLongToLong.java
@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.common.metric.old.operator.convert;
+
+import java.util.concurrent.atomic.AtomicLong;
+
+public class AtomicLongToLong implements Convertor<AtomicLong, Long> {
+    private static final long serialVersionUID = -2755066621494409063L;
+
+    @Override
+    public Long convert(AtomicLong obj) {
+        // TODO Auto-generated method stub
+        if (obj == null) {
+            return null;
+        } else {
+            return obj.get();
+        }
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/convert/Convertor.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/convert/Convertor.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/convert/Convertor.java
new file mode 100644
index 0000000..713c1df
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/convert/Convertor.java
@@ -0,0 +1,25 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.common.metric.old.operator.convert;
+
+import java.io.Serializable;
+
+public interface Convertor<From, To> extends Serializable {
+
+    To convert(From obj);
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/convert/DefaultConvertor.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/convert/DefaultConvertor.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/convert/DefaultConvertor.java
new file mode 100644
index 0000000..2cad206
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/convert/DefaultConvertor.java
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.common.metric.old.operator.convert;
+
+public class DefaultConvertor<T> implements Convertor<T, T> {
+    private static final long serialVersionUID = -647209923903679727L;
+
+    @Override
+    public T convert(T obj) {
+        // TODO Auto-generated method stub
+        return obj;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/convert/SetToList.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/convert/SetToList.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/convert/SetToList.java
new file mode 100644
index 0000000..2569387
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/convert/SetToList.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.common.metric.old.operator.convert;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+
+public class SetToList<T> implements Convertor<Set<T>, List<T>> {
+    private static final long serialVersionUID = 4968816655779625255L;
+
+    @Override
+    public List<T> convert(Set<T> set) {
+        // TODO Auto-generated method stub
+        List<T> ret = new ArrayList<T>();
+        if (set != null) {
+            for (T item : set) {
+                ret.add(item);
+            }
+        }
+        return ret;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/merger/AvgMerger.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/merger/AvgMerger.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/merger/AvgMerger.java
new file mode 100644
index 0000000..815bb33
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/merger/AvgMerger.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.common.metric.old.operator.merger;
+
+import com.alibaba.jstorm.common.metric.old.Histogram;
+
+import java.util.Collection;
+
+public class AvgMerger implements Merger<Histogram.HistorgramPair> {
+    private static final long serialVersionUID = -3892281208959055221L;
+
+    @Override
+    public Histogram.HistorgramPair merge(Collection<Histogram.HistorgramPair> objs, Histogram.HistorgramPair unflushed, Object... others) {
+        // TODO Auto-generated method stub
+        double sum = 0.0d;
+        long times = 0l;
+
+        if (unflushed != null) {
+            sum = sum + unflushed.getSum();
+            times = times + unflushed.getTimes();
+        }
+
+        for (Histogram.HistorgramPair item : objs) {
+            if (item == null) {
+                continue;
+            }
+            sum = sum + item.getSum();
+            times = times + item.getTimes();
+        }
+
+        return new Histogram.HistorgramPair(sum, times);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/merger/LongSumMerger.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/merger/LongSumMerger.java b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/merger/LongSumMerger.java
new file mode 100644
index 0000000..1151718
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/common/metric/old/operator/merger/LongSumMerger.java
@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.common.metric.old.operator.merger;
+
+import java.util.Collection;
+import java.util.concurrent.atomic.AtomicLong;
+
+public class LongSumMerger implements Merger<AtomicLong> {
+    private static final long serialVersionUID = -3500779273677666691L;
+
+    @Override
+    public AtomicLong merge(Collection<AtomicLong> objs, AtomicLong unflushed, Object... others) {
+        AtomicLong ret = new AtomicLong(0);
+        if (unflushed != null) {
+            ret.addAndGet(unflushed.get());
+        }
+
+        for (AtomicLong item : objs) {
+            if (item == null) {
+                continue;
+            }
+            ret.addAndGet(item.get());
+        }
+        return ret;
+    }
+
+}


[47/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/coordination/BatchSubtopologyBuilder.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/coordination/BatchSubtopologyBuilder.java b/jstorm-core/src/main/java/backtype/storm/coordination/BatchSubtopologyBuilder.java
index 2a77f3b..ce4c955 100755
--- a/jstorm-core/src/main/java/backtype/storm/coordination/BatchSubtopologyBuilder.java
+++ b/jstorm-core/src/main/java/backtype/storm/coordination/BatchSubtopologyBuilder.java
@@ -42,87 +42,83 @@ public class BatchSubtopologyBuilder {
     Map<String, Component> _bolts = new HashMap<String, Component>();
     Component _masterBolt;
     String _masterId;
-    
+
     public BatchSubtopologyBuilder(String masterBoltId, IBasicBolt masterBolt, Number boltParallelism) {
         Integer p = boltParallelism == null ? null : boltParallelism.intValue();
         _masterBolt = new Component(new BasicBoltExecutor(masterBolt), p);
         _masterId = masterBoltId;
     }
-    
+
     public BatchSubtopologyBuilder(String masterBoltId, IBasicBolt masterBolt) {
         this(masterBoltId, masterBolt, null);
     }
-    
+
     public BoltDeclarer getMasterDeclarer() {
         return new BoltDeclarerImpl(_masterBolt);
     }
-        
+
     public BoltDeclarer setBolt(String id, IBatchBolt bolt) {
         return setBolt(id, bolt, null);
     }
-    
+
     public BoltDeclarer setBolt(String id, IBatchBolt bolt, Number parallelism) {
         return setBolt(id, new BatchBoltExecutor(bolt), parallelism);
-    }     
-    
+    }
+
     public BoltDeclarer setBolt(String id, IBasicBolt bolt) {
         return setBolt(id, bolt, null);
-    }    
-    
+    }
+
     public BoltDeclarer setBolt(String id, IBasicBolt bolt, Number parallelism) {
         return setBolt(id, new BasicBoltExecutor(bolt), parallelism);
     }
-    
+
     private BoltDeclarer setBolt(String id, IRichBolt bolt, Number parallelism) {
         Integer p = null;
-        if(parallelism!=null) p = parallelism.intValue();
+        if (parallelism != null)
+            p = parallelism.intValue();
         Component component = new Component(bolt, p);
         _bolts.put(id, component);
         return new BoltDeclarerImpl(component);
     }
-    
+
     public void extendTopology(TopologyBuilder builder) {
         BoltDeclarer declarer = builder.setBolt(_masterId, new CoordinatedBolt(_masterBolt.bolt), _masterBolt.parallelism);
-        for(InputDeclaration decl: _masterBolt.declarations) {
+        for (InputDeclaration decl : _masterBolt.declarations) {
             decl.declare(declarer);
         }
-        for(Map conf: _masterBolt.componentConfs) {
+        for (Map conf : _masterBolt.componentConfs) {
             declarer.addConfigurations(conf);
         }
-        for(String id: _bolts.keySet()) {
+        for (String id : _bolts.keySet()) {
             Component component = _bolts.get(id);
             Map<String, SourceArgs> coordinatedArgs = new HashMap<String, SourceArgs>();
-            for(String c: componentBoltSubscriptions(component)) {
+            for (String c : componentBoltSubscriptions(component)) {
                 SourceArgs source;
-                if(c.equals(_masterId)) {
+                if (c.equals(_masterId)) {
                     source = SourceArgs.single();
                 } else {
                     source = SourceArgs.all();
                 }
-                coordinatedArgs.put(c, source);                    
+                coordinatedArgs.put(c, source);
             }
-            
-
-            BoltDeclarer input = builder.setBolt(id,
-                                                  new CoordinatedBolt(component.bolt,
-                                                                      coordinatedArgs,
-                                                                      null),
-                                                  component.parallelism);
-            for(Map conf: component.componentConfs) {
+
+            BoltDeclarer input = builder.setBolt(id, new CoordinatedBolt(component.bolt, coordinatedArgs, null), component.parallelism);
+            for (Map conf : component.componentConfs) {
                 input.addConfigurations(conf);
             }
-            for(String c: componentBoltSubscriptions(component)) {
+            for (String c : componentBoltSubscriptions(component)) {
                 input.directGrouping(c, Constants.COORDINATED_STREAM_ID);
             }
-            for(InputDeclaration d: component.declarations) {
+            for (InputDeclaration d : component.declarations) {
                 d.declare(input);
             }
-        }        
+        }
     }
-        
+
     private Set<String> componentBoltSubscriptions(Component component) {
         Set<String> ret = new HashSet<String>();
-        for(InputDeclaration d: component.declarations) {
+        for (InputDeclaration d : component.declarations) {
             ret.add(d.getComponent());
         }
         return ret;
@@ -133,25 +129,26 @@ public class BatchSubtopologyBuilder {
         public Integer parallelism;
         public List<InputDeclaration> declarations = new ArrayList<InputDeclaration>();
         public List<Map> componentConfs = new ArrayList<Map>();
-        
+
         public Component(IRichBolt bolt, Integer parallelism) {
             this.bolt = bolt;
             this.parallelism = parallelism;
         }
     }
-    
+
     private static interface InputDeclaration {
         void declare(InputDeclarer declarer);
+
         String getComponent();
     }
-        
+
     private class BoltDeclarerImpl extends BaseConfigurationDeclarer<BoltDeclarer> implements BoltDeclarer {
         Component _component;
-        
+
         public BoltDeclarerImpl(Component component) {
             _component = component;
         }
-        
+
         @Override
         public BoltDeclarer fieldsGrouping(final String component, final Fields fields) {
             addDeclaration(new InputDeclaration() {
@@ -163,7 +160,7 @@ public class BatchSubtopologyBuilder {
                 @Override
                 public String getComponent() {
                     return component;
-                }                
+                }
             });
             return this;
         }
@@ -174,12 +171,12 @@ public class BatchSubtopologyBuilder {
                 @Override
                 public void declare(InputDeclarer declarer) {
                     declarer.fieldsGrouping(component, streamId, fields);
-                }                
+                }
 
                 @Override
                 public String getComponent() {
                     return component;
-                }                
+                }
             });
             return this;
         }
@@ -190,12 +187,12 @@ public class BatchSubtopologyBuilder {
                 @Override
                 public void declare(InputDeclarer declarer) {
                     declarer.globalGrouping(component);
-                }                
+                }
 
                 @Override
                 public String getComponent() {
                     return component;
-                }                
+                }
             });
             return this;
         }
@@ -206,12 +203,12 @@ public class BatchSubtopologyBuilder {
                 @Override
                 public void declare(InputDeclarer declarer) {
                     declarer.globalGrouping(component, streamId);
-                }                
+                }
 
                 @Override
                 public String getComponent() {
                     return component;
-                }                
+                }
             });
             return this;
         }
@@ -222,12 +219,12 @@ public class BatchSubtopologyBuilder {
                 @Override
                 public void declare(InputDeclarer declarer) {
                     declarer.shuffleGrouping(component);
-                }                
+                }
 
                 @Override
                 public String getComponent() {
                     return component;
-                }                
+                }
             });
             return this;
         }
@@ -238,12 +235,12 @@ public class BatchSubtopologyBuilder {
                 @Override
                 public void declare(InputDeclarer declarer) {
                     declarer.shuffleGrouping(component, streamId);
-                }                
+                }
 
                 @Override
                 public String getComponent() {
                     return component;
-                }                
+                }
             });
             return this;
         }
@@ -254,12 +251,12 @@ public class BatchSubtopologyBuilder {
                 @Override
                 public void declare(InputDeclarer declarer) {
                     declarer.localOrShuffleGrouping(component);
-                }                
+                }
 
                 @Override
                 public String getComponent() {
                     return component;
-                }                
+                }
             });
             return this;
         }
@@ -270,8 +267,8 @@ public class BatchSubtopologyBuilder {
                 @Override
                 public void declare(InputDeclarer declarer) {
                     declarer.localOrShuffleGrouping(component, streamId);
-                }                
-                
+                }
+
                 @Override
                 public String getComponent() {
                     return component;
@@ -279,7 +276,7 @@ public class BatchSubtopologyBuilder {
             });
             return this;
         }
-        
+
         @Override
         public BoltDeclarer localFirstGrouping(final String componentId) {
             addDeclaration(new InputDeclaration() {
@@ -287,7 +284,7 @@ public class BatchSubtopologyBuilder {
                 public void declare(InputDeclarer declarer) {
                     declarer.localFirstGrouping(componentId);
                 }
-                
+
                 @Override
                 public String getComponent() {
                     return componentId;
@@ -295,7 +292,7 @@ public class BatchSubtopologyBuilder {
             });
             return this;
         }
-        
+
         @Override
         public BoltDeclarer localFirstGrouping(final String component, final String streamId) {
             addDeclaration(new InputDeclaration() {
@@ -303,27 +300,27 @@ public class BatchSubtopologyBuilder {
                 public void declare(InputDeclarer declarer) {
                     declarer.localFirstGrouping(component, streamId);
                 }
-                
+
                 @Override
                 public String getComponent() {
                     return component;
-                }                
+                }
             });
             return this;
         }
-        
+
         @Override
         public BoltDeclarer noneGrouping(final String component) {
             addDeclaration(new InputDeclaration() {
                 @Override
                 public void declare(InputDeclarer declarer) {
                     declarer.noneGrouping(component);
-                }                
+                }
 
                 @Override
                 public String getComponent() {
                     return component;
-                }                
+                }
             });
             return this;
         }
@@ -334,12 +331,12 @@ public class BatchSubtopologyBuilder {
                 @Override
                 public void declare(InputDeclarer declarer) {
                     declarer.noneGrouping(component, streamId);
-                }                
+                }
 
                 @Override
                 public String getComponent() {
                     return component;
-                }                
+                }
             });
             return this;
         }
@@ -350,12 +347,12 @@ public class BatchSubtopologyBuilder {
                 @Override
                 public void declare(InputDeclarer declarer) {
                     declarer.allGrouping(component);
-                }                
+                }
 
                 @Override
                 public String getComponent() {
                     return component;
-                }                
+                }
             });
             return this;
         }
@@ -366,12 +363,12 @@ public class BatchSubtopologyBuilder {
                 @Override
                 public void declare(InputDeclarer declarer) {
                     declarer.allGrouping(component, streamId);
-                }                
+                }
 
                 @Override
                 public String getComponent() {
                     return component;
-                }                
+                }
             });
             return this;
         }
@@ -382,12 +379,12 @@ public class BatchSubtopologyBuilder {
                 @Override
                 public void declare(InputDeclarer declarer) {
                     declarer.directGrouping(component);
-                }                
+                }
 
                 @Override
                 public String getComponent() {
                     return component;
-                }                
+                }
             });
             return this;
         }
@@ -398,12 +395,12 @@ public class BatchSubtopologyBuilder {
                 @Override
                 public void declare(InputDeclarer declarer) {
                     declarer.directGrouping(component, streamId);
-                }                
+                }
 
                 @Override
                 public String getComponent() {
                     return component;
-                }                
+                }
             });
             return this;
         }
@@ -417,21 +414,21 @@ public class BatchSubtopologyBuilder {
         public BoltDeclarer partialKeyGrouping(String componentId, String streamId, Fields fields) {
             return customGrouping(componentId, streamId, new PartialKeyGrouping(fields));
         }
-        
+
         @Override
         public BoltDeclarer customGrouping(final String component, final CustomStreamGrouping grouping) {
             addDeclaration(new InputDeclaration() {
                 @Override
                 public void declare(InputDeclarer declarer) {
                     declarer.customGrouping(component, grouping);
-                }                
+                }
 
                 @Override
                 public String getComponent() {
                     return component;
-                }                
+                }
             });
-            return this;        
+            return this;
         }
 
         @Override
@@ -440,12 +437,12 @@ public class BatchSubtopologyBuilder {
                 @Override
                 public void declare(InputDeclarer declarer) {
                     declarer.customGrouping(component, streamId, grouping);
-                }                
+                }
 
                 @Override
                 public String getComponent() {
                     return component;
-                }                
+                }
             });
             return this;
         }
@@ -456,16 +453,16 @@ public class BatchSubtopologyBuilder {
                 @Override
                 public void declare(InputDeclarer declarer) {
                     declarer.grouping(stream, grouping);
-                }                
+                }
 
                 @Override
                 public String getComponent() {
                     return stream.get_componentId();
-                }                
+                }
             });
             return this;
         }
-        
+
         private void addDeclaration(InputDeclaration declaration) {
             _component.declarations.add(declaration);
         }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/coordination/CoordinatedBolt.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/coordination/CoordinatedBolt.java b/jstorm-core/src/main/java/backtype/storm/coordination/CoordinatedBolt.java
index 6f337a6..39a158d 100755
--- a/jstorm-core/src/main/java/backtype/storm/coordination/CoordinatedBolt.java
+++ b/jstorm-core/src/main/java/backtype/storm/coordination/CoordinatedBolt.java
@@ -45,8 +45,7 @@ import org.slf4j.LoggerFactory;
 import static backtype.storm.utils.Utils.get;
 
 /**
- * Coordination requires the request ids to be globally unique for awhile. This is so it doesn't get confused
- * in the case of retries.
+ * Coordination requires the request ids to be globally unique for awhile. This is so it doesn't get confused in the case of retries.
  */
 public class CoordinatedBolt implements IRichBolt {
     public static Logger LOG = LoggerFactory.getLogger(CoordinatedBolt.class);
@@ -58,8 +57,7 @@ public class CoordinatedBolt implements IRichBolt {
     public static interface TimeoutCallback {
         void timeoutId(Object id);
     }
-    
-    
+
     public static class SourceArgs implements Serializable {
         public boolean singleCount;
 
@@ -74,7 +72,7 @@ public class CoordinatedBolt implements IRichBolt {
         public static SourceArgs all() {
             return new SourceArgs(false);
         }
-        
+
         @Override
         public String toString() {
             return "<Single: " + singleCount + ">";
@@ -101,14 +99,14 @@ public class CoordinatedBolt implements IRichBolt {
 
         public void ack(Tuple tuple) {
             Object id = tuple.getValue(0);
-            synchronized(_tracked) {
+            synchronized (_tracked) {
                 TrackingInfo track = _tracked.get(id);
                 if (track != null)
                     track.receivedTuples++;
             }
             boolean failed = checkFinishId(tuple, TupleType.REGULAR);
-            if(failed) {
-                _delegate.fail(tuple);                
+            if (failed) {
+                _delegate.fail(tuple);
             } else {
                 _delegate.ack(tuple);
             }
@@ -116,7 +114,7 @@ public class CoordinatedBolt implements IRichBolt {
 
         public void fail(Tuple tuple) {
             Object id = tuple.getValue(0);
-            synchronized(_tracked) {
+            synchronized (_tracked) {
                 TrackingInfo track = _tracked.get(id);
                 if (track != null)
                     track.failed = true;
@@ -124,18 +122,17 @@ public class CoordinatedBolt implements IRichBolt {
             checkFinishId(tuple, TupleType.REGULAR);
             _delegate.fail(tuple);
         }
-        
+
         public void reportError(Throwable error) {
             _delegate.reportError(error);
         }
 
-
         private void updateTaskCounts(Object id, List<Integer> tasks) {
-            synchronized(_tracked) {
+            synchronized (_tracked) {
                 TrackingInfo track = _tracked.get(id);
                 if (track != null) {
                     Map<Integer, Integer> taskEmittedTuples = track.taskEmittedTuples;
-                    for(Integer task: tasks) {
+                    for (Integer task : tasks) {
                         int newCount = get(taskEmittedTuples, task, 0) + 1;
                         taskEmittedTuples.put(task, newCount);
                     }
@@ -161,34 +158,30 @@ public class CoordinatedBolt implements IRichBolt {
         boolean receivedId = false;
         boolean finished = false;
         List<Tuple> ackTuples = new ArrayList<Tuple>();
-        
+
         @Override
         public String toString() {
-            return "reportCount: " + reportCount + "\n" +
-                   "expectedTupleCount: " + expectedTupleCount + "\n" +
-                   "receivedTuples: " + receivedTuples + "\n" +
-                   "failed: " + failed + "\n" +
-                   taskEmittedTuples.toString();
+            return "reportCount: " + reportCount + "\n" + "expectedTupleCount: " + expectedTupleCount + "\n" + "receivedTuples: " + receivedTuples + "\n"
+                    + "failed: " + failed + "\n" + taskEmittedTuples.toString();
         }
     }
 
-    
     public static class IdStreamSpec implements Serializable {
         GlobalStreamId _id;
-        
+
         public GlobalStreamId getGlobalStreamId() {
             return _id;
         }
 
         public static IdStreamSpec makeDetectSpec(String component, String stream) {
             return new IdStreamSpec(component, stream);
-        }        
-        
+        }
+
         protected IdStreamSpec(String component, String stream) {
             _id = new GlobalStreamId(component, stream);
         }
     }
-    
+
     public CoordinatedBolt(IRichBolt delegate) {
         this(delegate, null, null);
     }
@@ -196,37 +189,35 @@ public class CoordinatedBolt implements IRichBolt {
     public CoordinatedBolt(IRichBolt delegate, String sourceComponent, SourceArgs sourceArgs, IdStreamSpec idStreamSpec) {
         this(delegate, singleSourceArgs(sourceComponent, sourceArgs), idStreamSpec);
     }
-    
+
     public CoordinatedBolt(IRichBolt delegate, Map<String, SourceArgs> sourceArgs, IdStreamSpec idStreamSpec) {
         _sourceArgs = sourceArgs;
-        if(_sourceArgs==null) _sourceArgs = new HashMap<String, SourceArgs>();
+        if (_sourceArgs == null)
+            _sourceArgs = new HashMap<String, SourceArgs>();
         _delegate = delegate;
         _idStreamSpec = idStreamSpec;
     }
-    
+
     public void prepare(Map config, TopologyContext context, OutputCollector collector) {
         TimeCacheMap.ExpiredCallback<Object, TrackingInfo> callback = null;
-        if(_delegate instanceof TimeoutCallback) {
+        if (_delegate instanceof TimeoutCallback) {
             callback = new TimeoutItems();
         }
         _tracked = new TimeCacheMap<Object, TrackingInfo>(context.maxTopologyMessageTimeout(), callback);
         _collector = collector;
         _delegate.prepare(config, context, new OutputCollector(new CoordinatedOutputCollector(collector)));
-        for(String component: Utils.get(context.getThisTargets(),
-                                        Constants.COORDINATED_STREAM_ID,
-                                        new HashMap<String, Grouping>())
-                                        .keySet()) {
-            for(Integer task: context.getComponentTasks(component)) {
+        for (String component : Utils.get(context.getThisTargets(), Constants.COORDINATED_STREAM_ID, new HashMap<String, Grouping>()).keySet()) {
+            for (Integer task : context.getComponentTasks(component)) {
                 _countOutTasks.add(task);
             }
         }
-        if(!_sourceArgs.isEmpty()) {
+        if (!_sourceArgs.isEmpty()) {
             _numSourceReports = 0;
-            for(Entry<String, SourceArgs> entry: _sourceArgs.entrySet()) {
-                if(entry.getValue().singleCount) {
-                    _numSourceReports+=1;
+            for (Entry<String, SourceArgs> entry : _sourceArgs.entrySet()) {
+                if (entry.getValue().singleCount) {
+                    _numSourceReports += 1;
                 } else {
-                    _numSourceReports+=context.getComponentTasks(entry.getKey()).size();
+                    _numSourceReports += context.getComponentTasks(entry.getKey()).size();
                 }
             }
         }
@@ -235,57 +226,56 @@ public class CoordinatedBolt implements IRichBolt {
     private boolean checkFinishId(Tuple tup, TupleType type) {
         Object id = tup.getValue(0);
         boolean failed = false;
-        
-        synchronized(_tracked) {
+
+        synchronized (_tracked) {
             TrackingInfo track = _tracked.get(id);
             try {
-                if(track!=null) {
+                if (track != null) {
                     boolean delayed = false;
-                    if(_idStreamSpec==null && type == TupleType.COORD || _idStreamSpec!=null && type==TupleType.ID) {
+                    if (_idStreamSpec == null && type == TupleType.COORD || _idStreamSpec != null && type == TupleType.ID) {
                         track.ackTuples.add(tup);
                         delayed = true;
                     }
-                    if(track.failed) {
+                    if (track.failed) {
                         failed = true;
-                        for(Tuple t: track.ackTuples) {
+                        for (Tuple t : track.ackTuples) {
                             _collector.fail(t);
                         }
                         _tracked.remove(id);
-                    } else if(track.receivedId
-                             && (_sourceArgs.isEmpty() ||
-                                  track.reportCount==_numSourceReports &&
-                                  track.expectedTupleCount == track.receivedTuples)){
-                        if(_delegate instanceof FinishedCallback) {
-                            ((FinishedCallback)_delegate).finishedId(id);
+                    } else if (track.receivedId
+                            && (_sourceArgs.isEmpty() || track.reportCount == _numSourceReports && track.expectedTupleCount == track.receivedTuples)) {
+                        if (_delegate instanceof FinishedCallback) {
+                            ((FinishedCallback) _delegate).finishedId(id);
                         }
-                        if(!(_sourceArgs.isEmpty() || type!=TupleType.REGULAR)) {
+                        if (!(_sourceArgs.isEmpty() || type != TupleType.REGULAR)) {
                             throw new IllegalStateException("Coordination condition met on a non-coordinating tuple. Should be impossible");
                         }
                         Iterator<Integer> outTasks = _countOutTasks.iterator();
-                        while(outTasks.hasNext()) {
+                        while (outTasks.hasNext()) {
                             int task = outTasks.next();
                             int numTuples = get(track.taskEmittedTuples, task, 0);
                             _collector.emitDirect(task, Constants.COORDINATED_STREAM_ID, tup, new Values(id, numTuples));
                         }
-                        for(Tuple t: track.ackTuples) {
+                        for (Tuple t : track.ackTuples) {
                             _collector.ack(t);
                         }
                         track.finished = true;
                         _tracked.remove(id);
                     }
-                    if(!delayed && type!=TupleType.REGULAR) {
-                        if(track.failed) {
+                    if (!delayed && type != TupleType.REGULAR) {
+                        if (track.failed) {
                             _collector.fail(tup);
                         } else {
-                            _collector.ack(tup);                            
+                            _collector.ack(tup);
                         }
                     }
                 } else {
-                    if(type!=TupleType.REGULAR) _collector.fail(tup);
+                    if (type != TupleType.REGULAR)
+                        _collector.fail(tup);
                 }
-            } catch(FailedException e) {
+            } catch (FailedException e) {
                 LOG.error("Failed to finish batch", e);
-                for(Tuple t: track.ackTuples) {
+                for (Tuple t : track.ackTuples) {
                     _collector.fail(t);
                 }
                 _tracked.remove(id);
@@ -299,29 +289,30 @@ public class CoordinatedBolt implements IRichBolt {
         Object id = tuple.getValue(0);
         TrackingInfo track;
         TupleType type = getTupleType(tuple);
-        synchronized(_tracked) {
+        synchronized (_tracked) {
             track = _tracked.get(id);
-            if(track==null) {
+            if (track == null) {
                 track = new TrackingInfo();
-                if(_idStreamSpec==null) track.receivedId = true;
+                if (_idStreamSpec == null)
+                    track.receivedId = true;
                 _tracked.put(id, track);
             }
         }
-        
-        if(type==TupleType.ID) {
-            synchronized(_tracked) {
+
+        if (type == TupleType.ID) {
+            synchronized (_tracked) {
                 track.receivedId = true;
             }
-            checkFinishId(tuple, type);            
-        } else if(type==TupleType.COORD) {
+            checkFinishId(tuple, type);
+        } else if (type == TupleType.COORD) {
             int count = (Integer) tuple.getValue(1);
-            synchronized(_tracked) {
+            synchronized (_tracked) {
                 track.reportCount++;
-                track.expectedTupleCount+=count;
+                track.expectedTupleCount += count;
             }
             checkFinishId(tuple, type);
-        } else {            
-            synchronized(_tracked) {
+        } else {
+            synchronized (_tracked) {
                 _delegate.execute(tuple);
             }
         }
@@ -341,42 +332,38 @@ public class CoordinatedBolt implements IRichBolt {
     public Map<String, Object> getComponentConfiguration() {
         return _delegate.getComponentConfiguration();
     }
-    
+
     private static Map<String, SourceArgs> singleSourceArgs(String sourceComponent, SourceArgs sourceArgs) {
         Map<String, SourceArgs> ret = new HashMap<String, SourceArgs>();
         ret.put(sourceComponent, sourceArgs);
         return ret;
     }
-    
+
     private class TimeoutItems implements TimeCacheMap.ExpiredCallback<Object, TrackingInfo> {
         @Override
         public void expire(Object id, TrackingInfo val) {
-            synchronized(_tracked) {
+            synchronized (_tracked) {
                 // the combination of the lock and the finished flag ensure that
                 // an id is never timed out if it has been finished
                 val.failed = true;
-                if(!val.finished) {
+                if (!val.finished) {
                     ((TimeoutCallback) _delegate).timeoutId(id);
                 }
             }
         }
     }
-    
+
     private TupleType getTupleType(Tuple tuple) {
-        if(_idStreamSpec!=null
-                && tuple.getSourceGlobalStreamid().equals(_idStreamSpec._id)) {
+        if (_idStreamSpec != null && tuple.getSourceGlobalStreamid().equals(_idStreamSpec._id)) {
             return TupleType.ID;
-        } else if(!_sourceArgs.isEmpty()
-                && tuple.getSourceStreamId().equals(Constants.COORDINATED_STREAM_ID)) {
+        } else if (!_sourceArgs.isEmpty() && tuple.getSourceStreamId().equals(Constants.COORDINATED_STREAM_ID)) {
             return TupleType.COORD;
         } else {
             return TupleType.REGULAR;
         }
     }
-    
+
     static enum TupleType {
-        REGULAR,
-        ID,
-        COORD
+        REGULAR, ID, COORD
     }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/coordination/IBatchBolt.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/coordination/IBatchBolt.java b/jstorm-core/src/main/java/backtype/storm/coordination/IBatchBolt.java
index ee5d9bd..9a1abfa 100755
--- a/jstorm-core/src/main/java/backtype/storm/coordination/IBatchBolt.java
+++ b/jstorm-core/src/main/java/backtype/storm/coordination/IBatchBolt.java
@@ -25,6 +25,8 @@ import java.util.Map;
 
 public interface IBatchBolt<T> extends Serializable, IComponent {
     void prepare(Map conf, TopologyContext context, BatchOutputCollector collector, T id);
+
     void execute(Tuple tuple);
+
     void finishBatch();
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/drpc/DRPCInvocationsClient.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/drpc/DRPCInvocationsClient.java b/jstorm-core/src/main/java/backtype/storm/drpc/DRPCInvocationsClient.java
index 624db3e..d10872f 100755
--- a/jstorm-core/src/main/java/backtype/storm/drpc/DRPCInvocationsClient.java
+++ b/jstorm-core/src/main/java/backtype/storm/drpc/DRPCInvocationsClient.java
@@ -17,23 +17,22 @@
  */
 package backtype.storm.drpc;
 
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicReference;
-
+import backtype.storm.generated.AuthorizationException;
 import backtype.storm.generated.DRPCRequest;
 import backtype.storm.generated.DistributedRPCInvocations;
-import backtype.storm.generated.AuthorizationException;
 import backtype.storm.security.auth.ThriftClient;
 import backtype.storm.security.auth.ThriftConnectionType;
-import org.apache.thrift.transport.TTransportException;
 import org.apache.thrift.TException;
+import org.apache.thrift.transport.TTransportException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicReference;
+
 public class DRPCInvocationsClient extends ThriftClient implements DistributedRPCInvocations.Iface {
     public static Logger LOG = LoggerFactory.getLogger(DRPCInvocationsClient.class);
-    private final AtomicReference<DistributedRPCInvocations.Client> client =
-       new AtomicReference<DistributedRPCInvocations.Client>();
+    private final AtomicReference<DistributedRPCInvocations.Client> client = new AtomicReference<DistributedRPCInvocations.Client>();
     private String host;
     private int port;
 
@@ -43,14 +42,14 @@ public class DRPCInvocationsClient extends ThriftClient implements DistributedRP
         this.port = port;
         client.set(new DistributedRPCInvocations.Client(_protocol));
     }
-        
+
     public String getHost() {
         return host;
     }
-    
+
     public int getPort() {
         return port;
-    }       
+    }
 
     public void reconnectClient() throws TException {
         if (client.get() == null) {
@@ -70,9 +69,9 @@ public class DRPCInvocationsClient extends ThriftClient implements DistributedRP
                 throw new TException("Client is not connected...");
             }
             c.result(id, result);
-        } catch(AuthorizationException aze) {
+        } catch (AuthorizationException aze) {
             throw aze;
-        } catch(TException e) {
+        } catch (TException e) {
             client.compareAndSet(c, null);
             throw e;
         }
@@ -85,24 +84,24 @@ public class DRPCInvocationsClient extends ThriftClient implements DistributedRP
                 throw new TException("Client is not connected...");
             }
             return c.fetchRequest(func);
-        } catch(AuthorizationException aze) {
+        } catch (AuthorizationException aze) {
             throw aze;
-        } catch(TException e) {
+        } catch (TException e) {
             client.compareAndSet(c, null);
             throw e;
         }
-    }    
+    }
 
-    public void failRequest(String id) throws TException, AuthorizationException {
+    public void failRequest(String id) throws TException {
         DistributedRPCInvocations.Client c = client.get();
         try {
             if (c == null) {
                 throw new TException("Client is not connected...");
             }
             c.failRequest(id);
-        } catch(AuthorizationException aze) {
+        } catch (AuthorizationException aze) {
             throw aze;
-        } catch(TException e) {
+        } catch (TException e) {
             client.compareAndSet(c, null);
             throw e;
         }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/drpc/DRPCSpout.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/drpc/DRPCSpout.java b/jstorm-core/src/main/java/backtype/storm/drpc/DRPCSpout.java
index 4ed24d4..c490efd 100644
--- a/jstorm-core/src/main/java/backtype/storm/drpc/DRPCSpout.java
+++ b/jstorm-core/src/main/java/backtype/storm/drpc/DRPCSpout.java
@@ -17,25 +17,6 @@
  */
 package backtype.storm.drpc;
 
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Future;
-import java.util.concurrent.SynchronousQueue;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.thrift.TException;
-import org.json.simple.JSONValue;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.alibaba.jstorm.utils.NetWorkUtils;
-
 import backtype.storm.Config;
 import backtype.storm.ILocalDRPC;
 import backtype.storm.generated.AuthorizationException;
@@ -50,31 +31,38 @@ import backtype.storm.tuple.Values;
 import backtype.storm.utils.ExtendedThreadPoolExecutor;
 import backtype.storm.utils.ServiceRegistry;
 import backtype.storm.utils.Utils;
+import com.alibaba.jstorm.utils.NetWorkUtils;
+import org.apache.thrift.TException;
+import org.json.simple.JSONValue;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.*;
+import java.util.concurrent.*;
 
 public class DRPCSpout extends BaseRichSpout {
-    //ANY CHANGE TO THIS CODE MUST BE SERIALIZABLE COMPATIBLE OR THERE WILL BE PROBLEMS
+    // ANY CHANGE TO THIS CODE MUST BE SERIALIZABLE COMPATIBLE OR THERE WILL BE PROBLEMS
     static final long serialVersionUID = 2387848310969237877L;
 
     public static Logger LOG = LoggerFactory.getLogger(DRPCSpout.class);
-    
+
     SpoutOutputCollector _collector;
     List<DRPCInvocationsClient> _clients = new ArrayList<DRPCInvocationsClient>();
     transient LinkedList<Future<Void>> _futures = null;
     transient ExecutorService _backround = null;
     String _function;
     String _local_drpc_id = null;
-    
+
     private static class DRPCMessageId {
         String id;
         int index;
-        
+
         public DRPCMessageId(String id, int index) {
             this.id = id;
             this.index = index;
         }
     }
-    
-    
+
     public DRPCSpout(String function) {
         _function = function;
     }
@@ -83,7 +71,7 @@ public class DRPCSpout extends BaseRichSpout {
         _function = function;
         _local_drpc_id = drpc.getServiceId();
     }
-   
+
     private class Adder implements Callable<Void> {
         private String server;
         private int port;
@@ -129,16 +117,12 @@ public class DRPCSpout extends BaseRichSpout {
             }
         }
     }
-    
-    
- 
+
     @Override
     public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
         _collector = collector;
-        if(_local_drpc_id==null) {
-            _backround = new ExtendedThreadPoolExecutor(0, Integer.MAX_VALUE,
-                60L, TimeUnit.SECONDS,
-                new SynchronousQueue<Runnable>());
+        if (_local_drpc_id == null) {
+            _backround = new ExtendedThreadPoolExecutor(0, Integer.MAX_VALUE, 60L, TimeUnit.SECONDS, new SynchronousQueue<Runnable>());
             _futures = new LinkedList<Future<Void>>();
 
             int numTasks = context.getComponentTasks(context.getThisComponentId()).size();
@@ -146,26 +130,26 @@ public class DRPCSpout extends BaseRichSpout {
 
             int port = Utils.getInt(conf.get(Config.DRPC_INVOCATIONS_PORT));
             List<String> servers = NetWorkUtils.host2Ip((List<String>) conf.get(Config.DRPC_SERVERS));
-            
-            if(servers == null || servers.isEmpty()) {
-                throw new RuntimeException("No DRPC servers configured for topology");   
+
+            if (servers == null || servers.isEmpty()) {
+                throw new RuntimeException("No DRPC servers configured for topology");
             }
-            
+
             if (numTasks < servers.size()) {
-                for (String s: servers) {
+                for (String s : servers) {
                     _futures.add(_backround.submit(new Adder(s, port, conf)));
                 }
-            } else {        
+            } else {
                 int i = index % servers.size();
                 _futures.add(_backround.submit(new Adder(servers.get(i), port, conf)));
             }
         }
-        
+
     }
 
     @Override
     public void close() {
-        for(DRPCInvocationsClient client: _clients) {
+        for (DRPCInvocationsClient client : _clients) {
             client.close();
         }
     }
@@ -173,12 +157,12 @@ public class DRPCSpout extends BaseRichSpout {
     @Override
     public void nextTuple() {
         boolean gotRequest = false;
-        if(_local_drpc_id==null) {
+        if (_local_drpc_id == null) {
             int size = 0;
             synchronized (_clients) {
-                size = _clients.size(); //This will only ever grow, so no need to worry about falling off the end
+                size = _clients.size(); // This will only ever grow, so no need to worry about falling off the end
             }
-            for(int i=0; i<size; i++) {
+            for (int i = 0; i < size; i++) {
                 DRPCInvocationsClient client;
                 synchronized (_clients) {
                     client = _clients.get(i);
@@ -188,7 +172,7 @@ public class DRPCSpout extends BaseRichSpout {
                 }
                 try {
                     DRPCRequest req = client.fetchRequest(_function);
-                    if(req.get_request_id().length() > 0) {
+                    if (req.get_request_id().length() > 0) {
                         Map returnInfo = new HashMap();
                         returnInfo.put("id", req.get_request_id());
                         returnInfo.put("host", client.getHost());
@@ -210,10 +194,10 @@ public class DRPCSpout extends BaseRichSpout {
             checkFutures();
         } else {
             DistributedRPCInvocations.Iface drpc = (DistributedRPCInvocations.Iface) ServiceRegistry.getService(_local_drpc_id);
-            if(drpc!=null) { // can happen during shutdown of drpc while topology is still up
+            if (drpc != null) { // can happen during shutdown of drpc while topology is still up
                 try {
                     DRPCRequest req = drpc.fetchRequest(_function);
-                    if(req.get_request_id().length() > 0) {
+                    if (req.get_request_id().length() > 0) {
                         Map returnInfo = new HashMap();
                         returnInfo.put("id", req.get_request_id());
                         returnInfo.put("host", _local_drpc_id);
@@ -228,7 +212,7 @@ public class DRPCSpout extends BaseRichSpout {
                 }
             }
         }
-        if(!gotRequest) {
+        if (!gotRequest) {
             Utils.sleep(1);
         }
     }
@@ -241,8 +225,8 @@ public class DRPCSpout extends BaseRichSpout {
     public void fail(Object msgId) {
         DRPCMessageId did = (DRPCMessageId) msgId;
         DistributedRPCInvocations.Iface client;
-        
-        if(_local_drpc_id == null) {
+
+        if (_local_drpc_id == null) {
             client = _clients.get(did.index);
         } else {
             client = (DistributedRPCInvocations.Iface) ServiceRegistry.getService(_local_drpc_id);
@@ -259,5 +243,5 @@ public class DRPCSpout extends BaseRichSpout {
     @Override
     public void declareOutputFields(OutputFieldsDeclarer declarer) {
         declarer.declare(new Fields("args", "return-info"));
-    }    
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/drpc/JoinResult.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/drpc/JoinResult.java b/jstorm-core/src/main/java/backtype/storm/drpc/JoinResult.java
index b74b97e..e9195e7 100755
--- a/jstorm-core/src/main/java/backtype/storm/drpc/JoinResult.java
+++ b/jstorm-core/src/main/java/backtype/storm/drpc/JoinResult.java
@@ -31,7 +31,6 @@ import java.util.Map;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-
 public class JoinResult extends BaseRichBolt {
     public static Logger LOG = LoggerFactory.getLogger(JoinResult.class);
 
@@ -43,27 +42,27 @@ public class JoinResult extends BaseRichBolt {
     public JoinResult(String returnComponent) {
         this.returnComponent = returnComponent;
     }
- 
+
     public void prepare(Map map, TopologyContext context, OutputCollector collector) {
         _collector = collector;
     }
 
     public void execute(Tuple tuple) {
         Object requestId = tuple.getValue(0);
-        if(tuple.getSourceComponent().equals(returnComponent)) {
+        if (tuple.getSourceComponent().equals(returnComponent)) {
             returns.put(requestId, tuple);
         } else {
             results.put(requestId, tuple);
         }
 
-        if(returns.containsKey(requestId) && results.containsKey(requestId)) {
+        if (returns.containsKey(requestId) && results.containsKey(requestId)) {
             Tuple result = results.remove(requestId);
             Tuple returner = returns.remove(requestId);
             LOG.debug(result.getValue(1).toString());
             List<Tuple> anchors = new ArrayList<Tuple>();
             anchors.add(result);
-            anchors.add(returner);            
-            _collector.emit(anchors, new Values(""+result.getValue(1), returner.getValue(1)));
+            anchors.add(returner);
+            _collector.emit(anchors, new Values("" + result.getValue(1), returner.getValue(1)));
             _collector.ack(result);
             _collector.ack(returner);
         }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/drpc/KeyedFairBolt.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/drpc/KeyedFairBolt.java b/jstorm-core/src/main/java/backtype/storm/drpc/KeyedFairBolt.java
index 113163d..2294c54 100755
--- a/jstorm-core/src/main/java/backtype/storm/drpc/KeyedFairBolt.java
+++ b/jstorm-core/src/main/java/backtype/storm/drpc/KeyedFairBolt.java
@@ -29,7 +29,6 @@ import backtype.storm.utils.KeyedRoundRobinQueue;
 import java.util.HashMap;
 import java.util.Map;
 
-
 public class KeyedFairBolt implements IRichBolt, FinishedCallback {
     IRichBolt _delegate;
     KeyedRoundRobinQueue<Tuple> _rrQueue;
@@ -39,14 +38,13 @@ public class KeyedFairBolt implements IRichBolt, FinishedCallback {
     public KeyedFairBolt(IRichBolt delegate) {
         _delegate = delegate;
     }
-    
+
     public KeyedFairBolt(IBasicBolt delegate) {
         this(new BasicBoltExecutor(delegate));
     }
-    
-    
+
     public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
-        if(_delegate instanceof FinishedCallback) {
+        if (_delegate instanceof FinishedCallback) {
             _callback = (FinishedCallback) _delegate;
         }
         _delegate.prepare(stormConf, context, collector);
@@ -54,7 +52,7 @@ public class KeyedFairBolt implements IRichBolt, FinishedCallback {
         _executor = new Thread(new Runnable() {
             public void run() {
                 try {
-                    while(true) {
+                    while (true) {
                         _delegate.execute(_rrQueue.take());
                     }
                 } catch (InterruptedException e) {
@@ -81,7 +79,7 @@ public class KeyedFairBolt implements IRichBolt, FinishedCallback {
     }
 
     public void finishedId(Object id) {
-        if(_callback!=null) {
+        if (_callback != null) {
             _callback.finishedId(id);
         }
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/drpc/LinearDRPCInputDeclarer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/drpc/LinearDRPCInputDeclarer.java b/jstorm-core/src/main/java/backtype/storm/drpc/LinearDRPCInputDeclarer.java
index d03075e..ddcac35 100755
--- a/jstorm-core/src/main/java/backtype/storm/drpc/LinearDRPCInputDeclarer.java
+++ b/jstorm-core/src/main/java/backtype/storm/drpc/LinearDRPCInputDeclarer.java
@@ -23,30 +23,39 @@ import backtype.storm.tuple.Fields;
 
 public interface LinearDRPCInputDeclarer extends ComponentConfigurationDeclarer<LinearDRPCInputDeclarer> {
     public LinearDRPCInputDeclarer fieldsGrouping(Fields fields);
+
     public LinearDRPCInputDeclarer fieldsGrouping(String streamId, Fields fields);
 
     public LinearDRPCInputDeclarer globalGrouping();
+
     public LinearDRPCInputDeclarer globalGrouping(String streamId);
 
     public LinearDRPCInputDeclarer shuffleGrouping();
+
     public LinearDRPCInputDeclarer shuffleGrouping(String streamId);
 
     public LinearDRPCInputDeclarer localOrShuffleGrouping();
+
     public LinearDRPCInputDeclarer localOrShuffleGrouping(String streamId);
-    
+
     public LinearDRPCInputDeclarer noneGrouping();
+
     public LinearDRPCInputDeclarer noneGrouping(String streamId);
 
     public LinearDRPCInputDeclarer allGrouping();
+
     public LinearDRPCInputDeclarer allGrouping(String streamId);
 
     public LinearDRPCInputDeclarer directGrouping();
+
     public LinearDRPCInputDeclarer directGrouping(String streamId);
 
     public LinearDRPCInputDeclarer partialKeyGrouping(Fields fields);
+
     public LinearDRPCInputDeclarer partialKeyGrouping(String streamId, Fields fields);
 
     public LinearDRPCInputDeclarer customGrouping(CustomStreamGrouping grouping);
+
     public LinearDRPCInputDeclarer customGrouping(String streamId, CustomStreamGrouping grouping);
-    
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/drpc/LinearDRPCTopologyBuilder.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/drpc/LinearDRPCTopologyBuilder.java b/jstorm-core/src/main/java/backtype/storm/drpc/LinearDRPCTopologyBuilder.java
index ebbbc6d..e8c202e 100755
--- a/jstorm-core/src/main/java/backtype/storm/drpc/LinearDRPCTopologyBuilder.java
+++ b/jstorm-core/src/main/java/backtype/storm/drpc/LinearDRPCTopologyBuilder.java
@@ -43,39 +43,38 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
-
 // Trident subsumes the functionality provided by this class, so it's deprecated
 @Deprecated
-public class LinearDRPCTopologyBuilder {    
+public class LinearDRPCTopologyBuilder {
     String _function;
     List<Component> _components = new ArrayList<Component>();
-    
-    
+
     public LinearDRPCTopologyBuilder(String function) {
         _function = function;
     }
-        
+
     public LinearDRPCInputDeclarer addBolt(IBatchBolt bolt, Number parallelism) {
         return addBolt(new BatchBoltExecutor(bolt), parallelism);
     }
-    
+
     public LinearDRPCInputDeclarer addBolt(IBatchBolt bolt) {
         return addBolt(bolt, 1);
     }
-    
+
     @Deprecated
     public LinearDRPCInputDeclarer addBolt(IRichBolt bolt, Number parallelism) {
-        if(parallelism==null) parallelism = 1; 
+        if (parallelism == null)
+            parallelism = 1;
         Component component = new Component(bolt, parallelism.intValue());
         _components.add(component);
         return new InputDeclarerImpl(component);
     }
-    
+
     @Deprecated
     public LinearDRPCInputDeclarer addBolt(IRichBolt bolt) {
         return addBolt(bolt, null);
     }
-    
+
     public LinearDRPCInputDeclarer addBolt(IBasicBolt bolt, Number parallelism) {
         return addBolt(new BasicBoltExecutor(bolt), parallelism);
     }
@@ -83,125 +82,119 @@ public class LinearDRPCTopologyBuilder {
     public LinearDRPCInputDeclarer addBolt(IBasicBolt bolt) {
         return addBolt(bolt, null);
     }
-        
+
     public StormTopology createLocalTopology(ILocalDRPC drpc) {
         return createTopology(new DRPCSpout(_function, drpc));
     }
-    
+
     public StormTopology createRemoteTopology() {
         return createTopology(new DRPCSpout(_function));
     }
-    
-    
+
     private StormTopology createTopology(DRPCSpout spout) {
         final String SPOUT_ID = "spout";
         final String PREPARE_ID = "prepare-request";
-        
+
         TopologyBuilder builder = new TopologyBuilder();
         builder.setSpout(SPOUT_ID, spout);
-        builder.setBolt(PREPARE_ID, new PrepareRequest())
-                .noneGrouping(SPOUT_ID);
-        int i=0;
-        for(; i<_components.size();i++) {
+        builder.setBolt(PREPARE_ID, new PrepareRequest()).noneGrouping(SPOUT_ID);
+        int i = 0;
+        for (; i < _components.size(); i++) {
             Component component = _components.get(i);
-            
+
             Map<String, SourceArgs> source = new HashMap<String, SourceArgs>();
-            if (i==1) {
-                source.put(boltId(i-1), SourceArgs.single());
-            } else if (i>=2) {
-                source.put(boltId(i-1), SourceArgs.all());
+            if (i == 1) {
+                source.put(boltId(i - 1), SourceArgs.single());
+            } else if (i >= 2) {
+                source.put(boltId(i - 1), SourceArgs.all());
             }
             IdStreamSpec idSpec = null;
-            if(i==_components.size()-1 && component.bolt instanceof FinishedCallback) {
+            if (i == _components.size() - 1 && component.bolt instanceof FinishedCallback) {
                 idSpec = IdStreamSpec.makeDetectSpec(PREPARE_ID, PrepareRequest.ID_STREAM);
             }
-            BoltDeclarer declarer = builder.setBolt(
-                    boltId(i),
-                    new CoordinatedBolt(component.bolt, source, idSpec),
-                    component.parallelism);
-            
-            for(Map conf: component.componentConfs) {
+            BoltDeclarer declarer = builder.setBolt(boltId(i), new CoordinatedBolt(component.bolt, source, idSpec), component.parallelism);
+
+            for (Map conf : component.componentConfs) {
                 declarer.addConfigurations(conf);
             }
-            
-            if(idSpec!=null) {
+
+            if (idSpec != null) {
                 declarer.fieldsGrouping(idSpec.getGlobalStreamId().get_componentId(), PrepareRequest.ID_STREAM, new Fields("request"));
             }
-            if(i==0 && component.declarations.isEmpty()) {
+            if (i == 0 && component.declarations.isEmpty()) {
                 declarer.noneGrouping(PREPARE_ID, PrepareRequest.ARGS_STREAM);
             } else {
                 String prevId;
-                if(i==0) {
+                if (i == 0) {
                     prevId = PREPARE_ID;
                 } else {
-                    prevId = boltId(i-1);
+                    prevId = boltId(i - 1);
                 }
-                for(InputDeclaration declaration: component.declarations) {
+                for (InputDeclaration declaration : component.declarations) {
                     declaration.declare(prevId, declarer);
                 }
             }
-            if(i>0) {
-                declarer.directGrouping(boltId(i-1), Constants.COORDINATED_STREAM_ID); 
+            if (i > 0) {
+                declarer.directGrouping(boltId(i - 1), Constants.COORDINATED_STREAM_ID);
             }
         }
-        
-        IRichBolt lastBolt = _components.get(_components.size()-1).bolt;
+
+        IRichBolt lastBolt = _components.get(_components.size() - 1).bolt;
         OutputFieldsGetter getter = new OutputFieldsGetter();
         lastBolt.declareOutputFields(getter);
         Map<String, StreamInfo> streams = getter.getFieldsDeclaration();
-        if(streams.size()!=1) {
+        if (streams.size() != 1) {
             throw new RuntimeException("Must declare exactly one stream from last bolt in LinearDRPCTopology");
         }
         String outputStream = streams.keySet().iterator().next();
         List<String> fields = streams.get(outputStream).get_output_fields();
-        if(fields.size()!=2) {
-            throw new RuntimeException("Output stream of last component in LinearDRPCTopology must contain exactly two fields. The first should be the request id, and the second should be the result.");
+        if (fields.size() != 2) {
+            throw new RuntimeException(
+                    "Output stream of last component in LinearDRPCTopology must contain exactly two fields. The first should be the request id, and the second should be the result.");
         }
 
-        builder.setBolt("JoinResult", new JoinResult(PREPARE_ID))
-                .fieldsGrouping(boltId(i-1), outputStream, new Fields(fields.get(0)))
+        builder.setBolt("JoinResult", new JoinResult(PREPARE_ID)).fieldsGrouping(boltId(i - 1), outputStream, new Fields(fields.get(0)))
                 .fieldsGrouping(PREPARE_ID, PrepareRequest.RETURN_STREAM, new Fields("request"));
         i++;
-        builder.setBolt("ReturnResults", new ReturnResults())
-                .noneGrouping("JoinResult");
+        builder.setBolt("ReturnResults", new ReturnResults()).noneGrouping("JoinResult");
         return builder.createTopology();
     }
-    
+
     private static String boltId(int index) {
         return "bolt" + index;
     }
-    
+
     private static class Component {
         public IRichBolt bolt;
         public int parallelism;
         public List<Map> componentConfs;
         public List<InputDeclaration> declarations = new ArrayList<InputDeclaration>();
-        
+
         public Component(IRichBolt bolt, int parallelism) {
             this.bolt = bolt;
             this.parallelism = parallelism;
             this.componentConfs = new ArrayList();
         }
     }
-    
+
     private static interface InputDeclaration {
         public void declare(String prevComponent, InputDeclarer declarer);
     }
-    
+
     private class InputDeclarerImpl extends BaseConfigurationDeclarer<LinearDRPCInputDeclarer> implements LinearDRPCInputDeclarer {
         Component _component;
-        
+
         public InputDeclarerImpl(Component component) {
             _component = component;
         }
-        
+
         @Override
         public LinearDRPCInputDeclarer fieldsGrouping(final Fields fields) {
             addDeclaration(new InputDeclaration() {
                 @Override
                 public void declare(String prevComponent, InputDeclarer declarer) {
                     declarer.fieldsGrouping(prevComponent, fields);
-                }                
+                }
             });
             return this;
         }
@@ -212,7 +205,7 @@ public class LinearDRPCTopologyBuilder {
                 @Override
                 public void declare(String prevComponent, InputDeclarer declarer) {
                     declarer.fieldsGrouping(prevComponent, streamId, fields);
-                }                
+                }
             });
             return this;
         }
@@ -223,7 +216,7 @@ public class LinearDRPCTopologyBuilder {
                 @Override
                 public void declare(String prevComponent, InputDeclarer declarer) {
                     declarer.globalGrouping(prevComponent);
-                }                
+                }
             });
             return this;
         }
@@ -234,7 +227,7 @@ public class LinearDRPCTopologyBuilder {
                 @Override
                 public void declare(String prevComponent, InputDeclarer declarer) {
                     declarer.globalGrouping(prevComponent, streamId);
-                }                
+                }
             });
             return this;
         }
@@ -245,7 +238,7 @@ public class LinearDRPCTopologyBuilder {
                 @Override
                 public void declare(String prevComponent, InputDeclarer declarer) {
                     declarer.shuffleGrouping(prevComponent);
-                }                
+                }
             });
             return this;
         }
@@ -256,7 +249,7 @@ public class LinearDRPCTopologyBuilder {
                 @Override
                 public void declare(String prevComponent, InputDeclarer declarer) {
                     declarer.shuffleGrouping(prevComponent, streamId);
-                }                
+                }
             });
             return this;
         }
@@ -267,7 +260,7 @@ public class LinearDRPCTopologyBuilder {
                 @Override
                 public void declare(String prevComponent, InputDeclarer declarer) {
                     declarer.localOrShuffleGrouping(prevComponent);
-                }                
+                }
             });
             return this;
         }
@@ -278,18 +271,18 @@ public class LinearDRPCTopologyBuilder {
                 @Override
                 public void declare(String prevComponent, InputDeclarer declarer) {
                     declarer.localOrShuffleGrouping(prevComponent, streamId);
-                }                
+                }
             });
             return this;
         }
-        
+
         @Override
         public LinearDRPCInputDeclarer noneGrouping() {
             addDeclaration(new InputDeclaration() {
                 @Override
                 public void declare(String prevComponent, InputDeclarer declarer) {
                     declarer.noneGrouping(prevComponent);
-                }                
+                }
             });
             return this;
         }
@@ -300,7 +293,7 @@ public class LinearDRPCTopologyBuilder {
                 @Override
                 public void declare(String prevComponent, InputDeclarer declarer) {
                     declarer.noneGrouping(prevComponent, streamId);
-                }                
+                }
             });
             return this;
         }
@@ -311,7 +304,7 @@ public class LinearDRPCTopologyBuilder {
                 @Override
                 public void declare(String prevComponent, InputDeclarer declarer) {
                     declarer.allGrouping(prevComponent);
-                }                
+                }
             });
             return this;
         }
@@ -322,7 +315,7 @@ public class LinearDRPCTopologyBuilder {
                 @Override
                 public void declare(String prevComponent, InputDeclarer declarer) {
                     declarer.allGrouping(prevComponent, streamId);
-                }                
+                }
             });
             return this;
         }
@@ -333,7 +326,7 @@ public class LinearDRPCTopologyBuilder {
                 @Override
                 public void declare(String prevComponent, InputDeclarer declarer) {
                     declarer.directGrouping(prevComponent);
-                }                
+                }
             });
             return this;
         }
@@ -344,7 +337,7 @@ public class LinearDRPCTopologyBuilder {
                 @Override
                 public void declare(String prevComponent, InputDeclarer declarer) {
                     declarer.directGrouping(prevComponent, streamId);
-                }                
+                }
             });
             return this;
         }
@@ -365,7 +358,7 @@ public class LinearDRPCTopologyBuilder {
                 @Override
                 public void declare(String prevComponent, InputDeclarer declarer) {
                     declarer.customGrouping(prevComponent, grouping);
-                }                
+                }
             });
             return this;
         }
@@ -376,11 +369,11 @@ public class LinearDRPCTopologyBuilder {
                 @Override
                 public void declare(String prevComponent, InputDeclarer declarer) {
                     declarer.customGrouping(prevComponent, streamId, grouping);
-                }                
+                }
             });
             return this;
         }
-        
+
         private void addDeclaration(InputDeclaration declaration) {
             _component.declarations.add(declaration);
         }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/drpc/PrepareRequest.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/drpc/PrepareRequest.java b/jstorm-core/src/main/java/backtype/storm/drpc/PrepareRequest.java
index bd32169..fea8b36 100755
--- a/jstorm-core/src/main/java/backtype/storm/drpc/PrepareRequest.java
+++ b/jstorm-core/src/main/java/backtype/storm/drpc/PrepareRequest.java
@@ -28,7 +28,6 @@ import java.util.Map;
 import java.util.Random;
 import backtype.storm.utils.Utils;
 
-
 public class PrepareRequest extends BaseBasicBolt {
     public static final String ARGS_STREAM = Utils.DEFAULT_STREAM_ID;
     public static final String RETURN_STREAM = "ret";

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/drpc/ReturnResults.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/drpc/ReturnResults.java b/jstorm-core/src/main/java/backtype/storm/drpc/ReturnResults.java
index 2ca517e..129e2b3 100644
--- a/jstorm-core/src/main/java/backtype/storm/drpc/ReturnResults.java
+++ b/jstorm-core/src/main/java/backtype/storm/drpc/ReturnResults.java
@@ -37,15 +37,14 @@ import org.apache.thrift.TException;
 import org.apache.thrift.transport.TTransportException;
 import org.json.simple.JSONValue;
 
-
 public class ReturnResults extends BaseRichBolt {
-    //ANY CHANGE TO THIS CODE MUST BE SERIALIZABLE COMPATIBLE OR THERE WILL BE PROBLEMS
+    // ANY CHANGE TO THIS CODE MUST BE SERIALIZABLE COMPATIBLE OR THERE WILL BE PROBLEMS
     static final long serialVersionUID = -774882142710631591L;
 
     public static final Logger LOG = LoggerFactory.getLogger(ReturnResults.class);
     OutputCollector _collector;
     boolean local;
-    Map _conf; 
+    Map _conf;
     Map<List, DRPCInvocationsClient> _clients = new HashMap<List, DRPCInvocationsClient>();
 
     @Override
@@ -59,22 +58,24 @@ public class ReturnResults extends BaseRichBolt {
     public void execute(Tuple input) {
         String result = (String) input.getValue(0);
         String returnInfo = (String) input.getValue(1);
-        //LOG.info("Receive one message, resultInfo:{}, result:{}", returnInfo, result);
-        if(returnInfo!=null) {
+        // LOG.info("Receive one message, resultInfo:{}, result:{}", returnInfo, result);
+        if (returnInfo != null) {
             Map retMap = (Map) JSONValue.parse(returnInfo);
             final String host = (String) retMap.get("host");
             final int port = Utils.getInt(retMap.get("port"));
             String id = (String) retMap.get("id");
             DistributedRPCInvocations.Iface client;
-            if(local) {
+            if (local) {
                 client = (DistributedRPCInvocations.Iface) ServiceRegistry.getService(host);
             } else {
-                List server = new ArrayList() {{
-                    add(host);
-                    add(port);
-                }};
-            
-                if(!_clients.containsKey(server)) {
+                List server = new ArrayList() {
+                    {
+                        add(host);
+                        add(port);
+                    }
+                };
+
+                if (!_clients.containsKey(server)) {
                     try {
                         _clients.put(server, new DRPCInvocationsClient(_conf, host, port));
                     } catch (TTransportException ex) {
@@ -83,7 +84,7 @@ public class ReturnResults extends BaseRichBolt {
                 }
                 client = _clients.get(server);
             }
- 
+
             try {
                 client.result(id, result);
                 _collector.ack(input);
@@ -93,29 +94,29 @@ public class ReturnResults extends BaseRichBolt {
                 if (client instanceof DRPCInvocationsClient) {
                     try {
                         LOG.info("reconnecting... ");
-                        ((DRPCInvocationsClient)client).reconnectClient(); //Blocking call
+                        ((DRPCInvocationsClient) client).reconnectClient(); // Blocking call
                     } catch (TException e2) {
                         throw new RuntimeException(e2);
                     }
                 }
-            } catch(TException e) {
+            } catch (TException e) {
                 LOG.error("Failed to return results to DRPC server", e);
                 _collector.fail(input);
                 if (client instanceof DRPCInvocationsClient) {
                     try {
                         LOG.info("reconnecting... ");
-                        ((DRPCInvocationsClient)client).reconnectClient(); //Blocking call
+                        ((DRPCInvocationsClient) client).reconnectClient(); // Blocking call
                     } catch (TException e2) {
                         throw new RuntimeException(e2);
                     }
                 }
             }
         }
-    }    
+    }
 
     @Override
     public void cleanup() {
-        for(DRPCInvocationsClient c: _clients.values()) {
+        for (DRPCInvocationsClient c : _clients.values()) {
             c.close();
         }
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/AlreadyAliveException.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/AlreadyAliveException.java b/jstorm-core/src/main/java/backtype/storm/generated/AlreadyAliveException.java
index 06eadaf..533b112 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/AlreadyAliveException.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/AlreadyAliveException.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class AlreadyAliveException extends TException implements org.apache.thrift.TBase<AlreadyAliveException, AlreadyAliveException._Fields>, java.io.Serializable, Cloneable, Comparable<AlreadyAliveException> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AlreadyAliveException");
 
@@ -264,11 +264,11 @@ public class AlreadyAliveException extends TException implements org.apache.thri
     return _Fields.findByThriftId(fieldId);
   }
 
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
@@ -288,10 +288,10 @@ public class AlreadyAliveException extends TException implements org.apache.thri
     return sb.toString();
   }
 
-  public void validate() throws org.apache.thrift.TException {
+  public void validate() throws TException {
     // check for required fields
     if (!is_set_msg()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'msg' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'msg' is unset! Struct:" + toString());
     }
 
     // check for sub-struct validity
@@ -300,7 +300,7 @@ public class AlreadyAliveException extends TException implements org.apache.thri
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -308,7 +308,7 @@ public class AlreadyAliveException extends TException implements org.apache.thri
   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
     try {
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -321,7 +321,7 @@ public class AlreadyAliveException extends TException implements org.apache.thri
 
   private static class AlreadyAliveExceptionStandardScheme extends StandardScheme<AlreadyAliveException> {
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot, AlreadyAliveException struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot, AlreadyAliveException struct) throws TException {
       org.apache.thrift.protocol.TField schemeField;
       iprot.readStructBegin();
       while (true)
@@ -348,7 +348,7 @@ public class AlreadyAliveException extends TException implements org.apache.thri
       struct.validate();
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot, AlreadyAliveException struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot, AlreadyAliveException struct) throws TException {
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
@@ -372,13 +372,13 @@ public class AlreadyAliveException extends TException implements org.apache.thri
   private static class AlreadyAliveExceptionTupleScheme extends TupleScheme<AlreadyAliveException> {
 
     @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, AlreadyAliveException struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol prot, AlreadyAliveException struct) throws TException {
       TTupleProtocol oprot = (TTupleProtocol) prot;
       oprot.writeString(struct.msg);
     }
 
     @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, AlreadyAliveException struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol prot, AlreadyAliveException struct) throws TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       struct.msg = iprot.readString();
       struct.set_msg_isSet(true);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/AuthorizationException.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/AuthorizationException.java b/jstorm-core/src/main/java/backtype/storm/generated/AuthorizationException.java
index 02f72f0..0822f50 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/AuthorizationException.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/AuthorizationException.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class AuthorizationException extends TException implements org.apache.thrift.TBase<AuthorizationException, AuthorizationException._Fields>, java.io.Serializable, Cloneable, Comparable<AuthorizationException> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AuthorizationException");
 
@@ -264,11 +264,11 @@ public class AuthorizationException extends TException implements org.apache.thr
     return _Fields.findByThriftId(fieldId);
   }
 
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
@@ -288,10 +288,10 @@ public class AuthorizationException extends TException implements org.apache.thr
     return sb.toString();
   }
 
-  public void validate() throws org.apache.thrift.TException {
+  public void validate() throws TException {
     // check for required fields
     if (!is_set_msg()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'msg' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'msg' is unset! Struct:" + toString());
     }
 
     // check for sub-struct validity
@@ -300,7 +300,7 @@ public class AuthorizationException extends TException implements org.apache.thr
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -308,7 +308,7 @@ public class AuthorizationException extends TException implements org.apache.thr
   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
     try {
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -321,7 +321,7 @@ public class AuthorizationException extends TException implements org.apache.thr
 
   private static class AuthorizationExceptionStandardScheme extends StandardScheme<AuthorizationException> {
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot, AuthorizationException struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot, AuthorizationException struct) throws TException {
       org.apache.thrift.protocol.TField schemeField;
       iprot.readStructBegin();
       while (true)
@@ -348,7 +348,7 @@ public class AuthorizationException extends TException implements org.apache.thr
       struct.validate();
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot, AuthorizationException struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot, AuthorizationException struct) throws TException {
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
@@ -372,13 +372,13 @@ public class AuthorizationException extends TException implements org.apache.thr
   private static class AuthorizationExceptionTupleScheme extends TupleScheme<AuthorizationException> {
 
     @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, AuthorizationException struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol prot, AuthorizationException struct) throws TException {
       TTupleProtocol oprot = (TTupleProtocol) prot;
       oprot.writeString(struct.msg);
     }
 
     @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, AuthorizationException struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol prot, AuthorizationException struct) throws TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       struct.msg = iprot.readString();
       struct.set_msg_isSet(true);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/Bolt.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/Bolt.java b/jstorm-core/src/main/java/backtype/storm/generated/Bolt.java
index e3d0a07..9241322 100644
--- a/jstorm-core/src/main/java/backtype/storm/generated/Bolt.java
+++ b/jstorm-core/src/main/java/backtype/storm/generated/Bolt.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-10-20")
 public class Bolt implements org.apache.thrift.TBase<Bolt, Bolt._Fields>, java.io.Serializable, Cloneable, Comparable<Bolt> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Bolt");
 
@@ -337,11 +337,11 @@ public class Bolt implements org.apache.thrift.TBase<Bolt, Bolt._Fields>, java.i
     return _Fields.findByThriftId(fieldId);
   }
 
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws TException {
     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
   }
 
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws TException {
     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
   }
 
@@ -369,14 +369,14 @@ public class Bolt implements org.apache.thrift.TBase<Bolt, Bolt._Fields>, java.i
     return sb.toString();
   }
 
-  public void validate() throws org.apache.thrift.TException {
+  public void validate() throws TException {
     // check for required fields
     if (!is_set_bolt_object()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'bolt_object' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'bolt_object' is unset! Struct:" + toString());
     }
 
     if (!is_set_common()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'common' is unset! Struct:" + toString());
+      throw new TProtocolException("Required field 'common' is unset! Struct:" + toString());
     }
 
     // check for sub-struct validity
@@ -388,7 +388,7 @@ public class Bolt implements org.apache.thrift.TBase<Bolt, Bolt._Fields>, java.i
   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
     try {
       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -396,7 +396,7 @@ public class Bolt implements org.apache.thrift.TBase<Bolt, Bolt._Fields>, java.i
   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
     try {
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
+    } catch (TException te) {
       throw new java.io.IOException(te);
     }
   }
@@ -409,7 +409,7 @@ public class Bolt implements org.apache.thrift.TBase<Bolt, Bolt._Fields>, java.i
 
   private static class BoltStandardScheme extends StandardScheme<Bolt> {
 
-    public void read(org.apache.thrift.protocol.TProtocol iprot, Bolt struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol iprot, Bolt struct) throws TException {
       org.apache.thrift.protocol.TField schemeField;
       iprot.readStructBegin();
       while (true)
@@ -446,7 +446,7 @@ public class Bolt implements org.apache.thrift.TBase<Bolt, Bolt._Fields>, java.i
       struct.validate();
     }
 
-    public void write(org.apache.thrift.protocol.TProtocol oprot, Bolt struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol oprot, Bolt struct) throws TException {
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
@@ -475,14 +475,14 @@ public class Bolt implements org.apache.thrift.TBase<Bolt, Bolt._Fields>, java.i
   private static class BoltTupleScheme extends TupleScheme<Bolt> {
 
     @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, Bolt struct) throws org.apache.thrift.TException {
+    public void write(org.apache.thrift.protocol.TProtocol prot, Bolt struct) throws TException {
       TTupleProtocol oprot = (TTupleProtocol) prot;
       struct.bolt_object.write(oprot);
       struct.common.write(oprot);
     }
 
     @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, Bolt struct) throws org.apache.thrift.TException {
+    public void read(org.apache.thrift.protocol.TProtocol prot, Bolt struct) throws TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       struct.bolt_object = new ComponentObject();
       struct.bolt_object.read(iprot);


[24/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/cache/RocksTTLDBCache.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/cache/RocksTTLDBCache.java b/jstorm-core/src/main/java/com/alibaba/jstorm/cache/RocksTTLDBCache.java
index 20b5f46..426e074 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/cache/RocksTTLDBCache.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/cache/RocksTTLDBCache.java
@@ -48,23 +48,23 @@ import com.alibaba.jstorm.utils.PathUtils;
 public class RocksTTLDBCache implements JStormCache {
     private static final long serialVersionUID = 705938812240167583L;
     private static Logger LOG = LoggerFactory.getLogger(RocksTTLDBCache.class);
-    
+
     static {
         RocksDB.loadLibrary();
     }
-    
+
     public static final String ROCKSDB_ROOT_DIR = "rocksdb.root.dir";
     public static final String ROCKSDB_RESET = "rocksdb.reset";
     protected TtlDB ttlDB;
     protected String rootDir;
     protected TreeMap<Integer, ColumnFamilyHandle> windowHandlers = new TreeMap<Integer, ColumnFamilyHandle>();
-    
+
     public void initDir(Map<Object, Object> conf) {
         String confDir = (String) conf.get(ROCKSDB_ROOT_DIR);
         if (StringUtils.isBlank(confDir) == true) {
             throw new RuntimeException("Doesn't set rootDir of rocksDB");
         }
-        
+
         boolean clean = ConfigExtension.getNimbusCacheReset(conf);
         LOG.info("RocksDB reset is " + clean);
         if (clean == true) {
@@ -75,7 +75,7 @@ public class RocksTTLDBCache implements JStormCache {
                 throw new RuntimeException("Failed to cleanup rooDir of rocksDB " + confDir);
             }
         }
-        
+
         File file = new File(confDir);
         if (file.exists() == false) {
             try {
@@ -86,53 +86,53 @@ public class RocksTTLDBCache implements JStormCache {
                 throw new RuntimeException("Failed to mkdir rooDir of rocksDB " + confDir);
             }
         }
-        
+
         rootDir = file.getAbsolutePath();
     }
-    
-    public void initDb(List<Integer> list) throws Exception{
+
+    public void initDb(List<Integer> list) throws Exception {
         LOG.info("Begin to init rocksDB of {}", rootDir);
-        
+
         DBOptions dbOptions = null;
-        
+
         List<ColumnFamilyDescriptor> columnFamilyNames = new ArrayList<ColumnFamilyDescriptor>();
         columnFamilyNames.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY));
         for (Integer timeout : list) {
             columnFamilyNames.add(new ColumnFamilyDescriptor(String.valueOf(timeout).getBytes()));
         }
-        
+
         List<Integer> ttlValues = new ArrayList<Integer>();
         // Default column family with infinite lifetime
         // ATTENSION, the first must be 0, RocksDB.java API has this limitation
         ttlValues.add(0);
         // new column family with list second ttl
         ttlValues.addAll(list);
-        
+
         try {
             dbOptions = new DBOptions().setCreateMissingColumnFamilies(true).setCreateIfMissing(true);
-            
+
             List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<ColumnFamilyHandle>();
-            
+
             ttlDB = TtlDB.open(dbOptions, rootDir, columnFamilyNames, columnFamilyHandleList, ttlValues, false);
-            
+
             for (int i = 0; i < ttlValues.size(); i++) {
                 windowHandlers.put(ttlValues.get(i), columnFamilyHandleList.get(i));
             }
-            
+
             LOG.info("Successfully init rocksDB of {}", rootDir);
-        }  finally {
-            
+        } finally {
+
             if (dbOptions != null) {
                 dbOptions.dispose();
             }
         }
     }
-    
+
     @Override
-    public void init(Map<Object, Object> conf) throws Exception{
+    public void init(Map<Object, Object> conf) throws Exception {
         // TODO Auto-generated method stub
         initDir(conf);
-        
+
         List<Integer> list = new ArrayList<Integer>();
         if (conf.get(TAG_TIMEOUT_LIST) != null) {
             for (Object obj : (List) ConfigExtension.getCacheTimeoutList(conf)) {
@@ -140,11 +140,11 @@ public class RocksTTLDBCache implements JStormCache {
                 if (timeoutSecond == null || timeoutSecond <= 0) {
                     continue;
                 }
-                
+
                 list.add(timeoutSecond);
             }
         }
-        
+
         // Add retry logic
         boolean isSuccess = false;
         for (int i = 0; i < 3; i++) {
@@ -152,64 +152,61 @@ public class RocksTTLDBCache implements JStormCache {
                 initDb(list);
                 isSuccess = true;
                 break;
-            }catch(Exception e) {
+            } catch (Exception e) {
                 LOG.warn("Failed to init rocksDB " + rootDir, e);
                 try {
                     PathUtils.rmr(rootDir);
                 } catch (IOException e1) {
                     // TODO Auto-generated catch block
-                    
+
                 }
             }
         }
-        
+
         if (isSuccess == false) {
             throw new RuntimeException("Failed to init rocksDB " + rootDir);
         }
     }
-    
+
     @Override
     public void cleanup() {
         LOG.info("Begin to close rocketDb of {}", rootDir);
-        
+
         for (ColumnFamilyHandle columnFamilyHandle : windowHandlers.values()) {
             columnFamilyHandle.dispose();
         }
-        
+
         if (ttlDB != null) {
             ttlDB.close();
         }
-        
+
         LOG.info("Successfully closed rocketDb of {}", rootDir);
     }
-    
+
     @Override
-    public Object get(String key)  {
+    public Object get(String key) {
         // TODO Auto-generated method stub
         for (Entry<Integer, ColumnFamilyHandle> entry : windowHandlers.entrySet()) {
             try {
-                byte[] data = ttlDB.get(entry.getValue(),
-                            key.getBytes());
+                byte[] data = ttlDB.get(entry.getValue(), key.getBytes());
                 if (data != null) {
                     try {
                         return Utils.javaDeserialize(data);
-                    }catch(Exception e) {
+                    } catch (Exception e) {
                         LOG.error("Failed to deserialize obj of " + key);
-                        ttlDB.remove(entry.getValue(),
-                                        key.getBytes());
+                        ttlDB.remove(entry.getValue(), key.getBytes());
                         return null;
                     }
                 }
-                
-                
-            }catch(Exception e) {
-                
+
+            } catch (Exception e) {
+
             }
         }
-        
+
         return null;
     }
-    
+
     @Override
     public void getBatch(Map<String, Object> map) {
         List<byte[]> lookupKeys = new ArrayList<byte[]>();
@@ -217,26 +214,26 @@ public class RocksTTLDBCache implements JStormCache {
             lookupKeys.add(key.getBytes());
         }
         for (Entry<Integer, ColumnFamilyHandle> entry : windowHandlers.entrySet()) {
-            
+
             List<ColumnFamilyHandle> cfHandlers = new ArrayList<ColumnFamilyHandle>();
             for (String key : map.keySet()) {
                 cfHandlers.add(entry.getValue());
             }
-            
+
             try {
                 Map<byte[], byte[]> results = ttlDB.multiGet(cfHandlers, lookupKeys);
                 if (results == null || results.size() == 0) {
                     continue;
                 }
-                
+
                 for (Entry<byte[], byte[]> resultEntry : results.entrySet()) {
                     byte[] keyByte = resultEntry.getKey();
                     byte[] valueByte = resultEntry.getValue();
-                    
+
                     if (keyByte == null || valueByte == null) {
                         continue;
                     }
-                    
+
                     Object value = null;
                     try {
                         value = Utils.javaDeserialize(valueByte);
@@ -245,35 +242,31 @@ public class RocksTTLDBCache implements JStormCache {
                         ttlDB.remove(entry.getValue(), keyByte);
                         continue;
                     }
-                    
+
                     map.put(new String(keyByte), value);
                 }
-                
-                return ;
+
+                return;
             } catch (Exception e) {
                 LOG.error("Failed to query " + map.keySet() + ", in window: " + entry.getKey());
             }
         }
-        
+
         return;
     }
-    
-    
+
     @Override
     public void remove(String key) {
         for (Entry<Integer, ColumnFamilyHandle> entry : windowHandlers.entrySet()) {
             try {
-                ttlDB.remove(entry.getValue(),
-                            key.getBytes());
-                
-                
-                
-            }catch(Exception e) {
+                ttlDB.remove(entry.getValue(), key.getBytes());
+
+            } catch (Exception e) {
                 LOG.error("Failed to remove " + key);
             }
         }
     }
-    
+
     @Override
     public void removeBatch(Collection<String> keys) {
         // TODO Auto-generated method stub
@@ -281,22 +274,22 @@ public class RocksTTLDBCache implements JStormCache {
             remove(key);
         }
     }
-    
+
     protected void put(String key, Object value, Entry<Integer, ColumnFamilyHandle> entry) {
-        
+
         byte[] data = Utils.javaSerialize(value);
         try {
             ttlDB.put(entry.getValue(), key.getBytes(), data);
-        }catch(Exception e) {
+        } catch (Exception e) {
             LOG.error("Failed put into cache, " + key, e);
-            return ;
+            return;
         }
-        
+
         for (Entry<Integer, ColumnFamilyHandle> removeEntry : windowHandlers.entrySet()) {
             if (removeEntry.getKey().equals(entry.getKey())) {
                 continue;
             }
-            
+
             try {
                 ttlDB.remove(removeEntry.getValue(), key.getBytes());
             } catch (Exception e) {
@@ -305,72 +298,70 @@ public class RocksTTLDBCache implements JStormCache {
             }
         }
     }
-    
+
     protected Entry<Integer, ColumnFamilyHandle> getHandler(int timeoutSecond) {
         ColumnFamilyHandle cfHandler = null;
         Entry<Integer, ColumnFamilyHandle> ceilingEntry = windowHandlers.ceilingEntry(timeoutSecond);
         if (ceilingEntry != null) {
             return ceilingEntry;
-        }else {
+        } else {
             return windowHandlers.firstEntry();
         }
     }
-    
+
     @Override
     public void put(String key, Object value, int timeoutSecond) {
         // TODO Auto-generated method stub
-        
-        
+
         put(key, value, getHandler(timeoutSecond));
-        
+
     }
-    
+
     @Override
-    public void put(String key, Object value)  {
+    public void put(String key, Object value) {
         put(key, value, windowHandlers.firstEntry());
     }
-    
-    protected void putBatch(Map<String, Object> map, Entry<Integer, ColumnFamilyHandle> putEntry )  {
+
+    protected void putBatch(Map<String, Object> map, Entry<Integer, ColumnFamilyHandle> putEntry) {
         // TODO Auto-generated method stub
         WriteOptions writeOpts = null;
         WriteBatch writeBatch = null;
-        
+
         Set<byte[]> putKeys = new HashSet<byte[]>();
-        
+
         try {
             writeOpts = new WriteOptions();
             writeBatch = new WriteBatch();
-            
+
             for (Entry<String, Object> entry : map.entrySet()) {
                 String key = entry.getKey();
                 Object value = entry.getValue();
-                
-                
+
                 byte[] data = Utils.javaSerialize(value);
-                
+
                 if (StringUtils.isBlank(key) || data == null || data.length == 0) {
                     continue;
                 }
-                
+
                 byte[] keyByte = key.getBytes();
                 writeBatch.put(putEntry.getValue(), keyByte, data);
-                
+
                 putKeys.add(keyByte);
             }
-            
+
             ttlDB.write(writeOpts, writeBatch);
-        }catch(Exception e) {
+        } catch (Exception e) {
             LOG.error("Failed to putBatch into DB, " + map.keySet(), e);
-        }finally {
+        } finally {
             if (writeOpts != null) {
                 writeOpts.dispose();
             }
-            
+
             if (writeBatch != null) {
                 writeBatch.dispose();
             }
         }
-        
+
         for (Entry<Integer, ColumnFamilyHandle> entry : windowHandlers.entrySet()) {
             if (entry.getKey().equals(putEntry.getKey())) {
                 continue;
@@ -385,85 +376,85 @@ public class RocksTTLDBCache implements JStormCache {
             }
         }
     }
-    
+
     @Override
-    public void putBatch(Map<String, Object> map)  {
+    public void putBatch(Map<String, Object> map) {
         // TODO Auto-generated method stub
         putBatch(map, windowHandlers.firstEntry());
     }
-    
+
     @Override
     public void putBatch(Map<String, Object> map, int timeoutSeconds) {
         // TODO Auto-generated method stub
         putBatch(map, getHandler(timeoutSeconds));
     }
-    
-//    public void put() throws Exception {
-
-//    }
-//    
-//    public void write() throws Exception {
-//        Options options = null;
-//        WriteBatch wb1 = null;
-//        WriteBatch wb2 = null;
-//        WriteOptions opts = null;
-//        try {
-//            options = new Options().setMergeOperator(new StringAppendOperator()).setCreateIfMissing(true);
-//            db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath());
-//            opts = new WriteOptions();
-//            wb1 = new WriteBatch();
-//            wb1.put("key1".getBytes(), "aa".getBytes());
-//            wb1.merge("key1".getBytes(), "bb".getBytes());
-//            wb2 = new WriteBatch();
-//            wb2.put("key2".getBytes(), "xx".getBytes());
-//            wb2.merge("key2".getBytes(), "yy".getBytes());
-//            db.write(opts, wb1);
-//            db.write(opts, wb2);
-//            assertThat(db.get("key1".getBytes())).isEqualTo("aa,bb".getBytes());
-//            assertThat(db.get("key2".getBytes())).isEqualTo("xx,yy".getBytes());
-//        } finally {
-//            if (db != null) {
-//                db.close();
-//            }
-//            if (wb1 != null) {
-//                wb1.dispose();
-//            }
-//            if (wb2 != null) {
-//                wb2.dispose();
-//            }
-//            if (options != null) {
-//                options.dispose();
-//            }
-//            if (opts != null) {
-//                opts.dispose();
-//            }
-//        }
-//    }
-//    
-
-//    
-//    public void remove() throws Exception {
-//        RocksDB db = null;
-//        WriteOptions wOpt;
-//        try {
-//            wOpt = new WriteOptions();
-//            db = RocksDB.open(dbFolder.getRoot().getAbsolutePath());
-//            db.put("key1".getBytes(), "value".getBytes());
-//            db.put("key2".getBytes(), "12345678".getBytes());
-//            assertThat(db.get("key1".getBytes())).isEqualTo("value".getBytes());
-//            assertThat(db.get("key2".getBytes())).isEqualTo("12345678".getBytes());
-//            db.remove("key1".getBytes());
-//            db.remove(wOpt, "key2".getBytes());
-//            assertThat(db.get("key1".getBytes())).isNull();
-//            assertThat(db.get("key2".getBytes())).isNull();
-//        } finally {
-//            if (db != null) {
-//                db.close();
-//            }
-//        }
-//    }
-//    
-//    public void ttlDbOpenWithColumnFamilies() throws Exception, InterruptedException {
-//        
-//    }
+
+    // public void put() throws Exception {
+
+    // }
+    //
+    // public void write() throws Exception {
+    // Options options = null;
+    // WriteBatch wb1 = null;
+    // WriteBatch wb2 = null;
+    // WriteOptions opts = null;
+    // try {
+    // options = new Options().setMergeOperator(new StringAppendOperator()).setCreateIfMissing(true);
+    // db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath());
+    // opts = new WriteOptions();
+    // wb1 = new WriteBatch();
+    // wb1.put("key1".getBytes(), "aa".getBytes());
+    // wb1.merge("key1".getBytes(), "bb".getBytes());
+    // wb2 = new WriteBatch();
+    // wb2.put("key2".getBytes(), "xx".getBytes());
+    // wb2.merge("key2".getBytes(), "yy".getBytes());
+    // db.write(opts, wb1);
+    // db.write(opts, wb2);
+    // assertThat(db.get("key1".getBytes())).isEqualTo("aa,bb".getBytes());
+    // assertThat(db.get("key2".getBytes())).isEqualTo("xx,yy".getBytes());
+    // } finally {
+    // if (db != null) {
+    // db.close();
+    // }
+    // if (wb1 != null) {
+    // wb1.dispose();
+    // }
+    // if (wb2 != null) {
+    // wb2.dispose();
+    // }
+    // if (options != null) {
+    // options.dispose();
+    // }
+    // if (opts != null) {
+    // opts.dispose();
+    // }
+    // }
+    // }
+    //
+
+    //
+    // public void remove() throws Exception {
+    // RocksDB db = null;
+    // WriteOptions wOpt;
+    // try {
+    // wOpt = new WriteOptions();
+    // db = RocksDB.open(dbFolder.getRoot().getAbsolutePath());
+    // db.put("key1".getBytes(), "value".getBytes());
+    // db.put("key2".getBytes(), "12345678".getBytes());
+    // assertThat(db.get("key1".getBytes())).isEqualTo("value".getBytes());
+    // assertThat(db.get("key2".getBytes())).isEqualTo("12345678".getBytes());
+    // db.remove("key1".getBytes());
+    // db.remove(wOpt, "key2".getBytes());
+    // assertThat(db.get("key1".getBytes())).isNull();
+    // assertThat(db.get("key2".getBytes())).isNull();
+    // } finally {
+    // if (db != null) {
+    // db.close();
+    // }
+    // }
+    // }
+    //
+    // public void ttlDbOpenWithColumnFamilies() throws Exception, InterruptedException {
+    //
+    // }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/cache/TimeoutMemCache.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/cache/TimeoutMemCache.java b/jstorm-core/src/main/java/com/alibaba/jstorm/cache/TimeoutMemCache.java
index d4d9905..8924a81 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/cache/TimeoutMemCache.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/cache/TimeoutMemCache.java
@@ -17,50 +17,46 @@
  */
 package com.alibaba.jstorm.cache;
 
+import com.alibaba.jstorm.client.ConfigExtension;
+import com.alibaba.jstorm.utils.JStormUtils;
+import com.alibaba.jstorm.utils.TimeCacheMap;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.TreeMap;
 
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.alibaba.jstorm.client.ConfigExtension;
-import com.alibaba.jstorm.utils.JStormUtils;
-import com.alibaba.jstorm.utils.TimeCacheMap;
-
 public class TimeoutMemCache implements JStormCache {
     private static final long serialVersionUID = 705938812240167583L;
     private static Logger LOG = LoggerFactory.getLogger(TimeoutMemCache.class);
-    
-   
+
     protected int defaultTimeout;
     protected final TreeMap<Integer, TimeCacheMap<String, Object>> cacheWindows = new TreeMap<Integer, TimeCacheMap<String, Object>>();
-    
+
     public TimeoutMemCache() {
-        
     }
-    
+
     protected void registerCacheWindow(int timeoutSecond) {
         synchronized (this) {
             if (cacheWindows.get(timeoutSecond) == null) {
                 TimeCacheMap<String, Object> cacheWindow = new TimeCacheMap<String, Object>(timeoutSecond);
                 cacheWindows.put(timeoutSecond, cacheWindow);
-                
+
                 LOG.info("Successfully register CacheWindow: " + timeoutSecond);
             } else {
                 LOG.info("CacheWindow: " + timeoutSecond + " has been registered");
             }
         }
     }
-    
+
     @Override
     public void init(Map<Object, Object> conf) {
-        // TODO Auto-generated method stub
         this.defaultTimeout = ConfigExtension.getDefaultCacheTimeout(conf);
         registerCacheWindow(defaultTimeout);
-        
+
         List<Object> list = (List) ConfigExtension.getCacheTimeoutList(conf);
         if (list != null) {
             for (Object obj : list) {
@@ -68,21 +64,17 @@ public class TimeoutMemCache implements JStormCache {
                 if (timeoutSecond == null) {
                     continue;
                 }
-                
                 registerCacheWindow(timeoutSecond);
             }
         }
     }
-    
+
     @Override
     public void cleanup() {
-        // TODO Auto-generated method stub
-        
     }
-    
+
     @Override
     public Object get(String key) {
-        // TODO Auto-generated method stub
         // @@@ TODO
         // in order to improve performance, it can be query from defaultWindow firstly, then others
         for (TimeCacheMap<String, Object> cacheWindow : cacheWindows.values()) {
@@ -93,21 +85,17 @@ public class TimeoutMemCache implements JStormCache {
         }
         return null;
     }
-    
+
     @Override
     public void getBatch(Map<String, Object> map) {
-        // TODO Auto-generated method stub
         for (String key : map.keySet()) {
             Object obj = get(key);
             map.put(key, obj);
         }
-        
-        return;
     }
-    
+
     @Override
     public void remove(String key) {
-        // TODO Auto-generated method stub
         for (TimeCacheMap<String, Object> cacheWindow : cacheWindows.values()) {
             Object ret = cacheWindow.remove(key);
             if (ret != null) {
@@ -115,64 +103,52 @@ public class TimeoutMemCache implements JStormCache {
             }
         }
     }
-    
+
     @Override
     public void removeBatch(Collection<String> keys) {
-        // TODO Auto-generated method stub
         for (String key : keys) {
             remove(key);
         }
-        
-        return;
     }
-    
+
     @Override
     public void put(String key, Object value, int timeoutSecond) {
-        
-        // TODO Auto-generated method stub
         Entry<Integer, TimeCacheMap<String, Object>> ceilingEntry = cacheWindows.ceilingEntry(timeoutSecond);
         if (ceilingEntry == null) {
             put(key, value);
-            return ;
-        }else {
+        } else {
             remove(key);
             ceilingEntry.getValue().put(key, value);
         }
-        
     }
-    
+
     @Override
     public void put(String key, Object value) {
         remove(key);
         TimeCacheMap<String, Object> bestWindow = cacheWindows.get(defaultTimeout);
         bestWindow.put(key, value);
     }
-    
+
     @Override
-    public void putBatch(Map<String, Object> map)  {
-        // TODO Auto-generated method stub
+    public void putBatch(Map<String, Object> map) {
         for (Entry<String, Object> entry : map.entrySet()) {
             put(entry.getKey(), entry.getValue());
         }
-        
     }
-    
+
     @Override
     public void putBatch(Map<String, Object> map, int timeoutSeconds) {
-        // TODO Auto-generated method stub
         for (Entry<String, Object> entry : map.entrySet()) {
             put(entry.getKey(), entry.getValue(), timeoutSeconds);
         }
-        
     }
 
-	public int getDefaultTimeout() {
-		return defaultTimeout;
-	}
+    public int getDefaultTimeout() {
+        return defaultTimeout;
+    }
+
+    public void setDefaultTimeout(int defaultTimeout) {
+        this.defaultTimeout = defaultTimeout;
+    }
 
-	public void setDefaultTimeout(int defaultTimeout) {
-		this.defaultTimeout = defaultTimeout;
-	}
-    
-    
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/callback/AsyncLoopRunnable.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/callback/AsyncLoopRunnable.java b/jstorm-core/src/main/java/com/alibaba/jstorm/callback/AsyncLoopRunnable.java
index d21cc4a..e4466e7 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/callback/AsyncLoopRunnable.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/callback/AsyncLoopRunnable.java
@@ -33,8 +33,7 @@ import com.alibaba.jstorm.utils.JStormUtils;
  * 
  */
 public class AsyncLoopRunnable implements Runnable {
-    private static Logger LOG = LoggerFactory
-            .getLogger(AsyncLoopRunnable.class);
+    private static Logger LOG = LoggerFactory.getLogger(AsyncLoopRunnable.class);
 
     // set shutdown as false is to
     private static AtomicBoolean shutdown = new AtomicBoolean(false);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/callback/AsyncLoopThread.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/callback/AsyncLoopThread.java b/jstorm-core/src/main/java/com/alibaba/jstorm/callback/AsyncLoopThread.java
index ce49c51..2f722b9 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/callback/AsyncLoopThread.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/callback/AsyncLoopThread.java
@@ -28,16 +28,14 @@ import com.alibaba.jstorm.utils.JStormUtils;
 import com.alibaba.jstorm.utils.SmartThread;
 
 /**
- * Wrapper Timer thread Every several seconds execute afn, if something is run,
- * run kill_fn
+ * Wrapper Timer thread Every several seconds execute afn, if something is run, run kill_fn
  * 
  * 
  * @author yannian
  * 
  */
 public class AsyncLoopThread implements SmartThread {
-    private static final Logger LOG = LoggerFactory
-            .getLogger(AsyncLoopThread.class);
+    private static final Logger LOG = LoggerFactory.getLogger(AsyncLoopThread.class);
 
     private Thread thread;
 
@@ -47,18 +45,15 @@ public class AsyncLoopThread implements SmartThread {
         this.init(afn, false, Thread.NORM_PRIORITY, true);
     }
 
-    public AsyncLoopThread(RunnableCallback afn, boolean daemon, int priority,
-            boolean start) {
+    public AsyncLoopThread(RunnableCallback afn, boolean daemon, int priority, boolean start) {
         this.init(afn, daemon, priority, start);
     }
 
-    public AsyncLoopThread(RunnableCallback afn, boolean daemon,
-            RunnableCallback kill_fn, int priority, boolean start) {
+    public AsyncLoopThread(RunnableCallback afn, boolean daemon, RunnableCallback kill_fn, int priority, boolean start) {
         this.init(afn, daemon, kill_fn, priority, start);
     }
 
-    public void init(RunnableCallback afn, boolean daemon, int priority,
-            boolean start) {
+    public void init(RunnableCallback afn, boolean daemon, int priority, boolean start) {
         RunnableCallback kill_fn = new AsyncLoopDefaultKill();
         this.init(afn, daemon, kill_fn, priority, start);
     }
@@ -72,8 +67,7 @@ public class AsyncLoopThread implements SmartThread {
      * @param args_fn
      * @param start
      */
-    private void init(RunnableCallback afn, boolean daemon,
-            RunnableCallback kill_fn, int priority, boolean start) {
+    private void init(RunnableCallback afn, boolean daemon, RunnableCallback kill_fn, int priority, boolean start) {
         if (kill_fn == null) {
             kill_fn = new AsyncLoopDefaultKill();
         }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/callback/DefaultWatcherCallBack.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/callback/DefaultWatcherCallBack.java b/jstorm-core/src/main/java/com/alibaba/jstorm/callback/DefaultWatcherCallBack.java
index 4f1764b..132418f 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/callback/DefaultWatcherCallBack.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/callback/DefaultWatcherCallBack.java
@@ -33,13 +33,11 @@ import com.alibaba.jstorm.zk.ZkKeeperStates;
  */
 public class DefaultWatcherCallBack implements WatcherCallBack {
 
-    private static Logger LOG = LoggerFactory
-            .getLogger(DefaultWatcherCallBack.class);
+    private static Logger LOG = LoggerFactory.getLogger(DefaultWatcherCallBack.class);
 
     @Override
     public void execute(KeeperState state, EventType type, String path) {
-        LOG.info("Zookeeper state update:" + ZkKeeperStates.getStateName(state)
-                + "," + ZkEventTypes.getStateName(type) + "," + path);
+        LOG.info("Zookeeper state update:" + ZkKeeperStates.getStateName(state) + "," + ZkEventTypes.getStateName(type) + "," + path);
     }
 
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/callback/impl/DelayStatusTransitionCallback.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/callback/impl/DelayStatusTransitionCallback.java b/jstorm-core/src/main/java/com/alibaba/jstorm/callback/impl/DelayStatusTransitionCallback.java
index 79ef633..5670220 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/callback/impl/DelayStatusTransitionCallback.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/callback/impl/DelayStatusTransitionCallback.java
@@ -36,15 +36,13 @@ import com.alibaba.jstorm.utils.JStormUtils;
 /**
  * 
  * 
- * The action when nimbus receive kill command 1. set the topology status as
- * target 2. wait 2 * Timeout seconds later, do removing topology from ZK
+ * The action when nimbus receive kill command 1. set the topology status as target 2. wait 2 * Timeout seconds later, do removing topology from ZK
  * 
  * @author Longda
  */
 public class DelayStatusTransitionCallback extends BaseCallback {
 
-    private static Logger LOG = LoggerFactory
-            .getLogger(DelayStatusTransitionCallback.class);
+    private static Logger LOG = LoggerFactory.getLogger(DelayStatusTransitionCallback.class);
 
     public static final int DEFAULT_DELAY_SECONDS = 30;
 
@@ -54,8 +52,7 @@ public class DelayStatusTransitionCallback extends BaseCallback {
     protected StatusType newType;
     protected StatusType nextAction;
 
-    public DelayStatusTransitionCallback(NimbusData data, String topologyid,
-            StormStatus oldStatus, StatusType newType, StatusType nextAction) {
+    public DelayStatusTransitionCallback(NimbusData data, String topologyid, StormStatus oldStatus, StatusType newType, StatusType nextAction) {
         this.data = data;
         this.topologyid = topologyid;
         this.oldStatus = oldStatus;
@@ -73,13 +70,8 @@ public class DelayStatusTransitionCallback extends BaseCallback {
             Map<?, ?> map = null;
             try {
 
-                map =
-                        StormConfig.read_nimbus_topology_conf(data.getConf(),
-                                topologyid);
-                delaySecs =
-                        JStormUtils.parseInt(
-                                map.get(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS),
-                                DEFAULT_DELAY_SECONDS);
+                map = StormConfig.read_nimbus_topology_conf(data.getConf(), topologyid);
+                delaySecs = JStormUtils.parseInt(map.get(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS), DEFAULT_DELAY_SECONDS);
             } catch (Exception e) {
                 LOG.info("Failed to get topology configuration " + topologyid);
             }
@@ -98,12 +90,9 @@ public class DelayStatusTransitionCallback extends BaseCallback {
     @Override
     public <T> Object execute(T... args) {
         int delaySecs = getDelaySeconds(args);
-        LOG.info("Delaying event " + newType + " for " + delaySecs
-                + " secs for " + topologyid);
+        LOG.info("Delaying event " + newType + " for " + delaySecs + " secs for " + topologyid);
 
-        data.getScheduExec().schedule(
-                new DelayEventRunnable(data, topologyid, nextAction, args),
-                delaySecs, TimeUnit.SECONDS);
+        data.getScheduExec().schedule(new DelayEventRunnable(data, topologyid, nextAction, args), delaySecs, TimeUnit.SECONDS);
 
         return new StormStatus(delaySecs, newType);
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/callback/impl/DoRebalanceTransitionCallback.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/callback/impl/DoRebalanceTransitionCallback.java b/jstorm-core/src/main/java/com/alibaba/jstorm/callback/impl/DoRebalanceTransitionCallback.java
index 41706b3..5060aec 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/callback/impl/DoRebalanceTransitionCallback.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/callback/impl/DoRebalanceTransitionCallback.java
@@ -17,54 +17,44 @@
  */
 package com.alibaba.jstorm.callback.impl;
 
-import java.util.*;
-import java.util.Map.Entry;
-
-import org.apache.log4j.Logger;
-
 import backtype.storm.Config;
 import backtype.storm.generated.Bolt;
 import backtype.storm.generated.SpoutSpec;
 import backtype.storm.generated.StormTopology;
 import backtype.storm.utils.Utils;
-
 import com.alibaba.jstorm.callback.BaseCallback;
-import com.alibaba.jstorm.client.ConfigExtension;
 import com.alibaba.jstorm.cluster.Common;
 import com.alibaba.jstorm.cluster.StormClusterState;
 import com.alibaba.jstorm.cluster.StormConfig;
 import com.alibaba.jstorm.cluster.StormStatus;
-import com.alibaba.jstorm.daemon.nimbus.NimbusData;
-import com.alibaba.jstorm.daemon.nimbus.NimbusUtils;
-import com.alibaba.jstorm.daemon.nimbus.StatusType;
-import com.alibaba.jstorm.daemon.nimbus.TopologyAssign;
-import com.alibaba.jstorm.daemon.nimbus.TopologyAssignEvent;
+import com.alibaba.jstorm.daemon.nimbus.*;
 import com.alibaba.jstorm.task.TaskInfo;
 import com.alibaba.jstorm.task.TkHbCacheTime;
 import com.alibaba.jstorm.utils.JStormUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.*;
+import java.util.Map.Entry;
 
 /**
  * Do real rebalance action.
  * 
- * After nimbus receive one rebalance command, it will do as following: 1. set
- * topology status as rebalancing 2. delay 2 * timeout seconds 3. do this
- * callback
+ * After nimbus receive one rebalance command, it will do as following: 1. set topology status as rebalancing 2. delay 2 * timeout seconds 3. do this callback
  * 
  * @author Xin.Li/Longda
  * 
  */
 public class DoRebalanceTransitionCallback extends BaseCallback {
 
-    private static Logger LOG = Logger
-            .getLogger(DoRebalanceTransitionCallback.class);
+    private static Logger LOG = LoggerFactory.getLogger(DoRebalanceTransitionCallback.class);
 
     private NimbusData data;
     private String topologyid;
     private StormStatus oldStatus;
     private Set<Integer> newTasks;
 
-    public DoRebalanceTransitionCallback(NimbusData data, String topologyid,
-            StormStatus status) {
+    public DoRebalanceTransitionCallback(NimbusData data, String topologyid, StormStatus status) {
         this.data = data;
         this.topologyid = topologyid;
         this.oldStatus = status;
@@ -87,28 +77,17 @@ public class DoRebalanceTransitionCallback extends BaseCallback {
                 Map stormConf = data.getConf();
 
                 // Update topology code
-                Map topoConf =
-                        StormConfig.read_nimbus_topology_conf(stormConf,
-                                topologyid);
-                StormTopology rawOldTopology =
-                        StormConfig.read_nimbus_topology_code(stormConf,
-                                topologyid);
-                StormTopology rawNewTopology =
-                        NimbusUtils.normalizeTopology(conf, rawOldTopology,
-                                true);
+                Map topoConf = StormConfig.read_nimbus_topology_conf(stormConf, topologyid);
+                StormTopology rawOldTopology = StormConfig.read_nimbus_topology_code(stormConf, topologyid);
+                StormTopology rawNewTopology = NimbusUtils.normalizeTopology(conf, rawOldTopology, true);
                 StormTopology sysOldTopology = rawOldTopology.deepCopy();
                 StormTopology sysNewTopology = rawNewTopology.deepCopy();
                 if (conf.get(Config.TOPOLOGY_ACKER_EXECUTORS) != null) {
                     Common.add_acker(topoConf, sysOldTopology);
                     Common.add_acker(conf, sysNewTopology);
-                    int ackerNum =
-                            JStormUtils.parseInt(conf
-                                    .get(Config.TOPOLOGY_ACKER_EXECUTORS));
-                    int oldAckerNum =
-                            JStormUtils.parseInt(topoConf
-                                    .get(Config.TOPOLOGY_ACKER_EXECUTORS));
-                    LOG.info("Update acker from oldAckerNum=" + oldAckerNum
-                            + " to ackerNum=" + ackerNum);
+                    int ackerNum = JStormUtils.parseInt(conf.get(Config.TOPOLOGY_ACKER_EXECUTORS));
+                    int oldAckerNum = JStormUtils.parseInt(topoConf.get(Config.TOPOLOGY_ACKER_EXECUTORS));
+                    LOG.info("Update acker from oldAckerNum=" + oldAckerNum + " to ackerNum=" + ackerNum);
                     topoConf.put(Config.TOPOLOGY_ACKER_EXECUTORS, ackerNum);
                     isConfUpdate = true;
                 }
@@ -118,32 +97,25 @@ public class DoRebalanceTransitionCallback extends BaseCallback {
                 isSetTaskInfo = true;
 
                 // If everything is OK, write topology code into disk
-                StormConfig.write_nimbus_topology_code(stormConf, topologyid,
-                        Utils.serialize(rawNewTopology));
+                StormConfig.write_nimbus_topology_code(stormConf, topologyid, Utils.serialize(rawNewTopology));
 
                 // Update topology conf if worker num has been updated
                 Set<Object> keys = conf.keySet();
-                Integer workerNum =
-                        JStormUtils.parseInt(conf.get(Config.TOPOLOGY_WORKERS));
+                Integer workerNum = JStormUtils.parseInt(conf.get(Config.TOPOLOGY_WORKERS));
                 if (workerNum != null) {
-                    Integer oldWorkerNum =
-                            JStormUtils.parseInt(topoConf
-                                    .get(Config.TOPOLOGY_WORKERS));
+                    Integer oldWorkerNum = JStormUtils.parseInt(topoConf.get(Config.TOPOLOGY_WORKERS));
                     topoConf.put(Config.TOPOLOGY_WORKERS, workerNum);
                     isConfUpdate = true;
 
-                    LOG.info("Update worker num from " + oldWorkerNum + " to "
-                            + workerNum);
+                    LOG.info("Update worker num from " + oldWorkerNum + " to " + workerNum);
                 }
 
                 if (keys.contains(Config.ISOLATION_SCHEDULER_MACHINES)) {
-                    topoConf.put(Config.ISOLATION_SCHEDULER_MACHINES,
-                            conf.get(Config.ISOLATION_SCHEDULER_MACHINES));
+                    topoConf.put(Config.ISOLATION_SCHEDULER_MACHINES, conf.get(Config.ISOLATION_SCHEDULER_MACHINES));
                 }
 
                 if (isConfUpdate) {
-                    StormConfig.write_nimbus_topology_conf(stormConf,
-                            topologyid, topoConf);
+                    StormConfig.write_nimbus_topology_conf(stormConf, topologyid, topoConf);
                 }
             }
 
@@ -153,85 +125,66 @@ public class DoRebalanceTransitionCallback extends BaseCallback {
             event.setScratch(true);
             event.setOldStatus(oldStatus);
             event.setReassign(reassign);
-
+            if (conf != null)
+                event.setScaleTopology(true);
             TopologyAssign.push(event);
+            event.waitFinish();
         } catch (Exception e) {
             LOG.error("do-rebalance error!", e);
             // Rollback the changes on ZK
             if (isSetTaskInfo) {
-                    try {
-                        StormClusterState clusterState =
-                                data.getStormClusterState();
-                        clusterState.remove_task(topologyid, newTasks);
-                    } catch (Exception e1) {
-                        LOG.error(
-                                "Failed to rollback the changes on ZK for task-"
-                                        + newTasks, e);
-                    }
+                try {
+                    StormClusterState clusterState = data.getStormClusterState();
+                    clusterState.remove_task(topologyid, newTasks);
+                } catch (Exception e1) {
+                    LOG.error("Failed to rollback the changes on ZK for task-" + newTasks, e);
                 }
             }
+        }
 
         DelayStatusTransitionCallback delayCallback =
-                new DelayStatusTransitionCallback(data, topologyid, oldStatus,
-                        StatusType.rebalancing, StatusType.done_rebalance);
+                new DelayStatusTransitionCallback(data, topologyid, oldStatus, StatusType.rebalancing, StatusType.done_rebalance);
         return delayCallback.execute();
     }
 
-    private void setTaskInfo(StormTopology oldTopology,
-            StormTopology newTopology) throws Exception {
+    private void setTaskInfo(StormTopology oldTopology, StormTopology newTopology) throws Exception {
         StormClusterState clusterState = data.getStormClusterState();
         // Retrieve the max task ID
-        TreeSet<Integer> taskIds =
-                new TreeSet<Integer>(clusterState.task_ids(topologyid));
+        TreeSet<Integer> taskIds = new TreeSet<Integer>(clusterState.task_ids(topologyid));
         int cnt = taskIds.descendingIterator().next();
 
         cnt = setBoltInfo(oldTopology, newTopology, cnt, clusterState);
         cnt = setSpoutInfo(oldTopology, newTopology, cnt, clusterState);
     }
 
-    private int setBoltInfo(StormTopology oldTopology,
-            StormTopology newTopology, int cnt, StormClusterState clusterState)
-            throws Exception {
+    private int setBoltInfo(StormTopology oldTopology, StormTopology newTopology, int cnt, StormClusterState clusterState) throws Exception {
         Map<String, Bolt> oldBolts = oldTopology.get_bolts();
         Map<String, Bolt> bolts = newTopology.get_bolts();
         for (Entry<String, Bolt> entry : oldBolts.entrySet()) {
             String boltName = entry.getKey();
             Bolt oldBolt = entry.getValue();
             Bolt bolt = bolts.get(boltName);
-            if (oldBolt.get_common().get_parallelism_hint() > bolt.get_common()
-                    .get_parallelism_hint()) {
-                int removedTaskNum =
-                        oldBolt.get_common().get_parallelism_hint()
-                                - bolt.get_common().get_parallelism_hint();
-                TreeSet<Integer> taskIds =
-                        new TreeSet<Integer>(
-                                clusterState.task_ids_by_componentId(
-                                        topologyid, boltName));
-                Iterator<Integer> descendIterator =
-                        taskIds.descendingIterator();
+            if (oldBolt.get_common().get_parallelism_hint() > bolt.get_common().get_parallelism_hint()) {
+                int removedTaskNum = oldBolt.get_common().get_parallelism_hint() - bolt.get_common().get_parallelism_hint();
+                TreeSet<Integer> taskIds = new TreeSet<Integer>(clusterState.task_ids_by_componentId(topologyid, boltName));
+                Iterator<Integer> descendIterator = taskIds.descendingIterator();
                 while (--removedTaskNum >= 0) {
                     int taskId = descendIterator.next();
                     removeTask(topologyid, taskId, clusterState);
-                    LOG.info("Remove bolt task, taskId=" + taskId + " for "
-                            + boltName);
+                    LOG.info("Remove bolt task, taskId=" + taskId + " for " + boltName);
                 }
-            } else if (oldBolt.get_common().get_parallelism_hint() == bolt
-                    .get_common().get_parallelism_hint()) {
+            } else if (oldBolt.get_common().get_parallelism_hint() == bolt.get_common().get_parallelism_hint()) {
                 continue;
             } else {
-                int delta =
-                        bolt.get_common().get_parallelism_hint()
-                                - oldBolt.get_common().get_parallelism_hint();
+                int delta = bolt.get_common().get_parallelism_hint() - oldBolt.get_common().get_parallelism_hint();
                 Map<Integer, TaskInfo> taskInfoMap = new HashMap<Integer, TaskInfo>();
 
                 for (int i = 1; i <= delta; i++) {
                     cnt++;
-                    TaskInfo taskInfo =
-                            new TaskInfo((String) entry.getKey(), "bolt");
+                    TaskInfo taskInfo = new TaskInfo((String) entry.getKey(), "bolt");
                     taskInfoMap.put(cnt, taskInfo);
                     newTasks.add(cnt);
-                    LOG.info("Setup new bolt task, taskId=" + cnt + " for "
-                            + boltName);
+                    LOG.info("Setup new bolt task, taskId=" + cnt + " for " + boltName);
                 }
                 clusterState.add_task(topologyid, taskInfoMap);
             }
@@ -240,52 +193,35 @@ public class DoRebalanceTransitionCallback extends BaseCallback {
         return cnt;
     }
 
-    private int setSpoutInfo(StormTopology oldTopology,
-            StormTopology newTopology, int cnt, StormClusterState clusterState)
-            throws Exception {
+    private int setSpoutInfo(StormTopology oldTopology, StormTopology newTopology, int cnt, StormClusterState clusterState) throws Exception {
         Map<String, SpoutSpec> oldSpouts = oldTopology.get_spouts();
         Map<String, SpoutSpec> spouts = newTopology.get_spouts();
         for (Entry<String, SpoutSpec> entry : oldSpouts.entrySet()) {
             String spoutName = entry.getKey();
             SpoutSpec oldSpout = entry.getValue();
             SpoutSpec spout = spouts.get(spoutName);
-            if (oldSpout.get_common().get_parallelism_hint() > spout
-                    .get_common().get_parallelism_hint()) {
-                int removedTaskNum =
-                        oldSpout.get_common().get_parallelism_hint()
-                                - spout.get_common().get_parallelism_hint();
-                TreeSet<Integer> taskIds =
-                        new TreeSet<Integer>(
-                                clusterState.task_ids_by_componentId(
-                                        topologyid, spoutName));
-                Iterator<Integer> descendIterator =
-                        taskIds.descendingIterator();
+            if (oldSpout.get_common().get_parallelism_hint() > spout.get_common().get_parallelism_hint()) {
+                int removedTaskNum = oldSpout.get_common().get_parallelism_hint() - spout.get_common().get_parallelism_hint();
+                TreeSet<Integer> taskIds = new TreeSet<Integer>(clusterState.task_ids_by_componentId(topologyid, spoutName));
+                Iterator<Integer> descendIterator = taskIds.descendingIterator();
                 while (--removedTaskNum >= 0) {
                     int taskId = descendIterator.next();
                     removeTask(topologyid, taskId, clusterState);
-                    LOG.info("Remove spout task, taskId=" + taskId + " for "
-                            + spoutName);
+                    LOG.info("Remove spout task, taskId=" + taskId + " for " + spoutName);
                 }
 
-
-
-            } else if (oldSpout.get_common().get_parallelism_hint() == spout
-                    .get_common().get_parallelism_hint()) {
+            } else if (oldSpout.get_common().get_parallelism_hint() == spout.get_common().get_parallelism_hint()) {
                 continue;
             } else {
-                int delta =
-                        spout.get_common().get_parallelism_hint()
-                                - oldSpout.get_common().get_parallelism_hint();
+                int delta = spout.get_common().get_parallelism_hint() - oldSpout.get_common().get_parallelism_hint();
                 Map<Integer, TaskInfo> taskInfoMap = new HashMap<Integer, TaskInfo>();
 
                 for (int i = 1; i <= delta; i++) {
                     cnt++;
-                    TaskInfo taskInfo =
-                            new TaskInfo((String) entry.getKey(), "spout");
+                    TaskInfo taskInfo = new TaskInfo((String) entry.getKey(), "spout");
                     taskInfoMap.put(cnt, taskInfo);
                     newTasks.add(cnt);
-                    LOG.info("Setup new spout task, taskId=" + cnt + " for "
-                            + spoutName);
+                    LOG.info("Setup new spout task, taskId=" + cnt + " for " + spoutName);
                 }
                 clusterState.add_task(topologyid, taskInfoMap);
             }
@@ -294,12 +230,10 @@ public class DoRebalanceTransitionCallback extends BaseCallback {
         return cnt;
     }
 
-    private void removeTask(String topologyId, int taskId,
-            StormClusterState clusterState) throws Exception {
+    private void removeTask(String topologyId, int taskId, StormClusterState clusterState) throws Exception {
         Set<Integer> taskIds = new HashSet<Integer>(taskId);
         clusterState.remove_task(topologyid, taskIds);
-        Map<Integer, TkHbCacheTime> TkHbs =
-                data.getTaskHeartbeatsCache(topologyid, false);
+        Map<Integer, TkHbCacheTime> TkHbs = data.getTaskHeartbeatsCache(topologyid, false);
         if (TkHbs != null) {
             TkHbs.remove(taskId);
         }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/callback/impl/KillTransitionCallback.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/callback/impl/KillTransitionCallback.java b/jstorm-core/src/main/java/com/alibaba/jstorm/callback/impl/KillTransitionCallback.java
index 4dad890..e169fab 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/callback/impl/KillTransitionCallback.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/callback/impl/KillTransitionCallback.java
@@ -19,15 +19,14 @@ package com.alibaba.jstorm.callback.impl;
 
 import com.alibaba.jstorm.daemon.nimbus.NimbusData;
 import com.alibaba.jstorm.daemon.nimbus.StatusType;
+import com.alibaba.jstorm.daemon.nimbus.TopologyMetricsRunnable;
 
 /**
  * The action when nimbus receive killed command.
- * 
- * 1. change current topology status as killed 2. one TIMEOUT seconds later, do
- * remove action, which remove topology from ZK
+ * <p/>
+ * 1. change current topology status as killed 2. one TIMEOUT seconds later, do remove action, which remove topology from ZK
  * 
  * @author Longda
- * 
  */
 public class KillTransitionCallback extends DelayStatusTransitionCallback {
 
@@ -35,4 +34,15 @@ public class KillTransitionCallback extends DelayStatusTransitionCallback {
         super(data, topologyid, null, StatusType.killed, StatusType.remove);
     }
 
+    @Override
+    public <T> Object execute(T... args) {
+        TopologyMetricsRunnable.KillTopologyEvent event = new TopologyMetricsRunnable.KillTopologyEvent();
+        event.clusterName = this.data.getClusterName();
+        event.topologyId = this.topologyid;
+        event.timestamp = System.currentTimeMillis();
+
+        this.data.getMetricRunnable().pushEvent(event);
+        return super.execute(args);
+    }
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/callback/impl/ReassignTransitionCallback.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/callback/impl/ReassignTransitionCallback.java b/jstorm-core/src/main/java/com/alibaba/jstorm/callback/impl/ReassignTransitionCallback.java
index 1b4841c..ffa2ec8 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/callback/impl/ReassignTransitionCallback.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/callback/impl/ReassignTransitionCallback.java
@@ -24,9 +24,8 @@ import com.alibaba.jstorm.daemon.nimbus.TopologyAssign;
 import com.alibaba.jstorm.daemon.nimbus.TopologyAssignEvent;
 
 /**
- * 1. every Config.NIMBUS_MONITOR_FREQ_SECS will call MonitorRunnable 2.
- * MonitorRunnable will call NimbusData.transition 3. NimbusData.transition will
- * this callback
+ * 1. every Config.NIMBUS_MONITOR_FREQ_SECS will call MonitorRunnable 2. MonitorRunnable will call NimbusData.transition 3. NimbusData.transition will this
+ * callback
  * 
  * 
  */
@@ -42,8 +41,7 @@ public class ReassignTransitionCallback extends BaseCallback {
         this.oldStatus = null;
     }
 
-    public ReassignTransitionCallback(NimbusData data, String topologyid,
-            StormStatus oldStatus) {
+    public ReassignTransitionCallback(NimbusData data, String topologyid, StormStatus oldStatus) {
         this.data = data;
         this.topologyid = topologyid;
         this.oldStatus = oldStatus;
@@ -59,6 +57,7 @@ public class ReassignTransitionCallback extends BaseCallback {
         assignEvent.setOldStatus(oldStatus);
 
         TopologyAssign.push(assignEvent);
+        assignEvent.waitFinish();
 
         return null;
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/callback/impl/RebalanceTransitionCallback.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/callback/impl/RebalanceTransitionCallback.java b/jstorm-core/src/main/java/com/alibaba/jstorm/callback/impl/RebalanceTransitionCallback.java
index f65f542..476f4f5 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/callback/impl/RebalanceTransitionCallback.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/callback/impl/RebalanceTransitionCallback.java
@@ -22,21 +22,17 @@ import com.alibaba.jstorm.daemon.nimbus.NimbusData;
 import com.alibaba.jstorm.daemon.nimbus.StatusType;
 
 /**
- * The action when nimbus receive rebalance command. Rebalance command is only
- * valid when current status is active
+ * The action when nimbus receive rebalance command. Rebalance command is only valid when current status is active
  * 
- * 1. change current topology status as rebalancing 2. do_rebalance action after
- * 2 * TIMEOUT seconds
+ * 1. change current topology status as rebalancing 2. do_rebalance action after 2 * TIMEOUT seconds
  * 
  * @author Lixin/Longda
  * 
  */
 public class RebalanceTransitionCallback extends DelayStatusTransitionCallback {
 
-    public RebalanceTransitionCallback(NimbusData data, String topologyid,
-            StormStatus status) {
-        super(data, topologyid, status, StatusType.rebalancing,
-                StatusType.do_rebalance);
+    public RebalanceTransitionCallback(NimbusData data, String topologyid, StormStatus status) {
+        super(data, topologyid, status, StatusType.rebalancing, StatusType.do_rebalance);
     }
 
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/callback/impl/RemoveTransitionCallback.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/callback/impl/RemoveTransitionCallback.java b/jstorm-core/src/main/java/com/alibaba/jstorm/callback/impl/RemoveTransitionCallback.java
index 231d8e9..8052c40 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/callback/impl/RemoveTransitionCallback.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/callback/impl/RemoveTransitionCallback.java
@@ -35,8 +35,7 @@ import com.alibaba.jstorm.daemon.nimbus.NimbusUtils;
  */
 public class RemoveTransitionCallback extends BaseCallback {
 
-    private static Logger LOG = LoggerFactory
-            .getLogger(RemoveTransitionCallback.class);
+    private static Logger LOG = LoggerFactory.getLogger(RemoveTransitionCallback.class);
 
     private NimbusData data;
     private String topologyid;
@@ -51,13 +50,13 @@ public class RemoveTransitionCallback extends BaseCallback {
         LOG.info("Begin to remove topology: " + topologyid);
         try {
 
-            StormBase stormBase =
-                    data.getStormClusterState().storm_base(topologyid, null);
+            StormBase stormBase = data.getStormClusterState().storm_base(topologyid, null);
             if (stormBase == null) {
                 LOG.info("Topology " + topologyid + " has been removed ");
                 return null;
             }
             data.getStormClusterState().remove_storm(topologyid);
+            data.getTasksHeartbeat().remove(topologyid);
             NimbusUtils.removeTopologyTaskTimeout(data, topologyid);
             LOG.info("Successfully removed ZK items topology: " + topologyid);
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/callback/impl/UpdateConfTransitionCallback.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/callback/impl/UpdateConfTransitionCallback.java b/jstorm-core/src/main/java/com/alibaba/jstorm/callback/impl/UpdateConfTransitionCallback.java
deleted file mode 100644
index ca4e0ee..0000000
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/callback/impl/UpdateConfTransitionCallback.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.alibaba.jstorm.callback.impl;
-
-import java.util.Map;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.alibaba.jstorm.callback.BaseCallback;
-import com.alibaba.jstorm.cluster.StormClusterState;
-import com.alibaba.jstorm.cluster.StormConfig;
-import com.alibaba.jstorm.cluster.StormStatus;
-import com.alibaba.jstorm.daemon.nimbus.NimbusData;
-import com.alibaba.jstorm.schedule.Assignment;
-import com.alibaba.jstorm.schedule.Assignment.AssignmentType;
-
-/**
- * Update user configuration
- * 
- * @author Basti.lj
- */
-public class UpdateConfTransitionCallback extends BaseCallback {
-
-    private static Logger LOG = LoggerFactory
-            .getLogger(DelayStatusTransitionCallback.class);
-
-    public static final int DEFAULT_DELAY_SECONDS = 30;
-
-    private NimbusData data;
-    private String topologyId;
-    private StormStatus currentStatus;
-
-    public UpdateConfTransitionCallback(NimbusData data, String topologyId,
-            StormStatus currentStatus) {
-        this.data = data;
-        this.topologyId = topologyId;
-        this.currentStatus = currentStatus;
-    }
-
-    @Override
-    public <T> Object execute(T... args) {
-        StormClusterState clusterState = data.getStormClusterState();
-        try {
-            Map userConf = (Map) args[0];
-            Map topoConf =
-                    StormConfig.read_nimbus_topology_conf(data.getConf(),
-                            topologyId);
-            topoConf.putAll(userConf);
-            StormConfig.write_nimbus_topology_conf(data.getConf(), topologyId, topoConf);
-            
-            Assignment assignment =
-                    clusterState.assignment_info(topologyId, null);
-            assignment.setAssignmentType(AssignmentType.Config);
-            assignment.updateTimeStamp();
-            clusterState.set_assignment(topologyId, assignment);
-            LOG.info("Successfully update new config to ZK for " + topologyId);
-        } catch (Exception e) {
-            LOG.error("Failed to update user configuartion.", e);
-        }
-        return currentStatus;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/callback/impl/UpdateTopologyTransitionCallback.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/callback/impl/UpdateTopologyTransitionCallback.java b/jstorm-core/src/main/java/com/alibaba/jstorm/callback/impl/UpdateTopologyTransitionCallback.java
new file mode 100644
index 0000000..706b98b
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/callback/impl/UpdateTopologyTransitionCallback.java
@@ -0,0 +1,74 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.callback.impl;
+
+import java.util.Map;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.alibaba.jstorm.callback.BaseCallback;
+import com.alibaba.jstorm.cluster.StormClusterState;
+import com.alibaba.jstorm.cluster.StormConfig;
+import com.alibaba.jstorm.cluster.StormStatus;
+import com.alibaba.jstorm.daemon.nimbus.NimbusData;
+import com.alibaba.jstorm.schedule.Assignment;
+import com.alibaba.jstorm.schedule.Assignment.AssignmentType;
+
+/**
+ * Update user configuration
+ * 
+ * @author Basti.lj
+ */
+public class UpdateTopologyTransitionCallback extends BaseCallback {
+
+    private static Logger LOG = LoggerFactory.getLogger(DelayStatusTransitionCallback.class);
+
+    public static final int DEFAULT_DELAY_SECONDS = 30;
+
+    private NimbusData data;
+    private String topologyId;
+    private StormStatus currentStatus;
+
+    public UpdateTopologyTransitionCallback(NimbusData data, String topologyId, StormStatus currentStatus) {
+        this.data = data;
+        this.topologyId = topologyId;
+        this.currentStatus = currentStatus;
+    }
+
+    @Override
+    public <T> Object execute(T... args) {
+        StormClusterState clusterState = data.getStormClusterState();
+        try {
+            Map userConf = (Map) args[0];
+            Map topoConf = StormConfig.read_nimbus_topology_conf(data.getConf(), topologyId);
+            topoConf.putAll(userConf);
+            StormConfig.write_nimbus_topology_conf(data.getConf(), topologyId, topoConf);
+
+            Assignment assignment = clusterState.assignment_info(topologyId, null);
+            assignment.setAssignmentType(AssignmentType.UpdateTopology);
+            assignment.updateTimeStamp();
+            clusterState.set_assignment(topologyId, assignment);
+            LOG.info("Successfully update topology information to ZK for " + topologyId);
+        } catch (Exception e) {
+            LOG.error("Failed to update topology.", e);
+        }
+        return currentStatus;
+    }
+
+}


[33/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/security/auth/IAuthorizer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/security/auth/IAuthorizer.java b/jstorm-core/src/main/java/backtype/storm/security/auth/IAuthorizer.java
index d592bb7..7ed498b 100755
--- a/jstorm-core/src/main/java/backtype/storm/security/auth/IAuthorizer.java
+++ b/jstorm-core/src/main/java/backtype/storm/security/auth/IAuthorizer.java
@@ -20,27 +20,27 @@ package backtype.storm.security.auth;
 import java.util.Map;
 
 /**
- * Nimbus could be configured with an authorization plugin.
- * If not specified, all requests are authorized.
+ * Nimbus could be configured with an authorization plugin. If not specified, all requests are authorized.
  * 
- * You could specify the authorization plugin via storm parameter. For example:
- *  storm -c nimbus.authorization.class=backtype.storm.security.auth.NoopAuthorizer ...
- *  
- * You could also specify it via storm.yaml:
- *   nimbus.authorization.class: backtype.storm.security.auth.NoopAuthorizer
+ * You could specify the authorization plugin via storm parameter. For example: storm -c nimbus.authorization.class=backtype.storm.security.auth.NoopAuthorizer
+ * ...
+ * 
+ * You could also specify it via storm.yaml: nimbus.authorization.class: backtype.storm.security.auth.NoopAuthorizer
  */
 public interface IAuthorizer {
     /**
      * Invoked once immediately after construction
-     * @param conf Storm configuration 
+     * 
+     * @param conf Storm configuration
      */
     void prepare(Map storm_conf);
-    
+
     /**
      * permit() method is invoked for each incoming Thrift request.
-     * @param context request context includes info about 
+     * 
+     * @param context request context includes info about
      * @param operation operation name
-     * @param topology_storm configuration of targeted topology 
+     * @param topology_storm configuration of targeted topology
      * @return true if the request is authorized, false if reject
      */
     public boolean permit(ReqContext context, String operation, Map topology_conf);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/security/auth/IAutoCredentials.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/security/auth/IAutoCredentials.java b/jstorm-core/src/main/java/backtype/storm/security/auth/IAutoCredentials.java
index b3886da..16841d5 100755
--- a/jstorm-core/src/main/java/backtype/storm/security/auth/IAutoCredentials.java
+++ b/jstorm-core/src/main/java/backtype/storm/security/auth/IAutoCredentials.java
@@ -23,8 +23,7 @@ import java.util.Map;
 import javax.security.auth.Subject;
 
 /**
- * Provides a way to automatically push credentials to a topology and to
- * retreave them in the worker.
+ * Provides a way to automatically push credentials to a topology and to retreave them in the worker.
  */
 public interface IAutoCredentials {
 
@@ -32,24 +31,26 @@ public interface IAutoCredentials {
 
     /**
      * Called to populate the credentials on the client side.
+     * 
      * @param credentials the credentials to be populated.
      */
     public void populateCredentials(Map<String, String> credentials);
 
     /**
      * Called to initially populate the subject on the worker side with credentials passed in.
+     * 
      * @param subject the subject to optionally put credentials in.
      * @param credentials the credentials to be used.
-     */ 
+     */
     public void populateSubject(Subject subject, Map<String, String> credentials);
 
-
     /**
-     * Called to update the subject on the worker side when new credentials are recieved.
-     * This means that populateSubject has already been called on this subject.  
+     * Called to update the subject on the worker side when new credentials are recieved. This means that populateSubject has already been called on this
+     * subject.
+     * 
      * @param subject the subject to optionally put credentials in.
      * @param credentials the credentials to be used.
-     */ 
+     */
     public void updateSubject(Subject subject, Map<String, String> credentials);
 
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/security/auth/ICredentialsRenewer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/security/auth/ICredentialsRenewer.java b/jstorm-core/src/main/java/backtype/storm/security/auth/ICredentialsRenewer.java
index 3eaf6c4..34358f4 100755
--- a/jstorm-core/src/main/java/backtype/storm/security/auth/ICredentialsRenewer.java
+++ b/jstorm-core/src/main/java/backtype/storm/security/auth/ICredentialsRenewer.java
@@ -26,16 +26,18 @@ import java.util.Map;
  */
 public interface ICredentialsRenewer {
 
-   /**
-    * Called when initializing the service.
-    * @param conf the storm cluster configuration.
-    */ 
-   public void prepare(Map conf);
+    /**
+     * Called when initializing the service.
+     * 
+     * @param conf the storm cluster configuration.
+     */
+    public void prepare(Map conf);
 
     /**
      * Renew any credentials that need to be renewed. (Update the credentials if needed)
+     * 
      * @param credentials the credentials that may have something to renew.
      * @param topologyConf topology configuration.
-     */ 
+     */
     public void renew(Map<String, String> credentials, Map topologyConf);
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/security/auth/IGroupMappingServiceProvider.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/security/auth/IGroupMappingServiceProvider.java b/jstorm-core/src/main/java/backtype/storm/security/auth/IGroupMappingServiceProvider.java
index 5590b81..865e950 100755
--- a/jstorm-core/src/main/java/backtype/storm/security/auth/IGroupMappingServiceProvider.java
+++ b/jstorm-core/src/main/java/backtype/storm/security/auth/IGroupMappingServiceProvider.java
@@ -26,13 +26,14 @@ public interface IGroupMappingServiceProvider {
 
     /**
      * Invoked once immediately after construction
+     * 
      * @param storm_conf Storm configuration
      */
     void prepare(Map storm_conf);
 
     /**
-     * Get all various group memberships of a given user.
-     * Returns EMPTY list in case of non-existing user
+     * Get all various group memberships of a given user. Returns EMPTY list in case of non-existing user
+     * 
      * @param user User's name
      * @return group memberships of user
      * @throws IOException

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/security/auth/IHttpCredentialsPlugin.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/security/auth/IHttpCredentialsPlugin.java b/jstorm-core/src/main/java/backtype/storm/security/auth/IHttpCredentialsPlugin.java
index a012ce4..66dfcee 100755
--- a/jstorm-core/src/main/java/backtype/storm/security/auth/IHttpCredentialsPlugin.java
+++ b/jstorm-core/src/main/java/backtype/storm/security/auth/IHttpCredentialsPlugin.java
@@ -29,20 +29,22 @@ import backtype.storm.security.auth.ReqContext;
 public interface IHttpCredentialsPlugin {
     /**
      * Invoked once immediately after construction
+     * 
      * @param storm_conf Storm configuration
      */
     void prepare(Map storm_conf);
 
     /**
      * Gets the user name from the request.
+     * 
      * @param req the servlet request
      * @return the authenticated user, or null if none is authenticated.
      */
     String getUserName(HttpServletRequest req);
 
     /**
-     * Populates a given context with credentials information from an HTTP
-     * request.
+     * Populates a given context with credentials information from an HTTP request.
+     * 
      * @param req the servlet request
      * @return the context
      */

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/security/auth/IPrincipalToLocal.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/security/auth/IPrincipalToLocal.java b/jstorm-core/src/main/java/backtype/storm/security/auth/IPrincipalToLocal.java
index fca3d37..32b4564 100755
--- a/jstorm-core/src/main/java/backtype/storm/security/auth/IPrincipalToLocal.java
+++ b/jstorm-core/src/main/java/backtype/storm/security/auth/IPrincipalToLocal.java
@@ -22,18 +22,19 @@ import java.util.Map;
 import java.security.Principal;
 
 /**
- * Storm can be configured to launch worker processed as a given user.
- * Some transports need to map the Principal to a local user name.
+ * Storm can be configured to launch worker processed as a given user. Some transports need to map the Principal to a local user name.
  */
 public interface IPrincipalToLocal {
     /**
      * Invoked once immediately after construction
-     * @param conf Storm configuration 
+     * 
+     * @param conf Storm configuration
      */
     void prepare(Map storm_conf);
-    
+
     /**
      * Convert a Principal to a local user name.
+     * 
      * @param principal the principal to convert
      * @return The local user name.
      */

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/security/auth/ITransportPlugin.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/security/auth/ITransportPlugin.java b/jstorm-core/src/main/java/backtype/storm/security/auth/ITransportPlugin.java
index 5ba2557..c3c657f 100755
--- a/jstorm-core/src/main/java/backtype/storm/security/auth/ITransportPlugin.java
+++ b/jstorm-core/src/main/java/backtype/storm/security/auth/ITransportPlugin.java
@@ -37,25 +37,28 @@ import backtype.storm.security.auth.ThriftConnectionType;
 public interface ITransportPlugin {
     /**
      * Invoked once immediately after construction
+     * 
      * @param type the type of connection this will process.
-     * @param storm_conf Storm configuration 
+     * @param storm_conf Storm configuration
      * @param login_conf login configuration
      */
     void prepare(ThriftConnectionType type, Map storm_conf, Configuration login_conf);
-    
+
     /**
      * Create a server associated with a given port, service handler, and purpose
+     * 
      * @param processor service handler
      * @return server
      */
     public TServer getServer(TProcessor processor) throws IOException, TTransportException;
 
     /**
-     * Connect to the specified server via framed transport 
+     * Connect to the specified server via framed transport
+     * 
      * @param transport The underlying Thrift transport.
      * @param serverHost server host
-     * @param asUser the user as which the connection should be established, and all the subsequent actions should be executed.
-     *               Only applicable when using secure storm cluster. A null/blank value here will just indicate to use the logged in user.
+     * @param asUser the user as which the connection should be established, and all the subsequent actions should be executed. Only applicable when using
+     *            secure storm cluster. A null/blank value here will just indicate to use the logged in user.
      */
     public TTransport connect(TTransport transport, String serverHost, String asUser) throws IOException, TTransportException;
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/security/auth/KerberosPrincipalToLocal.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/security/auth/KerberosPrincipalToLocal.java b/jstorm-core/src/main/java/backtype/storm/security/auth/KerberosPrincipalToLocal.java
index 35c7788..7ac6a6d 100755
--- a/jstorm-core/src/main/java/backtype/storm/security/auth/KerberosPrincipalToLocal.java
+++ b/jstorm-core/src/main/java/backtype/storm/security/auth/KerberosPrincipalToLocal.java
@@ -28,18 +28,21 @@ public class KerberosPrincipalToLocal implements IPrincipalToLocal {
 
     /**
      * Invoked once immediately after construction
-     * @param conf Storm configuration 
+     * 
+     * @param conf Storm configuration
      */
-    public void prepare(Map storm_conf) {}
-    
+    public void prepare(Map storm_conf) {
+    }
+
     /**
      * Convert a Principal to a local user name.
+     * 
      * @param principal the principal to convert
      * @return The local user name.
      */
     public String toLocal(Principal principal) {
-      //This technically does not conform with rfc1964, but should work so
-      // long as you don't have any really odd names in your KDC.
-      return principal == null ? null : principal.getName().split("[/@]")[0];
+        // This technically does not conform with rfc1964, but should work so
+        // long as you don't have any really odd names in your KDC.
+        return principal == null ? null : principal.getName().split("[/@]")[0];
     }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/security/auth/ReqContext.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/security/auth/ReqContext.java b/jstorm-core/src/main/java/backtype/storm/security/auth/ReqContext.java
index a252f85..47f317c 100755
--- a/jstorm-core/src/main/java/backtype/storm/security/auth/ReqContext.java
+++ b/jstorm-core/src/main/java/backtype/storm/security/auth/ReqContext.java
@@ -31,10 +31,7 @@ import java.security.Principal;
 import javax.security.auth.Subject;
 
 /**
- * context request context includes info about 
- *      	   (1) remote address, 
- *             (2) remote subject and primary principal
- *             (3) request ID 
+ * context request context includes info about (1) remote address, (2) remote subject and primary principal (3) request ID
  */
 public class ReqContext {
     private static final AtomicInteger uniqueId = new AtomicInteger(0);
@@ -46,39 +43,37 @@ public class ReqContext {
 
     private static final Logger LOG = LoggerFactory.getLogger(ReqContext.class);
 
-
     /**
      * Get a request context associated with current thread
+     * 
      * @return
      */
     public static ReqContext context() {
         return ctxt.get();
     }
 
-    //each thread will have its own request context
-    private static final ThreadLocal < ReqContext > ctxt = 
-            new ThreadLocal < ReqContext > () {
-        @Override 
+    // each thread will have its own request context
+    private static final ThreadLocal<ReqContext> ctxt = new ThreadLocal<ReqContext>() {
+        @Override
         protected ReqContext initialValue() {
             return new ReqContext(AccessController.getContext());
         }
     };
 
-    //private constructor
+    // private constructor
     @VisibleForTesting
     public ReqContext(AccessControlContext acl_ctxt) {
         _subject = Subject.getSubject(acl_ctxt);
         _reqID = uniqueId.incrementAndGet();
     }
 
-    //private constructor
+    // private constructor
     @VisibleForTesting
     public ReqContext(Subject sub) {
         _subject = sub;
         _reqID = uniqueId.incrementAndGet();
     }
 
-
     /**
      * client address
      */
@@ -108,15 +103,18 @@ public class ReqContext {
      * The primary principal associated current subject
      */
     public Principal principal() {
-        if (_subject == null) return null;
+        if (_subject == null)
+            return null;
         Set<Principal> princs = _subject.getPrincipals();
-        if (princs.size()==0) return null;
+        if (princs.size() == 0)
+            return null;
         return (Principal) (princs.toArray()[0]);
     }
 
     public void setRealPrincipal(Principal realPrincipal) {
         this.realPrincipal = realPrincipal;
     }
+
     /**
      * The real principal associated with the subject.
      */
@@ -126,12 +124,13 @@ public class ReqContext {
 
     /**
      * Returns true if this request is an impersonation request.
+     * 
      * @return
      */
     public boolean isImpersonating() {
         return this.realPrincipal != null;
     }
-    
+
     /**
      * request ID of this request
      */

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/security/auth/SaslTransportPlugin.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/security/auth/SaslTransportPlugin.java b/jstorm-core/src/main/java/backtype/storm/security/auth/SaslTransportPlugin.java
index 7208a17..9c8780d 100755
--- a/jstorm-core/src/main/java/backtype/storm/security/auth/SaslTransportPlugin.java
+++ b/jstorm-core/src/main/java/backtype/storm/security/auth/SaslTransportPlugin.java
@@ -73,11 +73,9 @@ public abstract class SaslTransportPlugin implements ITransportPlugin {
         int numWorkerThreads = type.getNumThreads(storm_conf);
         Integer queueSize = type.getQueueSize(storm_conf);
 
-        TThreadPoolServer.Args server_args = new TThreadPoolServer.Args(serverTransport).
-                processor(new TUGIWrapProcessor(processor)).
-                minWorkerThreads(numWorkerThreads).
-                maxWorkerThreads(numWorkerThreads).
-                protocolFactory(new TBinaryProtocol.Factory(false, true));
+        TThreadPoolServer.Args server_args =
+                new TThreadPoolServer.Args(serverTransport).processor(new TUGIWrapProcessor(processor)).minWorkerThreads(numWorkerThreads)
+                        .maxWorkerThreads(numWorkerThreads).protocolFactory(new TBinaryProtocol.Factory(false, true));
 
         if (serverTransportFactory != null) {
             server_args.transportFactory(serverTransportFactory);
@@ -86,26 +84,23 @@ public abstract class SaslTransportPlugin implements ITransportPlugin {
         if (queueSize != null) {
             workQueue = new ArrayBlockingQueue(queueSize);
         }
-        ThreadPoolExecutor executorService = new ExtendedThreadPoolExecutor(numWorkerThreads, numWorkerThreads,
-            60, TimeUnit.SECONDS, workQueue);
+        ThreadPoolExecutor executorService = new ExtendedThreadPoolExecutor(numWorkerThreads, numWorkerThreads, 60, TimeUnit.SECONDS, workQueue);
         server_args.executorService(executorService);
         return new TThreadPoolServer(server_args);
     }
 
     /**
      * All subclass must implement this method
+     * 
      * @return
      * @throws IOException
      */
     protected abstract TTransportFactory getServerTransportFactory() throws IOException;
 
-
-    /**                                                                                                                                                                             
-     * Processor that pulls the SaslServer object out of the transport, and                                                                                                         
-     * assumes the remote user's UGI before calling through to the original                                                                                                         
-     * processor.                                                                                                                                                                   
-     *                                                                                                                                                                              
-     * This is used on the server side to set the UGI for each specific call.                                                                                                       
+    /**
+     * Processor that pulls the SaslServer object out of the transport, and assumes the remote user's UGI before calling through to the original processor.
+     * 
+     * This is used on the server side to set the UGI for each specific call.
      */
     private class TUGIWrapProcessor implements TProcessor {
         final TProcessor wrapped;
@@ -115,25 +110,25 @@ public abstract class SaslTransportPlugin implements ITransportPlugin {
         }
 
         public boolean process(final TProtocol inProt, final TProtocol outProt) throws TException {
-            //populating request context 
+            // populating request context
             ReqContext req_context = ReqContext.context();
 
             TTransport trans = inProt.getTransport();
-            //Sasl transport
-            TSaslServerTransport saslTrans = (TSaslServerTransport)trans;
-            //remote address
-            TSocket tsocket = (TSocket)saslTrans.getUnderlyingTransport();
+            // Sasl transport
+            TSaslServerTransport saslTrans = (TSaslServerTransport) trans;
+            // remote address
+            TSocket tsocket = (TSocket) saslTrans.getUnderlyingTransport();
             Socket socket = tsocket.getSocket();
             req_context.setRemoteAddress(socket.getInetAddress());
 
-            //remote subject 
+            // remote subject
             SaslServer saslServer = saslTrans.getSaslServer();
             String authId = saslServer.getAuthorizationID();
             Subject remoteUser = new Subject();
             remoteUser.getPrincipals().add(new User(authId));
             req_context.setSubject(remoteUser);
 
-            //invoke service handler
+            // invoke service handler
             return wrapped.process(inProt, outProt);
         }
     }
@@ -142,11 +137,11 @@ public abstract class SaslTransportPlugin implements ITransportPlugin {
         private final String name;
 
         public User(String name) {
-            this.name =  name;
+            this.name = name;
         }
 
-        /**                                                                                                                                                                                
-         * Get the full name of the user.                                                                                                                                                  
+        /**
+         * Get the full name of the user.
          */
         public String getName() {
             return name;

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/security/auth/ShellBasedGroupsMapping.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/security/auth/ShellBasedGroupsMapping.java b/jstorm-core/src/main/java/backtype/storm/security/auth/ShellBasedGroupsMapping.java
index 62a4c7e..16f2fe4 100755
--- a/jstorm-core/src/main/java/backtype/storm/security/auth/ShellBasedGroupsMapping.java
+++ b/jstorm-core/src/main/java/backtype/storm/security/auth/ShellBasedGroupsMapping.java
@@ -31,15 +31,14 @@ import backtype.storm.utils.ShellUtils.ExitCodeException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-
-public class ShellBasedGroupsMapping implements
-                                             IGroupMappingServiceProvider {
+public class ShellBasedGroupsMapping implements IGroupMappingServiceProvider {
 
     public static Logger LOG = LoggerFactory.getLogger(ShellBasedGroupsMapping.class);
     public TimeCacheMap<String, Set<String>> cachedGroups;
 
     /**
      * Invoked once immediately after construction
+     * 
      * @param storm_conf Storm configuration
      */
     @Override
@@ -50,24 +49,24 @@ public class ShellBasedGroupsMapping implements
 
     /**
      * Returns list of groups for a user
-     *
+     * 
      * @param user get groups for this user
      * @return list of groups for a given user
      */
     @Override
     public Set<String> getGroups(String user) throws IOException {
-        if(cachedGroups.containsKey(user)) {
+        if (cachedGroups.containsKey(user)) {
             return cachedGroups.get(user);
         }
         Set<String> groups = getUnixGroups(user);
-        if(!groups.isEmpty())
-            cachedGroups.put(user,groups);
+        if (!groups.isEmpty())
+            cachedGroups.put(user, groups);
         return groups;
     }
 
     /**
-     * Get the current user's group list from Unix by running the command 'groups'
-     * NOTE. For non-existing user it will return EMPTY list
+     * Get the current user's group list from Unix by running the command 'groups' NOTE. For non-existing user it will return EMPTY list
+     * 
      * @param user user name
      * @return the groups set that the <code>user</code> belongs to
      * @throws IOException if encounter any error when running the command
@@ -82,8 +81,7 @@ public class ShellBasedGroupsMapping implements
             return new HashSet<String>();
         }
 
-        StringTokenizer tokenizer =
-            new StringTokenizer(result, ShellUtils.TOKEN_SEPARATOR_REGEX);
+        StringTokenizer tokenizer = new StringTokenizer(result, ShellUtils.TOKEN_SEPARATOR_REGEX);
         Set<String> groups = new HashSet<String>();
         while (tokenizer.hasMoreTokens()) {
             groups.add(tokenizer.nextToken());

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/security/auth/SimpleTransportPlugin.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/security/auth/SimpleTransportPlugin.java b/jstorm-core/src/main/java/backtype/storm/security/auth/SimpleTransportPlugin.java
index 2abcdae..c7e816f 100755
--- a/jstorm-core/src/main/java/backtype/storm/security/auth/SimpleTransportPlugin.java
+++ b/jstorm-core/src/main/java/backtype/storm/security/auth/SimpleTransportPlugin.java
@@ -73,22 +73,21 @@ public class SimpleTransportPlugin implements ITransportPlugin {
         int maxBufferSize = type.getMaxBufferSize(storm_conf);
         Integer queueSize = type.getQueueSize(storm_conf);
 
-        THsHaServer.Args server_args = new THsHaServer.Args(serverTransport).
-                processor(new SimpleWrapProcessor(processor)).
-                workerThreads(numWorkerThreads).
-                protocolFactory(new TBinaryProtocol.Factory(false, true, maxBufferSize, -1));
+        THsHaServer.Args server_args =
+                new THsHaServer.Args(serverTransport).processor(new SimpleWrapProcessor(processor)).workerThreads(numWorkerThreads)
+                        .protocolFactory(new TBinaryProtocol.Factory(false, true, maxBufferSize, -1));
 
         if (queueSize != null) {
-            server_args.executorService(new ThreadPoolExecutor(numWorkerThreads, numWorkerThreads, 
-                                   60, TimeUnit.SECONDS, new ArrayBlockingQueue(queueSize)));
+            server_args.executorService(new ThreadPoolExecutor(numWorkerThreads, numWorkerThreads, 60, TimeUnit.SECONDS, new ArrayBlockingQueue(queueSize)));
         }
 
-        //construct THsHaServer
+        // construct THsHaServer
         return new THsHaServer(server_args);
     }
 
     /**
-     * Connect to the specified server via framed transport 
+     * Connect to the specified server via framed transport
+     * 
      * @param transport The underlying Thrift transport.
      * @param serverHost unused.
      * @param asUser unused.
@@ -96,10 +95,10 @@ public class SimpleTransportPlugin implements ITransportPlugin {
     @Override
     public TTransport connect(TTransport transport, String serverHost, String asUser) throws TTransportException {
         int maxBufferSize = type.getMaxBufferSize(storm_conf);
-        //create a framed transport
+        // create a framed transport
         TTransport conn = new TFramedTransport(transport, maxBufferSize);
 
-        //connect
+        // connect
         conn.open();
         LOG.debug("Simple client transport has been established");
 
@@ -108,13 +107,13 @@ public class SimpleTransportPlugin implements ITransportPlugin {
 
     /**
      * @return the subject that will be used for all connections
-     */  
+     */
     protected Subject getDefaultSubject() {
         return null;
     }
 
-    /**                                                                                                                                                                             
-     * Processor that populate simple transport info into ReqContext, and then invoke a service handler                                                                              
+    /**
+     * Processor that populate simple transport info into ReqContext, and then invoke a service handler
      */
     private class SimpleWrapProcessor implements TProcessor {
         final TProcessor wrapped;
@@ -124,7 +123,7 @@ public class SimpleTransportPlugin implements ITransportPlugin {
         }
 
         public boolean process(final TProtocol inProt, final TProtocol outProt) throws TException {
-            //populating request context 
+            // populating request context
             ReqContext req_context = ReqContext.context();
 
             TTransport trans = inProt.getTransport();
@@ -133,31 +132,36 @@ public class SimpleTransportPlugin implements ITransportPlugin {
                     req_context.setRemoteAddress(InetAddress.getLocalHost());
                 } catch (UnknownHostException e) {
                     throw new RuntimeException(e);
-                }                                
+                }
             } else if (trans instanceof TSocket) {
-                TSocket tsocket = (TSocket)trans;
-                //remote address
+                TSocket tsocket = (TSocket) trans;
+                // remote address
                 Socket socket = tsocket.getSocket();
-                req_context.setRemoteAddress(socket.getInetAddress());                
-            } 
+                req_context.setRemoteAddress(socket.getInetAddress());
+            }
 
-            //anonymous user
+            // anonymous user
             Subject s = getDefaultSubject();
             if (s == null) {
-              final String user = (String)storm_conf.get("debug.simple.transport.user");
-              if (user != null) {
-                HashSet<Principal> principals = new HashSet<Principal>();
-                principals.add(new Principal() {
-                  public String getName() { return user; }
-                  public String toString() { return user; }
-                });
-                s = new Subject(true, principals, new HashSet<Object>(), new HashSet<Object>());
-              }
+                final String user = (String) storm_conf.get("debug.simple.transport.user");
+                if (user != null) {
+                    HashSet<Principal> principals = new HashSet<Principal>();
+                    principals.add(new Principal() {
+                        public String getName() {
+                            return user;
+                        }
+
+                        public String toString() {
+                            return user;
+                        }
+                    });
+                    s = new Subject(true, principals, new HashSet<Object>(), new HashSet<Object>());
+                }
             }
             req_context.setSubject(s);
 
-            //invoke service handler
+            // invoke service handler
             return wrapped.process(inProt, outProt);
         }
-    } 
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/security/auth/SingleUserPrincipal.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/security/auth/SingleUserPrincipal.java b/jstorm-core/src/main/java/backtype/storm/security/auth/SingleUserPrincipal.java
index 6af17fa..fd9e694 100755
--- a/jstorm-core/src/main/java/backtype/storm/security/auth/SingleUserPrincipal.java
+++ b/jstorm-core/src/main/java/backtype/storm/security/auth/SingleUserPrincipal.java
@@ -34,7 +34,7 @@ public class SingleUserPrincipal implements Principal {
     @Override
     public boolean equals(Object another) {
         if (another instanceof SingleUserPrincipal) {
-            return _userName.equals(((SingleUserPrincipal)another)._userName);
+            return _userName.equals(((SingleUserPrincipal) another)._userName);
         }
         return false;
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/security/auth/TBackoffConnect.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/security/auth/TBackoffConnect.java b/jstorm-core/src/main/java/backtype/storm/security/auth/TBackoffConnect.java
index f547868..b699bc4 100755
--- a/jstorm-core/src/main/java/backtype/storm/security/auth/TBackoffConnect.java
+++ b/jstorm-core/src/main/java/backtype/storm/security/auth/TBackoffConnect.java
@@ -35,15 +35,13 @@ public class TBackoffConnect {
     public TBackoffConnect(int retryTimes, int retryInterval, int retryIntervalCeiling) {
 
         _retryTimes = retryTimes;
-        waitGrabber = new StormBoundedExponentialBackoffRetry(retryInterval,
-                                                              retryIntervalCeiling,
-                                                              retryTimes);
+        waitGrabber = new StormBoundedExponentialBackoffRetry(retryInterval, retryIntervalCeiling, retryTimes);
     }
 
     public TTransport doConnectWithRetry(ITransportPlugin transportPlugin, TTransport underlyingTransport, String host, String asUser) throws IOException {
         boolean connected = false;
         TTransport transportResult = null;
-        while(!connected) {
+        while (!connected) {
             try {
                 transportResult = transportPlugin.connect(underlyingTransport, host, asUser);
                 connected = true;
@@ -55,13 +53,13 @@ public class TBackoffConnect {
     }
 
     private void retryNext(TTransportException ex) {
-        if(!canRetry()) {
+        if (!canRetry()) {
             throw new RuntimeException(ex);
         }
         try {
             int sleeptime = waitGrabber.getSleepTimeMs(_completedRetries, 0);
 
-            LOG.debug("Failed to connect. Retrying... (" + Integer.toString( _completedRetries) + ") in " + Integer.toString(sleeptime) + "ms");
+            LOG.debug("Failed to connect. Retrying... (" + Integer.toString(_completedRetries) + ") in " + Integer.toString(sleeptime) + "ms");
 
             Thread.sleep(sleeptime);
         } catch (InterruptedException e) {

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/security/auth/ThriftClient.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/security/auth/ThriftClient.java b/jstorm-core/src/main/java/backtype/storm/security/auth/ThriftClient.java
index 8d2136a..954b4f8 100755
--- a/jstorm-core/src/main/java/backtype/storm/security/auth/ThriftClient.java
+++ b/jstorm-core/src/main/java/backtype/storm/security/auth/ThriftClient.java
@@ -45,17 +45,17 @@ public class ThriftClient {
     private String hostPort;
     private String host;
     private Integer port;
-    
+
     private Map<Object, Object> conf;
-    
+
     private Integer timeout;
     private ThriftConnectionType type;
     private String asUser;
-    
+
     public ThriftClient(Map conf, ThriftConnectionType type) throws Exception {
         this(conf, type, null, null, null, null);
     }
-    
+
     @SuppressWarnings("unchecked")
     public ThriftClient(Map conf, ThriftConnectionType type, Integer timeout) throws Exception {
         this(conf, type, null, null, timeout, null);
@@ -63,6 +63,7 @@ public class ThriftClient {
 
     /**
      * This is only for be compatible for Storm
+     * 
      * @param conf
      * @param type
      * @param host
@@ -71,45 +72,39 @@ public class ThriftClient {
         this(conf, type, host, null, null, null);
     }
 
-    public ThriftClient(Map conf, ThriftConnectionType type, String host, Integer port, Integer timeout){
+    public ThriftClient(Map conf, ThriftConnectionType type, String host, Integer port, Integer timeout) {
         this(conf, type, host, port, timeout, null);
     }
 
     public ThriftClient(Map conf, ThriftConnectionType type, String host, Integer port, Integer timeout, String asUser) {
-        //create a socket with server
-        
+        // create a socket with server
+
         this.timeout = timeout;
         this.conf = conf;
         this.type = type;
         this.asUser = asUser;
-        
+
         getMaster(conf, host, port);
         reconnect();
     }
-    
-    
-    
+
     public static String getMasterByZk(Map conf) throws Exception {
 
-        
         CuratorFramework zkobj = null;
         String masterHost = null;
-        
+
         try {
             String root = String.valueOf(conf.get(Config.STORM_ZOOKEEPER_ROOT));
             String zkMasterDir = root + Cluster.MASTER_SUBTREE;
-            
-            zkobj = Utils.newCurator(conf, 
-                            (List<String>) conf.get(Config.STORM_ZOOKEEPER_SERVERS), 
-                            conf.get(Config.STORM_ZOOKEEPER_PORT), 
-                            zkMasterDir);
+
+            zkobj = Utils.newCurator(conf, (List<String>) conf.get(Config.STORM_ZOOKEEPER_SERVERS), conf.get(Config.STORM_ZOOKEEPER_PORT), zkMasterDir);
             zkobj.start();
             if (zkobj.checkExists().forPath("/") == null) {
                 throw new RuntimeException("No alive nimbus ");
             }
-            
+
             masterHost = new String(zkobj.getData().forPath("/"));
-            
+
             LOG.info("masterHost:" + masterHost);
             return masterHost;
         } finally {
@@ -119,8 +114,8 @@ public class ThriftClient {
             }
         }
     }
-    
-    public void getMaster(Map conf, String host, Integer port){
+
+    public void getMaster(Map conf, String host, Integer port) {
         if (StringUtils.isBlank(host) == false) {
             this.host = host;
             if (port == null) {
@@ -128,7 +123,7 @@ public class ThriftClient {
             }
             this.port = port;
             this.hostPort = host + ":" + port;
-        }else {
+        } else {
             try {
                 hostPort = ThriftClient.getMasterByZk(conf);
             } catch (Exception e) {
@@ -142,7 +137,7 @@ public class ThriftClient {
             this.host = host_port[0];
             this.port = Integer.parseInt(host_port[1]);
         }
-        
+
         // create a socket with server
         if (this.host == null) {
             throw new IllegalArgumentException("host is not set");
@@ -151,45 +146,43 @@ public class ThriftClient {
             throw new IllegalArgumentException("invalid port: " + port);
         }
     }
-    
+
     public synchronized TTransport transport() {
         return _transport;
     }
-    
+
     public synchronized void reconnect() {
-        close();    
+        close();
         try {
             TSocket socket = new TSocket(host, port);
-            if(timeout!=null) {
+            if (timeout != null) {
                 socket.setTimeout(timeout);
-            }else {
-                //@@@ Todo
+            } else {
+                // @@@ Todo
                 // set the socket default Timeout as xxxx
             }
 
-            //locate login configuration 
+            // locate login configuration
             Configuration login_conf = AuthUtils.GetConfiguration(conf);
 
-            //construct a transport plugin
+            // construct a transport plugin
             ITransportPlugin transportPlugin = AuthUtils.GetTransportPlugin(type, conf, login_conf);
 
             final TTransport underlyingTransport = socket;
 
-            //TODO get this from type instead of hardcoding to Nimbus.
-            //establish client-server transport via plugin
-            //do retries if the connect fails
-            TBackoffConnect connectionRetry 
-                = new TBackoffConnect(
-                                      Utils.getInt(conf.get(Config.STORM_NIMBUS_RETRY_TIMES)),
-                                      Utils.getInt(conf.get(Config.STORM_NIMBUS_RETRY_INTERVAL)),
-                                      Utils.getInt(conf.get(Config.STORM_NIMBUS_RETRY_INTERVAL_CEILING)));
+            // TODO get this from type instead of hardcoding to Nimbus.
+            // establish client-server transport via plugin
+            // do retries if the connect fails
+            TBackoffConnect connectionRetry =
+                    new TBackoffConnect(Utils.getInt(conf.get(Config.STORM_NIMBUS_RETRY_TIMES)), Utils.getInt(conf.get(Config.STORM_NIMBUS_RETRY_INTERVAL)),
+                            Utils.getInt(conf.get(Config.STORM_NIMBUS_RETRY_INTERVAL_CEILING)));
             _transport = connectionRetry.doConnectWithRetry(transportPlugin, underlyingTransport, host, asUser);
         } catch (IOException ex) {
             throw new RuntimeException(ex);
         }
         _protocol = null;
         if (_transport != null) {
-            _protocol = new  TBinaryProtocol(_transport);
+            _protocol = new TBinaryProtocol(_transport);
         }
     }
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/security/auth/ThriftConnectionType.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/security/auth/ThriftConnectionType.java b/jstorm-core/src/main/java/backtype/storm/security/auth/ThriftConnectionType.java
index f9be7ae..e248df8 100755
--- a/jstorm-core/src/main/java/backtype/storm/security/auth/ThriftConnectionType.java
+++ b/jstorm-core/src/main/java/backtype/storm/security/auth/ThriftConnectionType.java
@@ -26,12 +26,9 @@ import java.util.Map;
  * The purpose for which the Thrift server is created.
  */
 public enum ThriftConnectionType {
-    NIMBUS(Config.NIMBUS_THRIFT_TRANSPORT_PLUGIN, Config.NIMBUS_THRIFT_PORT, null,
-         Config.NIMBUS_THRIFT_THREADS, Config.NIMBUS_THRIFT_MAX_BUFFER_SIZE),
-    DRPC(Config.DRPC_THRIFT_TRANSPORT_PLUGIN, Config.DRPC_PORT, Config.DRPC_QUEUE_SIZE,
-         Config.DRPC_WORKER_THREADS, Config.DRPC_MAX_BUFFER_SIZE),
-    DRPC_INVOCATIONS(Config.DRPC_INVOCATIONS_THRIFT_TRANSPORT_PLUGIN, Config.DRPC_INVOCATIONS_PORT, null,
-         Config.DRPC_INVOCATIONS_THREADS, Config.DRPC_MAX_BUFFER_SIZE);
+    NIMBUS(Config.NIMBUS_THRIFT_TRANSPORT_PLUGIN, Config.NIMBUS_THRIFT_PORT, null, Config.NIMBUS_THRIFT_THREADS, Config.NIMBUS_THRIFT_MAX_BUFFER_SIZE), DRPC(
+            Config.DRPC_THRIFT_TRANSPORT_PLUGIN, Config.DRPC_PORT, Config.DRPC_QUEUE_SIZE, Config.DRPC_WORKER_THREADS, Config.DRPC_MAX_BUFFER_SIZE), DRPC_INVOCATIONS(
+            Config.DRPC_INVOCATIONS_THRIFT_TRANSPORT_PLUGIN, Config.DRPC_INVOCATIONS_PORT, null, Config.DRPC_INVOCATIONS_THREADS, Config.DRPC_MAX_BUFFER_SIZE);
 
     private final String _transConf;
     private final String _portConf;
@@ -39,8 +36,7 @@ public enum ThriftConnectionType {
     private final String _threadsConf;
     private final String _buffConf;
 
-    ThriftConnectionType(String transConf, String portConf, String qConf,
-                         String threadsConf, String buffConf) {
+    ThriftConnectionType(String transConf, String portConf, String qConf, String threadsConf, String buffConf) {
         _transConf = transConf;
         _portConf = portConf;
         _qConf = qConf;
@@ -49,9 +45,9 @@ public enum ThriftConnectionType {
     }
 
     public String getTransportPlugin(Map conf) {
-        String ret = (String)conf.get(_transConf);
+        String ret = (String) conf.get(_transConf);
         if (ret == null) {
-            ret = (String)conf.get(Config.STORM_THRIFT_TRANSPORT_PLUGIN);
+            ret = (String) conf.get(Config.STORM_THRIFT_TRANSPORT_PLUGIN);
         }
         return ret;
     }
@@ -64,10 +60,10 @@ public enum ThriftConnectionType {
         if (_qConf == null) {
             return null;
         }
-        return (Integer)conf.get(_qConf);
+        return (Integer) conf.get(_qConf);
     }
 
-    public int getNumThreads(Map conf) { 
+    public int getNumThreads(Map conf) {
         return Utils.getInt(conf.get(_threadsConf));
     }
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/security/auth/ThriftServer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/security/auth/ThriftServer.java b/jstorm-core/src/main/java/backtype/storm/security/auth/ThriftServer.java
index 64243ce..410f1ce 100755
--- a/jstorm-core/src/main/java/backtype/storm/security/auth/ThriftServer.java
+++ b/jstorm-core/src/main/java/backtype/storm/security/auth/ThriftServer.java
@@ -28,19 +28,19 @@ import org.slf4j.LoggerFactory;
 
 public class ThriftServer {
     private static final Logger LOG = LoggerFactory.getLogger(ThriftServer.class);
-    private Map _storm_conf; //storm configuration
+    private Map _storm_conf; // storm configuration
     protected TProcessor _processor = null;
     private final ThriftConnectionType _type;
     private TServer _server = null;
     private Configuration _login_conf;
-    
+
     public ThriftServer(Map storm_conf, TProcessor processor, ThriftConnectionType type) {
         _storm_conf = storm_conf;
         _processor = processor;
         _type = type;
 
         try {
-            //retrieve authentication configuration 
+            // retrieve authentication configuration
             _login_conf = AuthUtils.GetConfiguration(_storm_conf);
         } catch (Exception x) {
             LOG.error(x.getMessage(), x);
@@ -54,27 +54,30 @@ public class ThriftServer {
 
     /**
      * Is ThriftServer listening to requests?
+     * 
      * @return
      */
     public boolean isServing() {
-        if (_server == null) return false;
+        if (_server == null)
+            return false;
         return _server.isServing();
     }
-    
-    public void serve()  {
+
+    public void serve() {
         try {
-            //locate our thrift transport plugin
-            ITransportPlugin  transportPlugin = AuthUtils.GetTransportPlugin(_type, _storm_conf, _login_conf);
+            // locate our thrift transport plugin
+            ITransportPlugin transportPlugin = AuthUtils.GetTransportPlugin(_type, _storm_conf, _login_conf);
 
-            //server
+            // server
             _server = transportPlugin.getServer(_processor);
 
-            //start accepting requests
+            // start accepting requests
             _server.serve();
         } catch (Exception ex) {
             LOG.error("ThriftServer is being stopped due to: " + ex, ex);
-            if (_server != null) _server.stop();
-            Runtime.getRuntime().halt(1); //shutdown server process since we could not handle Thrift requests any more
+            if (_server != null)
+                _server.stop();
+            Runtime.getRuntime().halt(1); // shutdown server process since we could not handle Thrift requests any more
         }
     }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/security/auth/authorizer/DRPCAuthorizerBase.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/security/auth/authorizer/DRPCAuthorizerBase.java b/jstorm-core/src/main/java/backtype/storm/security/auth/authorizer/DRPCAuthorizerBase.java
index 8951edd..11c4a0f 100755
--- a/jstorm-core/src/main/java/backtype/storm/security/auth/authorizer/DRPCAuthorizerBase.java
+++ b/jstorm-core/src/main/java/backtype/storm/security/auth/authorizer/DRPCAuthorizerBase.java
@@ -22,9 +22,10 @@ public abstract class DRPCAuthorizerBase implements IAuthorizer {
     abstract protected boolean permitClientRequest(ReqContext context, String operation, Map params);
 
     abstract protected boolean permitInvocationRequest(ReqContext context, String operation, Map params);
-    
+
     /**
      * Authorizes request from to the DRPC server.
+     * 
      * @param context the client request context
      * @param operation the operation requested by the DRPC server
      * @param params a Map with any key-value entries of use to the authorization implementation
@@ -33,14 +34,11 @@ public abstract class DRPCAuthorizerBase implements IAuthorizer {
     public boolean permit(ReqContext context, String operation, Map params) {
         if ("execute".equals(operation)) {
             return permitClientRequest(context, operation, params);
-        } else if ("failRequest".equals(operation) || 
-                "fetchRequest".equals(operation) || 
-                "result".equals(operation)) {
+        } else if ("failRequest".equals(operation) || "fetchRequest".equals(operation) || "result".equals(operation)) {
             return permitInvocationRequest(context, operation, params);
         }
         // Deny unsupported operations.
-        LOG.warn("Denying unsupported operation \""+operation+"\" from "+
-                context.remoteAddress());
+        LOG.warn("Denying unsupported operation \"" + operation + "\" from " + context.remoteAddress());
         return false;
     }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/security/auth/authorizer/DRPCSimpleACLAuthorizer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/security/auth/authorizer/DRPCSimpleACLAuthorizer.java b/jstorm-core/src/main/java/backtype/storm/security/auth/authorizer/DRPCSimpleACLAuthorizer.java
index 45eaea5..8aa7243 100755
--- a/jstorm-core/src/main/java/backtype/storm/security/auth/authorizer/DRPCSimpleACLAuthorizer.java
+++ b/jstorm-core/src/main/java/backtype/storm/security/auth/authorizer/DRPCSimpleACLAuthorizer.java
@@ -19,8 +19,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public class DRPCSimpleACLAuthorizer extends DRPCAuthorizerBase {
-    public static Logger LOG =
-        LoggerFactory.getLogger(DRPCSimpleACLAuthorizer.class);
+    public static Logger LOG = LoggerFactory.getLogger(DRPCSimpleACLAuthorizer.class);
 
     public static final String CLIENT_USERS_KEY = "client.users";
     public static final String INVOCATION_USER_KEY = "invocation.user";
@@ -33,44 +32,35 @@ public class DRPCSimpleACLAuthorizer extends DRPCAuthorizerBase {
     protected class AclFunctionEntry {
         final public Set<String> clientUsers;
         final public String invocationUser;
-        public AclFunctionEntry(Collection<String> clientUsers,
-                String invocationUser) {
-            this.clientUsers = (clientUsers != null) ?
-                new HashSet<String>(clientUsers) : new HashSet<String>();
+
+        public AclFunctionEntry(Collection<String> clientUsers, String invocationUser) {
+            this.clientUsers = (clientUsers != null) ? new HashSet<String>(clientUsers) : new HashSet<String>();
             this.invocationUser = invocationUser;
         }
     }
 
-    private volatile Map<String,AclFunctionEntry> _acl = null;
+    private volatile Map<String, AclFunctionEntry> _acl = null;
     private volatile long _lastUpdate = 0;
 
-    protected Map<String,AclFunctionEntry> readAclFromConfig() {
-        //Thread safety is mostly around _acl.  If _acl needs to be updated it is changed atomically
-        //More then one thread may be trying to update it at a time, but that is OK, because the
-        //change is atomic
+    protected Map<String, AclFunctionEntry> readAclFromConfig() {
+        // Thread safety is mostly around _acl. If _acl needs to be updated it is changed atomically
+        // More then one thread may be trying to update it at a time, but that is OK, because the
+        // change is atomic
         long now = System.currentTimeMillis();
         if ((now - 5000) > _lastUpdate || _acl == null) {
-            Map<String,AclFunctionEntry> acl = new HashMap<String,AclFunctionEntry>();
+            Map<String, AclFunctionEntry> acl = new HashMap<String, AclFunctionEntry>();
             Map conf = Utils.findAndReadConfigFile(_aclFileName);
             if (conf.containsKey(Config.DRPC_AUTHORIZER_ACL)) {
-                Map<String,Map<String,?>> confAcl =
-                    (Map<String,Map<String,?>>)
-                    conf.get(Config.DRPC_AUTHORIZER_ACL);
+                Map<String, Map<String, ?>> confAcl = (Map<String, Map<String, ?>>) conf.get(Config.DRPC_AUTHORIZER_ACL);
 
                 for (String function : confAcl.keySet()) {
-                    Map<String,?> val = confAcl.get(function);
-                    Collection<String> clientUsers =
-                        val.containsKey(CLIENT_USERS_KEY) ?
-                        (Collection<String>) val.get(CLIENT_USERS_KEY) : null;
-                    String invocationUser =
-                        val.containsKey(INVOCATION_USER_KEY) ?
-                        (String) val.get(INVOCATION_USER_KEY) : null;
-                    acl.put(function,
-                            new AclFunctionEntry(clientUsers, invocationUser));
+                    Map<String, ?> val = confAcl.get(function);
+                    Collection<String> clientUsers = val.containsKey(CLIENT_USERS_KEY) ? (Collection<String>) val.get(CLIENT_USERS_KEY) : null;
+                    String invocationUser = val.containsKey(INVOCATION_USER_KEY) ? (String) val.get(INVOCATION_USER_KEY) : null;
+                    acl.put(function, new AclFunctionEntry(clientUsers, invocationUser));
                 }
             } else if (!_permitWhenMissingFunctionEntry) {
-                LOG.warn("Requiring explicit ACL entries, but none given. " +
-                        "Therefore, all operiations will be denied.");
+                LOG.warn("Requiring explicit ACL entries, but none given. " + "Therefore, all operiations will be denied.");
             }
             _acl = acl;
             _lastUpdate = System.currentTimeMillis();
@@ -80,10 +70,8 @@ public class DRPCSimpleACLAuthorizer extends DRPCAuthorizerBase {
 
     @Override
     public void prepare(Map conf) {
-        Boolean isStrict = 
-                (Boolean) conf.get(Config.DRPC_AUTHORIZER_ACL_STRICT);
-        _permitWhenMissingFunctionEntry = 
-                (isStrict != null && !isStrict) ? true : false;
+        Boolean isStrict = (Boolean) conf.get(Config.DRPC_AUTHORIZER_ACL_STRICT);
+        _permitWhenMissingFunctionEntry = (isStrict != null && !isStrict) ? true : false;
         _aclFileName = (String) conf.get(Config.DRPC_AUTHORIZER_ACL_FILENAME);
         _ptol = AuthUtils.GetPrincipalToLocalPlugin(conf);
     }
@@ -105,11 +93,10 @@ public class DRPCSimpleACLAuthorizer extends DRPCAuthorizerBase {
         return null;
     }
 
-    protected boolean permitClientOrInvocationRequest(ReqContext context, Map params,
-            String fieldName) {
-        Map<String,AclFunctionEntry> acl = readAclFromConfig();
+    protected boolean permitClientOrInvocationRequest(ReqContext context, Map params, String fieldName) {
+        Map<String, AclFunctionEntry> acl = readAclFromConfig();
         String function = (String) params.get(FUNCTION_KEY);
-        if (function != null && ! function.isEmpty()) {
+        if (function != null && !function.isEmpty()) {
             AclFunctionEntry entry = acl.get(function);
             if (entry == null && _permitWhenMissingFunctionEntry) {
                 return true;
@@ -126,16 +113,11 @@ public class DRPCSimpleACLAuthorizer extends DRPCAuthorizerBase {
                 String principal = getUserFromContext(context);
                 String user = getLocalUserFromContext(context);
                 if (value == null) {
-                    LOG.warn("Configuration for function '"+function+"' is "+
-                            "invalid: it should have both an invocation user "+
-                            "and a list of client users defined.");
-                } else if (value instanceof Set && 
-                        (((Set<String>)value).contains(principal) ||
-                        ((Set<String>)value).contains(user))) {
+                    LOG.warn("Configuration for function '" + function + "' is " + "invalid: it should have both an invocation user "
+                            + "and a list of client users defined.");
+                } else if (value instanceof Set && (((Set<String>) value).contains(principal) || ((Set<String>) value).contains(user))) {
                     return true;
-                } else if (value instanceof String && 
-                        (value.equals(principal) ||
-                         value.equals(user))) {
+                } else if (value instanceof String && (value.equals(principal) || value.equals(user))) {
                     return true;
                 }
             }
@@ -144,14 +126,12 @@ public class DRPCSimpleACLAuthorizer extends DRPCAuthorizerBase {
     }
 
     @Override
-    protected boolean permitClientRequest(ReqContext context, String operation,
-            Map params) {
+    protected boolean permitClientRequest(ReqContext context, String operation, Map params) {
         return permitClientOrInvocationRequest(context, params, "clientUsers");
     }
 
     @Override
-    protected boolean permitInvocationRequest(ReqContext context, String operation,
-            Map params) {
+    protected boolean permitInvocationRequest(ReqContext context, String operation, Map params) {
         return permitClientOrInvocationRequest(context, params, "invocationUser");
     }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/security/auth/authorizer/DenyAuthorizer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/security/auth/authorizer/DenyAuthorizer.java b/jstorm-core/src/main/java/backtype/storm/security/auth/authorizer/DenyAuthorizer.java
index 5e84b38..32f809a 100755
--- a/jstorm-core/src/main/java/backtype/storm/security/auth/authorizer/DenyAuthorizer.java
+++ b/jstorm-core/src/main/java/backtype/storm/security/auth/authorizer/DenyAuthorizer.java
@@ -31,27 +31,27 @@ import org.slf4j.LoggerFactory;
  */
 public class DenyAuthorizer implements IAuthorizer {
     private static final Logger LOG = LoggerFactory.getLogger(DenyAuthorizer.class);
-    
+
     /**
      * Invoked once immediately after construction
-     * @param conf Storm configuration 
+     * 
+     * @param conf Storm configuration
      */
-    public void prepare(Map conf) {        
+    public void prepare(Map conf) {
     }
 
     /**
      * permit() method is invoked for each incoming Thrift request
-     * @param contrext request context 
+     * 
+     * @param contrext request context
      * @param operation operation name
-     * @param topology_storm configuration of targeted topology 
+     * @param topology_storm configuration of targeted topology
      * @return true if the request is authorized, false if reject
      */
     public boolean permit(ReqContext context, String operation, Map topology_conf) {
-        LOG.info("[req "+ context.requestID()+ "] Access "
-                + " from: " + (context.remoteAddress() == null? "null" : context.remoteAddress().toString())
-                + (context.principal() == null? "" : (" principal:"+ context.principal()))
-                +" op:"+operation
-                + (topology_conf == null? "" : (" topoology:"+topology_conf.get(Config.TOPOLOGY_NAME))));
+        LOG.info("[req " + context.requestID() + "] Access " + " from: " + (context.remoteAddress() == null ? "null" : context.remoteAddress().toString())
+                + (context.principal() == null ? "" : (" principal:" + context.principal())) + " op:" + operation
+                + (topology_conf == null ? "" : (" topoology:" + topology_conf.get(Config.TOPOLOGY_NAME))));
         return false;
     }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/security/auth/authorizer/ImpersonationAuthorizer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/security/auth/authorizer/ImpersonationAuthorizer.java b/jstorm-core/src/main/java/backtype/storm/security/auth/authorizer/ImpersonationAuthorizer.java
index d6431be..e1a037f 100755
--- a/jstorm-core/src/main/java/backtype/storm/security/auth/authorizer/ImpersonationAuthorizer.java
+++ b/jstorm-core/src/main/java/backtype/storm/security/auth/authorizer/ImpersonationAuthorizer.java
@@ -10,7 +10,6 @@ import java.io.IOException;
 import java.net.InetAddress;
 import java.util.*;
 
-
 public class ImpersonationAuthorizer implements IAuthorizer {
     private static final Logger LOG = LoggerFactory.getLogger(ImpersonationAuthorizer.class);
     protected static final String WILD_CARD = "*";
@@ -49,16 +48,16 @@ public class ImpersonationAuthorizer implements IAuthorizer {
         String userBeingImpersonated = _ptol.toLocal(context.principal());
         InetAddress remoteAddress = context.remoteAddress();
 
-        LOG.info("user = {}, principal = {} is attmepting to impersonate user = {} for operation = {} from host = {}",
-                impersonatingUser, impersonatingPrincipal, userBeingImpersonated, operation, remoteAddress);
+        LOG.info("user = {}, principal = {} is attmepting to impersonate user = {} for operation = {} from host = {}", impersonatingUser,
+                impersonatingPrincipal, userBeingImpersonated, operation, remoteAddress);
 
         /**
          * no config is present for impersonating principal or user, do not permit impersonation.
          */
         if (!userImpersonationACL.containsKey(impersonatingPrincipal) && !userImpersonationACL.containsKey(impersonatingUser)) {
-            LOG.info("user = {}, principal = {} is trying to impersonate user {}, but config {} does not have entry for impersonating user or principal." +
-                    "Please see SECURITY.MD to learn how to configure users for impersonation."
-                    , impersonatingUser, impersonatingPrincipal, userBeingImpersonated, Config.NIMBUS_IMPERSONATION_ACL);
+            LOG.info("user = {}, principal = {} is trying to impersonate user {}, but config {} does not have entry for impersonating user or principal."
+                    + "Please see SECURITY.MD to learn how to configure users for impersonation.", impersonatingUser, impersonatingPrincipal,
+                    userBeingImpersonated, Config.NIMBUS_IMPERSONATION_ACL);
             return false;
         }
 
@@ -78,18 +77,17 @@ public class ImpersonationAuthorizer implements IAuthorizer {
             authorizedGroups.addAll(userACL.authorizedGroups);
         }
 
-        LOG.debug("user = {}, principal = {} is allowed to impersonate groups = {} from hosts = {} ",
-                impersonatingUser, impersonatingPrincipal, authorizedGroups, authorizedHosts);
+        LOG.debug("user = {}, principal = {} is allowed to impersonate groups = {} from hosts = {} ", impersonatingUser, impersonatingPrincipal,
+                authorizedGroups, authorizedHosts);
 
         if (!isAllowedToImpersonateFromHost(authorizedHosts, remoteAddress)) {
-            LOG.info("user = {}, principal = {} is not allowed to impersonate from host {} ",
-                    impersonatingUser, impersonatingPrincipal, remoteAddress);
+            LOG.info("user = {}, principal = {} is not allowed to impersonate from host {} ", impersonatingUser, impersonatingPrincipal, remoteAddress);
             return false;
         }
 
         if (!isAllowedToImpersonateUser(authorizedGroups, userBeingImpersonated)) {
-            LOG.info("user = {}, principal = {} is not allowed to impersonate any group that user {} is part of.",
-                    impersonatingUser, impersonatingPrincipal, userBeingImpersonated);
+            LOG.info("user = {}, principal = {} is not allowed to impersonate any group that user {} is part of.", impersonatingUser, impersonatingPrincipal,
+                    userBeingImpersonated);
             return false;
         }
 
@@ -98,14 +96,12 @@ public class ImpersonationAuthorizer implements IAuthorizer {
     }
 
     private boolean isAllowedToImpersonateFromHost(Set<String> authorizedHosts, InetAddress remoteAddress) {
-        return authorizedHosts.contains(WILD_CARD) ||
-                authorizedHosts.contains(remoteAddress.getCanonicalHostName()) ||
-                authorizedHosts.contains(remoteAddress.getHostName()) ||
-                authorizedHosts.contains(remoteAddress.getHostAddress());
+        return authorizedHosts.contains(WILD_CARD) || authorizedHosts.contains(remoteAddress.getCanonicalHostName())
+                || authorizedHosts.contains(remoteAddress.getHostName()) || authorizedHosts.contains(remoteAddress.getHostAddress());
     }
 
     private boolean isAllowedToImpersonateUser(Set<String> authorizedGroups, String userBeingImpersonated) {
-        if(authorizedGroups.contains(WILD_CARD)) {
+        if (authorizedGroups.contains(WILD_CARD)) {
             return true;
         }
 
@@ -131,9 +127,9 @@ public class ImpersonationAuthorizer implements IAuthorizer {
 
     protected class ImpersonationACL {
         public String impersonatingUser;
-        //Groups this user is authorized to impersonate.
+        // Groups this user is authorized to impersonate.
         public Set<String> authorizedGroups;
-        //Hosts this user is authorized to impersonate from.
+        // Hosts this user is authorized to impersonate from.
         public Set<String> authorizedHosts;
 
         private ImpersonationACL(String impersonatingUser, Set<String> authorizedGroups, Set<String> authorizedHosts) {
@@ -144,11 +140,8 @@ public class ImpersonationAuthorizer implements IAuthorizer {
 
         @Override
         public String toString() {
-            return "ImpersonationACL{" +
-                    "impersonatingUser='" + impersonatingUser + '\'' +
-                    ", authorizedGroups=" + authorizedGroups +
-                    ", authorizedHosts=" + authorizedHosts +
-                    '}';
+            return "ImpersonationACL{" + "impersonatingUser='" + impersonatingUser + '\'' + ", authorizedGroups=" + authorizedGroups + ", authorizedHosts="
+                    + authorizedHosts + '}';
         }
     }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/security/auth/authorizer/NoopAuthorizer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/security/auth/authorizer/NoopAuthorizer.java b/jstorm-core/src/main/java/backtype/storm/security/auth/authorizer/NoopAuthorizer.java
index 9af44d3..1d88202 100755
--- a/jstorm-core/src/main/java/backtype/storm/security/auth/authorizer/NoopAuthorizer.java
+++ b/jstorm-core/src/main/java/backtype/storm/security/auth/authorizer/NoopAuthorizer.java
@@ -34,24 +34,24 @@ public class NoopAuthorizer implements IAuthorizer {
 
     /**
      * Invoked once immediately after construction
-     * @param conf Storm configuration 
+     * 
+     * @param conf Storm configuration
      */
-    public void prepare(Map conf) {        
+    public void prepare(Map conf) {
     }
 
     /**
      * permit() method is invoked for each incoming Thrift request
-     * @param context request context includes info about 
+     * 
+     * @param context request context includes info about
      * @param operation operation name
-     * @param topology_storm configuration of targeted topology 
+     * @param topology_storm configuration of targeted topology
      * @return true if the request is authorized, false if reject
      */
     public boolean permit(ReqContext context, String operation, Map topology_conf) {
-        LOG.info("[req "+ context.requestID()+ "] Access "
-                + " from: " + (context.remoteAddress() == null? "null" : context.remoteAddress().toString())
-                + (context.principal() == null? "" : (" principal:"+ context.principal()))
-                +" op:"+operation
-                + (topology_conf == null? "" : (" topoology:"+topology_conf.get(Config.TOPOLOGY_NAME))));
+        LOG.info("[req " + context.requestID() + "] Access " + " from: " + (context.remoteAddress() == null ? "null" : context.remoteAddress().toString())
+                + (context.principal() == null ? "" : (" principal:" + context.principal())) + " op:" + operation
+                + (topology_conf == null ? "" : (" topoology:" + topology_conf.get(Config.TOPOLOGY_NAME))));
         return true;
     }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/security/auth/authorizer/SimpleACLAuthorizer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/security/auth/authorizer/SimpleACLAuthorizer.java b/jstorm-core/src/main/java/backtype/storm/security/auth/authorizer/SimpleACLAuthorizer.java
index e50a587..40d7a5d 100755
--- a/jstorm-core/src/main/java/backtype/storm/security/auth/authorizer/SimpleACLAuthorizer.java
+++ b/jstorm-core/src/main/java/backtype/storm/security/auth/authorizer/SimpleACLAuthorizer.java
@@ -36,15 +36,15 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
- * An authorization implementation that simply checks if a user is allowed to perform specific
- * operations.
+ * An authorization implementation that simply checks if a user is allowed to perform specific operations.
  */
 public class SimpleACLAuthorizer implements IAuthorizer {
     private static final Logger LOG = LoggerFactory.getLogger(SimpleACLAuthorizer.class);
 
     protected Set<String> _userCommands = new HashSet<String>(Arrays.asList("submitTopology", "fileUpload", "getNimbusConf", "getClusterInfo"));
     protected Set<String> _supervisorCommands = new HashSet<String>(Arrays.asList("fileDownload"));
-    protected Set<String> _topoCommands = new HashSet<String>(Arrays.asList("killTopology","rebalance","activate","deactivate","getTopologyConf","getTopology","getUserTopology","getTopologyInfo","uploadNewCredentials"));
+    protected Set<String> _topoCommands = new HashSet<String>(Arrays.asList("killTopology", "rebalance", "activate", "deactivate", "getTopologyConf",
+            "getTopology", "getUserTopology", "getTopologyInfo", "uploadNewCredentials"));
 
     protected Set<String> _admins;
     protected Set<String> _supervisors;
@@ -52,8 +52,10 @@ public class SimpleACLAuthorizer implements IAuthorizer {
     protected Set<String> _nimbusGroups;
     protected IPrincipalToLocal _ptol;
     protected IGroupMappingServiceProvider _groupMappingProvider;
+
     /**
      * Invoked once immediately after construction
+     * 
      * @param conf Storm configuration
      */
     @Override
@@ -64,17 +66,17 @@ public class SimpleACLAuthorizer implements IAuthorizer {
         _nimbusGroups = new HashSet<String>();
 
         if (conf.containsKey(Config.NIMBUS_ADMINS)) {
-            _admins.addAll((Collection<String>)conf.get(Config.NIMBUS_ADMINS));
+            _admins.addAll((Collection<String>) conf.get(Config.NIMBUS_ADMINS));
         }
         if (conf.containsKey(Config.NIMBUS_SUPERVISOR_USERS)) {
-            _supervisors.addAll((Collection<String>)conf.get(Config.NIMBUS_SUPERVISOR_USERS));
+            _supervisors.addAll((Collection<String>) conf.get(Config.NIMBUS_SUPERVISOR_USERS));
         }
         if (conf.containsKey(Config.NIMBUS_USERS)) {
-            _nimbusUsers.addAll((Collection<String>)conf.get(Config.NIMBUS_USERS));
+            _nimbusUsers.addAll((Collection<String>) conf.get(Config.NIMBUS_USERS));
         }
 
         if (conf.containsKey(Config.NIMBUS_GROUPS)) {
-            _nimbusGroups.addAll((Collection<String>)conf.get(Config.NIMBUS_GROUPS));
+            _nimbusGroups.addAll((Collection<String>) conf.get(Config.NIMBUS_GROUPS));
         }
 
         _ptol = AuthUtils.GetPrincipalToLocalPlugin(conf);
@@ -83,6 +85,7 @@ public class SimpleACLAuthorizer implements IAuthorizer {
 
     /**
      * permit() method is invoked for each incoming Thrift request
+     * 
      * @param context request context includes info about
      * @param operation operation name
      * @param topology_conf configuration of targeted topology
@@ -90,10 +93,8 @@ public class SimpleACLAuthorizer implements IAuthorizer {
      */
     @Override
     public boolean permit(ReqContext context, String operation, Map topology_conf) {
-        LOG.info("[req " + context.requestID() + "] Access "
-                + " from: " + (context.remoteAddress() == null ? "null" : context.remoteAddress().toString())
-                + (context.principal() == null ? "" : (" principal:" + context.principal()))
-                + " op:" + operation
+        LOG.info("[req " + context.requestID() + "] Access " + " from: " + (context.remoteAddress() == null ? "null" : context.remoteAddress().toString())
+                + (context.principal() == null ? "" : (" principal:" + context.principal())) + " op:" + operation
                 + (topology_conf == null ? "" : (" topoology:" + topology_conf.get(Config.TOPOLOGY_NAME))));
 
         String principal = context.principal().getName();
@@ -103,8 +104,8 @@ public class SimpleACLAuthorizer implements IAuthorizer {
         if (_groupMappingProvider != null) {
             try {
                 userGroups = _groupMappingProvider.getGroups(user);
-            } catch(IOException e) {
-                LOG.warn("Error while trying to fetch user groups",e);
+            } catch (IOException e) {
+                LOG.warn("Error while trying to fetch user groups", e);
             }
         }
 
@@ -123,7 +124,7 @@ public class SimpleACLAuthorizer implements IAuthorizer {
         if (_topoCommands.contains(operation)) {
             Set topoUsers = new HashSet<String>();
             if (topology_conf.containsKey(Config.TOPOLOGY_USERS)) {
-                topoUsers.addAll((Collection<String>)topology_conf.get(Config.TOPOLOGY_USERS));
+                topoUsers.addAll((Collection<String>) topology_conf.get(Config.TOPOLOGY_USERS));
             }
 
             if (topoUsers.contains(principal) || topoUsers.contains(user)) {
@@ -132,18 +133,19 @@ public class SimpleACLAuthorizer implements IAuthorizer {
 
             Set<String> topoGroups = new HashSet<String>();
             if (topology_conf.containsKey(Config.TOPOLOGY_GROUPS) && topology_conf.get(Config.TOPOLOGY_GROUPS) != null) {
-                topoGroups.addAll((Collection<String>)topology_conf.get(Config.TOPOLOGY_GROUPS));
+                topoGroups.addAll((Collection<String>) topology_conf.get(Config.TOPOLOGY_GROUPS));
             }
 
-            if (checkUserGroupAllowed(userGroups, topoGroups)) return true;
+            if (checkUserGroupAllowed(userGroups, topoGroups))
+                return true;
         }
         return false;
     }
 
     private Boolean checkUserGroupAllowed(Set<String> userGroups, Set<String> configuredGroups) {
-        if(userGroups.size() > 0 && configuredGroups.size() > 0) {
+        if (userGroups.size() > 0 && configuredGroups.size() > 0) {
             for (String tgroup : configuredGroups) {
-                if(userGroups.contains(tgroup))
+                if (userGroups.contains(tgroup))
                     return true;
             }
         }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/security/auth/authorizer/SimpleWhitelistAuthorizer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/security/auth/authorizer/SimpleWhitelistAuthorizer.java b/jstorm-core/src/main/java/backtype/storm/security/auth/authorizer/SimpleWhitelistAuthorizer.java
index 55109f9..dbbc945 100755
--- a/jstorm-core/src/main/java/backtype/storm/security/auth/authorizer/SimpleWhitelistAuthorizer.java
+++ b/jstorm-core/src/main/java/backtype/storm/security/auth/authorizer/SimpleWhitelistAuthorizer.java
@@ -31,8 +31,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
- * An authorization implementation that simply checks a whitelist of users that
- * are allowed to use the cluster.
+ * An authorization implementation that simply checks a whitelist of users that are allowed to use the cluster.
  */
 public class SimpleWhitelistAuthorizer implements IAuthorizer {
     private static final Logger LOG = LoggerFactory.getLogger(SimpleWhitelistAuthorizer.class);
@@ -41,30 +40,30 @@ public class SimpleWhitelistAuthorizer implements IAuthorizer {
 
     /**
      * Invoked once immediately after construction
-     * @param conf Storm configuration 
+     * 
+     * @param conf Storm configuration
      */
     @Override
     public void prepare(Map conf) {
         users = new HashSet<String>();
         if (conf.containsKey(WHITELIST_USERS_CONF)) {
-            users.addAll((Collection<String>)conf.get(WHITELIST_USERS_CONF));
+            users.addAll((Collection<String>) conf.get(WHITELIST_USERS_CONF));
         }
     }
 
     /**
      * permit() method is invoked for each incoming Thrift request
-     * @param context request context includes info about 
+     * 
+     * @param context request context includes info about
      * @param operation operation name
-     * @param topology_storm configuration of targeted topology 
+     * @param topology_storm configuration of targeted topology
      * @return true if the request is authorized, false if reject
      */
     @Override
     public boolean permit(ReqContext context, String operation, Map topology_conf) {
-        LOG.info("[req "+ context.requestID()+ "] Access "
-                 + " from: " + (context.remoteAddress() == null? "null" : context.remoteAddress().toString())
-                 + (context.principal() == null? "" : (" principal:"+ context.principal()))
-                 +" op:"+operation
-                 + (topology_conf == null? "" : (" topoology:"+topology_conf.get(Config.TOPOLOGY_NAME))));
+        LOG.info("[req " + context.requestID() + "] Access " + " from: " + (context.remoteAddress() == null ? "null" : context.remoteAddress().toString())
+                + (context.principal() == null ? "" : (" principal:" + context.principal())) + " op:" + operation
+                + (topology_conf == null ? "" : (" topoology:" + topology_conf.get(Config.TOPOLOGY_NAME))));
         return context.principal() != null ? users.contains(context.principal().getName()) : false;
     }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/security/auth/digest/ClientCallbackHandler.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/security/auth/digest/ClientCallbackHandler.java b/jstorm-core/src/main/java/backtype/storm/security/auth/digest/ClientCallbackHandler.java
index 3caacaa..0e3f626 100755
--- a/jstorm-core/src/main/java/backtype/storm/security/auth/digest/ClientCallbackHandler.java
+++ b/jstorm-core/src/main/java/backtype/storm/security/auth/digest/ClientCallbackHandler.java
@@ -33,7 +33,7 @@ import org.slf4j.LoggerFactory;
 import backtype.storm.security.auth.AuthUtils;
 
 /**
- *  client side callback handler.
+ * client side callback handler.
  */
 public class ClientCallbackHandler implements CallbackHandler {
     private static final String USERNAME = "username";
@@ -51,28 +51,29 @@ public class ClientCallbackHandler implements CallbackHandler {
      * @throws IOException
      */
     public ClientCallbackHandler(Configuration configuration) throws IOException {
-        if (configuration == null) return;
+        if (configuration == null)
+            return;
         AppConfigurationEntry configurationEntries[] = configuration.getAppConfigurationEntry(AuthUtils.LOGIN_CONTEXT_CLIENT);
         if (configurationEntries == null) {
-            String errorMessage = "Could not find a '"+AuthUtils.LOGIN_CONTEXT_CLIENT
-                    + "' entry in this configuration: Client cannot start.";
+            String errorMessage = "Could not find a '" + AuthUtils.LOGIN_CONTEXT_CLIENT + "' entry in this configuration: Client cannot start.";
             throw new IOException(errorMessage);
         }
 
         _password = "";
-        for(AppConfigurationEntry entry: configurationEntries) {
+        for (AppConfigurationEntry entry : configurationEntries) {
             if (entry.getOptions().get(USERNAME) != null) {
-                _username = (String)entry.getOptions().get(USERNAME);
+                _username = (String) entry.getOptions().get(USERNAME);
             }
             if (entry.getOptions().get(PASSWORD) != null) {
-                _password = (String)entry.getOptions().get(PASSWORD);
+                _password = (String) entry.getOptions().get(PASSWORD);
             }
         }
     }
 
     /**
      * This method is invoked by SASL for authentication challenges
-     * @param callbacks a collection of challenge callbacks 
+     * 
+     * @param callbacks a collection of challenge callbacks
      */
     public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException {
         for (Callback c : callbacks) {
@@ -82,10 +83,10 @@ public class ClientCallbackHandler implements CallbackHandler {
                 nc.setName(_username);
             } else if (c instanceof PasswordCallback) {
                 LOG.debug("password callback");
-                PasswordCallback pc = (PasswordCallback)c;
+                PasswordCallback pc = (PasswordCallback) c;
                 if (_password != null) {
                     pc.setPassword(_password.toCharArray());
-                } 
+                }
             } else if (c instanceof AuthorizeCallback) {
                 LOG.debug("authorization callback");
                 AuthorizeCallback ac = (AuthorizeCallback) c;

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/security/auth/digest/DigestSaslTransportPlugin.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/security/auth/digest/DigestSaslTransportPlugin.java b/jstorm-core/src/main/java/backtype/storm/security/auth/digest/DigestSaslTransportPlugin.java
index ad642d8..7b497c6 100755
--- a/jstorm-core/src/main/java/backtype/storm/security/auth/digest/DigestSaslTransportPlugin.java
+++ b/jstorm-core/src/main/java/backtype/storm/security/auth/digest/DigestSaslTransportPlugin.java
@@ -38,11 +38,11 @@ public class DigestSaslTransportPlugin extends SaslTransportPlugin {
     public static final String DIGEST = "DIGEST-MD5";
     private static final Logger LOG = LoggerFactory.getLogger(DigestSaslTransportPlugin.class);
 
-    protected TTransportFactory getServerTransportFactory() throws IOException {        
-        //create an authentication callback handler
+    protected TTransportFactory getServerTransportFactory() throws IOException {
+        // create an authentication callback handler
         CallbackHandler serer_callback_handler = new ServerCallbackHandler(login_conf);
 
-        //create a transport factory that will invoke our auth callback for digest
+        // create a transport factory that will invoke our auth callback for digest
         TSaslServerTransport.Factory factory = new TSaslServerTransport.Factory();
         factory.addServerDefinition(DIGEST, AuthUtils.SERVICE, "localhost", null, serer_callback_handler);
 
@@ -53,13 +53,8 @@ public class DigestSaslTransportPlugin extends SaslTransportPlugin {
     @Override
     public TTransport connect(TTransport transport, String serverHost, String asUser) throws TTransportException, IOException {
         ClientCallbackHandler client_callback_handler = new ClientCallbackHandler(login_conf);
-        TSaslClientTransport wrapper_transport = new TSaslClientTransport(DIGEST,
-                null,
-                AuthUtils.SERVICE, 
-                serverHost,
-                null,
-                client_callback_handler, 
-                transport);
+        TSaslClientTransport wrapper_transport =
+                new TSaslClientTransport(DIGEST, null, AuthUtils.SERVICE, serverHost, null, client_callback_handler, transport);
 
         wrapper_transport.open();
         LOG.debug("SASL DIGEST-MD5 client transport has been established");


[26/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/Time.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/Time.java b/jstorm-core/src/main/java/backtype/storm/utils/Time.java
index 50a79fd..8732008 100755
--- a/jstorm-core/src/main/java/backtype/storm/utils/Time.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/Time.java
@@ -24,86 +24,87 @@ import java.util.concurrent.atomic.AtomicBoolean;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-
 public class Time {
-    public static Logger LOG = LoggerFactory.getLogger(Time.class);    
-    
+    public static Logger LOG = LoggerFactory.getLogger(Time.class);
+
     private static AtomicBoolean simulating = new AtomicBoolean(false);
-    //TODO: should probably use weak references here or something
+    // TODO: should probably use weak references here or something
     private static volatile Map<Thread, AtomicLong> threadSleepTimes;
     private static final Object sleepTimesLock = new Object();
-    
-    private static AtomicLong simulatedCurrTimeMs; //should this be a thread local that's allowed to keep advancing?
-    
+
+    private static AtomicLong simulatedCurrTimeMs; // should this be a thread local that's allowed to keep advancing?
+
     public static void startSimulating() {
-        synchronized(sleepTimesLock) {
+        synchronized (sleepTimesLock) {
             simulating.set(true);
             simulatedCurrTimeMs = new AtomicLong(0);
             threadSleepTimes = new ConcurrentHashMap<Thread, AtomicLong>();
         }
     }
-    
+
     public static void stopSimulating() {
-        synchronized(sleepTimesLock) {
-            simulating.set(false);             
-            threadSleepTimes = null;  
+        synchronized (sleepTimesLock) {
+            simulating.set(false);
+            threadSleepTimes = null;
         }
     }
-    
+
     public static boolean isSimulating() {
         return simulating.get();
     }
-    
+
     public static void sleepUntil(long targetTimeMs) throws InterruptedException {
-        if(simulating.get()) {
+        if (simulating.get()) {
             try {
-                synchronized(sleepTimesLock) {
+                synchronized (sleepTimesLock) {
                     threadSleepTimes.put(Thread.currentThread(), new AtomicLong(targetTimeMs));
                 }
-                while(simulatedCurrTimeMs.get() < targetTimeMs) {
+                while (simulatedCurrTimeMs.get() < targetTimeMs) {
                     Thread.sleep(10);
                 }
             } finally {
-                synchronized(sleepTimesLock) {
+                synchronized (sleepTimesLock) {
                     if (simulating.get()) {
                         threadSleepTimes.remove(Thread.currentThread());
                     }
                 }
             }
         } else {
-            long sleepTime = targetTimeMs-currentTimeMillis();
-            if(sleepTime>0) 
+            long sleepTime = targetTimeMs - currentTimeMillis();
+            if (sleepTime > 0)
                 Thread.sleep(sleepTime);
         }
     }
-    
+
     public static void sleep(long ms) throws InterruptedException {
-        sleepUntil(currentTimeMillis()+ms);
+        sleepUntil(currentTimeMillis() + ms);
     }
-    
+
     public static long currentTimeMillis() {
-        if(simulating.get()) {
+        if (simulating.get()) {
             return simulatedCurrTimeMs.get();
         } else {
             return System.currentTimeMillis();
         }
     }
-    
+
     public static int currentTimeSecs() {
         return (int) (currentTimeMillis() / 1000);
     }
-    
+
     public static void advanceTime(long ms) {
-        if(!simulating.get()) throw new IllegalStateException("Cannot simulate time unless in simulation mode");
+        if (!simulating.get())
+            throw new IllegalStateException("Cannot simulate time unless in simulation mode");
         simulatedCurrTimeMs.set(simulatedCurrTimeMs.get() + ms);
     }
-    
+
     public static boolean isThreadWaiting(Thread t) {
-        if(!simulating.get()) throw new IllegalStateException("Must be in simulation mode");
+        if (!simulating.get())
+            throw new IllegalStateException("Must be in simulation mode");
         AtomicLong time;
-        synchronized(sleepTimesLock) {
+        synchronized (sleepTimesLock) {
             time = threadSleepTimes.get(t);
         }
-        return !t.isAlive() || time!=null && currentTimeMillis() < time.longValue();
-    }    
+        return !t.isAlive() || time != null && currentTimeMillis() < time.longValue();
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/TimeCacheMap.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/TimeCacheMap.java b/jstorm-core/src/main/java/backtype/storm/utils/TimeCacheMap.java
index f0a194f..a29a954 100755
--- a/jstorm-core/src/main/java/backtype/storm/utils/TimeCacheMap.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/TimeCacheMap.java
@@ -37,18 +37,18 @@ import java.util.Set;
 public class TimeCacheMap<K, V> {
     // this default ensures things expire at most 50% past the expiration time
     private static final int DEFAULT_NUM_BUCKETS = 3;
-    
+
     @Deprecated
     public static interface ExpiredCallback<K, V> {
         public void expire(K key, V val);
     }
-    
+
     private LinkedList<HashMap<K, V>> _buckets;
-    
+
     private final Object _lock = new Object();
     private Thread _cleaner;
     private ExpiredCallback _callback;
-    
+
     public TimeCacheMap(int expirationSecs, int numBuckets, ExpiredCallback<K, V> callback) {
         if (numBuckets < 2) {
             throw new IllegalArgumentException("numBuckets must be >= 2");
@@ -57,7 +57,7 @@ public class TimeCacheMap<K, V> {
         for (int i = 0; i < numBuckets; i++) {
             _buckets.add(new HashMap<K, V>());
         }
-        
+
         _callback = callback;
         final long expirationMillis = expirationSecs * 1000L;
         final long sleepTime = expirationMillis / (numBuckets - 1);
@@ -78,26 +78,26 @@ public class TimeCacheMap<K, V> {
                         }
                     }
                 } catch (InterruptedException ex) {
-                    
+
                 }
             }
         });
         _cleaner.setDaemon(true);
         _cleaner.start();
     }
-    
+
     public TimeCacheMap(int expirationSecs, ExpiredCallback<K, V> callback) {
         this(expirationSecs, DEFAULT_NUM_BUCKETS, callback);
     }
-    
+
     public TimeCacheMap(int expirationSecs) {
         this(expirationSecs, DEFAULT_NUM_BUCKETS);
     }
-    
+
     public TimeCacheMap(int expirationSecs, int numBuckets) {
         this(expirationSecs, numBuckets, null);
     }
-    
+
     public boolean containsKey(K key) {
         synchronized (_lock) {
             for (HashMap<K, V> bucket : _buckets) {
@@ -108,7 +108,7 @@ public class TimeCacheMap<K, V> {
             return false;
         }
     }
-    
+
     public V get(K key) {
         synchronized (_lock) {
             for (HashMap<K, V> bucket : _buckets) {
@@ -119,7 +119,7 @@ public class TimeCacheMap<K, V> {
             return null;
         }
     }
-    
+
     public void put(K key, V value) {
         synchronized (_lock) {
             Iterator<HashMap<K, V>> it = _buckets.iterator();
@@ -131,7 +131,7 @@ public class TimeCacheMap<K, V> {
             }
         }
     }
-    
+
     public Object remove(K key) {
         synchronized (_lock) {
             for (HashMap<K, V> bucket : _buckets) {
@@ -142,7 +142,7 @@ public class TimeCacheMap<K, V> {
             return null;
         }
     }
-    
+
     public int size() {
         synchronized (_lock) {
             int size = 0;
@@ -152,11 +152,11 @@ public class TimeCacheMap<K, V> {
             return size;
         }
     }
-    
+
     public void cleanup() {
         _cleaner.interrupt();
     }
-    
+
     public Set<K> keySet() {
         Set<K> ret = new HashSet<K>();
         synchronized (_lock) {

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/TransferDrainer.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/TransferDrainer.java b/jstorm-core/src/main/java/backtype/storm/utils/TransferDrainer.java
index 4638117..48b39b7 100755
--- a/jstorm-core/src/main/java/backtype/storm/utils/TransferDrainer.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/TransferDrainer.java
@@ -26,88 +26,88 @@ import backtype.storm.messaging.TaskMessage;
 
 public class TransferDrainer {
 
-  private HashMap<String, ArrayList<ArrayList<TaskMessage>>> bundles = new HashMap();
-  
-  public void add(HashMap<String, ArrayList<TaskMessage>> workerTupleSetMap) {
-    for (String key : workerTupleSetMap.keySet()) {
-      
-      ArrayList<ArrayList<TaskMessage>> bundle = bundles.get(key);
-      if (null == bundle) {
-        bundle = new ArrayList<ArrayList<TaskMessage>>();
-        bundles.put(key, bundle);
-      }
-      
-      ArrayList tupleSet = workerTupleSetMap.get(key);
-      if (null != tupleSet && tupleSet.size() > 0) {
-        bundle.add(tupleSet);
-      }
-    } 
-  }
-  
-  public void send(HashMap<String, IConnection> connections) {
-    for (String hostPort : bundles.keySet()) {
-      IConnection connection = connections.get(hostPort);
-      if (null != connection) { 
-        ArrayList<ArrayList<TaskMessage>> bundle = bundles.get(hostPort);
-        for (ArrayList<TaskMessage> list : bundle) {
-            connection.send(list);
+    private HashMap<String, ArrayList<ArrayList<TaskMessage>>> bundles = new HashMap();
+
+    public void add(HashMap<String, ArrayList<TaskMessage>> workerTupleSetMap) {
+        for (String key : workerTupleSetMap.keySet()) {
+
+            ArrayList<ArrayList<TaskMessage>> bundle = bundles.get(key);
+            if (null == bundle) {
+                bundle = new ArrayList<ArrayList<TaskMessage>>();
+                bundles.put(key, bundle);
+            }
+
+            ArrayList tupleSet = workerTupleSetMap.get(key);
+            if (null != tupleSet && tupleSet.size() > 0) {
+                bundle.add(tupleSet);
+            }
         }
-        
-      }
-    } 
-  }
-  
-  private Iterator<TaskMessage> getBundleIterator(final ArrayList<ArrayList<TaskMessage>> bundle) {
-    
-    if (null == bundle) {
-      return null;
     }
-    
-    return new Iterator<TaskMessage> () {
-      
-      private int offset = 0;
-      private int size = 0;
-      {
-        for (ArrayList<TaskMessage> list : bundle) {
-            size += list.size();
-        }
-      }
-      
-      private int bundleOffset = 0;
-      private Iterator<TaskMessage> iter = bundle.get(bundleOffset).iterator();
-      
-      @Override
-      public boolean hasNext() {
-        if (offset < size) {
-          return true;
-        }
-        return false;
-      }
-
-      @Override
-      public TaskMessage next() {
-        TaskMessage msg = null;
-        if (iter.hasNext()) {
-          msg = iter.next(); 
-        } else {
-          bundleOffset++;
-          iter = bundle.get(bundleOffset).iterator();
-          msg = iter.next();
+
+    public void send(HashMap<String, IConnection> connections) {
+        for (String hostPort : bundles.keySet()) {
+            IConnection connection = connections.get(hostPort);
+            if (null != connection) {
+                ArrayList<ArrayList<TaskMessage>> bundle = bundles.get(hostPort);
+                for (ArrayList<TaskMessage> list : bundle) {
+                    connection.send(list);
+                }
+
+            }
         }
-        if (null != msg) {
-          offset++;
+    }
+
+    private Iterator<TaskMessage> getBundleIterator(final ArrayList<ArrayList<TaskMessage>> bundle) {
+
+        if (null == bundle) {
+            return null;
         }
-        return msg;
-      }
-
-      @Override
-      public void remove() {
-        throw new RuntimeException("not supported");
-      }
-    };
-  }
-  
-  public void clear() {
-    bundles.clear();
-  }
+
+        return new Iterator<TaskMessage>() {
+
+            private int offset = 0;
+            private int size = 0;
+            {
+                for (ArrayList<TaskMessage> list : bundle) {
+                    size += list.size();
+                }
+            }
+
+            private int bundleOffset = 0;
+            private Iterator<TaskMessage> iter = bundle.get(bundleOffset).iterator();
+
+            @Override
+            public boolean hasNext() {
+                if (offset < size) {
+                    return true;
+                }
+                return false;
+            }
+
+            @Override
+            public TaskMessage next() {
+                TaskMessage msg = null;
+                if (iter.hasNext()) {
+                    msg = iter.next();
+                } else {
+                    bundleOffset++;
+                    iter = bundle.get(bundleOffset).iterator();
+                    msg = iter.next();
+                }
+                if (null != msg) {
+                    offset++;
+                }
+                return msg;
+            }
+
+            @Override
+            public void remove() {
+                throw new RuntimeException("not supported");
+            }
+        };
+    }
+
+    public void clear() {
+        bundles.clear();
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/TupleHelpers.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/TupleHelpers.java b/jstorm-core/src/main/java/backtype/storm/utils/TupleHelpers.java
index 45725c9..ce2a0b3 100755
--- a/jstorm-core/src/main/java/backtype/storm/utils/TupleHelpers.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/TupleHelpers.java
@@ -22,9 +22,9 @@ import backtype.storm.tuple.Tuple;
 
 public class TupleHelpers {
     private TupleHelpers() {
-        
+
     }
-    
+
     public static boolean isTickTuple(Tuple tuple) {
         return tuple.getSourceComponent().equals(Constants.SYSTEM_COMPONENT_ID) && tuple.getSourceStreamId().equals(Constants.SYSTEM_TICK_STREAM_ID);
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/TupleUtils.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/TupleUtils.java b/jstorm-core/src/main/java/backtype/storm/utils/TupleUtils.java
index f9fb2c0..80b78d8 100755
--- a/jstorm-core/src/main/java/backtype/storm/utils/TupleUtils.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/TupleUtils.java
@@ -22,14 +22,13 @@ import backtype.storm.tuple.Tuple;
 
 public final class TupleUtils {
 
-  private TupleUtils() {
-    // No instantiation
-  }
+    private TupleUtils() {
+        // No instantiation
+    }
 
-  public static boolean isTick(Tuple tuple) {
-    return tuple != null
-           && Constants.SYSTEM_COMPONENT_ID  .equals(tuple.getSourceComponent())
-           && Constants.SYSTEM_TICK_STREAM_ID.equals(tuple.getSourceStreamId());
-  }
+    public static boolean isTick(Tuple tuple) {
+        return tuple != null && Constants.SYSTEM_COMPONENT_ID.equals(tuple.getSourceComponent())
+                && Constants.SYSTEM_TICK_STREAM_ID.equals(tuple.getSourceStreamId());
+    }
 
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/Utils.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/Utils.java b/jstorm-core/src/main/java/backtype/storm/utils/Utils.java
index 0669cfb..9194d07 100644
--- a/jstorm-core/src/main/java/backtype/storm/utils/Utils.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/Utils.java
@@ -17,45 +17,21 @@
  */
 package backtype.storm.utils;
 
-import java.io.BufferedReader;
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileNotFoundException;
-import java.io.FileOutputStream;
-import java.io.FileReader;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.io.ObjectInputStream;
-import java.io.ObjectOutputStream;
-import java.lang.reflect.Constructor;
-import java.net.URL;
-import java.net.URLClassLoader;
-import java.net.URLDecoder;
-import java.nio.ByteBuffer;
-import java.nio.channels.Channels;
-import java.nio.channels.WritableByteChannel;
-import java.util.ArrayList;
-import java.util.Enumeration;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Properties;
-import java.util.TreeMap;
-import java.util.UUID;
-import java.util.zip.GZIPInputStream;
-import java.util.zip.GZIPOutputStream;
-
+import backtype.storm.Config;
+import backtype.storm.generated.ComponentCommon;
+import backtype.storm.generated.ComponentObject;
+import backtype.storm.generated.StormTopology;
+import backtype.storm.serialization.DefaultSerializationDelegate;
+import backtype.storm.serialization.SerializationDelegate;
+import clojure.lang.IFn;
+import clojure.lang.RT;
+import com.alibaba.jstorm.utils.LoadConf;
+import com.google.gson.Gson;
+import com.google.gson.GsonBuilder;
 import org.apache.commons.io.input.ClassLoaderObjectInputStream;
 import org.apache.commons.lang.StringUtils;
 import org.apache.curator.framework.CuratorFramework;
 import org.apache.curator.framework.CuratorFrameworkFactory;
-import org.apache.curator.retry.ExponentialBackoffRetry;
 import org.apache.thrift.TException;
 import org.apache.zookeeper.ZooDefs;
 import org.apache.zookeeper.data.ACL;
@@ -64,20 +40,19 @@ import org.json.simple.JSONValue;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.yaml.snakeyaml.Yaml;
-import org.yaml.snakeyaml.constructor.SafeConstructor;
 
-import backtype.storm.Config;
-import backtype.storm.generated.ComponentCommon;
-import backtype.storm.generated.ComponentObject;
-import backtype.storm.generated.StormTopology;
-import backtype.storm.serialization.DefaultSerializationDelegate;
-import backtype.storm.serialization.SerializationDelegate;
-import clojure.lang.IFn;
-import clojure.lang.RT;
-
-import com.alibaba.jstorm.utils.LoadConf;
-import com.google.gson.Gson;
-import com.google.gson.GsonBuilder;
+import java.io.*;
+import java.lang.reflect.Constructor;
+import java.net.URL;
+import java.net.URLClassLoader;
+import java.net.URLDecoder;
+import java.nio.ByteBuffer;
+import java.nio.channels.Channels;
+import java.nio.channels.WritableByteChannel;
+import java.util.*;
+import java.util.Map.Entry;
+import java.util.zip.GZIPInputStream;
+import java.util.zip.GZIPOutputStream;
 
 public class Utils {
     private static final Logger LOG = LoggerFactory.getLogger(Utils.class);
@@ -99,7 +74,7 @@ public class Utils {
         }
     }
 
-    public static Object newInstance(String klass, Object ...params) {
+    public static Object newInstance(String klass, Object... params) {
         try {
             Class c = Class.forName(klass);
             Constructor[] constructors = c.getConstructors();
@@ -111,7 +86,7 @@ public class Utils {
                     break;
                 }
             }
-            
+
             if (con == null) {
                 throw new RuntimeException("Cound not found the corresponding constructor, params=" + params.toString());
             } else {
@@ -128,35 +103,34 @@ public class Utils {
 
     /**
      * Go thrift gzip serializer
+     * 
      * @param obj
      * @return
      */
     public static byte[] serialize(Object obj) {
         /**
-         * @@@
-         * JStorm disable the thrift.gz.serializer
+         * @@@ JStorm disable the thrift.gz.serializer
          */
-        //return serializationDelegate.serialize(obj);
+        // return serializationDelegate.serialize(obj);
         return javaSerialize(obj);
     }
 
     /**
      * Go thrift gzip serializer
-     * @param obj
+     * 
      * @return
      */
     public static <T> T deserialize(byte[] serialized, Class<T> clazz) {
         /**
-         * @@@
-         * JStorm disable the thrift.gz.serializer 
+         * @@@ JStorm disable the thrift.gz.serializer
          */
-        //return serializationDelegate.deserialize(serialized, clazz);
-        return (T)javaDeserialize(serialized);
+        // return serializationDelegate.deserialize(serialized, clazz);
+        return (T) javaDeserialize(serialized);
     }
 
     public static byte[] javaSerialize(Object obj) {
         if (obj instanceof byte[]) {
-            return (byte[])obj;
+            return (byte[]) obj;
         }
         try {
             ByteArrayOutputStream bos = new ByteArrayOutputStream();
@@ -168,7 +142,7 @@ public class Utils {
             throw new RuntimeException(e);
         }
     }
-    
+
     public static Object maybe_deserialize(byte[] data) {
         if (data == null || data.length == 0) {
             return null;
@@ -179,9 +153,10 @@ public class Utils {
             return null;
         }
     }
-    
+
     /**
      * Deserialized with ClassLoader
+     * 
      * @param serialized
      * @param loader
      * @return
@@ -206,20 +181,20 @@ public class Utils {
             throw new RuntimeException(e);
         }
     }
-    
+
     public static Object javaDeserialize(byte[] serialized) {
         return javaDeserializeWithCL(serialized, WorkerClassLoader.getInstance());
     }
-    
+
     public static <T> T javaDeserialize(byte[] serialized, Class<T> clazz) {
-        return (T)javaDeserializeWithCL(serialized, WorkerClassLoader.getInstance());
+        return (T) javaDeserializeWithCL(serialized, WorkerClassLoader.getInstance());
     }
-    
+
     public static String to_json(Object m) {
         // return JSON.toJSONString(m);
         return JSONValue.toJSONString(m);
     }
-    
+
     public static Object from_json(String json) {
         if (json == null) {
             return null;
@@ -228,14 +203,14 @@ public class Utils {
             return JSONValue.parse(json);
         }
     }
-    
+
     public static String toPrettyJsonString(Object obj) {
         Gson gson2 = new GsonBuilder().setPrettyPrinting().create();
         String ret = gson2.toJson(obj);
-        
+
         return ret;
     }
-    
+
     public static byte[] gzip(byte[] data) {
         try {
             ByteArrayOutputStream bos = new ByteArrayOutputStream();
@@ -269,9 +244,9 @@ public class Utils {
     public static <T> String join(Iterable<T> coll, String sep) {
         Iterator<T> it = coll.iterator();
         String ret = "";
-        while(it.hasNext()) {
+        while (it.hasNext()) {
             ret = ret + it.next();
-            if(it.hasNext()) {
+            if (it.hasNext()) {
                 ret = ret + sep;
             }
         }
@@ -281,13 +256,14 @@ public class Utils {
     public static void sleep(long millis) {
         try {
             Time.sleep(millis);
-        } catch(InterruptedException e) {
+        } catch (InterruptedException e) {
             throw new RuntimeException(e);
         }
     }
 
     /**
      * Please directly use LoadConf.findResources(name);
+     * 
      * @param name
      * @return
      */
@@ -298,6 +274,7 @@ public class Utils {
 
     /**
      * Please directly use LoadConf.findAndReadYaml(name);
+     * 
      * @param name
      * @return
      */
@@ -306,9 +283,8 @@ public class Utils {
         return LoadConf.findAndReadYaml(name, mustExist, false);
     }
 
-
     public static Map findAndReadConfigFile(String name) {
-    	return LoadConf.findAndReadYaml(name, true, false);
+        return LoadConf.findAndReadYaml(name, true, false);
     }
 
     public static Map readDefaultConfig() {
@@ -318,7 +294,7 @@ public class Utils {
     public static Map readCommandLineOpts() {
         Map ret = new HashMap();
         String commandOptions = System.getProperty("storm.options");
-        if(commandOptions != null) {
+        if (commandOptions != null) {
             String[] configs = commandOptions.split(",");
             for (String config : configs) {
                 config = URLDecoder.decode(config);
@@ -335,22 +311,21 @@ public class Utils {
         return ret;
     }
 
-    
     public static void replaceLocalDir(Map<Object, Object> conf) {
         String stormHome = System.getProperty("jstorm.home");
         boolean isEmpty = StringUtils.isBlank(stormHome);
-        
+
         Map<Object, Object> replaceMap = new HashMap<Object, Object>();
-        
+
         for (Entry entry : conf.entrySet()) {
             Object key = entry.getKey();
             Object value = entry.getValue();
-            
+
             if (value instanceof String) {
                 if (StringUtils.isBlank((String) value) == true) {
                     continue;
                 }
-                
+
                 String str = (String) value;
                 if (isEmpty == true) {
                     // replace %JSTORM_HOME% as current directory
@@ -358,20 +333,20 @@ public class Utils {
                 } else {
                     str = str.replace("%JSTORM_HOME%", stormHome);
                 }
-                
+
                 replaceMap.put(key, str);
             }
         }
-        
+
         conf.putAll(replaceMap);
     }
-    
+
     public static Map loadDefinedConf(String confFile) {
         File file = new File(confFile);
         if (file.exists() == false) {
             return findAndReadConfigFile(confFile, true);
         }
-        
+
         Yaml yaml = new Yaml();
         Map ret;
         try {
@@ -381,10 +356,10 @@ public class Utils {
         }
         if (ret == null)
             ret = new HashMap();
-        
+
         return new HashMap(ret);
     }
-    
+
     public static Map readStormConfig() {
         Map ret = readDefaultConfig();
         String confFile = System.getProperty("storm.conf.file");
@@ -396,40 +371,41 @@ public class Utils {
         }
         ret.putAll(storm);
         ret.putAll(readCommandLineOpts());
-        
+
         replaceLocalDir(ret);
         return ret;
     }
 
     private static Object normalizeConf(Object conf) {
-        if(conf==null) return new HashMap();
-        if(conf instanceof Map) {
+        if (conf == null)
+            return new HashMap();
+        if (conf instanceof Map) {
             Map confMap = new HashMap((Map) conf);
-            for(Object key: confMap.keySet()) {
+            for (Object key : confMap.keySet()) {
                 Object val = confMap.get(key);
                 confMap.put(key, normalizeConf(val));
             }
             return confMap;
-        } else if(conf instanceof List) {
-            List confList =  new ArrayList((List) conf);
-            for(int i=0; i<confList.size(); i++) {
+        } else if (conf instanceof List) {
+            List confList = new ArrayList((List) conf);
+            for (int i = 0; i < confList.size(); i++) {
                 Object val = confList.get(i);
                 confList.set(i, normalizeConf(val));
             }
             return confList;
         } else if (conf instanceof Integer) {
             return ((Integer) conf).longValue();
-        } else if(conf instanceof Float) {
+        } else if (conf instanceof Float) {
             return ((Float) conf).doubleValue();
         } else {
             return conf;
         }
     }
-    
+
     public static boolean isValidConf(Map<String, Object> stormConf) {
         return normalizeConf(stormConf).equals(normalizeConf(Utils.from_json(Utils.to_json(stormConf))));
     }
-    
+
     public static Object getSetComponentObject(ComponentObject obj, URLClassLoader loader) {
         if (obj.getSetField() == ComponentObject._Fields.SERIALIZED_JAVA) {
             return javaDeserializeWithCL(obj.get_serialized_java(), loader);
@@ -439,7 +415,7 @@ public class Utils {
             return obj.get_shell();
         }
     }
-    
+
     public static <S, T> T get(Map<S, T> m, S key, T def) {
         T ret = m.get(key);
         if (ret == null) {
@@ -447,7 +423,7 @@ public class Utils {
         }
         return ret;
     }
-    
+
     public static List<Object> tuple(Object... values) {
         List<Object> ret = new ArrayList<Object>();
         for (Object v : values) {
@@ -455,7 +431,7 @@ public class Utils {
         }
         return ret;
     }
-    
+
     public static void downloadFromMaster(Map conf, String file, String localFile) throws IOException, TException {
         WritableByteChannel out = null;
         NimbusClient client = null;
@@ -478,12 +454,12 @@ public class Utils {
                 client.close();
         }
     }
-	
+
     public static IFn loadClojureFn(String namespace, String name) {
         try {
-          clojure.lang.Compiler.eval(RT.readString("(require '" + namespace + ")"));
+            clojure.lang.Compiler.eval(RT.readString("(require '" + namespace + ")"));
         } catch (Exception e) {
-          //if playing from the repl and defining functions, file won't exist
+            // if playing from the repl and defining functions, file won't exist
         }
         return (IFn) RT.var(namespace, name).deref();
     }
@@ -494,38 +470,38 @@ public class Utils {
 
     public static <K, V> Map<V, K> reverseMap(Map<K, V> map) {
         Map<V, K> ret = new HashMap<V, K>();
-        for(K key: map.keySet()) {
+        for (K key : map.keySet()) {
             ret.put(map.get(key), key);
         }
         return ret;
     }
 
     public static ComponentCommon getComponentCommon(StormTopology topology, String id) {
-        if(topology.get_spouts().containsKey(id)) {
+        if (topology.get_spouts().containsKey(id)) {
             return topology.get_spouts().get(id).get_common();
         }
-        if(topology.get_bolts().containsKey(id)) {
+        if (topology.get_bolts().containsKey(id)) {
             return topology.get_bolts().get(id).get_common();
         }
-        if(topology.get_state_spouts().containsKey(id)) {
+        if (topology.get_state_spouts().containsKey(id)) {
             return topology.get_state_spouts().get(id).get_common();
         }
         throw new IllegalArgumentException("Could not find component with id " + id);
     }
 
     public static Integer getInt(Object o) {
-      Integer result = getInt(o, null);
-      if (null == result) {
-        throw new IllegalArgumentException("Don't know how to convert null to int");
-      }
-      return result;
+        Integer result = getInt(o, null);
+        if (null == result) {
+            throw new IllegalArgumentException("Don't know how to convert null to int");
+        }
+        return result;
     }
-    
+
     public static Integer getInt(Object o, Integer defaultValue) {
         if (null == o) {
             return defaultValue;
         }
-        
+
         if (o instanceof Number) {
             return ((Number) o).intValue();
         } else if (o instanceof String) {
@@ -534,38 +510,18 @@ public class Utils {
             throw new IllegalArgumentException("Don't know how to convert " + o + " to int");
         }
     }
-    
+
     public static long secureRandomLong() {
         return UUID.randomUUID().getLeastSignificantBits();
     }
-	
-    public static class BoundedExponentialBackoffRetry extends ExponentialBackoffRetry {
-        
-        protected final int maxRetryInterval;
-        
-        public BoundedExponentialBackoffRetry(int baseSleepTimeMs, int maxRetries, int maxSleepTimeMs) {
-            super(baseSleepTimeMs, maxRetries);
-            this.maxRetryInterval = maxSleepTimeMs;
-        }
-        
-        public int getMaxRetryInterval() {
-            return this.maxRetryInterval;
-        }
-        
-        @Override
-        public int getSleepTimeMs(int count, long elapsedMs) {
-            return Math.min(maxRetryInterval, super.getSleepTimeMs(count, elapsedMs));
-        }
-        
-    }
-    
+
     public static CuratorFramework newCurator(Map conf, List<String> servers, Object port, String root) {
         return newCurator(conf, servers, port, root, null);
     }
 
     public static CuratorFramework newCurator(Map conf, List<String> servers, Object port, String root, ZookeeperAuthInfo auth) {
         List<String> serverPorts = new ArrayList<String>();
-        for(String zkServer: (List<String>) servers) {
+        for (String zkServer : (List<String>) servers) {
             serverPorts.add(zkServer + ":" + Utils.getInt(port));
         }
         String zkStr = StringUtils.join(serverPorts, ",") + root;
@@ -576,17 +532,15 @@ public class Utils {
         return builder.build();
     }
 
-    protected static void setupBuilder(CuratorFrameworkFactory.Builder builder, String zkStr, Map conf, ZookeeperAuthInfo auth)
-    {
+    protected static void setupBuilder(CuratorFrameworkFactory.Builder builder, String zkStr, Map conf, ZookeeperAuthInfo auth) {
         builder.connectString(zkStr)
-            .connectionTimeoutMs(Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_CONNECTION_TIMEOUT)))
-            .sessionTimeoutMs(Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT)))
-            .retryPolicy(new StormBoundedExponentialBackoffRetry(
-                        Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_RETRY_INTERVAL)),
-                        Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_RETRY_INTERVAL_CEILING)),
-                        Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_RETRY_TIMES))));
-
-        if(auth!=null && auth.scheme!=null && auth.payload!=null) {
+                .connectionTimeoutMs(Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_CONNECTION_TIMEOUT)))
+                .sessionTimeoutMs(Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT)))
+                .retryPolicy(
+                        new StormBoundedExponentialBackoffRetry(Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_RETRY_INTERVAL)), Utils.getInt(conf
+                                .get(Config.STORM_ZOOKEEPER_RETRY_INTERVAL_CEILING)), Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_RETRY_TIMES))));
+
+        if (auth != null && auth.scheme != null && auth.payload != null) {
             builder = builder.authorization(auth.scheme, auth.payload);
         }
     }
@@ -608,15 +562,10 @@ public class Utils {
     }
 
     /**
-     *
-(defn integer-divided [sum num-pieces]
-  (let [base (int (/ sum num-pieces))
-        num-inc (mod sum num-pieces)
-        num-bases (- num-pieces num-inc)]
-    (if (= num-inc 0)
-      {base num-bases}
-      {base num-bases (inc base) num-inc}
-      )))
+     * 
+     (defn integer-divided [sum num-pieces] (let [base (int (/ sum num-pieces)) num-inc (mod sum num-pieces) num-bases (- num-pieces num-inc)] (if (= num-inc
+     * 0) {base num-bases} {base num-bases (inc base) num-inc} )))
+     * 
      * @param sum
      * @param numPieces
      * @return
@@ -628,8 +577,8 @@ public class Utils {
         int numBases = numPieces - numInc;
         TreeMap<Integer, Integer> ret = new TreeMap<Integer, Integer>();
         ret.put(base, numBases);
-        if(numInc!=0) {
-            ret.put(base+1, numInc);
+        if (numInc != 0) {
+            ret.put(base + 1, numInc);
         }
         return ret;
     }
@@ -644,7 +593,7 @@ public class Utils {
         try {
             BufferedReader r = new BufferedReader(new InputStreamReader(in));
             String line = null;
-            while ((line = r.readLine())!= null) {
+            while ((line = r.readLine()) != null) {
                 LOG.info("{}:{}", prefix, line);
             }
         } catch (IOException e) {
@@ -654,8 +603,8 @@ public class Utils {
 
     public static boolean exceptionCauseIsInstanceOf(Class klass, Throwable throwable) {
         Throwable t = throwable;
-        while(t != null) {
-            if(klass.isInstance(t)) {
+        while (t != null) {
+            if (klass.isInstance(t)) {
                 return true;
             }
             t = t.getCause();
@@ -664,71 +613,70 @@ public class Utils {
     }
 
     /**
-     * Is the cluster configured to interact with ZooKeeper in a secure way?
-     * This only works when called from within Nimbus or a Supervisor process.
+     * Is the cluster configured to interact with ZooKeeper in a secure way? This only works when called from within Nimbus or a Supervisor process.
+     * 
      * @param conf the storm configuration, not the topology configuration
      * @return true if it is configured else false.
      */
     public static boolean isZkAuthenticationConfiguredStormServer(Map conf) {
         return null != System.getProperty("java.security.auth.login.config")
-            || (conf != null
-                && conf.get(Config.STORM_ZOOKEEPER_AUTH_SCHEME) != null
-                && ! ((String)conf.get(Config.STORM_ZOOKEEPER_AUTH_SCHEME)).isEmpty());
+                || (conf != null && conf.get(Config.STORM_ZOOKEEPER_AUTH_SCHEME) != null && !((String) conf.get(Config.STORM_ZOOKEEPER_AUTH_SCHEME)).isEmpty());
     }
 
     /**
      * Is the topology configured to have ZooKeeper authentication.
+     * 
      * @param conf the topology configuration
      * @return true if ZK is configured else false
      */
     public static boolean isZkAuthenticationConfiguredTopology(Map conf) {
-        return (conf != null
-                && conf.get(Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_SCHEME) != null
-                && ! ((String)conf.get(Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_SCHEME)).isEmpty());
+        return (conf != null && conf.get(Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_SCHEME) != null && !((String) conf
+                .get(Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_SCHEME)).isEmpty());
     }
 
     public static List<ACL> getWorkerACL(Map conf) {
-        //This is a work around to an issue with ZK where a sasl super user is not super unless there is an open SASL ACL so we are trying to give the correct perms
+        // This is a work around to an issue with ZK where a sasl super user is not super unless there is an open SASL ACL so we are trying to give the correct
+        // perms
         if (!isZkAuthenticationConfiguredTopology(conf)) {
             return null;
         }
-        String stormZKUser = (String)conf.get(Config.STORM_ZOOKEEPER_SUPERACL);
+        String stormZKUser = (String) conf.get(Config.STORM_ZOOKEEPER_SUPERACL);
         if (stormZKUser == null) {
-           throw new IllegalArgumentException("Authentication is enabled but "+Config.STORM_ZOOKEEPER_SUPERACL+" is not set");
+            throw new IllegalArgumentException("Authentication is enabled but " + Config.STORM_ZOOKEEPER_SUPERACL + " is not set");
         }
-        String[] split = stormZKUser.split(":",2);
+        String[] split = stormZKUser.split(":", 2);
         if (split.length != 2) {
-          throw new IllegalArgumentException(Config.STORM_ZOOKEEPER_SUPERACL+" does not appear to be in the form scheme:acl, i.e. sasl:storm-user");
+            throw new IllegalArgumentException(Config.STORM_ZOOKEEPER_SUPERACL + " does not appear to be in the form scheme:acl, i.e. sasl:storm-user");
         }
         ArrayList<ACL> ret = new ArrayList<ACL>(ZooDefs.Ids.CREATOR_ALL_ACL);
         ret.add(new ACL(ZooDefs.Perms.ALL, new Id(split[0], split[1])));
         return ret;
     }
 
-   public static String threadDump() {
-       final StringBuilder dump = new StringBuilder();
-       final java.lang.management.ThreadMXBean threadMXBean =  java.lang.management.ManagementFactory.getThreadMXBean();
-       final java.lang.management.ThreadInfo[] threadInfos = threadMXBean.getThreadInfo(threadMXBean.getAllThreadIds(), 100);
-       for (java.lang.management.ThreadInfo threadInfo : threadInfos) {
-           dump.append('"');
-           dump.append(threadInfo.getThreadName());
-           dump.append("\" ");
-           final Thread.State state = threadInfo.getThreadState();
-           dump.append("\n   java.lang.Thread.State: ");
-           dump.append(state);
-           final StackTraceElement[] stackTraceElements = threadInfo.getStackTrace();
-           for (final StackTraceElement stackTraceElement : stackTraceElements) {
-               dump.append("\n        at ");
-               dump.append(stackTraceElement);
-           }
-           dump.append("\n\n");
-       }
-       return dump.toString();
-   }
+    public static String threadDump() {
+        final StringBuilder dump = new StringBuilder();
+        final java.lang.management.ThreadMXBean threadMXBean = java.lang.management.ManagementFactory.getThreadMXBean();
+        final java.lang.management.ThreadInfo[] threadInfos = threadMXBean.getThreadInfo(threadMXBean.getAllThreadIds(), 100);
+        for (java.lang.management.ThreadInfo threadInfo : threadInfos) {
+            dump.append('"');
+            dump.append(threadInfo.getThreadName());
+            dump.append("\" ");
+            final Thread.State state = threadInfo.getThreadState();
+            dump.append("\n   java.lang.Thread.State: ");
+            dump.append(state);
+            final StackTraceElement[] stackTraceElements = threadInfo.getStackTrace();
+            for (final StackTraceElement stackTraceElement : stackTraceElements) {
+                dump.append("\n        at ");
+                dump.append(stackTraceElement);
+            }
+            dump.append("\n\n");
+        }
+        return dump.toString();
+    }
 
     // Assumes caller is synchronizing
     private static SerializationDelegate getSerializationDelegate(Map stormConf) {
-        String delegateClassName = (String)stormConf.get(Config.STORM_META_SERIALIZATION_DELEGATE);
+        String delegateClassName = (String) stormConf.get(Config.STORM_META_SERIALIZATION_DELEGATE);
         SerializationDelegate delegate;
         try {
             Class delegateClass = Class.forName(delegateClassName);
@@ -747,27 +695,25 @@ public class Utils {
         return delegate;
     }
 
-  public static void handleUncaughtException(Throwable t) {
-    if (t != null && t instanceof Error) {
-      if (t instanceof OutOfMemoryError) {
-        try {
-          System.err.println("Halting due to Out Of Memory Error..." + Thread.currentThread().getName());
-        } catch (Throwable err) {
-          //Again we don't want to exit because of logging issues.
+    public static void handleUncaughtException(Throwable t) {
+        if (t != null && t instanceof Error) {
+            if (t instanceof OutOfMemoryError) {
+                try {
+                    System.err.println("Halting due to Out Of Memory Error..." + Thread.currentThread().getName());
+                } catch (Throwable err) {
+                    // Again we don't want to exit because of logging issues.
+                }
+                Runtime.getRuntime().halt(-1);
+            } else {
+                // Running in daemon mode, we would pass Error to calling thread.
+                throw (Error) t;
+            }
         }
-        Runtime.getRuntime().halt(-1);
-      } else {
-        //Running in daemon mode, we would pass Error to calling thread.
-        throw (Error) t;
-      }
     }
-  }
-
 
-    
     public static List<String> tokenize_path(String path) {
         String[] toks = path.split("/");
-        java.util.ArrayList<String> rtn = new ArrayList<String>();
+        ArrayList<String> rtn = new ArrayList<String>();
         for (String str : toks) {
             if (!str.isEmpty()) {
                 rtn.add(str);
@@ -775,7 +721,7 @@ public class Utils {
         }
         return rtn;
     }
-    
+
     public static String toks_to_path(List<String> toks) {
         StringBuffer buff = new StringBuffer();
         buff.append("/");
@@ -785,16 +731,16 @@ public class Utils {
             if (i < (size - 1)) {
                 buff.append("/");
             }
-            
+
         }
         return buff.toString();
     }
-    
+
     public static String normalize_path(String path) {
         String rtn = toks_to_path(tokenize_path(path));
         return rtn;
     }
-    
+
     public static String printStack() {
         StringBuilder sb = new StringBuilder();
         sb.append("\nCurrent call stack:\n");
@@ -802,14 +748,14 @@ public class Utils {
         for (int i = 2; i < stackElements.length; i++) {
             sb.append("\t").append(stackElements[i]).append("\n");
         }
-        
+
         return sb.toString();
     }
-    
+
     private static Map loadProperty(String prop) {
         Map ret = new HashMap<Object, Object>();
         Properties properties = new Properties();
-        
+
         try {
             InputStream stream = new FileInputStream(prop);
             properties.load(stream);
@@ -826,14 +772,14 @@ public class Utils {
             e1.printStackTrace();
             throw new RuntimeException(e1.getMessage());
         }
-        
+
         return ret;
     }
-    
+
     private static Map loadYaml(String confPath) {
         Map ret = new HashMap<Object, Object>();
         Yaml yaml = new Yaml();
-        
+
         try {
             InputStream stream = new FileInputStream(confPath);
             ret = (Map) yaml.load(stream);
@@ -848,10 +794,10 @@ public class Utils {
             e1.printStackTrace();
             throw new RuntimeException("Failed to read config file");
         }
-        
+
         return ret;
     }
-    
+
     public static Map loadConf(String arg) {
         Map ret = null;
         if (arg.endsWith("yaml")) {
@@ -866,13 +812,11 @@ public class Utils {
         String ret = "";
         InputStream input = null;
         try {
-            input =
-                    Utils.class.getClassLoader().getResourceAsStream("version");
+            input = Utils.class.getClassLoader().getResourceAsStream("version");
             BufferedReader in = new BufferedReader(new InputStreamReader(input));
-			String s = in.readLine();
-			ret = s.trim();
-			
-			
+            String s = in.readLine();
+            ret = s.trim();
+
         } catch (Exception e) {
             LOG.warn("Failed to get version", e);
         } finally {
@@ -892,7 +836,7 @@ public class Utils {
         bytes[offset++] = (byte) (value & 0x000000FF);
         bytes[offset++] = (byte) ((value & 0x0000FF00) >> 8);
         bytes[offset++] = (byte) ((value & 0x00FF0000) >> 16);
-        bytes[offset]   = (byte) ((value & 0xFF000000) >> 24);        
+        bytes[offset] = (byte) ((value & 0xFF000000) >> 24);
     }
 
     public static int readIntFromByteArray(byte[] bytes, int offset) {
@@ -900,7 +844,7 @@ public class Utils {
         ret = ret | (bytes[offset++] & 0x000000FF);
         ret = ret | ((bytes[offset++] << 8) & 0x0000FF00);
         ret = ret | ((bytes[offset++] << 16) & 0x00FF0000);
-        ret = ret | ((bytes[offset]   << 24) & 0xFF000000);
+        ret = ret | ((bytes[offset] << 24) & 0xFF000000);
         return ret;
     }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/VersionInfo.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/VersionInfo.java b/jstorm-core/src/main/java/backtype/storm/utils/VersionInfo.java
index 1740e18..456dfd0 100755
--- a/jstorm-core/src/main/java/backtype/storm/utils/VersionInfo.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/VersionInfo.java
@@ -24,108 +24,102 @@ import java.util.Properties;
 
 public class VersionInfo {
 
-  private Properties info;
-
-  protected VersionInfo(String component) {
-    info = new Properties();
-    String versionInfoFile = component + "-version-info.properties";
-    InputStream is = null;
-    try {
-      is = Thread.currentThread().getContextClassLoader()
-        .getResourceAsStream(versionInfoFile);
-      if (is == null) {
-        throw new IOException("Resource not found");
-      }
-      info.load(is);
-    } catch (IOException ex) {
-    } finally {
-      if (is != null) {
-      try {
-
-         is.close();
-     } catch (IOException ioex) {
-     }
-
-     }
-    }
-  }
-
-  protected String _getVersion() {
-    return info.getProperty("version", "Unknown");
-  }
-
-  protected String _getRevision() {
-    return info.getProperty("revision", "Unknown");
-  }
-
-  protected String _getBranch() {
-    return info.getProperty("branch", "Unknown");
-  }
-
-  protected String _getDate() {
-    return info.getProperty("date", "Unknown");
-  }
-
-  protected String _getUser() {
-    return info.getProperty("user", "Unknown");
-  }
-
-  protected String _getUrl() {
-    return info.getProperty("url", "Unknown");
-  }
-
-  protected String _getSrcChecksum() {
-    return info.getProperty("srcChecksum", "Unknown");
-  }
-
-  protected String _getBuildVersion(){
-    return getVersion() +
-      " from " + _getRevision() +
-      " by " + _getUser() +
-      " source checksum " + _getSrcChecksum();
-  }
-
-
-  private static VersionInfo COMMON_VERSION_INFO = new VersionInfo("storm-core");
-
-  public static String getVersion() {
-    return COMMON_VERSION_INFO._getVersion();
-  }
-  
-  public static String getRevision() {
-    return COMMON_VERSION_INFO._getRevision();
-  }
-
-  public static String getBranch() {
-    return COMMON_VERSION_INFO._getBranch();
-  }
-
-  public static String getDate() {
-    return COMMON_VERSION_INFO._getDate();
-  }
-  
-  public static String getUser() {
-    return COMMON_VERSION_INFO._getUser();
-  }
-  
-  public static String getUrl() {
-    return COMMON_VERSION_INFO._getUrl();
-  }
-
-  public static String getSrcChecksum() {
-    return COMMON_VERSION_INFO._getSrcChecksum();
-  }
-
-  public static String getBuildVersion(){
-    return COMMON_VERSION_INFO._getBuildVersion();
-  }
-
-
-  public static void main(String[] args) {
-    System.out.println("Storm " + getVersion());
-    System.out.println("URL " + getUrl() + " -r " + getRevision());
-    System.out.println("Branch " + getBranch());
-    System.out.println("Compiled by " + getUser() + " on " + getDate());
-    System.out.println("From source with checksum " + getSrcChecksum());
-  }
+    private Properties info;
+
+    protected VersionInfo(String component) {
+        info = new Properties();
+        String versionInfoFile = component + "-version-info.properties";
+        InputStream is = null;
+        try {
+            is = Thread.currentThread().getContextClassLoader().getResourceAsStream(versionInfoFile);
+            if (is == null) {
+                throw new IOException("Resource not found");
+            }
+            info.load(is);
+        } catch (IOException ex) {
+        } finally {
+            if (is != null) {
+                try {
+
+                    is.close();
+                } catch (IOException ioex) {
+                }
+
+            }
+        }
+    }
+
+    protected String _getVersion() {
+        return info.getProperty("version", "Unknown");
+    }
+
+    protected String _getRevision() {
+        return info.getProperty("revision", "Unknown");
+    }
+
+    protected String _getBranch() {
+        return info.getProperty("branch", "Unknown");
+    }
+
+    protected String _getDate() {
+        return info.getProperty("date", "Unknown");
+    }
+
+    protected String _getUser() {
+        return info.getProperty("user", "Unknown");
+    }
+
+    protected String _getUrl() {
+        return info.getProperty("url", "Unknown");
+    }
+
+    protected String _getSrcChecksum() {
+        return info.getProperty("srcChecksum", "Unknown");
+    }
+
+    protected String _getBuildVersion() {
+        return getVersion() + " from " + _getRevision() + " by " + _getUser() + " source checksum " + _getSrcChecksum();
+    }
+
+    private static VersionInfo COMMON_VERSION_INFO = new VersionInfo("storm-core");
+
+    public static String getVersion() {
+        return COMMON_VERSION_INFO._getVersion();
+    }
+
+    public static String getRevision() {
+        return COMMON_VERSION_INFO._getRevision();
+    }
+
+    public static String getBranch() {
+        return COMMON_VERSION_INFO._getBranch();
+    }
+
+    public static String getDate() {
+        return COMMON_VERSION_INFO._getDate();
+    }
+
+    public static String getUser() {
+        return COMMON_VERSION_INFO._getUser();
+    }
+
+    public static String getUrl() {
+        return COMMON_VERSION_INFO._getUrl();
+    }
+
+    public static String getSrcChecksum() {
+        return COMMON_VERSION_INFO._getSrcChecksum();
+    }
+
+    public static String getBuildVersion() {
+        return COMMON_VERSION_INFO._getBuildVersion();
+    }
+
+    public static void main(String[] args) {
+        System.out.println("Storm " + getVersion());
+        System.out.println("URL " + getUrl() + " -r " + getRevision());
+        System.out.println("Branch " + getBranch());
+        System.out.println("Compiled by " + getUser() + " on " + getDate());
+        System.out.println("From source with checksum " + getSrcChecksum());
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/VersionedStore.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/VersionedStore.java b/jstorm-core/src/main/java/backtype/storm/utils/VersionedStore.java
index 07ce5a8..0852292 100755
--- a/jstorm-core/src/main/java/backtype/storm/utils/VersionedStore.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/VersionedStore.java
@@ -30,10 +30,10 @@ public class VersionedStore {
     private static final String FINISHED_VERSION_SUFFIX = ".version";
 
     private String _root;
-    
+
     public VersionedStore(String path) throws IOException {
-      _root = path;
-      mkdirs(_root);
+        _root = path;
+        mkdirs(_root);
     }
 
     public String getRoot() {
@@ -46,26 +46,30 @@ public class VersionedStore {
 
     public String mostRecentVersionPath() throws IOException {
         Long v = mostRecentVersion();
-        if(v==null) return null;
+        if (v == null)
+            return null;
         return versionPath(v);
     }
 
     public String mostRecentVersionPath(long maxVersion) throws IOException {
         Long v = mostRecentVersion(maxVersion);
-        if(v==null) return null;
+        if (v == null)
+            return null;
         return versionPath(v);
     }
 
     public Long mostRecentVersion() throws IOException {
         List<Long> all = getAllVersions();
-        if(all.size()==0) return null;
+        if (all.size() == 0)
+            return null;
         return all.get(0);
     }
 
     public Long mostRecentVersion(long maxVersion) throws IOException {
         List<Long> all = getAllVersions();
-        for(Long v: all) {
-            if(v <= maxVersion) return v;
+        for (Long v : all) {
+            if (v <= maxVersion)
+                return v;
         }
         return null;
     }
@@ -73,7 +77,7 @@ public class VersionedStore {
     public String createVersion() throws IOException {
         Long mostRecent = mostRecentVersion();
         long version = Time.currentTimeMillis();
-        if(mostRecent!=null && version <= mostRecent) {
+        if (mostRecent != null && version <= mostRecent) {
             version = mostRecent + 1;
         }
         return createVersion(version);
@@ -81,7 +85,7 @@ public class VersionedStore {
 
     public String createVersion(long version) throws IOException {
         String ret = versionPath(version);
-        if(getAllVersions().contains(version))
+        if (getAllVersions().contains(version))
             throw new RuntimeException("Version already exists or data already exists");
         else
             return ret;
@@ -95,11 +99,11 @@ public class VersionedStore {
         File versionFile = new File(versionPath(version));
         File tokenFile = new File(tokenPath(version));
 
-        if(tokenFile.exists()) {
+        if (tokenFile.exists()) {
             FileUtils.forceDelete(tokenFile);
         }
 
-        if(versionFile.exists()) {
+        if (versionFile.exists()) {
             FileUtils.forceDelete(versionFile);
         }
     }
@@ -116,14 +120,14 @@ public class VersionedStore {
 
     public void cleanup(int versionsToKeep) throws IOException {
         List<Long> versions = getAllVersions();
-        if(versionsToKeep >= 0) {
+        if (versionsToKeep >= 0) {
             versions = versions.subList(0, Math.min(versions.size(), versionsToKeep));
         }
         HashSet<Long> keepers = new HashSet<Long>(versions);
 
-        for(String p: listDir(_root)) {
+        for (String p : listDir(_root)) {
             Long v = parseVersion(p);
-            if(v!=null && !keepers.contains(v)) {
+            if (v != null && !keepers.contains(v)) {
                 deleteVersion(v);
             }
         }
@@ -134,8 +138,8 @@ public class VersionedStore {
      */
     public List<Long> getAllVersions() throws IOException {
         List<Long> ret = new ArrayList<Long>();
-        for(String s: listDir(_root)) {
-            if(s.endsWith(FINISHED_VERSION_SUFFIX)) {
+        for (String s : listDir(_root)) {
+            if (s.endsWith(FINISHED_VERSION_SUFFIX)) {
                 ret.add(validateAndGetVersion(s));
             }
         }
@@ -150,18 +154,19 @@ public class VersionedStore {
 
     private long validateAndGetVersion(String path) {
         Long v = parseVersion(path);
-        if(v==null) throw new RuntimeException(path + " is not a valid version");
+        if (v == null)
+            throw new RuntimeException(path + " is not a valid version");
         return v;
     }
 
     private Long parseVersion(String path) {
         String name = new File(path).getName();
-        if(name.endsWith(FINISHED_VERSION_SUFFIX)) {
-            name = name.substring(0, name.length()-FINISHED_VERSION_SUFFIX.length());
+        if (name.endsWith(FINISHED_VERSION_SUFFIX)) {
+            name = name.substring(0, name.length() - FINISHED_VERSION_SUFFIX.length());
         }
         try {
             return Long.parseLong(name);
-        } catch(NumberFormatException e) {
+        } catch (NumberFormatException e) {
             return null;
         }
     }
@@ -173,12 +178,12 @@ public class VersionedStore {
     private void mkdirs(String path) throws IOException {
         new File(path).mkdirs();
     }
-    
+
     private List<String> listDir(String dir) throws IOException {
         List<String> ret = new ArrayList<String>();
         File[] contents = new File(dir).listFiles();
-        if(contents!=null) {
-            for(File f: contents) {
+        if (contents != null) {
+            for (File f : contents) {
                 ret.add(f.getAbsolutePath());
             }
         }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/WindowedTimeThrottler.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/WindowedTimeThrottler.java b/jstorm-core/src/main/java/backtype/storm/utils/WindowedTimeThrottler.java
index 5a288a0..6290f5a 100755
--- a/jstorm-core/src/main/java/backtype/storm/utils/WindowedTimeThrottler.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/WindowedTimeThrottler.java
@@ -22,28 +22,28 @@ public class WindowedTimeThrottler {
     int _maxAmt;
     long _windowStartTime;
     int _windowEvents = 0;
-    
+
     public WindowedTimeThrottler(Number windowMillis, Number maxAmt) {
         _windowMillis = windowMillis.longValue();
         _maxAmt = maxAmt.intValue();
         _windowStartTime = System.currentTimeMillis();
     }
-    
+
     public boolean isThrottled() {
         resetIfNecessary();
         return _windowEvents >= _maxAmt;
     }
-    
-    //returns void if the event should continue, false if the event should not be done
+
+    // returns void if the event should continue, false if the event should not be done
     public void markEvent() {
         resetIfNecessary();
         _windowEvents++;
-        
+
     }
-    
+
     private void resetIfNecessary() {
         long now = System.currentTimeMillis();
-        if(now - _windowStartTime > _windowMillis) {
+        if (now - _windowStartTime > _windowMillis) {
             _windowStartTime = now;
             _windowEvents = 0;
         }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/WorkerClassLoader.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/WorkerClassLoader.java b/jstorm-core/src/main/java/backtype/storm/utils/WorkerClassLoader.java
index f3526b1..4c2f35c 100755
--- a/jstorm-core/src/main/java/backtype/storm/utils/WorkerClassLoader.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/WorkerClassLoader.java
@@ -28,30 +28,30 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public class WorkerClassLoader extends URLClassLoader {
-    
+
     public static Logger LOG = LoggerFactory.getLogger(WorkerClassLoader.class);
-    
+
     private ClassLoader defaultClassLoader;
-    
+
     private ClassLoader JDKClassLoader;
-    
+
     private boolean isDebug;
-    
+
     protected static WorkerClassLoader instance;
-    
+
     protected static boolean enable;
-    
+
     protected static Map<Thread, ClassLoader> threadContextCache;
-    
+
     protected WorkerClassLoader(URL[] urls, ClassLoader defaultClassLoader, ClassLoader JDKClassLoader, boolean isDebug) {
         super(urls, JDKClassLoader);
         this.defaultClassLoader = defaultClassLoader;
         this.JDKClassLoader = JDKClassLoader;
         this.isDebug = isDebug;
-        
+
         // TODO Auto-generated constructor stub
     }
-    
+
     // for all log go through logback when enable classloader
     protected boolean isLogByDefault(String name) {
         if (name.startsWith("org.apache.log4j")) {
@@ -59,11 +59,11 @@ public class WorkerClassLoader extends URLClassLoader {
         } else if (name.startsWith("org.slf4j")) {
             return true;
         }
-        
+
         return false;
-        
+
     }
-    
+
     protected boolean isLoadByDefault(String name) {
         if (name.startsWith("backtype.storm") == true) {
             return true;
@@ -75,100 +75,101 @@ public class WorkerClassLoader extends URLClassLoader {
             return false;
         }
     }
-    
+
     @Override
     public Class<?> loadClass(String name) throws ClassNotFoundException {
         Class<?> result = null;
         try {
             result = this.findLoadedClass(name);
-            
+
             if (result != null) {
                 return result;
             }
-            
+
             try {
                 result = JDKClassLoader.loadClass(name);
                 if (result != null)
                     return result;
             } catch (Exception e) {
-                
+
             }
-            
+
             try {
                 if (isLoadByDefault(name) == false) {
                     result = findClass(name);
-                    
+
                     if (result != null) {
                         return result;
                     }
                 }
-                
+
             } catch (Exception e) {
-                
+
             }
-            
+
             result = defaultClassLoader.loadClass(name);
             return result;
-            
+
         } finally {
             if (result != null) {
                 ClassLoader resultClassLoader = result.getClassLoader();
-                LOG.info("Successfully load class " + name + " by " + resultClassLoader + ",threadContextLoader:" + Thread.currentThread().getContextClassLoader());
+                LOG.info("Successfully load class " + name + " by " + resultClassLoader + ",threadContextLoader:"
+                        + Thread.currentThread().getContextClassLoader());
             } else {
                 LOG.warn("Failed to load class " + name + ",threadContextLoader:" + Thread.currentThread().getContextClassLoader());
             }
-            
+
             if (isDebug) {
                 LOG.info(Utils.printStack());
             }
         }
-        
+
     }
-    
+
     public static WorkerClassLoader mkInstance(URL[] urls, ClassLoader DefaultClassLoader, ClassLoader JDKClassLoader, boolean enable, boolean isDebug) {
         WorkerClassLoader.enable = enable;
         if (enable == false) {
             LOG.info("Don't enable UserDefine ClassLoader");
             return null;
         }
-        
+
         synchronized (WorkerClassLoader.class) {
             if (instance == null) {
                 instance = new WorkerClassLoader(urls, DefaultClassLoader, JDKClassLoader, isDebug);
-                
+
                 threadContextCache = new ConcurrentHashMap<Thread, ClassLoader>();
             }
-            
+
         }
-        
+
         LOG.info("Successfully create classloader " + mk_list(urls));
         return instance;
     }
-    
+
     public static WorkerClassLoader getInstance() {
         return instance;
     }
-    
+
     public static boolean isEnable() {
         return enable;
     }
-    
+
     public static void switchThreadContext() {
         if (enable == false) {
             return;
         }
-        
+
         Thread thread = Thread.currentThread();
         ClassLoader oldClassLoader = thread.getContextClassLoader();
         threadContextCache.put(thread, oldClassLoader);
         thread.setContextClassLoader(instance);
     }
-    
+
     public static void restoreThreadContext() {
         if (enable == false) {
             return;
         }
-        
+
         Thread thread = Thread.currentThread();
         ClassLoader oldClassLoader = threadContextCache.get(thread);
         if (oldClassLoader != null) {
@@ -177,7 +178,7 @@ public class WorkerClassLoader extends URLClassLoader {
             LOG.info("No context classloader of " + thread.getName());
         }
     }
-    
+
     private static <V> List<V> mk_list(V... args) {
         ArrayList<V> rtn = new ArrayList<V>();
         for (V o : args) {

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/utils/WritableUtils.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/utils/WritableUtils.java b/jstorm-core/src/main/java/backtype/storm/utils/WritableUtils.java
index 8516f97..2c0a2a3 100755
--- a/jstorm-core/src/main/java/backtype/storm/utils/WritableUtils.java
+++ b/jstorm-core/src/main/java/backtype/storm/utils/WritableUtils.java
@@ -42,334 +42,314 @@ package backtype.storm.utils;
 
 import java.io.*;
 
-
 import java.util.zip.GZIPInputStream;
 import java.util.zip.GZIPOutputStream;
 
-public final class WritableUtils  {
-
-  public static byte[] readCompressedByteArray(DataInput in) throws IOException {
-    int length = in.readInt();
-    if (length == -1) return null;
-    byte[] buffer = new byte[length];
-    in.readFully(buffer);      // could/should use readFully(buffer,0,length)?
-    GZIPInputStream gzi = new GZIPInputStream(new ByteArrayInputStream(buffer, 0, buffer.length));
-    byte[] outbuf = new byte[length];
-    ByteArrayOutputStream bos =  new ByteArrayOutputStream();
-    int len;
-    while((len=gzi.read(outbuf, 0, outbuf.length)) != -1){
-      bos.write(outbuf, 0, len);
-    }
-    byte[] decompressed =  bos.toByteArray();
-    bos.close();
-    gzi.close();
-    return decompressed;
-  }
-
-  public static void skipCompressedByteArray(DataInput in) throws IOException {
-    int length = in.readInt();
-    if (length != -1) {
-      skipFully(in, length);
-    }
-  }
-
-  public static int  writeCompressedByteArray(DataOutput out, byte[] bytes) throws IOException {
-    if (bytes != null) {
-      ByteArrayOutputStream bos =  new ByteArrayOutputStream();
-      GZIPOutputStream gzout = new GZIPOutputStream(bos);
-      gzout.write(bytes, 0, bytes.length);
-      gzout.close();
-      byte[] buffer = bos.toByteArray();
-      int len = buffer.length;
-      out.writeInt(len);
-      out.write(buffer, 0, len);
-      /* debug only! Once we have confidence, can lose this. */
-      return ((bytes.length != 0) ? (100*buffer.length)/bytes.length : 0);
-    } else {
-      out.writeInt(-1);
-      return -1;
+public final class WritableUtils {
+
+    public static byte[] readCompressedByteArray(DataInput in) throws IOException {
+        int length = in.readInt();
+        if (length == -1)
+            return null;
+        byte[] buffer = new byte[length];
+        in.readFully(buffer); // could/should use readFully(buffer,0,length)?
+        GZIPInputStream gzi = new GZIPInputStream(new ByteArrayInputStream(buffer, 0, buffer.length));
+        byte[] outbuf = new byte[length];
+        ByteArrayOutputStream bos = new ByteArrayOutputStream();
+        int len;
+        while ((len = gzi.read(outbuf, 0, outbuf.length)) != -1) {
+            bos.write(outbuf, 0, len);
+        }
+        byte[] decompressed = bos.toByteArray();
+        bos.close();
+        gzi.close();
+        return decompressed;
     }
-  }
-
-
-  /* Ugly utility, maybe someone else can do this better  */
-  public static String readCompressedString(DataInput in) throws IOException {
-    byte[] bytes = readCompressedByteArray(in);
-    if (bytes == null) return null;
-    return new String(bytes, "UTF-8");
-  }
-
-
-  public static int  writeCompressedString(DataOutput out, String s) throws IOException {
-    return writeCompressedByteArray(out, (s != null) ? s.getBytes("UTF-8") : null);
-  }
-
-  /*
-   *
-   * Write a String as a Network Int n, followed by n Bytes
-   * Alternative to 16 bit read/writeUTF.
-   * Encoding standard is... ?
-   *
-   */
-  public static void writeString(DataOutput out, String s) throws IOException {
-    if (s != null) {
-      byte[] buffer = s.getBytes("UTF-8");
-      int len = buffer.length;
-      out.writeInt(len);
-      out.write(buffer, 0, len);
-    } else {
-      out.writeInt(-1);
-    }
-  }
-
-  /*
-   * Read a String as a Network Int n, followed by n Bytes
-   * Alternative to 16 bit read/writeUTF.
-   * Encoding standard is... ?
-   *
-   */
-  public static String readString(DataInput in) throws IOException{
-    int length = in.readInt();
-    if (length == -1) return null;
-    byte[] buffer = new byte[length];
-    in.readFully(buffer);      // could/should use readFully(buffer,0,length)?
-    return new String(buffer,"UTF-8");
-  }
-
-
-  /*
-   * Write a String array as a Nework Int N, followed by Int N Byte Array Strings.
-   * Could be generalised using introspection.
-   *
-   */
-  public static void writeStringArray(DataOutput out, String[] s) throws IOException{
-    out.writeInt(s.length);
-    for(int i = 0; i < s.length; i++) {
-      writeString(out, s[i]);
+
+    public static void skipCompressedByteArray(DataInput in) throws IOException {
+        int length = in.readInt();
+        if (length != -1) {
+            skipFully(in, length);
+        }
     }
-  }
-
-  /*
-   * Write a String array as a Nework Int N, followed by Int N Byte Array of
-   * compressed Strings. Handles also null arrays and null values.
-   * Could be generalised using introspection.
-   *
-   */
-  public static void writeCompressedStringArray(DataOutput out, String[] s) throws IOException{
-    if (s == null) {
-      out.writeInt(-1);
-      return;
+
+    public static int writeCompressedByteArray(DataOutput out, byte[] bytes) throws IOException {
+        if (bytes != null) {
+            ByteArrayOutputStream bos = new ByteArrayOutputStream();
+            GZIPOutputStream gzout = new GZIPOutputStream(bos);
+            gzout.write(bytes, 0, bytes.length);
+            gzout.close();
+            byte[] buffer = bos.toByteArray();
+            int len = buffer.length;
+            out.writeInt(len);
+            out.write(buffer, 0, len);
+            /* debug only! Once we have confidence, can lose this. */
+            return ((bytes.length != 0) ? (100 * buffer.length) / bytes.length : 0);
+        } else {
+            out.writeInt(-1);
+            return -1;
+        }
     }
-    out.writeInt(s.length);
-    for(int i = 0; i < s.length; i++) {
-      writeCompressedString(out, s[i]);
+
+    /* Ugly utility, maybe someone else can do this better */
+    public static String readCompressedString(DataInput in) throws IOException {
+        byte[] bytes = readCompressedByteArray(in);
+        if (bytes == null)
+            return null;
+        return new String(bytes, "UTF-8");
     }
-  }
-
-  /*
-   * Write a String array as a Nework Int N, followed by Int N Byte Array Strings.
-   * Could be generalised using introspection. Actually this bit couldn't...
-   *
-   */
-  public static String[] readStringArray(DataInput in) throws IOException {
-    int len = in.readInt();
-    if (len == -1) return null;
-    String[] s = new String[len];
-    for(int i = 0; i < len; i++) {
-      s[i] = readString(in);
+
+    public static int writeCompressedString(DataOutput out, String s) throws IOException {
+        return writeCompressedByteArray(out, (s != null) ? s.getBytes("UTF-8") : null);
     }
-    return s;
-  }
-
-
-  /*
-   * Write a String array as a Nework Int N, followed by Int N Byte Array Strings.
-   * Could be generalised using introspection. Handles null arrays and null values.
-   *
-   */
-  public static  String[] readCompressedStringArray(DataInput in) throws IOException {
-    int len = in.readInt();
-    if (len == -1) return null;
-    String[] s = new String[len];
-    for(int i = 0; i < len; i++) {
-      s[i] = readCompressedString(in);
+
+    /*
+     * 
+     * Write a String as a Network Int n, followed by n Bytes Alternative to 16 bit read/writeUTF. Encoding standard is... ?
+     */
+    public static void writeString(DataOutput out, String s) throws IOException {
+        if (s != null) {
+            byte[] buffer = s.getBytes("UTF-8");
+            int len = buffer.length;
+            out.writeInt(len);
+            out.write(buffer, 0, len);
+        } else {
+            out.writeInt(-1);
+        }
     }
-    return s;
-  }
-
-
-  /*
-   *
-   * Test Utility Method Display Byte Array.
-   *
-   */
-  public static void displayByteArray(byte[] record){
-    int i;
-    for(i=0;i < record.length -1; i++){
-      if (i % 16 == 0) { System.out.println(); }
-      System.out.print(Integer.toHexString(record[i]  >> 4 & 0x0F));
-      System.out.print(Integer.toHexString(record[i] & 0x0F));
-      System.out.print(",");
+
+    /*
+     * Read a String as a Network Int n, followed by n Bytes Alternative to 16 bit read/writeUTF. Encoding standard is... ?
+     */
+    public static String readString(DataInput in) throws IOException {
+        int length = in.readInt();
+        if (length == -1)
+            return null;
+        byte[] buffer = new byte[length];
+        in.readFully(buffer); // could/should use readFully(buffer,0,length)?
+        return new String(buffer, "UTF-8");
     }
-    System.out.print(Integer.toHexString(record[i]  >> 4 & 0x0F));
-    System.out.print(Integer.toHexString(record[i] & 0x0F));
-    System.out.println();
-  }
-
-
-  /**
-   * Serializes an integer to a binary stream with zero-compressed encoding.
-   * For -120 <= i <= 127, only one byte is used with the actual value.
-   * For other values of i, the first byte value indicates whether the
-   * integer is positive or negative, and the number of bytes that follow.
-   * If the first byte value v is between -121 and -124, the following integer
-   * is positive, with number of bytes that follow are -(v+120).
-   * If the first byte value v is between -125 and -128, the following integer
-   * is negative, with number of bytes that follow are -(v+124). Bytes are
-   * stored in the high-non-zero-byte-first order.
-   *
-   * @param stream Binary output stream
-   * @param i Integer to be serialized
-   * @throws java.io.IOException
-   */
-  public static void writeVInt(DataOutput stream, int i) throws IOException {
-    writeVLong(stream, i);
-  }
-
-  /**
-   * Serializes a long to a binary stream with zero-compressed encoding.
-   * For -112 <= i <= 127, only one byte is used with the actual value.
-   * For other values of i, the first byte value indicates whether the
-   * long is positive or negative, and the number of bytes that follow.
-   * If the first byte value v is between -113 and -120, the following long
-   * is positive, with number of bytes that follow are -(v+112).
-   * If the first byte value v is between -121 and -128, the following long
-   * is negative, with number of bytes that follow are -(v+120). Bytes are
-   * stored in the high-non-zero-byte-first order.
-   *
-   * @param stream Binary output stream
-   * @param i Long to be serialized
-   * @throws java.io.IOException
-   */
-  public static void writeVLong(DataOutput stream, long i) throws IOException {
-    if (i >= -112 && i <= 127) {
-      stream.writeByte((byte)i);
-      return;
+
+    /*
+     * Write a String array as a Nework Int N, followed by Int N Byte Array Strings. Could be generalised using introspection.
+     */
+    public static void writeStringArray(DataOutput out, String[] s) throws IOException {
+        out.writeInt(s.length);
+        for (int i = 0; i < s.length; i++) {
+            writeString(out, s[i]);
+        }
     }
 
-    int len = -112;
-    if (i < 0) {
-      i ^= -1L; // take one's complement'
-      len = -120;
+    /*
+     * Write a String array as a Nework Int N, followed by Int N Byte Array of compressed Strings. Handles also null arrays and null values. Could be
+     * generalised using introspection.
+     */
+    public static void writeCompressedStringArray(DataOutput out, String[] s) throws IOException {
+        if (s == null) {
+            out.writeInt(-1);
+            return;
+        }
+        out.writeInt(s.length);
+        for (int i = 0; i < s.length; i++) {
+            writeCompressedString(out, s[i]);
+        }
     }
 
-    long tmp = i;
-    while (tmp != 0) {
-      tmp = tmp >> 8;
-      len--;
+    /*
+     * Write a String array as a Nework Int N, followed by Int N Byte Array Strings. Could be generalised using introspection. Actually this bit couldn't...
+     */
+    public static String[] readStringArray(DataInput in) throws IOException {
+        int len = in.readInt();
+        if (len == -1)
+            return null;
+        String[] s = new String[len];
+        for (int i = 0; i < len; i++) {
+            s[i] = readString(in);
+        }
+        return s;
     }
 
-    stream.writeByte((byte)len);
+    /*
+     * Write a String array as a Nework Int N, followed by Int N Byte Array Strings. Could be generalised using introspection. Handles null arrays and null
+     * values.
+     */
+    public static String[] readCompressedStringArray(DataInput in) throws IOException {
+        int len = in.readInt();
+        if (len == -1)
+            return null;
+        String[] s = new String[len];
+        for (int i = 0; i < len; i++) {
+            s[i] = readCompressedString(in);
+        }
+        return s;
+    }
 
-    len = (len < -120) ? -(len + 120) : -(len + 112);
+    /*
+     * 
+     * Test Utility Method Display Byte Array.
+     */
+    public static void displayByteArray(byte[] record) {
+        int i;
+        for (i = 0; i < record.length - 1; i++) {
+            if (i % 16 == 0) {
+                System.out.println();
+            }
+            System.out.print(Integer.toHexString(record[i] >> 4 & 0x0F));
+            System.out.print(Integer.toHexString(record[i] & 0x0F));
+            System.out.print(",");
+        }
+        System.out.print(Integer.toHexString(record[i] >> 4 & 0x0F));
+        System.out.print(Integer.toHexString(record[i] & 0x0F));
+        System.out.println();
+    }
 
-    for (int idx = len; idx != 0; idx--) {
-      int shiftbits = (idx - 1) * 8;
-      long mask = 0xFFL << shiftbits;
-      stream.writeByte((byte)((i & mask) >> shiftbits));
+    /**
+     * Serializes an integer to a binary stream with zero-compressed encoding. For -120 <= i <= 127, only one byte is used with the actual value. For other
+     * values of i, the first byte value indicates whether the integer is positive or negative, and the number of bytes that follow. If the first byte value v
+     * is between -121 and -124, the following integer is positive, with number of bytes that follow are -(v+120). If the first byte value v is between -125 and
+     * -128, the following integer is negative, with number of bytes that follow are -(v+124). Bytes are stored in the high-non-zero-byte-first order.
+     * 
+     * @param stream Binary output stream
+     * @param i Integer to be serialized
+     * @throws IOException
+     */
+    public static void writeVInt(DataOutput stream, int i) throws IOException {
+        writeVLong(stream, i);
     }
-  }
-
-
-  /**
-   * Reads a zero-compressed encoded long from input stream and returns it.
-   * @param stream Binary input stream
-   * @throws java.io.IOException
-   * @return deserialized long from stream.
-   */
-  public static long readVLong(DataInput stream) throws IOException {
-    byte firstByte = stream.readByte();
-    int len = decodeVIntSize(firstByte);
-    if (len == 1) {
-      return firstByte;
+
+    /**
+     * Serializes a long to a binary stream with zero-compressed encoding. For -112 <= i <= 127, only one byte is used with the actual value. For other values
+     * of i, the first byte value indicates whether the long is positive or negative, and the number of bytes that follow. If the first byte value v is between
+     * -113 and -120, the following long is positive, with number of bytes that follow are -(v+112). If the first byte value v is between -121 and -128, the
+     * following long is negative, with number of bytes that follow are -(v+120). Bytes are stored in the high-non-zero-byte-first order.
+     * 
+     * @param stream Binary output stream
+     * @param i Long to be serialized
+     * @throws IOException
+     */
+    public static void writeVLong(DataOutput stream, long i) throws IOException {
+        if (i >= -112 && i <= 127) {
+            stream.writeByte((byte) i);
+            return;
+        }
+
+        int len = -112;
+        if (i < 0) {
+            i ^= -1L; // take one's complement'
+            len = -120;
+        }
+
+        long tmp = i;
+        while (tmp != 0) {
+            tmp = tmp >> 8;
+            len--;
+        }
+
+        stream.writeByte((byte) len);
+
+        len = (len < -120) ? -(len + 120) : -(len + 112);
+
+        for (int idx = len; idx != 0; idx--) {
+            int shiftbits = (idx - 1) * 8;
+            long mask = 0xFFL << shiftbits;
+            stream.writeByte((byte) ((i & mask) >> shiftbits));
+        }
     }
-    long i = 0;
-    for (int idx = 0; idx < len-1; idx++) {
-      byte b = stream.readByte();
-      i = i << 8;
-      i = i | (b & 0xFF);
+
+    /**
+     * Reads a zero-compressed encoded long from input stream and returns it.
+     * 
+     * @param stream Binary input stream
+     * @throws IOException
+     * @return deserialized long from stream.
+     */
+    public static long readVLong(DataInput stream) throws IOException {
+        byte firstByte = stream.readByte();
+        int len = decodeVIntSize(firstByte);
+        if (len == 1) {
+            return firstByte;
+        }
+        long i = 0;
+        for (int idx = 0; idx < len - 1; idx++) {
+            byte b = stream.readByte();
+            i = i << 8;
+            i = i | (b & 0xFF);
+        }
+        return (isNegativeVInt(firstByte) ? (i ^ -1L) : i);
     }
-    return (isNegativeVInt(firstByte) ? (i ^ -1L) : i);
-  }
-
-  /**
-   * Reads a zero-compressed encoded integer from input stream and returns it.
-   * @param stream Binary input stream
-   * @throws java.io.IOException
-   * @return deserialized integer from stream.
-   */
-  public static int readVInt(DataInput stream) throws IOException {
-    return (int) readVLong(stream);
-  }
-
-  /**
-   * Given the first byte of a vint/vlong, determine the sign
-   * @param value the first byte
-   * @return is the value negative
-   */
-  public static boolean isNegativeVInt(byte value) {
-    return value < -120 || (value >= -112 && value < 0);
-  }
-
-  /**
-   * Parse the first byte of a vint/vlong to determine the number of bytes
-   * @param value the first byte of the vint/vlong
-   * @return the total number of bytes (1 to 9)
-   */
-  public static int decodeVIntSize(byte value) {
-    if (value >= -112) {
-      return 1;
-    } else if (value < -120) {
-      return -119 - value;
+
+    /**
+     * Reads a zero-compressed encoded integer from input stream and returns it.
+     * 
+     * @param stream Binary input stream
+     * @throws IOException
+     * @return deserialized integer from stream.
+     */
+    public static int readVInt(DataInput stream) throws IOException {
+        return (int) readVLong(stream);
     }
-    return -111 - value;
-  }
-
-  /**
-   * Get the encoded length if an integer is stored in a variable-length format
-   * @return the encoded length
-   */
-  public static int getVIntSize(long i) {
-    if (i >= -112 && i <= 127) {
-      return 1;
+
+    /**
+     * Given the first byte of a vint/vlong, determine the sign
+     * 
+     * @param value the first byte
+     * @return is the value negative
+     */
+    public static boolean isNegativeVInt(byte value) {
+        return value < -120 || (value >= -112 && value < 0);
     }
 
-    if (i < 0) {
-      i ^= -1L; // take one's complement'
+    /**
+     * Parse the first byte of a vint/vlong to determine the number of bytes
+     * 
+     * @param value the first byte of the vint/vlong
+     * @return the total number of bytes (1 to 9)
+     */
+    public static int decodeVIntSize(byte value) {
+        if (value >= -112) {
+            return 1;
+        } else if (value < -120) {
+            return -119 - value;
+        }
+        return -111 - value;
     }
-    // find the number of bytes with non-leading zeros
-    int dataBits = Long.SIZE - Long.numberOfLeadingZeros(i);
-    // find the number of data bytes + length byte
-    return (dataBits + 7) / 8 + 1;
-  }
-
-  /**
-   * Skip <i>len</i> number of bytes in input stream<i>in</i>
-   * @param in input stream
-   * @param len number of bytes to skip
-   * @throws IOException when skipped less number of bytes
-   */
-  public static void skipFully(DataInput in, int len) throws IOException {
-    int total = 0;
-    int cur = 0;
-
-    while ((total<len) && ((cur = in.skipBytes(len-total)) > 0)) {
-        total += cur;
+
+    /**
+     * Get the encoded length if an integer is stored in a variable-length format
+     * 
+     * @return the encoded length
+     */
+    public static int getVIntSize(long i) {
+        if (i >= -112 && i <= 127) {
+            return 1;
+        }
+
+        if (i < 0) {
+            i ^= -1L; // take one's complement'
+        }
+        // find the number of bytes with non-leading zeros
+        int dataBits = Long.SIZE - Long.numberOfLeadingZeros(i);
+        // find the number of data bytes + length byte
+        return (dataBits + 7) / 8 + 1;
     }
 
-    if (total<len) {
-      throw new IOException("Not able to skip " + len + " bytes, possibly " +
-                            "due to end of input.");
+    /**
+     * Skip <i>len</i> number of bytes in input stream<i>in</i>
+     * 
+     * @param in input stream
+     * @param len number of bytes to skip
+     * @throws IOException when skipped less number of bytes
+     */
+    public static void skipFully(DataInput in, int len) throws IOException {
+        int total = 0;
+        int cur = 0;
+
+        while ((total < len) && ((cur = in.skipBytes(len - total)) > 0)) {
+            total += cur;
+        }
+
+        if (total < len) {
+            throw new IOException("Not able to skip " + len + " bytes, possibly " + "due to end of input.");
+        }
     }
-  }
 }


[49/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/Config.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/Config.java b/jstorm-core/src/main/java/backtype/storm/Config.java
index 4273908..0e04b95 100644
--- a/jstorm-core/src/main/java/backtype/storm/Config.java
+++ b/jstorm-core/src/main/java/backtype/storm/Config.java
@@ -17,10 +17,8 @@
  */
 package backtype.storm;
 
-import backtype.storm.ConfigValidation;
 import backtype.storm.serialization.IKryoDecorator;
 import backtype.storm.serialization.IKryoFactory;
-
 import com.esotericsoftware.kryo.Serializer;
 
 import java.util.ArrayList;
@@ -29,28 +27,25 @@ import java.util.List;
 import java.util.Map;
 
 /**
- * Topology configs are specified as a plain old map. This class provides a
- * convenient way to create a topology config map by providing setter methods for
- * all the configs that can be set. It also makes it easier to do things like add
- * serializations.
- *
- * <p>This class also provides constants for all the configurations possible on
- * a Storm cluster and Storm topology. Each constant is paired with a schema
- * that defines the validity criterion of the corresponding field. Default
- * values for these configs can be found in defaults.yaml.</p>
- *
- * <p>Note that you may put other configurations in any of the configs. Storm
- * will ignore anything it doesn't recognize, but your topologies are free to make
- * use of them by reading them in the prepare method of Bolts or the open method of
- * Spouts.</p>
+ * Topology configs are specified as a plain old map. This class provides a convenient way to create a topology config map by providing setter methods for all
+ * the configs that can be set. It also makes it easier to do things like add serializations.
+ * <p/>
+ * <p>
+ * This class also provides constants for all the configurations possible on a Storm cluster and Storm topology. Each constant is paired with a schema that
+ * defines the validity criterion of the corresponding field. Default values for these configs can be found in defaults.yaml.
+ * </p>
+ * <p/>
+ * <p>
+ * Note that you may put other configurations in any of the configs. Storm will ignore anything it doesn't recognize, but your topologies are free to make use
+ * of them by reading them in the prepare method of Bolts or the open method of Spouts.
+ * </p>
  */
 public class Config extends HashMap<String, Object> {
-    //DO NOT CHANGE UNLESS WE ADD IN STATE NOT STORED IN THE PARENT CLASS
+    // DO NOT CHANGE UNLESS WE ADD IN STATE NOT STORED IN THE PARENT CLASS
     private static final long serialVersionUID = -1550278723792864455L;
 
     /**
-     * This is part of a temporary workaround to a ZK bug, it is the 'scheme:acl' for
-     * the user Nimbus and Supervisors use to authenticate with ZK.
+     * This is part of a temporary workaround to a ZK bug, it is the 'scheme:acl' for the user Nimbus and Supervisors use to authenticate with ZK.
      */
     public static final String STORM_ZOOKEEPER_SUPERACL = "storm.zookeeper.superACL";
     public static final Object STORM_ZOOKEEPER_SUPERACL_SCHEMA = String.class;
@@ -104,7 +99,8 @@ public class Config extends HashMap<String, Object> {
     public static final Object STORM_MESSAGING_NETTY_CLIENT_WORKER_THREADS_SCHEMA = ConfigValidation.IntegerValidator;
 
     /**
-     * If the Netty messaging layer is busy, the Netty client will try to batch message as more as possible up to the size of STORM_NETTY_MESSAGE_BATCH_SIZE bytes
+     * If the Netty messaging layer is busy, the Netty client will try to batch message as more as possible up to the size of STORM_NETTY_MESSAGE_BATCH_SIZE
+     * bytes
      */
     public static final String STORM_NETTY_MESSAGE_BATCH_SIZE = "storm.messaging.netty.transfer.batch.size";
     public static final Object STORM_NETTY_MESSAGE_BATCH_SIZE_SCHEMA = ConfigValidation.IntegerValidator;
@@ -122,8 +118,8 @@ public class Config extends HashMap<String, Object> {
     public static final Object STORM_MESSAGING_NETTY_AUTHENTICATION_SCHEMA = Boolean.class;
 
     /**
-     * The delegate for serializing metadata, should be used for serialized objects stored in zookeeper and on disk.
-     * This is NOT used for compressing serialized tuples sent between topologies.
+     * The delegate for serializing metadata, should be used for serialized objects stored in zookeeper and on disk. This is NOT used for compressing serialized
+     * tuples sent between topologies.
      */
     public static final String STORM_META_SERIALIZATION_DELEGATE = "storm.meta.serialization.delegate";
     public static final Object STORM_META_SERIALIZATION_DELEGATE_SCHEMA = String.class;
@@ -141,16 +137,15 @@ public class Config extends HashMap<String, Object> {
     public static final Object STORM_ZOOKEEPER_PORT_SCHEMA = ConfigValidation.IntegerValidator;
 
     /**
-     * A directory on the local filesystem used by Storm for any local
-     * filesystem usage it needs. The directory must exist and the Storm daemons must
-     * have permission to read/write from this location.
+     * A directory on the local filesystem used by Storm for any local filesystem usage it needs. The directory must exist and the Storm daemons must have
+     * permission to read/write from this location.
      */
     public static final String STORM_LOCAL_DIR = "storm.local.dir";
     public static final Object STORM_LOCAL_DIR_SCHEMA = String.class;
 
     /**
      * A global task scheduler used to assign topologies's tasks to supervisors' wokers.
-     *
+     * <p/>
      * If this is not set, a default system scheduler will be used.
      */
     public static final String STORM_SCHEDULER = "storm.scheduler";
@@ -163,11 +158,10 @@ public class Config extends HashMap<String, Object> {
     public static final Object STORM_CLUSTER_MODE_SCHEMA = String.class;
 
     /**
-     * The hostname the supervisors/workers should report to nimbus. If unset, Storm will
-     * get the hostname to report by calling <code>InetAddress.getLocalHost().getCanonicalHostName()</code>.
-     *
-     * You should set this config when you dont have a DNS which supervisors/workers
-     * can utilize to find each other based on hostname got from calls to
+     * The hostname the supervisors/workers should report to nimbus. If unset, Storm will get the hostname to report by calling
+     * <code>InetAddress.getLocalHost().getCanonicalHostName()</code>.
+     * <p/>
+     * You should set this config when you dont have a DNS which supervisors/workers can utilize to find each other based on hostname got from calls to
      * <code>InetAddress.getLocalHost().getCanonicalHostName()</code>.
      */
     public static final String STORM_LOCAL_HOSTNAME = "storm.local.hostname";
@@ -198,25 +192,22 @@ public class Config extends HashMap<String, Object> {
     public static final Object STORM_THRIFT_TRANSPORT_PLUGIN_SCHEMA = String.class;
 
     /**
-     * The serializer class for ListDelegate (tuple payload).
-     * The default serializer will be ListDelegateSerializer
+     * The serializer class for ListDelegate (tuple payload). The default serializer will be ListDelegateSerializer
      */
     public static final String TOPOLOGY_TUPLE_SERIALIZER = "topology.tuple.serializer";
     public static final Object TOPOLOGY_TUPLE_SERIALIZER_SCHEMA = String.class;
 
     /**
-     * Try to serialize all tuples, even for local transfers.  This should only be used
-     * for testing, as a sanity check that all of your tuples are setup properly.
+     * Try to serialize all tuples, even for local transfers. This should only be used for testing, as a sanity check that all of your tuples are setup
+     * properly.
      */
     public static final String TOPOLOGY_TESTING_ALWAYS_TRY_SERIALIZE = "topology.testing.always.try.serialize";
     public static final Object TOPOLOGY_TESTING_ALWAYS_TRY_SERIALIZE_SCHEMA = Boolean.class;
 
     /**
-     * Whether or not to use ZeroMQ for messaging in local mode. If this is set
-     * to false, then Storm will use a pure-Java messaging system. The purpose
-     * of this flag is to make it easy to run Storm in local mode by eliminating
-     * the need for native dependencies, which can be difficult to install.
-     *
+     * Whether or not to use ZeroMQ for messaging in local mode. If this is set to false, then Storm will use a pure-Java messaging system. The purpose of this
+     * flag is to make it easy to run Storm in local mode by eliminating the need for native dependencies, which can be difficult to install.
+     * <p/>
      * Defaults to false.
      */
     public static final String STORM_LOCAL_MODE_ZMQ = "storm.local.mode.zmq";
@@ -243,49 +234,45 @@ public class Config extends HashMap<String, Object> {
     /**
      * The number of times to retry a Zookeeper operation.
      */
-    public static final String STORM_ZOOKEEPER_RETRY_TIMES="storm.zookeeper.retry.times";
+    public static final String STORM_ZOOKEEPER_RETRY_TIMES = "storm.zookeeper.retry.times";
     public static final Object STORM_ZOOKEEPER_RETRY_TIMES_SCHEMA = ConfigValidation.IntegerValidator;
 
     /**
      * The interval between retries of a Zookeeper operation.
      */
-    public static final String STORM_ZOOKEEPER_RETRY_INTERVAL="storm.zookeeper.retry.interval";
+    public static final String STORM_ZOOKEEPER_RETRY_INTERVAL = "storm.zookeeper.retry.interval";
     public static final Object STORM_ZOOKEEPER_RETRY_INTERVAL_SCHEMA = ConfigValidation.IntegerValidator;
 
     /**
      * The ceiling of the interval between retries of a Zookeeper operation.
      */
-    public static final String STORM_ZOOKEEPER_RETRY_INTERVAL_CEILING="storm.zookeeper.retry.intervalceiling.millis";
+    public static final String STORM_ZOOKEEPER_RETRY_INTERVAL_CEILING = "storm.zookeeper.retry.intervalceiling.millis";
     public static final Object STORM_ZOOKEEPER_RETRY_INTERVAL_CEILING_SCHEMA = ConfigValidation.IntegerValidator;
 
     /**
      * The cluster Zookeeper authentication scheme to use, e.g. "digest". Defaults to no authentication.
      */
-    public static final String STORM_ZOOKEEPER_AUTH_SCHEME="storm.zookeeper.auth.scheme";
+    public static final String STORM_ZOOKEEPER_AUTH_SCHEME = "storm.zookeeper.auth.scheme";
     public static final Object STORM_ZOOKEEPER_AUTH_SCHEME_SCHEMA = String.class;
 
     /**
-     * A string representing the payload for cluster Zookeeper authentication.
-     * It gets serialized using UTF-8 encoding during authentication.
-     * Note that if this is set to something with a secret (as when using
-     * digest authentication) then it should only be set in the
-     * storm-cluster-auth.yaml file.
-     * This file storm-cluster-auth.yaml should then be protected with
-     * appropriate permissions that deny access from workers.
+     * A string representing the payload for cluster Zookeeper authentication. It gets serialized using UTF-8 encoding during authentication. Note that if this
+     * is set to something with a secret (as when using digest authentication) then it should only be set in the storm-cluster-auth.yaml file. This file
+     * storm-cluster-auth.yaml should then be protected with appropriate permissions that deny access from workers.
      */
-    public static final String STORM_ZOOKEEPER_AUTH_PAYLOAD="storm.zookeeper.auth.payload";
+    public static final String STORM_ZOOKEEPER_AUTH_PAYLOAD = "storm.zookeeper.auth.payload";
     public static final Object STORM_ZOOKEEPER_AUTH_PAYLOAD_SCHEMA = String.class;
 
     /**
      * The topology Zookeeper authentication scheme to use, e.g. "digest". Defaults to no authentication.
      */
-    public static final String STORM_ZOOKEEPER_TOPOLOGY_AUTH_SCHEME="storm.zookeeper.topology.auth.scheme";
+    public static final String STORM_ZOOKEEPER_TOPOLOGY_AUTH_SCHEME = "storm.zookeeper.topology.auth.scheme";
     public static final Object STORM_ZOOKEEPER_TOPOLOGY_AUTH_SCHEME_SCHEMA = String.class;
 
     /**
      * A string representing the payload for topology Zookeeper authentication. It gets serialized using UTF-8 encoding during authentication.
      */
-    public static final String STORM_ZOOKEEPER_TOPOLOGY_AUTH_PAYLOAD="storm.zookeeper.topology.auth.payload";
+    public static final String STORM_ZOOKEEPER_TOPOLOGY_AUTH_PAYLOAD = "storm.zookeeper.topology.auth.payload";
     public static final Object STORM_ZOOKEEPER_TOPOLOGY_AUTH_PAYLOAD_SCHEMA = String.class;
 
     /**
@@ -298,19 +285,19 @@ public class Config extends HashMap<String, Object> {
     /**
      * The number of times to retry a Nimbus operation.
      */
-    public static final String STORM_NIMBUS_RETRY_TIMES="storm.nimbus.retry.times";
+    public static final String STORM_NIMBUS_RETRY_TIMES = "storm.nimbus.retry.times";
     public static final Object STORM_NIMBUS_RETRY_TIMES_SCHEMA = Number.class;
 
     /**
      * The starting interval between exponential backoff retries of a Nimbus operation.
      */
-    public static final String STORM_NIMBUS_RETRY_INTERVAL="storm.nimbus.retry.interval.millis";
+    public static final String STORM_NIMBUS_RETRY_INTERVAL = "storm.nimbus.retry.interval.millis";
     public static final Object STORM_NIMBUS_RETRY_INTERVAL_SCHEMA = Number.class;
 
     /**
      * The ceiling of the interval between retries of a client connect to Nimbus operation.
      */
-    public static final String STORM_NIMBUS_RETRY_INTERVAL_CEILING="storm.nimbus.retry.intervalceiling.millis";
+    public static final String STORM_NIMBUS_RETRY_INTERVAL_CEILING = "storm.nimbus.retry.intervalceiling.millis";
     public static final Object STORM_NIMBUS_RETRY_INTERVAL_CEILING_SCHEMA = Number.class;
 
     /**
@@ -326,8 +313,7 @@ public class Config extends HashMap<String, Object> {
     public static final Object NIMBUS_THRIFT_TRANSPORT_PLUGIN_SCHEMA = String.class;
 
     /**
-     * Which port the Thrift interface of Nimbus should run on. Clients should
-     * connect to this port to upload jars and submit topologies.
+     * Which port the Thrift interface of Nimbus should run on. Clients should connect to this port to upload jars and submit topologies.
      */
     public static final String NIMBUS_THRIFT_PORT = "nimbus.thrift.port";
     public static final Object NIMBUS_THRIFT_PORT_SCHEMA = ConfigValidation.IntegerValidator;
@@ -339,30 +325,29 @@ public class Config extends HashMap<String, Object> {
     public static final Object NIMBUS_THRIFT_THREADS_SCHEMA = Number.class;
 
     /**
-     * A list of users that are cluster admins and can run any command.  To use this set
-     * nimbus.authorizer to backtype.storm.security.auth.authorizer.SimpleACLAuthorizer
+     * A list of users that are cluster admins and can run any command. To use this set nimbus.authorizer to
+     * backtype.storm.security.auth.authorizer.SimpleACLAuthorizer
      */
     public static final String NIMBUS_ADMINS = "nimbus.admins";
     public static final Object NIMBUS_ADMINS_SCHEMA = ConfigValidation.StringsValidator;
 
     /**
-     * A list of users that are the only ones allowed to run user operation on storm cluster.
-     * To use this set nimbus.authorizer to backtype.storm.security.auth.authorizer.SimpleACLAuthorizer
+     * A list of users that are the only ones allowed to run user operation on storm cluster. To use this set nimbus.authorizer to
+     * backtype.storm.security.auth.authorizer.SimpleACLAuthorizer
      */
     public static final String NIMBUS_USERS = "nimbus.users";
     public static final Object NIMBUS_USERS_SCHEMA = ConfigValidation.StringsValidator;
 
     /**
-     * A list of groups , users belong to these groups are the only ones allowed to run user operation on storm cluster.
-     * To use this set nimbus.authorizer to backtype.storm.security.auth.authorizer.SimpleACLAuthorizer
+     * A list of groups , users belong to these groups are the only ones allowed to run user operation on storm cluster. To use this set nimbus.authorizer to
+     * backtype.storm.security.auth.authorizer.SimpleACLAuthorizer
      */
     public static final String NIMBUS_GROUPS = "nimbus.groups";
     public static final Object NIMBUS_GROUPS_SCHEMA = ConfigValidation.StringsValidator;
 
     /**
-     * A list of users that run the supervisors and should be authorized to interact with
-     * nimbus as a supervisor would.  To use this set
-     * nimbus.authorizer to backtype.storm.security.auth.authorizer.SimpleACLAuthorizer
+     * A list of users that run the supervisors and should be authorized to interact with nimbus as a supervisor would. To use this set nimbus.authorizer to
+     * backtype.storm.security.auth.authorizer.SimpleACLAuthorizer
      */
     public static final String NIMBUS_SUPERVISOR_USERS = "nimbus.supervisor.users";
     public static final Object NIMBUS_SUPERVISOR_USERS_SCHEMA = ConfigValidation.StringsValidator;
@@ -374,83 +359,70 @@ public class Config extends HashMap<String, Object> {
     public static final Object NIMBUS_THRIFT_MAX_BUFFER_SIZE_SCHEMA = ConfigValidation.IntegerValidator;
 
     /**
-     * This parameter is used by the storm-deploy project to configure the
-     * jvm options for the nimbus daemon.
+     * This parameter is used by the storm-deploy project to configure the jvm options for the nimbus daemon.
      */
     public static final String NIMBUS_CHILDOPTS = "nimbus.childopts";
     public static final Object NIMBUS_CHILDOPTS_SCHEMA = String.class;
 
-
     /**
-     * How long without heartbeating a task can go before nimbus will consider the
-     * task dead and reassign it to another location.
+     * How long without heartbeating a task can go before nimbus will consider the task dead and reassign it to another location.
      */
     public static final String NIMBUS_TASK_TIMEOUT_SECS = "nimbus.task.timeout.secs";
     public static final Object NIMBUS_TASK_TIMEOUT_SECS_SCHEMA = ConfigValidation.IntegerValidator;
 
-
     /**
-     * How often nimbus should wake up to check heartbeats and do reassignments. Note
-     * that if a machine ever goes down Nimbus will immediately wake up and take action.
-     * This parameter is for checking for failures when there's no explicit event like that
-     * occuring.
+     * How often nimbus should wake up to check heartbeats and do reassignments. Note that if a machine ever goes down Nimbus will immediately wake up and take
+     * action. This parameter is for checking for failures when there's no explicit event like that occuring.
      */
     public static final String NIMBUS_MONITOR_FREQ_SECS = "nimbus.monitor.freq.secs";
     public static final Object NIMBUS_MONITOR_FREQ_SECS_SCHEMA = ConfigValidation.IntegerValidator;
 
     /**
      * How often nimbus should wake the cleanup thread to clean the inbox.
-     * @see NIMBUS_INBOX_JAR_EXPIRATION_SECS
      */
     public static final String NIMBUS_CLEANUP_INBOX_FREQ_SECS = "nimbus.cleanup.inbox.freq.secs";
     public static final Object NIMBUS_CLEANUP_INBOX_FREQ_SECS_SCHEMA = ConfigValidation.IntegerValidator;
 
     /**
      * The length of time a jar file lives in the inbox before being deleted by the cleanup thread.
-     *
-     * Probably keep this value greater than or equal to NIMBUS_CLEANUP_INBOX_JAR_EXPIRATION_SECS.
-     * Note that the time it takes to delete an inbox jar file is going to be somewhat more than
-     * NIMBUS_CLEANUP_INBOX_JAR_EXPIRATION_SECS (depending on how often NIMBUS_CLEANUP_FREQ_SECS
-     * is set to).
-     * @see NIMBUS_CLEANUP_FREQ_SECS
+     * <p/>
+     * Probably keep this value greater than or equal to NIMBUS_CLEANUP_INBOX_JAR_EXPIRATION_SECS. Note that the time it takes to delete an inbox jar file is
+     * going to be somewhat more than NIMBUS_CLEANUP_INBOX_JAR_EXPIRATION_SECS (depending on how often NIMBUS_CLEANUP_FREQ_SECS is set to).
      */
     public static final String NIMBUS_INBOX_JAR_EXPIRATION_SECS = "nimbus.inbox.jar.expiration.secs";
     public static final Object NIMBUS_INBOX_JAR_EXPIRATION_SECS_SCHEMA = ConfigValidation.IntegerValidator;
 
     /**
-     * How long before a supervisor can go without heartbeating before nimbus considers it dead
-     * and stops assigning new work to it.
+     * How long before a supervisor can go without heartbeating before nimbus considers it dead and stops assigning new work to it.
      */
     public static final String NIMBUS_SUPERVISOR_TIMEOUT_SECS = "nimbus.supervisor.timeout.secs";
     public static final Object NIMBUS_SUPERVISOR_TIMEOUT_SECS_SCHEMA = ConfigValidation.IntegerValidator;
 
     /**
-     * A special timeout used when a task is initially launched. During launch, this is the timeout
-     * used until the first heartbeat, overriding nimbus.task.timeout.secs.
-     *
-     * <p>A separate timeout exists for launch because there can be quite a bit of overhead
-     * to launching new JVM's and configuring them.</p>
+     * A special timeout used when a task is initially launched. During launch, this is the timeout used until the first heartbeat, overriding
+     * nimbus.task.timeout.secs.
+     * <p/>
+     * <p>
+     * A separate timeout exists for launch because there can be quite a bit of overhead to launching new JVM's and configuring them.
+     * </p>
      */
     public static final String NIMBUS_TASK_LAUNCH_SECS = "nimbus.task.launch.secs";
     public static final Object NIMBUS_TASK_LAUNCH_SECS_SCHEMA = ConfigValidation.IntegerValidator;
 
     /**
-     * Whether or not nimbus should reassign tasks if it detects that a task goes down.
-     * Defaults to true, and it's not recommended to change this value.
+     * Whether or not nimbus should reassign tasks if it detects that a task goes down. Defaults to true, and it's not recommended to change this value.
      */
     public static final String NIMBUS_REASSIGN = "nimbus.reassign";
     public static final Object NIMBUS_REASSIGN_SCHEMA = Boolean.class;
 
     /**
-     * During upload/download with the master, how long an upload or download connection is idle
-     * before nimbus considers it dead and drops the connection.
+     * During upload/download with the master, how long an upload or download connection is idle before nimbus considers it dead and drops the connection.
      */
     public static final String NIMBUS_FILE_COPY_EXPIRATION_SECS = "nimbus.file.copy.expiration.secs";
     public static final Object NIMBUS_FILE_COPY_EXPIRATION_SECS_SCHEMA = ConfigValidation.IntegerValidator;
 
     /**
-     * A custom class that implements ITopologyValidator that is run whenever a
-     * topology is submitted. Can be used to provide business-specific logic for
+     * A custom class that implements ITopologyValidator that is run whenever a topology is submitted. Can be used to provide business-specific logic for
      * whether topologies are allowed to run or not.
      */
     public static final String NIMBUS_TOPOLOGY_VALIDATOR = "nimbus.topology.validator";
@@ -462,14 +434,12 @@ public class Config extends HashMap<String, Object> {
     public static final String NIMBUS_AUTHORIZER = "nimbus.authorizer";
     public static final Object NIMBUS_AUTHORIZER_SCHEMA = String.class;
 
-
     /**
      * Impersonation user ACL config entries.
      */
     public static final String NIMBUS_IMPERSONATION_AUTHORIZER = "nimbus.impersonation.authorizer";
     public static final Object NIMBUS_IMPERSONATION_AUTHORIZER_SCHEMA = String.class;
 
-
     /**
      * Impersonation user ACL config entries.
      */
@@ -489,8 +459,7 @@ public class Config extends HashMap<String, Object> {
     public static final Object NIMBUS_CREDENTIAL_RENEWERS_SCHEMA = ConfigValidation.StringsValidator;
 
     /**
-     * A list of plugins that nimbus should load during submit topology to populate
-     * credentials on user's behalf.
+     * A list of plugins that nimbus should load during submit topology to populate credentials on user's behalf.
      */
     public static final String NIMBUS_AUTO_CRED_PLUGINS = "nimbus.autocredential.plugins.classes";
     public static final Object NIMBUS_AUTO_CRED_PLUGINS_SCHEMA = ConfigValidation.StringsValidator;
@@ -592,8 +561,7 @@ public class Config extends HashMap<String, Object> {
     public static final Object UI_HTTPS_KEYSTORE_PASSWORD_SCHEMA = String.class;
 
     /**
-     * Type of keystore used by Storm UI for setting up HTTPS (SSL).
-     * see http://docs.oracle.com/javase/7/docs/api/java/security/KeyStore.html for more details.
+     * Type of keystore used by Storm UI for setting up HTTPS (SSL). see http://docs.oracle.com/javase/7/docs/api/java/security/KeyStore.html for more details.
      */
     public static final String UI_HTTPS_KEYSTORE_TYPE = "ui.https.keystore.type";
     public static final Object UI_HTTPS_KEYSTORE_TYPE_SCHEMA = String.class;
@@ -617,8 +585,8 @@ public class Config extends HashMap<String, Object> {
     public static final Object UI_HTTPS_TRUSTSTORE_PASSWORD_SCHEMA = String.class;
 
     /**
-     * Type of truststore used by Storm UI for setting up HTTPS (SSL).
-     * see http://docs.oracle.com/javase/7/docs/api/java/security/KeyStore.html for more details.
+     * Type of truststore used by Storm UI for setting up HTTPS (SSL). see http://docs.oracle.com/javase/7/docs/api/java/security/KeyStore.html for more
+     * details.
      */
     public static final String UI_HTTPS_TRUSTSTORE_TYPE = "ui.https.truststore.type";
     public static final Object UI_HTTPS_TRUSTSTORE_TYPE_SCHEMA = String.class;
@@ -632,7 +600,6 @@ public class Config extends HashMap<String, Object> {
     public static final String UI_HTTPS_NEED_CLIENT_AUTH = "ui.https.need.client.auth";
     public static final Object UI_HTTPS_NEED_CLIENT_AUTH_SCHEMA = Boolean.class;
 
-
     /**
      * List of DRPC servers so that the DRPCSpout knows who to talk to.
      */
@@ -664,8 +631,8 @@ public class Config extends HashMap<String, Object> {
     public static final Object DRPC_HTTPS_KEYSTORE_PASSWORD_SCHEMA = String.class;
 
     /**
-     * Type of keystore used by Storm DRPC for setting up HTTPS (SSL).
-     * see http://docs.oracle.com/javase/7/docs/api/java/security/KeyStore.html for more details.
+     * Type of keystore used by Storm DRPC for setting up HTTPS (SSL). see http://docs.oracle.com/javase/7/docs/api/java/security/KeyStore.html for more
+     * details.
      */
     public static final String DRPC_HTTPS_KEYSTORE_TYPE = "drpc.https.keystore.type";
     public static final Object DRPC_HTTPS_KEYSTORE_TYPE_SCHEMA = String.class;
@@ -689,8 +656,8 @@ public class Config extends HashMap<String, Object> {
     public static final Object DRPC_HTTPS_TRUSTSTORE_PASSWORD_SCHEMA = String.class;
 
     /**
-     * Type of truststore used by Storm DRPC for setting up HTTPS (SSL).
-     * see http://docs.oracle.com/javase/7/docs/api/java/security/KeyStore.html for more details.
+     * Type of truststore used by Storm DRPC for setting up HTTPS (SSL). see http://docs.oracle.com/javase/7/docs/api/java/security/KeyStore.html for more
+     * details.
      */
     public static final String DRPC_HTTPS_TRUSTSTORE_TYPE = "drpc.https.truststore.type";
     public static final Object DRPC_HTTPS_TRUSTSTORE_TYPE_SCHEMA = String.class;
@@ -724,26 +691,20 @@ public class Config extends HashMap<String, Object> {
 
     /**
      * The Access Control List for the DRPC Authorizer.
-     * @see DRPCSimpleAclAuthorizer
      */
     public static final String DRPC_AUTHORIZER_ACL = "drpc.authorizer.acl";
     public static final Object DRPC_AUTHORIZER_ACL_SCHEMA = Map.class;
 
     /**
      * File name of the DRPC Authorizer ACL.
-     * @see DRPCSimpleAclAuthorizer
      */
     public static final String DRPC_AUTHORIZER_ACL_FILENAME = "drpc.authorizer.acl.filename";
     public static final Object DRPC_AUTHORIZER_ACL_FILENAME_SCHEMA = String.class;
 
     /**
-     * Whether the DRPCSimpleAclAuthorizer should deny requests for operations
-     * involving functions that have no explicit ACL entry. When set to false
-     * (the default) DRPC functions that have no entry in the ACL will be
-     * permitted, which is appropriate for a development environment. When set
-     * to true, explicit ACL entries are required for every DRPC function, and
-     * any request for functions will be denied.
-     * @see DRPCSimpleAclAuthorizer
+     * Whether the DRPCSimpleAclAuthorizer should deny requests for operations involving functions that have no explicit ACL entry. When set to false (the
+     * default) DRPC functions that have no entry in the ACL will be permitted, which is appropriate for a development environment. When set to true, explicit
+     * ACL entries are required for every DRPC function, and any request for functions will be denied.
      */
     public static final String DRPC_AUTHORIZER_ACL_STRICT = "drpc.authorizer.acl.strict";
     public static final Object DRPC_AUTHORIZER_ACL_STRICT_SCHEMA = Boolean.class;
@@ -785,11 +746,10 @@ public class Config extends HashMap<String, Object> {
     public static final Object DRPC_INVOCATIONS_THREADS_SCHEMA = Number.class;
 
     /**
-     * The timeout on DRPC requests within the DRPC server. Defaults to 10 minutes. Note that requests can also
-     * timeout based on the socket timeout on the DRPC client, and separately based on the topology message
-     * timeout for the topology implementing the DRPC function.
+     * The timeout on DRPC requests within the DRPC server. Defaults to 10 minutes. Note that requests can also timeout based on the socket timeout on the DRPC
+     * client, and separately based on the topology message timeout for the topology implementing the DRPC function.
      */
-    public static final String DRPC_REQUEST_TIMEOUT_SECS  = "drpc.request.timeout.secs";
+    public static final String DRPC_REQUEST_TIMEOUT_SECS = "drpc.request.timeout.secs";
     public static final Object DRPC_REQUEST_TIMEOUT_SECS_SCHEMA = ConfigValidation.IntegerValidator;
 
     /**
@@ -816,9 +776,8 @@ public class Config extends HashMap<String, Object> {
     public static final String SUPERVISOR_SCHEDULER_META = "supervisor.scheduler.meta";
     public static final Object SUPERVISOR_SCHEDULER_META_SCHEMA = Map.class;
     /**
-     * A list of ports that can run workers on this supervisor. Each worker uses one port, and
-     * the supervisor will only run one worker per port. Use this configuration to tune
-     * how many workers run on each machine.
+     * A list of ports that can run workers on this supervisor. Each worker uses one port, and the supervisor will only run one worker per port. Use this
+     * configuration to tune how many workers run on each machine.
      */
     public static final String SUPERVISOR_SLOTS_PORTS = "supervisor.slots.ports";
     public static final Object SUPERVISOR_SLOTS_PORTS_SCHEMA = ConfigValidation.IntegersValidator;
@@ -836,8 +795,7 @@ public class Config extends HashMap<String, Object> {
     public static final Object DRPC_HTTP_FILTER_SCHEMA = String.class;
 
     /**
-     * Initialization parameters for the javax.servlet.Filter of the DRPC HTTP
-     * service
+     * Initialization parameters for the javax.servlet.Filter of the DRPC HTTP service
      */
     public static final String DRPC_HTTP_FILTER_PARAMS = "drpc.http.filter.params";
     public static final Object DRPC_HTTP_FILTER_PARAMS_SCHEMA = Map.class;
@@ -849,15 +807,13 @@ public class Config extends HashMap<String, Object> {
     public static final Object NIMBUS_EXECUTORS_PER_TOPOLOGY_SCHEMA = Number.class;
 
     /**
-     * This parameter is used by the storm-deploy project to configure the
-     * jvm options for the supervisor daemon.
+     * This parameter is used by the storm-deploy project to configure the jvm options for the supervisor daemon.
      */
     public static final String SUPERVISOR_CHILDOPTS = "supervisor.childopts";
     public static final Object SUPERVISOR_CHILDOPTS_SCHEMA = String.class;
 
     /**
-     * How long a worker can go without heartbeating before the supervisor tries to
-     * restart the worker process.
+     * How long a worker can go without heartbeating before the supervisor tries to restart the worker process.
      */
     public static final String SUPERVISOR_WORKER_TIMEOUT_SECS = "supervisor.worker.timeout.secs";
     public static final Object SUPERVISOR_WORKER_TIMEOUT_SECS_SCHEMA = ConfigValidation.IntegerValidator;
@@ -869,18 +825,15 @@ public class Config extends HashMap<String, Object> {
     public static final Object SUPERVISOR_WORKER_SHUTDOWN_SLEEP_SECS_SCHEMA = ConfigValidation.IntegerValidator;
 
     /**
-     * How long a worker can go without heartbeating during the initial launch before
-     * the supervisor tries to restart the worker process. This value override
-     * supervisor.worker.timeout.secs during launch because there is additional
-     * overhead to starting and configuring the JVM on launch.
+     * How long a worker can go without heartbeating during the initial launch before the supervisor tries to restart the worker process. This value override
+     * supervisor.worker.timeout.secs during launch because there is additional overhead to starting and configuring the JVM on launch.
      */
     public static final String SUPERVISOR_WORKER_START_TIMEOUT_SECS = "supervisor.worker.start.timeout.secs";
     public static final Object SUPERVISOR_WORKER_START_TIMEOUT_SECS_SCHEMA = ConfigValidation.IntegerValidator;
 
     /**
-     * Whether or not the supervisor should launch workers assigned to it. Defaults
-     * to true -- and you should probably never change this value. This configuration
-     * is used in the Storm unit tests.
+     * Whether or not the supervisor should launch workers assigned to it. Defaults to true -- and you should probably never change this value. This
+     * configuration is used in the Storm unit tests.
      */
     public static final String SUPERVISOR_ENABLE = "supervisor.enable";
     public static final Object SUPERVISOR_ENABLE_SCHEMA = Boolean.class;
@@ -891,42 +844,34 @@ public class Config extends HashMap<String, Object> {
     public static final String SUPERVISOR_HEARTBEAT_FREQUENCY_SECS = "supervisor.heartbeat.frequency.secs";
     public static final Object SUPERVISOR_HEARTBEAT_FREQUENCY_SECS_SCHEMA = ConfigValidation.IntegerValidator;
 
-
     /**
-     * How often the supervisor checks the worker heartbeats to see if any of them
-     * need to be restarted.
+     * How often the supervisor checks the worker heartbeats to see if any of them need to be restarted.
      */
     public static final String SUPERVISOR_MONITOR_FREQUENCY_SECS = "supervisor.monitor.frequency.secs";
     public static final Object SUPERVISOR_MONITOR_FREQUENCY_SECS_SCHEMA = ConfigValidation.IntegerValidator;
 
     /**
-     * Should the supervior try to run the worker as the lauching user or not.  Defaults to false.
+     * Should the supervior try to run the worker as the lauching user or not. Defaults to false.
      */
     public static final String SUPERVISOR_RUN_WORKER_AS_USER = "supervisor.run.worker.as.user";
     public static final Object SUPERVISOR_RUN_WORKER_AS_USER_SCHEMA = Boolean.class;
 
     /**
-     * Full path to the worker-laucher executable that will be used to lauch workers when
-     * SUPERVISOR_RUN_WORKER_AS_USER is set to true.
+     * Full path to the worker-laucher executable that will be used to lauch workers when SUPERVISOR_RUN_WORKER_AS_USER is set to true.
      */
     public static final String SUPERVISOR_WORKER_LAUNCHER = "supervisor.worker.launcher";
     public static final Object SUPERVISOR_WORKER_LAUNCHER_SCHEMA = String.class;
 
     /**
-     * The jvm opts provided to workers launched by this supervisor. All "%ID%", "%WORKER-ID%", "%TOPOLOGY-ID%"
-     * and "%WORKER-PORT%" substrings are replaced with:
-     * %ID%          -> port (for backward compatibility),
-     * %WORKER-ID%   -> worker-id,
-     * %TOPOLOGY-ID%    -> topology-id,
-     * %WORKER-PORT% -> port.
+     * The jvm opts provided to workers launched by this supervisor. All "%ID%", "%WORKER-ID%", "%TOPOLOGY-ID%" and "%WORKER-PORT%" substrings are replaced
+     * with: %ID% -> port (for backward compatibility), %WORKER-ID% -> worker-id, %TOPOLOGY-ID% -> topology-id, %WORKER-PORT% -> port.
      */
     public static final String WORKER_CHILDOPTS = "worker.childopts";
     public static final Object WORKER_CHILDOPTS_SCHEMA = ConfigValidation.StringOrStringListValidator;
 
     /**
-     * The jvm opts provided to workers launched by this supervisor for GC. All "%ID%" substrings are replaced
-     * with an identifier for this worker.  Because the JVM complains about multiple GC opts the topology
-     * can override this default value by setting topology.worker.gc.childopts.
+     * The jvm opts provided to workers launched by this supervisor for GC. All "%ID%" substrings are replaced with an identifier for this worker. Because the
+     * JVM complains about multiple GC opts the topology can override this default value by setting topology.worker.gc.childopts.
      */
     public static final String WORKER_GC_CHILDOPTS = "worker.gc.childopts";
     public static final Object WORKER_GC_CHILDOPTS_SCHEMA = ConfigValidation.StringOrStringListValidator;
@@ -949,42 +894,37 @@ public class Config extends HashMap<String, Object> {
     public static final String TASK_HEARTBEAT_FREQUENCY_SECS = "task.heartbeat.frequency.secs";
     public static final Object TASK_HEARTBEAT_FREQUENCY_SECS_SCHEMA = ConfigValidation.IntegerValidator;
 
-
     /**
-     * How often a task should sync its connections with other tasks (if a task is
-     * reassigned, the other tasks sending messages to it need to refresh their connections).
-     * In general though, when a reassignment happens other tasks will be notified
-     * almost immediately. This configuration is here just in case that notification doesn't
-     * come through.
+     * How often a task should sync its connections with other tasks (if a task is reassigned, the other tasks sending messages to it need to refresh their
+     * connections). In general though, when a reassignment happens other tasks will be notified almost immediately. This configuration is here just in case
+     * that notification doesn't come through.
      */
     public static final String TASK_REFRESH_POLL_SECS = "task.refresh.poll.secs";
     public static final Object TASK_REFRESH_POLL_SECS_SCHEMA = ConfigValidation.IntegerValidator;
 
-
     /**
      * How often a task should sync credentials, worst case.
      */
     public static final String TASK_CREDENTIALS_POLL_SECS = "task.credentials.poll.secs";
     public static final Object TASK_CREDENTIALS_POLL_SECS_SCHEMA = Number.class;
 
-
     /**
-     * A list of users that are allowed to interact with the topology.  To use this set
-     * nimbus.authorizer to backtype.storm.security.auth.authorizer.SimpleACLAuthorizer
+     * A list of users that are allowed to interact with the topology. To use this set nimbus.authorizer to
+     * backtype.storm.security.auth.authorizer.SimpleACLAuthorizer
      */
     public static final String TOPOLOGY_USERS = "topology.users";
     public static final Object TOPOLOGY_USERS_SCHEMA = ConfigValidation.StringsValidator;
 
     /**
-     * A list of groups that are allowed to interact with the topology.  To use this set
-     * nimbus.authorizer to backtype.storm.security.auth.authorizer.SimpleACLAuthorizer
+     * A list of groups that are allowed to interact with the topology. To use this set nimbus.authorizer to
+     * backtype.storm.security.auth.authorizer.SimpleACLAuthorizer
      */
     public static final String TOPOLOGY_GROUPS = "topology.groups";
     public static final Object TOPOLOGY_GROUPS_SCHEMA = ConfigValidation.StringsValidator;
 
     /**
-     * True if Storm should timeout messages or not. Defaults to true. This is meant to be used
-     * in unit tests to prevent tuples from being accidentally timed out during the test.
+     * True if Storm should timeout messages or not. Defaults to true. This is meant to be used in unit tests to prevent tuples from being accidentally timed
+     * out during the test.
      */
     public static final String TOPOLOGY_ENABLE_MESSAGE_TIMEOUTS = "topology.enable.message.timeouts";
     public static final Object TOPOLOGY_ENABLE_MESSAGE_TIMEOUTS_SCHEMA = Boolean.class;
@@ -996,27 +936,22 @@ public class Config extends HashMap<String, Object> {
     public static final Object TOPOLOGY_DEBUG_SCHEMA = Boolean.class;
 
     /**
-     * The serializer for communication between shell components and non-JVM
-     * processes
+     * The serializer for communication between shell components and non-JVM processes
      */
     public static final String TOPOLOGY_MULTILANG_SERIALIZER = "topology.multilang.serializer";
     public static final Object TOPOLOGY_MULTILANG_SERIALIZER_SCHEMA = String.class;
 
     /**
-     * How many processes should be spawned around the cluster to execute this
-     * topology. Each process will execute some number of tasks as threads within
-     * them. This parameter should be used in conjunction with the parallelism hints
-     * on each component in the topology to tune the performance of a topology.
+     * How many processes should be spawned around the cluster to execute this topology. Each process will execute some number of tasks as threads within them.
+     * This parameter should be used in conjunction with the parallelism hints on each component in the topology to tune the performance of a topology.
      */
     public static final String TOPOLOGY_WORKERS = "topology.workers";
     public static final Object TOPOLOGY_WORKERS_SCHEMA = ConfigValidation.IntegerValidator;
 
     /**
-     * How many instances to create for a spout/bolt. A task runs on a thread with zero or more
-     * other tasks for the same spout/bolt. The number of tasks for a spout/bolt is always
-     * the same throughout the lifetime of a topology, but the number of executors (threads) for
-     * a spout/bolt can change over time. This allows a topology to scale to more or less resources
-     * without redeploying the topology or violating the constraints of Storm (such as a fields grouping
+     * How many instances to create for a spout/bolt. A task runs on a thread with zero or more other tasks for the same spout/bolt. The number of tasks for a
+     * spout/bolt is always the same throughout the lifetime of a topology, but the number of executors (threads) for a spout/bolt can change over time. This
+     * allows a topology to scale to more or less resources without redeploying the topology or violating the constraints of Storm (such as a fields grouping
      * guaranteeing that the same value goes to the same task).
      */
     public static final String TOPOLOGY_TASKS = "topology.tasks";
@@ -1024,266 +959,242 @@ public class Config extends HashMap<String, Object> {
 
     /**
      * How many executors to spawn for ackers.
-     *
-     * <p>If this is set to 0, then Storm will immediately ack tuples as soon
-     * as they come off the spout, effectively disabling reliability.</p>
+     * <p/>
+     * <p>
+     * If this is set to 0, then Storm will immediately ack tuples as soon as they come off the spout, effectively disabling reliability.
+     * </p>
      */
     public static final String TOPOLOGY_ACKER_EXECUTORS = "topology.acker.executors";
     public static final Object TOPOLOGY_ACKER_EXECUTORS_SCHEMA = ConfigValidation.IntegerValidator;
 
-
     /**
-     * The maximum amount of time given to the topology to fully process a message
-     * emitted by a spout. If the message is not acked within this time frame, Storm
-     * will fail the message on the spout. Some spouts implementations will then replay
-     * the message at a later time.
+     * The maximum amount of time given to the topology to fully process a message emitted by a spout. If the message is not acked within this time frame, Storm
+     * will fail the message on the spout. Some spouts implementations will then replay the message at a later time.
      */
     public static final String TOPOLOGY_MESSAGE_TIMEOUT_SECS = "topology.message.timeout.secs";
     public static final Object TOPOLOGY_MESSAGE_TIMEOUT_SECS_SCHEMA = ConfigValidation.IntegerValidator;
 
     /**
-     * A list of serialization registrations for Kryo ( http://code.google.com/p/kryo/ ),
-     * the underlying serialization framework for Storm. A serialization can either
-     * be the name of a class (in which case Kryo will automatically create a serializer for the class
-     * that saves all the object's fields), or an implementation of com.esotericsoftware.kryo.Serializer.
-     *
+     * A list of serialization registrations for Kryo ( http://code.google.com/p/kryo/ ), the underlying serialization framework for Storm. A serialization can
+     * either be the name of a class (in which case Kryo will automatically create a serializer for the class that saves all the object's fields), or an
+     * implementation of com.esotericsoftware.kryo.Serializer.
+     * <p/>
      * See Kryo's documentation for more information about writing custom serializers.
      */
     public static final String TOPOLOGY_KRYO_REGISTER = "topology.kryo.register";
     public static final Object TOPOLOGY_KRYO_REGISTER_SCHEMA = ConfigValidation.KryoRegValidator;
 
     /**
-     * A list of classes that customize storm's kryo instance during start-up.
-     * Each listed class name must implement IKryoDecorator. During start-up the
-     * listed class is instantiated with 0 arguments, then its 'decorate' method
-     * is called with storm's kryo instance as the only argument.
+     * A list of classes that customize storm's kryo instance during start-up. Each listed class name must implement IKryoDecorator. During start-up the listed
+     * class is instantiated with 0 arguments, then its 'decorate' method is called with storm's kryo instance as the only argument.
      */
     public static final String TOPOLOGY_KRYO_DECORATORS = "topology.kryo.decorators";
     public static final Object TOPOLOGY_KRYO_DECORATORS_SCHEMA = ConfigValidation.StringsValidator;
 
     /**
-     * Class that specifies how to create a Kryo instance for serialization. Storm will then apply
-     * topology.kryo.register and topology.kryo.decorators on top of this. The default implementation
-     * implements topology.fall.back.on.java.serialization and turns references off.
+     * Class that specifies how to create a Kryo instance for serialization. Storm will then apply topology.kryo.register and topology.kryo.decorators on top of
+     * this. The default implementation implements topology.fall.back.on.java.serialization and turns references off.
      */
     public static final String TOPOLOGY_KRYO_FACTORY = "topology.kryo.factory";
     public static final Object TOPOLOGY_KRYO_FACTORY_SCHEMA = String.class;
 
-
     /**
-     * Whether or not Storm should skip the loading of kryo registrations for which it
-     * does not know the class or have the serializer implementation. Otherwise, the task will
-     * fail to load and will throw an error at runtime. The use case of this is if you want to
-     * declare your serializations on the storm.yaml files on the cluster rather than every single
-     * time you submit a topology. Different applications may use different serializations and so
-     * a single application may not have the code for the other serializers used by other apps.
-     * By setting this config to true, Storm will ignore that it doesn't have those other serializations
-     * rather than throw an error.
+     * Whether or not Storm should skip the loading of kryo registrations for which it does not know the class or have the serializer implementation. Otherwise,
+     * the task will fail to load and will throw an error at runtime. The use case of this is if you want to declare your serializations on the storm.yaml files
+     * on the cluster rather than every single time you submit a topology. Different applications may use different serializations and so a single application
+     * may not have the code for the other serializers used by other apps. By setting this config to true, Storm will ignore that it doesn't have those other
+     * serializations rather than throw an error.
      */
-    public static final String TOPOLOGY_SKIP_MISSING_KRYO_REGISTRATIONS= "topology.skip.missing.kryo.registrations";
+    public static final String TOPOLOGY_SKIP_MISSING_KRYO_REGISTRATIONS = "topology.skip.missing.kryo.registrations";
     public static final Object TOPOLOGY_SKIP_MISSING_KRYO_REGISTRATIONS_SCHEMA = Boolean.class;
 
     /*
-     * A list of classes implementing IMetricsConsumer (See storm.yaml.example for exact config format).
-     * Each listed class will be routed all the metrics data generated by the storm metrics API.
-     * Each listed class maps 1:1 to a system bolt named __metrics_ClassName#N, and it's parallelism is configurable.
+     * A list of classes implementing IMetricsConsumer (See storm.yaml.example for exact config format). Each listed class will be routed all the metrics data
+     * generated by the storm metrics API. Each listed class maps 1:1 to a system bolt named __metrics_ClassName#N, and it's parallelism is configurable.
      */
     public static final String TOPOLOGY_METRICS_CONSUMER_REGISTER = "topology.metrics.consumer.register";
     public static final Object TOPOLOGY_METRICS_CONSUMER_REGISTER_SCHEMA = ConfigValidation.MapsValidator;
 
-
     /**
-     * The maximum parallelism allowed for a component in this topology. This configuration is
-     * typically used in testing to limit the number of threads spawned in local mode.
+     * The maximum parallelism allowed for a component in this topology. This configuration is typically used in testing to limit the number of threads spawned
+     * in local mode.
      */
-    public static final String TOPOLOGY_MAX_TASK_PARALLELISM="topology.max.task.parallelism";
+    public static final String TOPOLOGY_MAX_TASK_PARALLELISM = "topology.max.task.parallelism";
     public static final Object TOPOLOGY_MAX_TASK_PARALLELISM_SCHEMA = ConfigValidation.IntegerValidator;
 
-
     /**
-     * The maximum number of tuples that can be pending on a spout task at any given time.
-     * This config applies to individual tasks, not to spouts or topologies as a whole.
-     *
-     * A pending tuple is one that has been emitted from a spout but has not been acked or failed yet.
-     * Note that this config parameter has no effect for unreliable spouts that don't tag
-     * their tuples with a message id.
+     * The maximum number of tuples that can be pending on a spout task at any given time. This config applies to individual tasks, not to spouts or topologies
+     * as a whole.
+     * <p/>
+     * A pending tuple is one that has been emitted from a spout but has not been acked or failed yet. Note that this config parameter has no effect for
+     * unreliable spouts that don't tag their tuples with a message id.
      */
-    public static final String TOPOLOGY_MAX_SPOUT_PENDING="topology.max.spout.pending";
+    public static final String TOPOLOGY_MAX_SPOUT_PENDING = "topology.max.spout.pending";
     public static final Object TOPOLOGY_MAX_SPOUT_PENDING_SCHEMA = ConfigValidation.IntegerValidator;
 
     /**
-     * A class that implements a strategy for what to do when a spout needs to wait. Waiting is
-     * triggered in one of two conditions:
-     *
-     * 1. nextTuple emits no tuples
-     * 2. The spout has hit maxSpoutPending and can't emit any more tuples
+     * A class that implements a strategy for what to do when a spout needs to wait. Waiting is triggered in one of two conditions:
+     * <p/>
+     * 1. nextTuple emits no tuples 2. The spout has hit maxSpoutPending and can't emit any more tuples
      */
-    public static final String TOPOLOGY_SPOUT_WAIT_STRATEGY="topology.spout.wait.strategy";
+    public static final String TOPOLOGY_SPOUT_WAIT_STRATEGY = "topology.spout.wait.strategy";
     public static final Object TOPOLOGY_SPOUT_WAIT_STRATEGY_SCHEMA = String.class;
 
     /**
+     * Configure the wait timeout used for timeout blocking wait strategy.
+     */
+    public static final String TOPOLOGY_DISRUPTOR_WAIT_TIMEOUT = "topology.disruptor.wait.timeout";
+    public static final Object TOPOLOGY_DISRUPTOR_WAIT_TIMEOUT_SCHEMA = Number.class;
+
+    /**
      * The amount of milliseconds the SleepEmptyEmitStrategy should sleep for.
      */
-    public static final String TOPOLOGY_SLEEP_SPOUT_WAIT_STRATEGY_TIME_MS="topology.sleep.spout.wait.strategy.time.ms";
+    public static final String TOPOLOGY_SLEEP_SPOUT_WAIT_STRATEGY_TIME_MS = "topology.sleep.spout.wait.strategy.time.ms";
     public static final Object TOPOLOGY_SLEEP_SPOUT_WAIT_STRATEGY_TIME_MS_SCHEMA = ConfigValidation.IntegerValidator;
 
     /**
-     * The maximum amount of time a component gives a source of state to synchronize before it requests
-     * synchronization again.
+     * The maximum amount of time a component gives a source of state to synchronize before it requests synchronization again.
      */
-    public static final String TOPOLOGY_STATE_SYNCHRONIZATION_TIMEOUT_SECS="topology.state.synchronization.timeout.secs";
+    public static final String TOPOLOGY_STATE_SYNCHRONIZATION_TIMEOUT_SECS = "topology.state.synchronization.timeout.secs";
     public static final Object TOPOLOGY_STATE_SYNCHRONIZATION_TIMEOUT_SECS_SCHEMA = ConfigValidation.IntegerValidator;
 
     /**
      * The percentage of tuples to sample to produce stats for a task.
      */
-    public static final String TOPOLOGY_STATS_SAMPLE_RATE="topology.stats.sample.rate";
+    public static final String TOPOLOGY_STATS_SAMPLE_RATE = "topology.stats.sample.rate";
     public static final Object TOPOLOGY_STATS_SAMPLE_RATE_SCHEMA = ConfigValidation.DoubleValidator;
 
     /**
      * The time period that builtin metrics data in bucketed into.
      */
-    public static final String TOPOLOGY_BUILTIN_METRICS_BUCKET_SIZE_SECS="topology.builtin.metrics.bucket.size.secs";
+    public static final String TOPOLOGY_BUILTIN_METRICS_BUCKET_SIZE_SECS = "topology.builtin.metrics.bucket.size.secs";
     public static final Object TOPOLOGY_BUILTIN_METRICS_BUCKET_SIZE_SECS_SCHEMA = ConfigValidation.IntegerValidator;
 
     /**
      * Whether or not to use Java serialization in a topology.
      */
-    public static final String TOPOLOGY_FALL_BACK_ON_JAVA_SERIALIZATION="topology.fall.back.on.java.serialization";
+    public static final String TOPOLOGY_FALL_BACK_ON_JAVA_SERIALIZATION = "topology.fall.back.on.java.serialization";
     public static final Object TOPOLOGY_FALL_BACK_ON_JAVA_SERIALIZATION_SCHEMA = Boolean.class;
 
     /**
+     * Whether or not need to be registered for kryo serialization in a topology.
+     */
+    public static final String TOPOLOGY_KRYO_REGISTER_REQUIRED =
+            "topology.kryo.register.required";
+    public static final Object TOPOLOGY_KRYO_REGISTER_REQUIRED_SCHEMA =
+            Boolean.class;
+
+    /**
      * Topology-specific options for the worker child process. This is used in addition to WORKER_CHILDOPTS.
      */
-    public static final String TOPOLOGY_WORKER_CHILDOPTS="topology.worker.childopts";
+    public static final String TOPOLOGY_WORKER_CHILDOPTS = "topology.worker.childopts";
     public static final Object TOPOLOGY_WORKER_CHILDOPTS_SCHEMA = ConfigValidation.StringOrStringListValidator;
 
     /**
      * Topology-specific options GC for the worker child process. This overrides WORKER_GC_CHILDOPTS.
      */
-    public static final String TOPOLOGY_WORKER_GC_CHILDOPTS="topology.worker.gc.childopts";
+    public static final String TOPOLOGY_WORKER_GC_CHILDOPTS = "topology.worker.gc.childopts";
     public static final Object TOPOLOGY_WORKER_GC_CHILDOPTS_SCHEMA = ConfigValidation.StringOrStringListValidator;
 
     /**
      * Topology-specific classpath for the worker child process. This is combined to the usual classpath.
      */
-    public static final String TOPOLOGY_CLASSPATH="topology.classpath";
+    public static final String TOPOLOGY_CLASSPATH = "topology.classpath";
     public static final Object TOPOLOGY_CLASSPATH_SCHEMA = ConfigValidation.StringOrStringListValidator;
 
     /**
-     * Topology-specific environment variables for the worker child process.
-     * This is added to the existing environment (that of the supervisor)
+     * Topology-specific environment variables for the worker child process. This is added to the existing environment (that of the supervisor)
      */
-     public static final String TOPOLOGY_ENVIRONMENT="topology.environment";
-     public static final Object TOPOLOGY_ENVIRONMENT_SCHEMA = Map.class;
+    public static final String TOPOLOGY_ENVIRONMENT = "topology.environment";
+    public static final Object TOPOLOGY_ENVIRONMENT_SCHEMA = Map.class;
 
     /*
-     * Topology-specific option to disable/enable bolt's outgoing overflow buffer.
-     * Enabling this option ensures that the bolt can always clear the incoming messages,
-     * preventing live-lock for the topology with cyclic flow.
-     * The overflow buffer can fill degrading the performance gradually,
-     * eventually running out of memory.
+     * Topology-specific option to disable/enable bolt's outgoing overflow buffer. Enabling this option ensures that the bolt can always clear the incoming
+     * messages, preventing live-lock for the topology with cyclic flow. The overflow buffer can fill degrading the performance gradually, eventually running
+     * out of memory.
      */
-    public static final String TOPOLOGY_BOLTS_OUTGOING_OVERFLOW_BUFFER_ENABLE="topology.bolts.outgoing.overflow.buffer.enable";
+    public static final String TOPOLOGY_BOLTS_OUTGOING_OVERFLOW_BUFFER_ENABLE = "topology.bolts.outgoing.overflow.buffer.enable";
     public static final Object TOPOLOGY_BOLTS_OUTGOING_OVERFLOW_BUFFER_ENABLE_SCHEMA = Boolean.class;
 
     /**
-     * This config is available for TransactionalSpouts, and contains the id ( a String) for
-     * the transactional topology. This id is used to store the state of the transactional
-     * topology in Zookeeper.
+     * This config is available for TransactionalSpouts, and contains the id ( a String) for the transactional topology. This id is used to store the state of
+     * the transactional topology in Zookeeper.
      */
-    public static final String TOPOLOGY_TRANSACTIONAL_ID="topology.transactional.id";
+    public static final String TOPOLOGY_TRANSACTIONAL_ID = "topology.transactional.id";
     public static final Object TOPOLOGY_TRANSACTIONAL_ID_SCHEMA = String.class;
 
     /**
-     * A list of task hooks that are automatically added to every spout and bolt in the topology. An example
-     * of when you'd do this is to add a hook that integrates with your internal
-     * monitoring system. These hooks are instantiated using the zero-arg constructor.
+     * A list of task hooks that are automatically added to every spout and bolt in the topology. An example of when you'd do this is to add a hook that
+     * integrates with your internal monitoring system. These hooks are instantiated using the zero-arg constructor.
      */
-    public static final String TOPOLOGY_AUTO_TASK_HOOKS="topology.auto.task.hooks";
+    public static final String TOPOLOGY_AUTO_TASK_HOOKS = "topology.auto.task.hooks";
     public static final Object TOPOLOGY_AUTO_TASK_HOOKS_SCHEMA = ConfigValidation.StringsValidator;
 
-
     /**
      * The size of the Disruptor receive queue for each executor. Must be a power of 2.
      */
-    public static final String TOPOLOGY_EXECUTOR_RECEIVE_BUFFER_SIZE="topology.executor.receive.buffer.size";
+    public static final String TOPOLOGY_EXECUTOR_RECEIVE_BUFFER_SIZE = "topology.executor.receive.buffer.size";
     public static final Object TOPOLOGY_EXECUTOR_RECEIVE_BUFFER_SIZE_SCHEMA = ConfigValidation.PowerOf2Validator;
 
     /**
-     * The maximum number of messages to batch from the thread receiving off the network to the
-     * executor queues. Must be a power of 2.
+     * The maximum number of messages to batch from the thread receiving off the network to the executor queues. Must be a power of 2.
      */
-    public static final String TOPOLOGY_RECEIVER_BUFFER_SIZE="topology.receiver.buffer.size";
+    public static final String TOPOLOGY_RECEIVER_BUFFER_SIZE = "topology.receiver.buffer.size";
     public static final Object TOPOLOGY_RECEIVER_BUFFER_SIZE_SCHEMA = ConfigValidation.PowerOf2Validator;
 
     /**
      * The size of the Disruptor send queue for each executor. Must be a power of 2.
      */
-    public static final String TOPOLOGY_EXECUTOR_SEND_BUFFER_SIZE="topology.executor.send.buffer.size";
+    public static final String TOPOLOGY_EXECUTOR_SEND_BUFFER_SIZE = "topology.executor.send.buffer.size";
     public static final Object TOPOLOGY_EXECUTOR_SEND_BUFFER_SIZE_SCHEMA = ConfigValidation.PowerOf2Validator;
 
     /**
      * The size of the Disruptor transfer queue for each worker.
      */
-    public static final String TOPOLOGY_TRANSFER_BUFFER_SIZE="topology.transfer.buffer.size";
+    public static final String TOPOLOGY_TRANSFER_BUFFER_SIZE = "topology.transfer.buffer.size";
     public static final Object TOPOLOGY_TRANSFER_BUFFER_SIZE_SCHEMA = ConfigValidation.IntegerValidator;
 
-   /**
-    * How often a tick tuple from the "__system" component and "__tick" stream should be sent
-    * to tasks. Meant to be used as a component-specific configuration.
-    */
-    public static final String TOPOLOGY_TICK_TUPLE_FREQ_SECS="topology.tick.tuple.freq.secs";
+    /**
+     * How often a tick tuple from the "__system" component and "__tick" stream should be sent to tasks. Meant to be used as a component-specific configuration.
+     */
+    public static final String TOPOLOGY_TICK_TUPLE_FREQ_SECS = "topology.tick.tuple.freq.secs";
     public static final Object TOPOLOGY_TICK_TUPLE_FREQ_SECS_SCHEMA = ConfigValidation.IntegerValidator;
 
-
-   /**
-    * Configure the wait strategy used for internal queuing. Can be used to tradeoff latency
-    * vs. throughput
-    */
-    public static final String TOPOLOGY_DISRUPTOR_WAIT_STRATEGY="topology.disruptor.wait.strategy";
+    /**
+     * Configure the wait strategy used for internal queuing. Can be used to tradeoff latency vs. throughput
+     */
+    public static final String TOPOLOGY_DISRUPTOR_WAIT_STRATEGY = "topology.disruptor.wait.strategy";
     public static final Object TOPOLOGY_DISRUPTOR_WAIT_STRATEGY_SCHEMA = String.class;
 
     /**
-     * Configure the wait timeout used for timeout blocking wait strategy.
+     * The size of the shared thread pool for worker tasks to make use of. The thread pool can be accessed via the TopologyContext.
      */
-    public static final String TOPOLOGY_DISRUPTOR_WAIT_TIMEOUT =
-            "topology.disruptor.wait.timeout";
-    public static final Object TOPOLOGY_DISRUPTOR_WAIT_TIMEOUT_SCHEMA =
-            Number.class;
-    
-    /*
-    * The size of the shared thread pool for worker tasks to make use of. The thread pool can be accessed
-    * via the TopologyContext.
-    */
-    public static final String TOPOLOGY_WORKER_SHARED_THREAD_POOL_SIZE="topology.worker.shared.thread.pool.size";
+    public static final String TOPOLOGY_WORKER_SHARED_THREAD_POOL_SIZE = "topology.worker.shared.thread.pool.size";
     public static final Object TOPOLOGY_WORKER_SHARED_THREAD_POOL_SIZE_SCHEMA = ConfigValidation.IntegerValidator;
 
     /**
-     * The interval in seconds to use for determining whether to throttle error reported to Zookeeper. For example,
-     * an interval of 10 seconds with topology.max.error.report.per.interval set to 5 will only allow 5 errors to be
-     * reported to Zookeeper per task for every 10 second interval of time.
+     * The interval in seconds to use for determining whether to throttle error reported to Zookeeper. For example, an interval of 10 seconds with
+     * topology.max.error.report.per.interval set to 5 will only allow 5 errors to be reported to Zookeeper per task for every 10 second interval of time.
      */
-    public static final String TOPOLOGY_ERROR_THROTTLE_INTERVAL_SECS="topology.error.throttle.interval.secs";
+    public static final String TOPOLOGY_ERROR_THROTTLE_INTERVAL_SECS = "topology.error.throttle.interval.secs";
     public static final Object TOPOLOGY_ERROR_THROTTLE_INTERVAL_SECS_SCHEMA = ConfigValidation.IntegerValidator;
 
     /**
      * See doc for TOPOLOGY_ERROR_THROTTLE_INTERVAL_SECS
      */
-    public static final String TOPOLOGY_MAX_ERROR_REPORT_PER_INTERVAL="topology.max.error.report.per.interval";
+    public static final String TOPOLOGY_MAX_ERROR_REPORT_PER_INTERVAL = "topology.max.error.report.per.interval";
     public static final Object TOPOLOGY_MAX_ERROR_REPORT_PER_INTERVAL_SCHEMA = ConfigValidation.IntegerValidator;
 
-
     /**
      * How often a batch can be emitted in a Trident topology.
      */
-    public static final String TOPOLOGY_TRIDENT_BATCH_EMIT_INTERVAL_MILLIS="topology.trident.batch.emit.interval.millis";
+    public static final String TOPOLOGY_TRIDENT_BATCH_EMIT_INTERVAL_MILLIS = "topology.trident.batch.emit.interval.millis";
     public static final Object TOPOLOGY_TRIDENT_BATCH_EMIT_INTERVAL_MILLIS_SCHEMA = ConfigValidation.IntegerValidator;
 
     /**
      * Name of the topology. This config is automatically set by Storm when the topology is submitted.
      */
-    public final static String TOPOLOGY_NAME="topology.name";
+    public final static String TOPOLOGY_NAME = "topology.name";
     public static final Object TOPOLOGY_NAME_SCHEMA = String.class;
 
     /**
@@ -1313,33 +1224,31 @@ public class Config extends HashMap<String, Object> {
     /**
      * Max pending tuples in one ShellBolt
      */
-    public static final String TOPOLOGY_SHELLBOLT_MAX_PENDING="topology.shellbolt.max.pending";
+    public static final String TOPOLOGY_SHELLBOLT_MAX_PENDING = "topology.shellbolt.max.pending";
     public static final Object TOPOLOGY_SHELLBOLT_MAX_PENDING_SCHEMA = ConfigValidation.IntegerValidator;
 
     /**
      * The root directory in ZooKeeper for metadata about TransactionalSpouts.
      */
-    public static final String TRANSACTIONAL_ZOOKEEPER_ROOT="transactional.zookeeper.root";
+    public static final String TRANSACTIONAL_ZOOKEEPER_ROOT = "transactional.zookeeper.root";
     public static final Object TRANSACTIONAL_ZOOKEEPER_ROOT_SCHEMA = String.class;
 
     /**
-     * The list of zookeeper servers in which to keep the transactional state. If null (which is default),
-     * will use storm.zookeeper.servers
+     * The list of zookeeper servers in which to keep the transactional state. If null (which is default), will use storm.zookeeper.servers
      */
-    public static final String TRANSACTIONAL_ZOOKEEPER_SERVERS="transactional.zookeeper.servers";
+    public static final String TRANSACTIONAL_ZOOKEEPER_SERVERS = "transactional.zookeeper.servers";
     public static final Object TRANSACTIONAL_ZOOKEEPER_SERVERS_SCHEMA = ConfigValidation.StringsValidator;
 
     /**
-     * The port to use to connect to the transactional zookeeper servers. If null (which is default),
-     * will use storm.zookeeper.port
+     * The port to use to connect to the transactional zookeeper servers. If null (which is default), will use storm.zookeeper.port
      */
-    public static final String TRANSACTIONAL_ZOOKEEPER_PORT="transactional.zookeeper.port";
+    public static final String TRANSACTIONAL_ZOOKEEPER_PORT = "transactional.zookeeper.port";
     public static final Object TRANSACTIONAL_ZOOKEEPER_PORT_SCHEMA = ConfigValidation.IntegerValidator;
 
     /**
      * The user as which the nimbus client should be acquired to perform the operation.
      */
-    public static final String STORM_DO_AS_USER="storm.doAsUser";
+    public static final String STORM_DO_AS_USER = "storm.doAsUser";
     public static final Object STORM_DO_AS_USER_SCHEMA = String.class;
 
     /**
@@ -1349,58 +1258,54 @@ public class Config extends HashMap<String, Object> {
     public static final Object ZMQ_THREADS_SCHEMA = ConfigValidation.IntegerValidator;
 
     /**
-     * How long a connection should retry sending messages to a target host when
-     * the connection is closed. This is an advanced configuration and can almost
+     * How long a connection should retry sending messages to a target host when the connection is closed. This is an advanced configuration and can almost
      * certainly be ignored.
      */
     public static final String ZMQ_LINGER_MILLIS = "zmq.linger.millis";
     public static final Object ZMQ_LINGER_MILLIS_SCHEMA = ConfigValidation.IntegerValidator;
 
     /**
-     * The high water for the ZeroMQ push sockets used for networking. Use this config to prevent buffer explosion
-     * on the networking layer.
+     * The high water for the ZeroMQ push sockets used for networking. Use this config to prevent buffer explosion on the networking layer.
      */
     public static final String ZMQ_HWM = "zmq.hwm";
     public static final Object ZMQ_HWM_SCHEMA = ConfigValidation.IntegerValidator;
 
     /**
-     * This value is passed to spawned JVMs (e.g., Nimbus, Supervisor, and Workers)
-     * for the java.library.path value. java.library.path tells the JVM where
-     * to look for native libraries. It is necessary to set this config correctly since
-     * Storm uses the ZeroMQ and JZMQ native libs.
+     * This value is passed to spawned JVMs (e.g., Nimbus, Supervisor, and Workers) for the java.library.path value. java.library.path tells the JVM where to
+     * look for native libraries. It is necessary to set this config correctly since Storm uses the ZeroMQ and JZMQ native libs.
      */
     public static final String JAVA_LIBRARY_PATH = "java.library.path";
     public static final Object JAVA_LIBRARY_PATH_SCHEMA = String.class;
 
     /**
-     * The path to use as the zookeeper dir when running a zookeeper server via
-     * "storm dev-zookeeper". This zookeeper instance is only intended for development;
+     * The path to use as the zookeeper dir when running a zookeeper server via "storm dev-zookeeper". This zookeeper instance is only intended for development;
      * it is not a production grade zookeeper setup.
      */
     public static final String DEV_ZOOKEEPER_PATH = "dev.zookeeper.path";
     public static final Object DEV_ZOOKEEPER_PATH_SCHEMA = String.class;
 
     /**
-     * A map from topology name to the number of machines that should be dedicated for that topology. Set storm.scheduler
-     * to backtype.storm.scheduler.IsolationScheduler to make use of the isolation scheduler.
+     * A map from topology name to the number of machines that should be dedicated for that topology. Set storm.scheduler to
+     * backtype.storm.scheduler.IsolationScheduler to make use of the isolation scheduler.
      */
     public static final String ISOLATION_SCHEDULER_MACHINES = "isolation.scheduler.machines";
     public static final Object ISOLATION_SCHEDULER_MACHINES_SCHEMA = ConfigValidation.MapOfStringToNumberValidator;
 
     /**
-     * A map from the user name to the number of machines that should that user is allowed to use. Set storm.scheduler
-     * to backtype.storm.scheduler.multitenant.MultitenantScheduler
+     * A map from the user name to the number of machines that should that user is allowed to use. Set storm.scheduler to
+     * backtype.storm.scheduler.multitenant.MultitenantScheduler
      */
     public static final String MULTITENANT_SCHEDULER_USER_POOLS = "multitenant.scheduler.user.pools";
     public static final Object MULTITENANT_SCHEDULER_USER_POOLS_SCHEMA = ConfigValidation.MapOfStringToNumberValidator;
 
     /**
-     * The number of machines that should be used by this topology to isolate it from all others. Set storm.scheduler
-     * to backtype.storm.scheduler.multitenant.MultitenantScheduler
+     * The number of machines that should be used by this topology to isolate it from all others. Set storm.scheduler to
+     * backtype.storm.scheduler.multitenant.MultitenantScheduler
      */
     public static final String TOPOLOGY_ISOLATED_MACHINES = "topology.isolate.machines";
     public static final Object TOPOLOGY_ISOLATED_MACHINES_SCHEMA = Number.class;
 
+
     public static void setClasspath(Map conf, String cp) {
         conf.put(Config.TOPOLOGY_CLASSPATH, cp);
     }
@@ -1473,14 +1378,16 @@ public class Config extends HashMap<String, Object> {
         m.put("parallelism.hint", parallelismHint);
         m.put("argument", argument);
 
-        List l = (List)conf.get(TOPOLOGY_METRICS_CONSUMER_REGISTER);
-        if (l == null) { l = new ArrayList(); }
+        List l = (List) conf.get(TOPOLOGY_METRICS_CONSUMER_REGISTER);
+        if (l == null) {
+            l = new ArrayList();
+        }
         l.add(m);
         conf.put(TOPOLOGY_METRICS_CONSUMER_REGISTER, l);
     }
 
     public void registerMetricsConsumer(Class klass, Object argument, long parallelismHint) {
-       registerMetricsConsumer(this, klass, argument, parallelismHint);
+        registerMetricsConsumer(this, klass, argument, parallelismHint);
     }
 
     public static void registerMetricsConsumer(Map conf, Class klass, long parallelismHint) {
@@ -1520,7 +1427,7 @@ public class Config extends HashMap<String, Object> {
     }
 
     public void setSkipMissingKryoRegistrations(boolean skip) {
-       setSkipMissingKryoRegistrations(this, skip);
+        setSkipMissingKryoRegistrations(this, skip);
     }
 
     public static void setMaxTaskParallelism(Map conf, int max) {
@@ -1557,7 +1464,7 @@ public class Config extends HashMap<String, Object> {
 
     private static List getRegisteredSerializations(Map conf) {
         List ret;
-        if(!conf.containsKey(Config.TOPOLOGY_KRYO_REGISTER)) {
+        if (!conf.containsKey(Config.TOPOLOGY_KRYO_REGISTER)) {
             ret = new ArrayList();
         } else {
             ret = new ArrayList((List) conf.get(Config.TOPOLOGY_KRYO_REGISTER));
@@ -1568,7 +1475,7 @@ public class Config extends HashMap<String, Object> {
 
     private static List getRegisteredDecorators(Map conf) {
         List ret;
-        if(!conf.containsKey(Config.TOPOLOGY_KRYO_DECORATORS)) {
+        if (!conf.containsKey(Config.TOPOLOGY_KRYO_DECORATORS)) {
             ret = new ArrayList();
         } else {
             ret = new ArrayList((List) conf.get(Config.TOPOLOGY_KRYO_DECORATORS));
@@ -1576,4 +1483,12 @@ public class Config extends HashMap<String, Object> {
         conf.put(Config.TOPOLOGY_KRYO_DECORATORS, ret);
         return ret;
     }
+
+    public static void setKryoRegisterRequired(Map conf, boolean required) {
+        conf.put(Config.TOPOLOGY_KRYO_REGISTER_REQUIRED, required);
+    }
+
+    public void setKryoRegisterRequired(boolean fallback) {
+        setKryoRegisterRequired(this, fallback);
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/ConfigValidation.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/ConfigValidation.java b/jstorm-core/src/main/java/backtype/storm/ConfigValidation.java
index 24991d7..9fe2f69 100755
--- a/jstorm-core/src/main/java/backtype/storm/ConfigValidation.java
+++ b/jstorm-core/src/main/java/backtype/storm/ConfigValidation.java
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 package backtype.storm;
+
 import java.util.Map;
 
 import java.util.Map;
@@ -31,13 +32,14 @@ public class ConfigValidation {
     public static interface FieldValidator {
         /**
          * Validates the given field.
+         * 
          * @param name the name of the field.
          * @param field The field to be validated.
          * @throws IllegalArgumentException if the field fails validation.
          */
         public void validateField(String name, Object field) throws IllegalArgumentException;
     }
-    
+
     /**
      * Declares a method for validating configuration values that is nestable.
      */
@@ -46,9 +48,10 @@ public class ConfigValidation {
         public void validateField(String name, Object field) throws IllegalArgumentException {
             validateField(null, name, field);
         }
-        
+
         /**
          * Validates the given field.
+         * 
          * @param pd describes the parent wrapping this validator.
          * @param name the name of the field.
          * @param field The field to be validated.
@@ -59,6 +62,7 @@ public class ConfigValidation {
 
     /**
      * Returns a new NestableFieldValidator for a given class.
+     * 
      * @param cls the Class the field should be a type of
      * @param nullAllowed whether or not a value of null is valid
      * @return a NestableFieldValidator for that class
@@ -66,99 +70,93 @@ public class ConfigValidation {
     public static NestableFieldValidator fv(final Class cls, final boolean nullAllowed) {
         return new NestableFieldValidator() {
             @Override
-            public void validateField(String pd, String name, Object field)
-                    throws IllegalArgumentException {
+            public void validateField(String pd, String name, Object field) throws IllegalArgumentException {
                 if (nullAllowed && field == null) {
                     return;
                 }
-                if (! cls.isInstance(field)) {
-                    throw new IllegalArgumentException(
-                        pd + name + " must be a " + cls.getName() + ". ("+field+")");
+                if (!cls.isInstance(field)) {
+                    throw new IllegalArgumentException(pd + name + " must be a " + cls.getName() + ". (" + field + ")");
                 }
             }
         };
     }
-    
+
     /**
      * Returns a new NestableFieldValidator for a List of the given Class.
+     * 
      * @param cls the Class of elements composing the list
      * @param nullAllowed whether or not a value of null is valid
      * @return a NestableFieldValidator for a list of the given class
      */
     public static NestableFieldValidator listFv(Class cls, boolean nullAllowed) {
-      return listFv(fv(cls, false), nullAllowed);
+        return listFv(fv(cls, false), nullAllowed);
     }
-    
+
     /**
      * Returns a new NestableFieldValidator for a List where each item is validated by validator.
+     * 
      * @param validator used to validate each item in the list
      * @param nullAllowed whether or not a value of null is valid
      * @return a NestableFieldValidator for a list with each item validated by a different validator.
      */
-    public static NestableFieldValidator listFv(final NestableFieldValidator validator, 
-            final boolean nullAllowed) {
+    public static NestableFieldValidator listFv(final NestableFieldValidator validator, final boolean nullAllowed) {
         return new NestableFieldValidator() {
             @Override
-            public void validateField(String pd, String name, Object field)
-                    throws IllegalArgumentException {
+            public void validateField(String pd, String name, Object field) throws IllegalArgumentException {
                 if (nullAllowed && field == null) {
                     return;
                 }
                 if (field instanceof Iterable) {
-                    for (Object e : (Iterable)field) {
+                    for (Object e : (Iterable) field) {
                         validator.validateField(pd + "Each element of the list ", name, e);
                     }
                     return;
                 }
-                throw new IllegalArgumentException(
-                        "Field " + name + " must be an Iterable but was " +
-                        ((field == null) ? "null" :  ("a " + field.getClass())));
+                throw new IllegalArgumentException("Field " + name + " must be an Iterable but was " + ((field == null) ? "null" : ("a " + field.getClass())));
             }
         };
     }
 
     /**
      * Returns a new NestableFieldValidator for a Map of key to val.
+     * 
      * @param key the Class of keys in the map
      * @param val the Class of values in the map
      * @param nullAllowed whether or not a value of null is valid
      * @return a NestableFieldValidator for a Map of key to val
      */
-    public static NestableFieldValidator mapFv(Class key, Class val, 
-            boolean nullAllowed) {
+    public static NestableFieldValidator mapFv(Class key, Class val, boolean nullAllowed) {
         return mapFv(fv(key, false), fv(val, false), nullAllowed);
     }
- 
+
     /**
      * Returns a new NestableFieldValidator for a Map.
+     * 
      * @param key a validator for the keys in the map
      * @param val a validator for the values in the map
      * @param nullAllowed whether or not a value of null is valid
      * @return a NestableFieldValidator for a Map
-     */   
-    public static NestableFieldValidator mapFv(final NestableFieldValidator key, 
-            final NestableFieldValidator val, final boolean nullAllowed) {
+     */
+    public static NestableFieldValidator mapFv(final NestableFieldValidator key, final NestableFieldValidator val, final boolean nullAllowed) {
         return new NestableFieldValidator() {
             @SuppressWarnings("unchecked")
             @Override
-            public void validateField(String pd, String name, Object field)
-                    throws IllegalArgumentException {
+            public void validateField(String pd, String name, Object field) throws IllegalArgumentException {
                 if (nullAllowed && field == null) {
                     return;
                 }
                 if (field instanceof Map) {
-                    for (Map.Entry<Object, Object> entry: ((Map<Object, Object>)field).entrySet()) {
-                      key.validateField("Each key of the map ", name, entry.getKey());
-                      val.validateField("Each value in the map ", name, entry.getValue());
+                    for (Map.Entry<Object, Object> entry : ((Map<Object, Object>) field).entrySet()) {
+                        key.validateField("Each key of the map ", name, entry.getKey());
+                        val.validateField("Each value in the map ", name, entry.getValue());
                     }
                     return;
                 }
-                throw new IllegalArgumentException(
-                        "Field " + name + " must be a Map");
+                throw new IllegalArgumentException("Field " + name + " must be a Map");
             }
         };
     }
-    
+
     /**
      * Validates a list of Numbers.
      */
@@ -175,8 +173,7 @@ public class ConfigValidation {
     public static Object MapOfStringToNumberValidator = mapFv(String.class, Number.class, true);
 
     /**
-     * Validates a map of Strings to a map of Strings to a list.
-     * {str -> {str -> [str,str]}
+     * Validates a map of Strings to a map of Strings to a list. {str -> {str -> [str,str]}
      */
     public static Object MapOfStringToMapValidator = mapFv(fv(String.class, false), mapFv(fv(String.class, false), listFv(String.class, false), false), true);
 
@@ -196,8 +193,7 @@ public class ConfigValidation {
                 return;
             }
             final long i;
-            if (o instanceof Number &&
-                    (i = ((Number)o).longValue()) == ((Number)o).doubleValue()) {
+            if (o instanceof Number && (i = ((Number) o).longValue()) == ((Number) o).doubleValue()) {
                 if (i <= Integer.MAX_VALUE && i >= Integer.MIN_VALUE) {
                     return;
                 }
@@ -212,22 +208,19 @@ public class ConfigValidation {
      */
     public static Object IntegersValidator = new FieldValidator() {
         @Override
-        public void validateField(String name, Object field)
-                throws IllegalArgumentException {
+        public void validateField(String name, Object field) throws IllegalArgumentException {
             if (field == null) {
                 // A null value is acceptable.
                 return;
             }
             if (field instanceof Iterable) {
-                for (Object o : (Iterable)field) {
+                for (Object o : (Iterable) field) {
                     final long i;
-                    if (o instanceof Number &&
-                            ((i = ((Number)o).longValue()) == ((Number)o).doubleValue()) &&
-                            (i <= Integer.MAX_VALUE && i >= Integer.MIN_VALUE)) {
+                    if (o instanceof Number && ((i = ((Number) o).longValue()) == ((Number) o).doubleValue())
+                            && (i <= Integer.MAX_VALUE && i >= Integer.MIN_VALUE)) {
                         // pass the test
                     } else {
-                        throw new IllegalArgumentException(
-                                "Each element of the list " + name + " must be an Integer within type range.");
+                        throw new IllegalArgumentException("Each element of the list " + name + " must be an Integer within type range.");
                     }
                 }
                 return;
@@ -266,11 +259,9 @@ public class ConfigValidation {
                 return;
             }
             final long i;
-            if (o instanceof Number &&
-                    (i = ((Number)o).longValue()) == ((Number)o).doubleValue())
-            {
+            if (o instanceof Number && (i = ((Number) o).longValue()) == ((Number) o).doubleValue()) {
                 // Test whether the integer is a power of 2.
-                if (i > 0 && (i & (i-1)) == 0) {
+                if (i > 0 && (i & (i - 1)) == 0) {
                     return;
                 }
             }
@@ -289,9 +280,7 @@ public class ConfigValidation {
                 return;
             }
             final long i;
-            if (o instanceof Number &&
-                    (i = ((Number)o).longValue()) == ((Number)o).doubleValue())
-            {
+            if (o instanceof Number && (i = ((Number) o).longValue()) == ((Number) o).doubleValue()) {
                 if (i > 0) {
                     return;
                 }
@@ -311,24 +300,20 @@ public class ConfigValidation {
                 return;
             }
             if (o instanceof Iterable) {
-                for (Object e : (Iterable)o) {
+                for (Object e : (Iterable) o) {
                     if (e instanceof Map) {
-                        for (Map.Entry<Object,Object> entry: ((Map<Object,Object>)e).entrySet()) {
-                            if (!(entry.getKey() instanceof String) ||
-                                !(entry.getValue() instanceof String)) {
-                                throw new IllegalArgumentException(
-                                    "Each element of the list " + name + " must be a String or a Map of Strings");
+                        for (Map.Entry<Object, Object> entry : ((Map<Object, Object>) e).entrySet()) {
+                            if (!(entry.getKey() instanceof String) || !(entry.getValue() instanceof String)) {
+                                throw new IllegalArgumentException("Each element of the list " + name + " must be a String or a Map of Strings");
                             }
                         }
                     } else if (!(e instanceof String)) {
-                        throw new IllegalArgumentException(
-                                "Each element of the list " + name + " must be a String or a Map of Strings");
+                        throw new IllegalArgumentException("Each element of the list " + name + " must be a String or a Map of Strings");
                     }
                 }
                 return;
             }
-            throw new IllegalArgumentException(
-                    "Field " + name + " must be an Iterable containing only Strings or Maps of Strings");
+            throw new IllegalArgumentException("Field " + name + " must be an Iterable containing only Strings or Maps of Strings");
         }
     };
 


[48/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/Constants.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/Constants.java b/jstorm-core/src/main/java/backtype/storm/Constants.java
index 2797b69..da2d5b7 100755
--- a/jstorm-core/src/main/java/backtype/storm/Constants.java
+++ b/jstorm-core/src/main/java/backtype/storm/Constants.java
@@ -20,9 +20,8 @@ package backtype.storm;
 import backtype.storm.coordination.CoordinatedBolt;
 import clojure.lang.RT;
 
-
 public class Constants {
-    public static final String COORDINATED_STREAM_ID = CoordinatedBolt.class.getName() + "/coord-stream"; 
+    public static final String COORDINATED_STREAM_ID = CoordinatedBolt.class.getName() + "/coord-stream";
 
     public static final long SYSTEM_TASK_ID = -1;
     public static final Object SYSTEM_EXECUTOR_ID = RT.readString("[-1 -1]");
@@ -32,6 +31,6 @@ public class Constants {
     public static final String METRICS_STREAM_ID = "__metrics";
     public static final String METRICS_TICK_STREAM_ID = "__metrics_tick";
     public static final String CREDENTIALS_CHANGED_STREAM_ID = "__credentials";
-    
+
     public static final String JSTORM_CONF_DIR = "JSTORM_CONF_DIR";
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/GenericOptionsParser.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/GenericOptionsParser.java b/jstorm-core/src/main/java/backtype/storm/GenericOptionsParser.java
index 9319ce1..88adfb4 100755
--- a/jstorm-core/src/main/java/backtype/storm/GenericOptionsParser.java
+++ b/jstorm-core/src/main/java/backtype/storm/GenericOptionsParser.java
@@ -108,94 +108,92 @@ import org.yaml.snakeyaml.Yaml;
 
 public class GenericOptionsParser {
     static final Logger LOG = LoggerFactory.getLogger(GenericOptionsParser.class);
-    
+
     static final Charset UTF8 = Charset.forName("UTF-8");
-    
+
     public static final String TOPOLOGY_LIB_PATH = "topology.lib.path";
-    
+
     public static final String TOPOLOGY_LIB_NAME = "topology.lib.name";
-    
+
     Config conf;
-    
+
     CommandLine commandLine;
-    
+
     // Order in this map is important for these purposes:
     // - configuration priority
     static final LinkedHashMap<String, OptionProcessor> optionProcessors = new LinkedHashMap<String, OptionProcessor>();
-    
+
     public GenericOptionsParser(Config conf, String[] args) throws ParseException {
         this(conf, new Options(), args);
     }
-    
+
     public GenericOptionsParser(Config conf, Options options, String[] args) throws ParseException {
         this.conf = conf;
         parseGeneralOptions(options, conf, args);
     }
-    
+
     public String[] getRemainingArgs() {
         return commandLine.getArgs();
     }
-    
+
     public Config getConfiguration() {
         return conf;
     }
-    
+
     static Options buildGeneralOptions(Options opts) {
         Options r = new Options();
-        
+
         for (Object o : opts.getOptions())
             r.addOption((Option) o);
-        
-        Option libjars = OptionBuilder.withArgName("paths").hasArg().withDescription("comma separated jars to be used by the submitted topology").create("libjars");
+
+        Option libjars =
+                OptionBuilder.withArgName("paths").hasArg().withDescription("comma separated jars to be used by the submitted topology").create("libjars");
         r.addOption(libjars);
         optionProcessors.put("libjars", new LibjarsProcessor());
-        
+
         Option conf = OptionBuilder.withArgName("configuration file").hasArg().withDescription("an application configuration file").create("conf");
         r.addOption(conf);
         optionProcessors.put("conf", new ConfFileProcessor());
-        
+
         // Must come after `conf': this option is of higher priority
         Option extraConfig = OptionBuilder.withArgName("D").hasArg().withDescription("extra configurations (preserving types)").create("D");
         r.addOption(extraConfig);
         optionProcessors.put("D", new ExtraConfigProcessor());
-        
+
         return r;
     }
-    
+
     void parseGeneralOptions(Options opts, Config conf, String[] args) throws ParseException {
         opts = buildGeneralOptions(opts);
         CommandLineParser parser = new GnuParser();
         commandLine = parser.parse(opts, args, true);
         processGeneralOptions(conf, commandLine);
     }
-    
+
     void processGeneralOptions(Config conf, CommandLine commandLine) throws ParseException {
         for (Map.Entry<String, OptionProcessor> e : optionProcessors.entrySet())
             if (commandLine.hasOption(e.getKey()))
                 e.getValue().process(conf, commandLine);
     }
-    
+
     static List<File> validateFiles(String pathList) throws IOException {
         List<File> l = new ArrayList<File>();
-        
+
         for (String s : pathList.split(",")) {
             File file = new File(s);
             if (!file.exists())
                 throw new FileNotFoundException("File `" + file.getAbsolutePath() + "' does not exist");
-            
+
             l.add(file);
         }
-        
+
         return l;
     }
-    
+
     public static void printGenericCommandUsage(PrintStream out) {
         String[] strs =
-                new String[] {
-                        "Generic options supported are",
-                        "  -conf <conf.xml>                            load configurations from",
-                        "                                              <conf.xml>",
-                        "  -conf <conf.yaml>                           load configurations from",
+                new String[] { "Generic options supported are", "  -conf <conf.xml>                            load configurations from",
+                        "                                              <conf.xml>", "  -conf <conf.yaml>                           load configurations from",
                         "                                              <conf.yaml>",
                         "  -D <key>=<value>                            set <key> in configuration",
                         "                                              to <value> (preserve value's type)",
@@ -205,11 +203,11 @@ public class GenericOptionsParser {
         for (String s : strs)
             out.println(s);
     }
-    
+
     static interface OptionProcessor {
         public void process(Config conf, CommandLine commandLine) throws ParseException;
     }
-    
+
     static class LibjarsProcessor implements OptionProcessor {
         @Override
         public void process(Config conf, CommandLine commandLine) throws ParseException {
@@ -223,31 +221,31 @@ public class GenericOptionsParser {
                 }
                 conf.put(TOPOLOGY_LIB_PATH, jars);
                 conf.put(TOPOLOGY_LIB_NAME, names);
-                
+
             } catch (IOException e) {
                 throw new ParseException(e.getMessage());
             }
         }
     }
-    
+
     static class ExtraConfigProcessor implements OptionProcessor {
         static final Yaml yaml = new Yaml();
-        
+
         @Override
         public void process(Config conf, CommandLine commandLine) throws ParseException {
             for (String s : commandLine.getOptionValues("D")) {
                 String[] keyval = s.split("=", 2);
                 if (keyval.length != 2)
                     throw new ParseException("Invalid option value `" + s + "'");
-                
+
                 conf.putAll((Map) yaml.load(keyval[0] + ": " + keyval[1]));
             }
         }
     }
-    
+
     static class ConfFileProcessor implements OptionProcessor {
         static final Yaml yaml = new Yaml();
-        
+
         static Map loadYamlConf(String f) throws IOException {
             InputStreamReader reader = null;
             try {
@@ -259,13 +257,13 @@ public class GenericOptionsParser {
                     reader.close();
             }
         }
-        
+
         static Map loadConf(String f) throws IOException {
             if (f.endsWith(".yaml"))
                 return loadYamlConf(f);
             throw new IOException("Unknown configuration file type: " + f + " does not end with either .yaml");
         }
-        
+
         @Override
         public void process(Config conf, CommandLine commandLine) throws ParseException {
             try {

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/ICredentialsListener.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/ICredentialsListener.java b/jstorm-core/src/main/java/backtype/storm/ICredentialsListener.java
index 1a7bc1b..f8f9e9b 100755
--- a/jstorm-core/src/main/java/backtype/storm/ICredentialsListener.java
+++ b/jstorm-core/src/main/java/backtype/storm/ICredentialsListener.java
@@ -26,7 +26,8 @@ import java.util.Map;
 public interface ICredentialsListener {
     /**
      * Called when the credentials of a topology have changed.
+     * 
      * @param credentials the new credentials, could be null.
      */
-    public void setCredentials(Map<String,String> credentials);
+    public void setCredentials(Map<String, String> credentials);
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/ILocalCluster.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/ILocalCluster.java b/jstorm-core/src/main/java/backtype/storm/ILocalCluster.java
index 7d5aa35..7d31f07 100755
--- a/jstorm-core/src/main/java/backtype/storm/ILocalCluster.java
+++ b/jstorm-core/src/main/java/backtype/storm/ILocalCluster.java
@@ -30,20 +30,33 @@ import backtype.storm.generated.Credentials;
 
 import java.util.Map;
 
-
 public interface ILocalCluster {
     void submitTopology(String topologyName, Map conf, StormTopology topology) throws AlreadyAliveException, InvalidTopologyException;
-    void submitTopologyWithOpts(String topologyName, Map conf, StormTopology topology, SubmitOptions submitOpts) throws AlreadyAliveException, InvalidTopologyException;
+
+    void submitTopologyWithOpts(String topologyName, Map conf, StormTopology topology, SubmitOptions submitOpts) throws AlreadyAliveException,
+            InvalidTopologyException;
+
     void uploadNewCredentials(String topologyName, Credentials creds);
+
     void killTopology(String topologyName) throws NotAliveException;
+
     void killTopologyWithOpts(String name, KillOptions options) throws NotAliveException;
+
     void activate(String topologyName) throws NotAliveException;
+
     void deactivate(String topologyName) throws NotAliveException;
+
     void rebalance(String name, RebalanceOptions options) throws NotAliveException;
+
     void shutdown();
+
     String getTopologyConf(String id);
+
     StormTopology getTopology(String id);
+
     ClusterSummary getClusterInfo();
+
     TopologyInfo getTopologyInfo(String id);
+
     Map getState();
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/ILocalDRPC.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/ILocalDRPC.java b/jstorm-core/src/main/java/backtype/storm/ILocalDRPC.java
index e478dca..4482ecd 100755
--- a/jstorm-core/src/main/java/backtype/storm/ILocalDRPC.java
+++ b/jstorm-core/src/main/java/backtype/storm/ILocalDRPC.java
@@ -21,7 +21,6 @@ import backtype.storm.daemon.Shutdownable;
 import backtype.storm.generated.DistributedRPC;
 import backtype.storm.generated.DistributedRPCInvocations;
 
-
 public interface ILocalDRPC extends DistributedRPC.Iface, DistributedRPCInvocations.Iface, Shutdownable {
-    public String getServiceId();    
+    public String getServiceId();
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/LocalCluster.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/LocalCluster.java b/jstorm-core/src/main/java/backtype/storm/LocalCluster.java
index b55bac4..c25c260 100755
--- a/jstorm-core/src/main/java/backtype/storm/LocalCluster.java
+++ b/jstorm-core/src/main/java/backtype/storm/LocalCluster.java
@@ -17,30 +17,21 @@
  */
 package backtype.storm;
 
-import java.util.Map;
-
+import backtype.storm.generated.*;
+import backtype.storm.utils.Utils;
+import com.alibaba.jstorm.utils.JStormUtils;
 import org.apache.thrift.TException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import backtype.storm.generated.ClusterSummary;
-import backtype.storm.generated.Credentials;
-import backtype.storm.generated.KillOptions;
-import backtype.storm.generated.NotAliveException;
-import backtype.storm.generated.RebalanceOptions;
-import backtype.storm.generated.StormTopology;
-import backtype.storm.generated.SubmitOptions;
-import backtype.storm.generated.TopologyInfo;
-import backtype.storm.utils.Utils;
-
-import com.alibaba.jstorm.utils.JStormUtils;
+import java.util.Map;
 
 public class LocalCluster implements ILocalCluster {
-    
+
     public static Logger LOG = LoggerFactory.getLogger(LocalCluster.class);
-    
+
     private LocalClusterMap state;
-    
+
     protected void setLogger() {
         // the code is for log4j
         // boolean needReset = true;
@@ -56,61 +47,62 @@ public class LocalCluster implements ILocalCluster {
         // BasicConfigurator.configure();
         // rootLogger.setLevel(Level.INFO);
         // }
-        
+
     }
-    
+
     // this is easy to debug
     protected static LocalCluster instance = null;
-    
+
     public static LocalCluster getInstance() {
         return instance;
     }
-    
+
     public LocalCluster() {
         synchronized (LocalCluster.class) {
             if (instance != null) {
                 throw new RuntimeException("LocalCluster should be single");
             }
             setLogger();
-            
+
             // fix in zk occur Address family not supported by protocol family:
             // connect
             System.setProperty("java.net.preferIPv4Stack", "true");
-            
+
             this.state = LocalUtils.prepareLocalCluster();
             if (this.state == null)
                 throw new RuntimeException("prepareLocalCluster error");
-            
+
             instance = this;
         }
     }
-    
+
     @Override
     public void submitTopology(String topologyName, Map conf, StormTopology topology) {
         submitTopologyWithOpts(topologyName, conf, topology, null);
     }
-    
+
     @Override
     public void submitTopologyWithOpts(String topologyName, Map conf, StormTopology topology, SubmitOptions submitOpts) {
         // TODO Auto-generated method stub
         if (!Utils.isValidConf(conf))
             throw new RuntimeException("Topology conf is not json-serializable");
         JStormUtils.setLocalMode(true);
-        
+        conf.put(Config.STORM_CLUSTER_MODE, "local");
+
         try {
             if (submitOpts == null) {
                 state.getNimbus().submitTopology(topologyName, null, Utils.to_json(conf), topology);
             } else {
                 state.getNimbus().submitTopologyWithOpts(topologyName, null, Utils.to_json(conf), topology, submitOpts);
             }
-            
+
         } catch (Exception e) {
             // TODO Auto-generated catch block
             LOG.error("Failed to submit topology " + topologyName, e);
             throw new RuntimeException(e);
         }
     }
-    
+
     @Override
     public void killTopology(String topologyName) {
         // TODO Auto-generated method stub
@@ -124,7 +116,7 @@ public class LocalCluster implements ILocalCluster {
             LOG.error("fail to kill Topology " + topologyName, e);
         }
     }
-    
+
     @Override
     public void killTopologyWithOpts(String name, KillOptions options) throws NotAliveException {
         // TODO Auto-generated method stub
@@ -136,7 +128,7 @@ public class LocalCluster implements ILocalCluster {
             throw new RuntimeException(e);
         }
     }
-    
+
     @Override
     public void activate(String topologyName) {
         // TODO Auto-generated method stub
@@ -148,7 +140,7 @@ public class LocalCluster implements ILocalCluster {
             throw new RuntimeException(e);
         }
     }
-    
+
     @Override
     public void deactivate(String topologyName) {
         // TODO Auto-generated method stub
@@ -160,7 +152,7 @@ public class LocalCluster implements ILocalCluster {
             throw new RuntimeException(e);
         }
     }
-    
+
     @Override
     public void rebalance(String name, RebalanceOptions options) {
         // TODO Auto-generated method stub
@@ -172,7 +164,7 @@ public class LocalCluster implements ILocalCluster {
             throw new RuntimeException(e);
         }
     }
-    
+
     @Override
     public void shutdown() {
         // TODO Auto-generated method stub
@@ -180,8 +172,9 @@ public class LocalCluster implements ILocalCluster {
         // it take 10 seconds to remove topology's node
         JStormUtils.sleepMs(10 * 1000);
         this.state.clean();
+        instance = null;
     }
-    
+
     @Override
     public String getTopologyConf(String id) {
         // TODO Auto-generated method stub
@@ -193,7 +186,7 @@ public class LocalCluster implements ILocalCluster {
         }
         return null;
     }
-    
+
     @Override
     public StormTopology getTopology(String id) {
         // TODO Auto-generated method stub
@@ -208,7 +201,7 @@ public class LocalCluster implements ILocalCluster {
         }
         return null;
     }
-    
+
     @Override
     public ClusterSummary getClusterInfo() {
         // TODO Auto-generated method stub
@@ -220,7 +213,7 @@ public class LocalCluster implements ILocalCluster {
         }
         return null;
     }
-    
+
     @Override
     public TopologyInfo getTopologyInfo(String id) {
         // TODO Auto-generated method stub
@@ -235,7 +228,7 @@ public class LocalCluster implements ILocalCluster {
         }
         return null;
     }
-    
+
     /***
      * You should use getLocalClusterMap() to instead.This function will always return null
      * */
@@ -245,11 +238,11 @@ public class LocalCluster implements ILocalCluster {
         // TODO Auto-generated method stub
         return null;
     }
-    
+
     public LocalClusterMap getLocalClusterMap() {
         return state;
     }
-    
+
     public static void main(String[] args) throws Exception {
         LocalCluster localCluster = null;
         try {
@@ -269,7 +262,7 @@ public class LocalCluster implements ILocalCluster {
         } catch (Exception e) {
             // TODO Auto-generated catch block
             LOG.error("fail to uploadNewCredentials of topologyId: " + topologyName, e);
-        } 
+        }
     }
-    
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/LocalClusterMap.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/LocalClusterMap.java b/jstorm-core/src/main/java/backtype/storm/LocalClusterMap.java
index bd99c76..39f23be 100755
--- a/jstorm-core/src/main/java/backtype/storm/LocalClusterMap.java
+++ b/jstorm-core/src/main/java/backtype/storm/LocalClusterMap.java
@@ -31,83 +31,83 @@ import com.alibaba.jstorm.utils.PathUtils;
 import com.alibaba.jstorm.zk.Factory;
 
 public class LocalClusterMap {
-    
+
     public static Logger LOG = LoggerFactory.getLogger(LocalClusterMap.class);
-    
+
     private NimbusServer nimbusServer;
-    
+
     private ServiceHandler nimbus;
-    
+
     private Factory zookeeper;
-    
+
     private Map conf;
-    
+
     private List<String> tmpDir;
-    
+
     private SupervisorManger supervisor;
-    
+
     public ServiceHandler getNimbus() {
         return nimbus;
     }
-    
+
     public void setNimbus(ServiceHandler nimbus) {
         this.nimbus = nimbus;
     }
-    
+
     public Factory getZookeeper() {
         return zookeeper;
     }
-    
+
     public void setZookeeper(Factory zookeeper) {
         this.zookeeper = zookeeper;
     }
-    
+
     public Map getConf() {
         return conf;
     }
-    
+
     public void setConf(Map conf) {
         this.conf = conf;
     }
-    
+
     public NimbusServer getNimbusServer() {
         return nimbusServer;
     }
-    
+
     public void setNimbusServer(NimbusServer nimbusServer) {
         this.nimbusServer = nimbusServer;
     }
-    
+
     public SupervisorManger getSupervisor() {
         return supervisor;
     }
-    
+
     public void setSupervisor(SupervisorManger supervisor) {
         this.supervisor = supervisor;
     }
-    
+
     public List<String> getTmpDir() {
         return tmpDir;
     }
-    
+
     public void setTmpDir(List<String> tmpDir) {
         this.tmpDir = tmpDir;
     }
-    
+
     public void clean() {
-        
+
         if (supervisor != null) {
             supervisor.ShutdownAllWorkers();
             supervisor.shutdown();
         }
-        
+
         if (nimbusServer != null) {
             nimbusServer.cleanup();
         }
-        
+
         if (zookeeper != null)
             zookeeper.shutdown();
-        
+
         // it will hava a problem:
         // java.io.IOException: Unable to delete file:
         // {TmpPath}\{UUID}\version-2\log.1
@@ -122,5 +122,5 @@ public class LocalClusterMap {
             }
         }
     }
-    
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/LocalDRPC.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/LocalDRPC.java b/jstorm-core/src/main/java/backtype/storm/LocalDRPC.java
index 4113bf4..a838026 100755
--- a/jstorm-core/src/main/java/backtype/storm/LocalDRPC.java
+++ b/jstorm-core/src/main/java/backtype/storm/LocalDRPC.java
@@ -28,16 +28,16 @@ import com.alibaba.jstorm.drpc.Drpc;
 
 public class LocalDRPC implements ILocalDRPC {
     private static final Logger LOG = LoggerFactory.getLogger(LocalDRPC.class);
-    
+
     private Drpc handler = new Drpc();
     private Thread thread;
-    
+
     private final String serviceId;
-    
+
     public LocalDRPC() {
-        
+
         thread = new Thread(new Runnable() {
-            
+
             @Override
             public void run() {
                 LOG.info("Begin to init local Drpc");
@@ -51,10 +51,10 @@ public class LocalDRPC implements ILocalDRPC {
             }
         });
         thread.start();
-        
+
         serviceId = ServiceRegistry.registerService(handler);
     }
-    
+
     @Override
     public String execute(String functionName, String funcArgs) {
         // TODO Auto-generated method stub
@@ -65,36 +65,36 @@ public class LocalDRPC implements ILocalDRPC {
             throw new RuntimeException(e);
         }
     }
-    
+
     @Override
     public void result(String id, String result) throws TException {
         // TODO Auto-generated method stub
         handler.result(id, result);
     }
-    
+
     @Override
     public DRPCRequest fetchRequest(String functionName) throws TException {
         // TODO Auto-generated method stub
         return handler.fetchRequest(functionName);
     }
-    
+
     @Override
     public void failRequest(String id) throws TException {
         // TODO Auto-generated method stub
         handler.failRequest(id);
     }
-    
+
     @Override
     public void shutdown() {
         // TODO Auto-generated method stub
         ServiceRegistry.unregisterService(this.serviceId);
         this.handler.shutdown();
     }
-    
+
     @Override
     public String getServiceId() {
         // TODO Auto-generated method stub
         return serviceId;
     }
-    
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/LocalUtils.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/LocalUtils.java b/jstorm-core/src/main/java/backtype/storm/LocalUtils.java
index e32c07e..6e5023b 100755
--- a/jstorm-core/src/main/java/backtype/storm/LocalUtils.java
+++ b/jstorm-core/src/main/java/backtype/storm/LocalUtils.java
@@ -39,32 +39,32 @@ import com.alibaba.jstorm.zk.Factory;
 import com.alibaba.jstorm.zk.Zookeeper;
 
 public class LocalUtils {
-    
+
     public static Logger LOG = LoggerFactory.getLogger(LocalUtils.class);
-    
+
     public static LocalClusterMap prepareLocalCluster() {
         LocalClusterMap state = new LocalClusterMap();
         try {
             List<String> tmpDirs = new ArrayList();
-            
+
             String zkDir = getTmpDir();
             tmpDirs.add(zkDir);
             Factory zookeeper = startLocalZookeeper(zkDir);
             Map conf = getLocalConf(zookeeper.getZooKeeperServer().getClientPort());
-            
+
             String nimbusDir = getTmpDir();
             tmpDirs.add(nimbusDir);
             Map nimbusConf = deepCopyMap(conf);
             nimbusConf.put(Config.STORM_LOCAL_DIR, nimbusDir);
             NimbusServer instance = new NimbusServer();
-            
+
             Map supervisorConf = deepCopyMap(conf);
             String supervisorDir = getTmpDir();
             tmpDirs.add(supervisorDir);
             supervisorConf.put(Config.STORM_LOCAL_DIR, supervisorDir);
             Supervisor supervisor = new Supervisor();
             IContext context = getLocalContext(supervisorConf);
-            
+
             state.setNimbusServer(instance);
             state.setNimbus(instance.launcherLocalServer(nimbusConf, new DefaultInimbus()));
             state.setZookeeper(zookeeper);
@@ -75,11 +75,11 @@ public class LocalUtils {
         } catch (Exception e) {
             LOG.error("prepare cluster error!", e);
             state.clean();
-            
+
         }
         return null;
     }
-    
+
     private static Factory startLocalZookeeper(String tmpDir) {
         for (int i = 2000; i < 65535; i++) {
             try {
@@ -90,11 +90,11 @@ public class LocalUtils {
         }
         throw new RuntimeException("No port is available to launch an inprocess zookeeper.");
     }
-    
+
     private static String getTmpDir() {
         return System.getProperty("java.io.tmpdir") + File.separator + UUID.randomUUID();
     }
-    
+
     private static Map getLocalConf(int port) {
         List<String> zkServers = new ArrayList<String>(1);
         zkServers.add("localhost");
@@ -110,7 +110,7 @@ public class LocalUtils {
         ConfigExtension.setTaskCleanupTimeoutSec(conf, 0);
         return conf;
     }
-    
+
     private static IContext getLocalContext(Map conf) {
         if (!(Boolean) conf.get(Config.STORM_LOCAL_MODE_ZMQ)) {
             IContext result = new NettyContext();
@@ -120,7 +120,7 @@ public class LocalUtils {
         }
         return null;
     }
-    
+
     private static Map deepCopyMap(Map map) {
         return new HashMap(map);
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/StormSubmitter.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/StormSubmitter.java b/jstorm-core/src/main/java/backtype/storm/StormSubmitter.java
index 400875e..1666b29 100644
--- a/jstorm-core/src/main/java/backtype/storm/StormSubmitter.java
+++ b/jstorm-core/src/main/java/backtype/storm/StormSubmitter.java
@@ -17,6 +17,14 @@
  */
 package backtype.storm;
 
+import backtype.storm.generated.*;
+import backtype.storm.utils.BufferFileInputStream;
+import backtype.storm.utils.NimbusClient;
+import backtype.storm.utils.Utils;
+import org.apache.thrift.TException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import java.io.File;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
@@ -24,26 +32,9 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.commons.lang.StringUtils;
-import org.apache.thrift.TException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import backtype.storm.generated.AlreadyAliveException;
-import backtype.storm.generated.InvalidTopologyException;
-import backtype.storm.generated.Nimbus;
-import backtype.storm.generated.NotAliveException;
-import backtype.storm.generated.StormTopology;
-import backtype.storm.generated.SubmitOptions;
-import backtype.storm.generated.TopologyAssignException;
-import backtype.storm.utils.BufferFileInputStream;
-import backtype.storm.utils.NimbusClient;
-import backtype.storm.utils.Utils;
-
 /**
- * Use this class to submit topologies to run on the Storm cluster. You should
- * run your program with the "storm jar" command from the command-line, and then
- * use this class to submit your topologies.
+ * Use this class to submit topologies to run on the Storm cluster. You should run your program with the "storm jar" command from the command-line, and then use
+ * this class to submit your topologies.
  */
 public class StormSubmitter {
     public static Logger LOG = LoggerFactory.getLogger(StormSubmitter.class);
@@ -55,25 +46,20 @@ public class StormSubmitter {
     }
 
     /**
-     * Submits a topology to run on the cluster. A topology runs forever or
-     * until explicitly killed.
+     * Submits a topology to run on the cluster. A topology runs forever or until explicitly killed.
      * 
      * 
      * @param name the name of the storm.
      * @param stormConf the topology-specific configuration. See {@link Config}.
      * @param topology the processing to execute.
-     * @throws AlreadyAliveException if a topology with this name is already
-     *             running
+     * @throws AlreadyAliveException if a topology with this name is already running
      * @throws InvalidTopologyException if an invalid topology was submitted
      */
-    public static void submitTopology(String name, Map stormConf,
-            StormTopology topology) throws AlreadyAliveException,
-            InvalidTopologyException {
+    public static void submitTopology(String name, Map stormConf, StormTopology topology) throws AlreadyAliveException, InvalidTopologyException {
         submitTopology(name, stormConf, topology, null);
     }
 
-    public static void submitTopology(String name, Map stormConf,
-            StormTopology topology, SubmitOptions opts, List<File> jarFiles)
+    public static void submitTopology(String name, Map stormConf, StormTopology topology, SubmitOptions opts, List<File> jarFiles)
             throws AlreadyAliveException, InvalidTopologyException {
         if (jarFiles == null) {
             jarFiles = new ArrayList<File>();
@@ -83,8 +69,7 @@ public class StormSubmitter {
 
         for (File f : jarFiles) {
             if (!f.exists()) {
-                LOG.info(f.getName() + " is not existed: "
-                        + f.getAbsolutePath());
+                LOG.info(f.getName() + " is not existed: " + f.getAbsolutePath());
                 continue;
             }
             jars.put(f.getName(), f.getAbsolutePath());
@@ -96,32 +81,25 @@ public class StormSubmitter {
         submitTopology(name, stormConf, topology, opts);
     }
 
-    public static void submitTopology(String name, Map stormConf,
-            StormTopology topology, SubmitOptions opts,
-            ProgressListener listener) throws AlreadyAliveException,
-            InvalidTopologyException {
+    public static void submitTopology(String name, Map stormConf, StormTopology topology, SubmitOptions opts, ProgressListener listener)
+            throws AlreadyAliveException, InvalidTopologyException {
         submitTopology(name, stormConf, topology, opts);
     }
 
     /**
-     * Submits a topology to run on the cluster. A topology runs forever or
-     * until explicitly killed.
+     * Submits a topology to run on the cluster. A topology runs forever or until explicitly killed.
      * 
      * 
      * @param name the name of the storm.
      * @param stormConf the topology-specific configuration. See {@link Config}.
      * @param topology the processing to execute.
-     * @param options to manipulate the starting of the topology
-     * @throws AlreadyAliveException if a topology with this name is already
-     *             running
+     * @throws AlreadyAliveException if a topology with this name is already running
      * @throws InvalidTopologyException if an invalid topology was submitted
      */
-    public static void submitTopology(String name, Map stormConf,
-            StormTopology topology, SubmitOptions opts)
-            throws AlreadyAliveException, InvalidTopologyException {
+    public static void submitTopology(String name, Map stormConf, StormTopology topology, SubmitOptions opts) throws AlreadyAliveException,
+            InvalidTopologyException {
         if (!Utils.isValidConf(stormConf)) {
-            throw new IllegalArgumentException(
-                    "Storm conf is not valid. Must be json-serializable");
+            throw new IllegalArgumentException("Storm conf is not valid. Must be json-serializable");
         }
         stormConf = new HashMap(stormConf);
         stormConf.putAll(Utils.readCommandLineOpts());
@@ -137,20 +115,16 @@ public class StormSubmitter {
                 NimbusClient client = NimbusClient.getConfiguredClient(conf);
                 try {
                     if (topologyNameExists(client, conf, name)) {
-                        throw new RuntimeException("Topology with name `" + name
-                                + "` already exists on cluster");
+                        throw new RuntimeException("Topology with name `" + name + "` already exists on cluster");
                     }
 
                     submitJar(client, conf);
-                    LOG.info("Submitting topology " + name
-                            + " in distributed mode with conf " + serConf);
+                    LOG.info("Submitting topology " + name + " in distributed mode with conf " + serConf);
                     if (opts != null) {
-                        client.getClient().submitTopologyWithOpts(name, path,
-                                serConf, topology, opts);
+                        client.getClient().submitTopologyWithOpts(name, path, serConf, topology, opts);
                     } else {
                         // this is for backwards compatibility
-                        client.getClient().submitTopology(name, path, serConf,
-                                topology);
+                        client.getClient().submitTopology(name, path, serConf, topology);
                     }
                 } finally {
                     client.close();
@@ -173,43 +147,36 @@ public class StormSubmitter {
     }
 
     /**
-     * Submits a topology to run on the cluster with a progress bar. A topology
-     * runs forever or until explicitly killed.
+     * Submits a topology to run on the cluster with a progress bar. A topology runs forever or until explicitly killed.
      * 
      * 
      * @param name the name of the storm.
      * @param stormConf the topology-specific configuration. See {@link Config}.
      * @param topology the processing to execute.
-     * @throws AlreadyAliveException if a topology with this name is already
-     *             running
+     * @throws AlreadyAliveException if a topology with this name is already running
      * @throws InvalidTopologyException if an invalid topology was submitted
      * @throws TopologyAssignException
      */
 
-    public static void submitTopologyWithProgressBar(String name,
-            Map stormConf, StormTopology topology)
-            throws AlreadyAliveException, InvalidTopologyException {
+    public static void submitTopologyWithProgressBar(String name, Map stormConf, StormTopology topology) throws AlreadyAliveException, InvalidTopologyException {
         submitTopologyWithProgressBar(name, stormConf, topology, null);
     }
 
     /**
-     * Submits a topology to run on the cluster with a progress bar. A topology
-     * runs forever or until explicitly killed.
+     * Submits a topology to run on the cluster with a progress bar. A topology runs forever or until explicitly killed.
      * 
      * 
      * @param name the name of the storm.
      * @param stormConf the topology-specific configuration. See {@link Config}.
      * @param topology the processing to execute.
      * @param opts to manipulate the starting of the topology
-     * @throws AlreadyAliveException if a topology with this name is already
-     *             running
+     * @throws AlreadyAliveException if a topology with this name is already running
      * @throws InvalidTopologyException if an invalid topology was submitted
      * @throws TopologyAssignException
      */
 
-    public static void submitTopologyWithProgressBar(String name,
-            Map stormConf, StormTopology topology, SubmitOptions opts)
-            throws AlreadyAliveException, InvalidTopologyException {
+    public static void submitTopologyWithProgressBar(String name, Map stormConf, StormTopology topology, SubmitOptions opts) throws AlreadyAliveException,
+            InvalidTopologyException {
 
         /**
          * remove progress bar in jstorm
@@ -218,21 +185,11 @@ public class StormSubmitter {
     }
 
     public static boolean topologyNameExists(NimbusClient client, Map conf, String name) {
-    	if (StringUtils.isBlank(name)) {
-    		throw new RuntimeException("TopologyName is empty");
-    	}
-    	
         try {
-            String topologyId = client.getClient().getTopologyId(name);
-            if (StringUtils.isBlank(topologyId) == false) {
-                return true;
-            }
-            return false;
-
-        } catch (NotAliveException e) {
-            return false;
+            client.getClient().getTopologyInfoByName(name);
+            return true;
         } catch (Exception e) {
-            throw new RuntimeException(e);
+            return false;
         }
     }
 
@@ -246,15 +203,9 @@ public class StormSubmitter {
                 String localJar = System.getProperty("storm.jar");
                 path = client.getClient().beginFileUpload();
                 String[] pathCache = path.split("/");
-                String uploadLocation =
-                        path + "/stormjar-" + pathCache[pathCache.length - 1]
-                                + ".jar";
-                List<String> lib =
-                        (List<String>) conf
-                                .get(GenericOptionsParser.TOPOLOGY_LIB_NAME);
-                Map<String, String> libPath =
-                        (Map<String, String>) conf
-                                .get(GenericOptionsParser.TOPOLOGY_LIB_PATH);
+                String uploadLocation = path + "/stormjar-" + pathCache[pathCache.length - 1] + ".jar";
+                List<String> lib = (List<String>) conf.get(GenericOptionsParser.TOPOLOGY_LIB_NAME);
+                Map<String, String> libPath = (Map<String, String>) conf.get(GenericOptionsParser.TOPOLOGY_LIB_PATH);
                 if (lib != null && lib.size() != 0) {
                     for (String libName : lib) {
                         String jarPath = path + "/lib/" + libName;
@@ -265,14 +216,12 @@ public class StormSubmitter {
                 } else {
                     if (localJar == null) {
                         // no lib, no client jar
-                        throw new RuntimeException(
-                                "No client app jar, please upload it");
+                        throw new RuntimeException("No client app jar, please upload it");
                     }
                 }
 
                 if (localJar != null) {
-                    submittedJar =
-                            submitJar(conf, localJar, uploadLocation, client);
+                    submittedJar = submitJar(conf, localJar, uploadLocation, client);
                 } else {
                     // no client jar, but with lib jar
                     client.getClient().finishFileUpload(uploadLocation);
@@ -285,36 +234,29 @@ public class StormSubmitter {
         }
     }
 
-    public static String submitJar(Map conf, String localJar,
-            String uploadLocation, NimbusClient client) {
+    public static String submitJar(Map conf, String localJar, String uploadLocation, NimbusClient client) {
         if (localJar == null) {
-            throw new RuntimeException(
-                    "Must submit topologies using the 'storm' client script so that StormSubmitter knows which jar to upload.");
+            throw new RuntimeException("Must submit topologies using the 'storm' client script so that StormSubmitter knows which jar to upload.");
         }
 
         try {
 
-            LOG.info("Uploading topology jar " + localJar
-                    + " to assigned location: " + uploadLocation);
+            LOG.info("Uploading topology jar " + localJar + " to assigned location: " + uploadLocation);
             int bufferSize = 512 * 1024;
-            Object maxBufSizeObject =
-                    conf.get(Config.NIMBUS_THRIFT_MAX_BUFFER_SIZE);
+            Object maxBufSizeObject = conf.get(Config.NIMBUS_THRIFT_MAX_BUFFER_SIZE);
             if (maxBufSizeObject != null) {
                 bufferSize = Utils.getInt(maxBufSizeObject) / 2;
             }
 
-            BufferFileInputStream is =
-                    new BufferFileInputStream(localJar, bufferSize);
+            BufferFileInputStream is = new BufferFileInputStream(localJar, bufferSize);
             while (true) {
                 byte[] toSubmit = is.read();
                 if (toSubmit.length == 0)
                     break;
-                client.getClient().uploadChunk(uploadLocation,
-                        ByteBuffer.wrap(toSubmit));
+                client.getClient().uploadChunk(uploadLocation, ByteBuffer.wrap(toSubmit));
             }
             client.getClient().finishFileUpload(uploadLocation);
-            LOG.info("Successfully uploaded topology jar to assigned location: "
-                    + uploadLocation);
+            LOG.info("Successfully uploaded topology jar to assigned location: " + uploadLocation);
             return uploadLocation;
         } catch (Exception e) {
             throw new RuntimeException(e);
@@ -350,8 +292,7 @@ public class StormSubmitter {
          * @param bytesUploaded - number of bytes transferred so far
          * @param totalBytes - total number of bytes of the file
          */
-        public void onProgress(String srcFile, String targetFile,
-                long bytesUploaded, long totalBytes);
+        public void onProgress(String srcFile, String targetFile, long bytesUploaded, long totalBytes);
 
         /**
          * called when the file is uploaded
@@ -360,7 +301,6 @@ public class StormSubmitter {
          * @param targetFile - destination file
          * @param totalBytes - total number of bytes of the file
          */
-        public void onCompleted(String srcFile, String targetFile,
-                long totalBytes);
+        public void onCompleted(String srcFile, String targetFile, long totalBytes);
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/Tool.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/Tool.java b/jstorm-core/src/main/java/backtype/storm/Tool.java
index 6722b24..0dc5e32 100755
--- a/jstorm-core/src/main/java/backtype/storm/Tool.java
+++ b/jstorm-core/src/main/java/backtype/storm/Tool.java
@@ -58,13 +58,13 @@ package backtype.storm;
 
 public abstract class Tool {
     Config config;
-    
+
     public abstract int run(String[] args) throws Exception;
-    
+
     public Config getConf() {
         return config;
     }
-    
+
     public void setConf(Config config) {
         this.config = config;
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/ToolRunner.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/ToolRunner.java b/jstorm-core/src/main/java/backtype/storm/ToolRunner.java
index 33f5034..d70da41 100755
--- a/jstorm-core/src/main/java/backtype/storm/ToolRunner.java
+++ b/jstorm-core/src/main/java/backtype/storm/ToolRunner.java
@@ -32,6 +32,8 @@ import backtype.storm.utils.Utils;
  * href="{@docRoot} to parse the <a href="{@docRoot} to parse the <a href="{@docRoot} to parse the <a
  * href="{@docRoot}
  * to parse the <a href="{@docRoot} to parse the <a href="{@docRoot} to parse the <a href="{@docRoot} to parse the <a href="{@docRoot}
+ * to parse the <a href="{@docRoot} to parse the <a href="{@docRoot} to parse the <a href="{@docRoot} to parse the <a href="{@docRoot} to parse the <a
+ * href="{@docRoot} to parse the <a href="{@docRoot} to parse the <a href="{@docRoot} to parse the <a href="{@docRoot}
  * /backtype/storm/GenericOptionsParser.html#GenericOptions"> generic storm command line arguments</a> and modifies the <code>Config</code> of the
  * <code>Tool</code>. The application-specific options are passed along without being modified.
  * 
@@ -41,21 +43,22 @@ import backtype.storm.utils.Utils;
 
 public class ToolRunner {
     static final Logger LOG = LoggerFactory.getLogger(ToolRunner.class);
-    
+
     public static void run(Tool tool, String[] args) {
         run(tool.getConf(), tool, args);
     }
-    
+
     public static void run(Config conf, Tool tool, String[] args) {
         try {
             if (conf == null) {
                 conf = new Config();
                 conf.putAll(Utils.readStormConfig());
             }
-            
+
             GenericOptionsParser parser = new GenericOptionsParser(conf, args);
+            LOG.info(conf.toString());
             tool.setConf(conf);
-            
+
             System.exit(tool.run(parser.getRemainingArgs()));
         } catch (ParseException e) {
             LOG.error("Error parsing generic options: {}", e.getMessage());

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/clojure/ClojureBolt.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/clojure/ClojureBolt.java b/jstorm-core/src/main/java/backtype/storm/clojure/ClojureBolt.java
index 5de9bde..d3d1d37 100755
--- a/jstorm-core/src/main/java/backtype/storm/clojure/ClojureBolt.java
+++ b/jstorm-core/src/main/java/backtype/storm/clojure/ClojureBolt.java
@@ -36,15 +36,14 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 
-
 public class ClojureBolt implements IRichBolt, FinishedCallback {
     Map<String, StreamInfo> _fields;
     List<String> _fnSpec;
     List<String> _confSpec;
     List<Object> _params;
-    
+
     IBolt _bolt;
-    
+
     public ClojureBolt(List fnSpec, List confSpec, List<Object> params, Map<String, StreamInfo> fields) {
         _fnSpec = fnSpec;
         _confSpec = confSpec;
@@ -57,21 +56,23 @@ public class ClojureBolt implements IRichBolt, FinishedCallback {
         IFn hof = Utils.loadClojureFn(_fnSpec.get(0), _fnSpec.get(1));
         try {
             IFn preparer = (IFn) hof.applyTo(RT.seq(_params));
-            final Map<Keyword,Object> collectorMap = new PersistentArrayMap( new Object[] {
-                Keyword.intern(Symbol.create("output-collector")), collector,
-                Keyword.intern(Symbol.create("context")), context});
-            List<Object> args = new ArrayList<Object>() {{
-                add(stormConf);
-                add(context);
-                add(collectorMap);
-            }};
-            
+            final Map<Keyword, Object> collectorMap =
+                    new PersistentArrayMap(new Object[] { Keyword.intern(Symbol.create("output-collector")), collector,
+                            Keyword.intern(Symbol.create("context")), context });
+            List<Object> args = new ArrayList<Object>() {
+                {
+                    add(stormConf);
+                    add(context);
+                    add(collectorMap);
+                }
+            };
+
             _bolt = (IBolt) preparer.applyTo(RT.seq(args));
-            //this is kind of unnecessary for clojure
+            // this is kind of unnecessary for clojure
             try {
                 _bolt.prepare(stormConf, context, collector);
-            } catch(AbstractMethodError ame) {
-                
+            } catch (AbstractMethodError ame) {
+
             }
         } catch (Exception e) {
             throw new RuntimeException(e);
@@ -85,16 +86,16 @@ public class ClojureBolt implements IRichBolt, FinishedCallback {
 
     @Override
     public void cleanup() {
-            try {
-                _bolt.cleanup();
-            } catch(AbstractMethodError ame) {
-                
-            }
+        try {
+            _bolt.cleanup();
+        } catch (AbstractMethodError ame) {
+
+        }
     }
 
     @Override
     public void declareOutputFields(OutputFieldsDeclarer declarer) {
-        for(String stream: _fields.keySet()) {
+        for (String stream : _fields.keySet()) {
             StreamInfo info = _fields.get(stream);
             declarer.declareStream(stream, info.is_direct(), new Fields(info.get_output_fields()));
         }
@@ -102,7 +103,7 @@ public class ClojureBolt implements IRichBolt, FinishedCallback {
 
     @Override
     public void finishedId(Object id) {
-        if(_bolt instanceof FinishedCallback) {
+        if (_bolt instanceof FinishedCallback) {
             ((FinishedCallback) _bolt).finishedId(id);
         }
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/clojure/ClojureSpout.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/clojure/ClojureSpout.java b/jstorm-core/src/main/java/backtype/storm/clojure/ClojureSpout.java
index f6422e3..fc231ce 100755
--- a/jstorm-core/src/main/java/backtype/storm/clojure/ClojureSpout.java
+++ b/jstorm-core/src/main/java/backtype/storm/clojure/ClojureSpout.java
@@ -39,37 +39,38 @@ public class ClojureSpout implements IRichSpout {
     List<String> _fnSpec;
     List<String> _confSpec;
     List<Object> _params;
-    
+
     ISpout _spout;
-    
+
     public ClojureSpout(List fnSpec, List confSpec, List<Object> params, Map<String, StreamInfo> fields) {
         _fnSpec = fnSpec;
         _confSpec = confSpec;
         _params = params;
         _fields = fields;
     }
-    
 
     @Override
     public void open(final Map conf, final TopologyContext context, final SpoutOutputCollector collector) {
         IFn hof = Utils.loadClojureFn(_fnSpec.get(0), _fnSpec.get(1));
         try {
             IFn preparer = (IFn) hof.applyTo(RT.seq(_params));
-            final Map<Keyword,Object> collectorMap = new PersistentArrayMap( new Object[] {
-                Keyword.intern(Symbol.create("output-collector")), collector,
-                Keyword.intern(Symbol.create("context")), context});
-            List<Object> args = new ArrayList<Object>() {{
-                add(conf);
-                add(context);
-                add(collectorMap);
-            }};
-            
+            final Map<Keyword, Object> collectorMap =
+                    new PersistentArrayMap(new Object[] { Keyword.intern(Symbol.create("output-collector")), collector,
+                            Keyword.intern(Symbol.create("context")), context });
+            List<Object> args = new ArrayList<Object>() {
+                {
+                    add(conf);
+                    add(context);
+                    add(collectorMap);
+                }
+            };
+
             _spout = (ISpout) preparer.applyTo(RT.seq(args));
-            //this is kind of unnecessary for clojure
+            // this is kind of unnecessary for clojure
             try {
                 _spout.open(conf, context, collector);
-            } catch(AbstractMethodError ame) {
-                
+            } catch (AbstractMethodError ame) {
+
             }
         } catch (Exception e) {
             throw new RuntimeException(e);
@@ -80,8 +81,8 @@ public class ClojureSpout implements IRichSpout {
     public void close() {
         try {
             _spout.close();
-        } catch(AbstractMethodError ame) {
-                
+        } catch (AbstractMethodError ame) {
+
         }
     }
 
@@ -89,8 +90,8 @@ public class ClojureSpout implements IRichSpout {
     public void nextTuple() {
         try {
             _spout.nextTuple();
-        } catch(AbstractMethodError ame) {
-                
+        } catch (AbstractMethodError ame) {
+
         }
 
     }
@@ -99,8 +100,8 @@ public class ClojureSpout implements IRichSpout {
     public void ack(Object msgId) {
         try {
             _spout.ack(msgId);
-        } catch(AbstractMethodError ame) {
-                
+        } catch (AbstractMethodError ame) {
+
         }
 
     }
@@ -109,20 +110,20 @@ public class ClojureSpout implements IRichSpout {
     public void fail(Object msgId) {
         try {
             _spout.fail(msgId);
-        } catch(AbstractMethodError ame) {
-                
+        } catch (AbstractMethodError ame) {
+
         }
 
     }
 
     @Override
     public void declareOutputFields(OutputFieldsDeclarer declarer) {
-        for(String stream: _fields.keySet()) {
+        for (String stream : _fields.keySet()) {
             StreamInfo info = _fields.get(stream);
             declarer.declareStream(stream, info.is_direct(), new Fields(info.get_output_fields()));
         }
     }
-    
+
     @Override
     public Map<String, Object> getComponentConfiguration() {
         IFn hof = Utils.loadClojureFn(_confSpec.get(0), _confSpec.get(1));
@@ -137,8 +138,8 @@ public class ClojureSpout implements IRichSpout {
     public void activate() {
         try {
             _spout.activate();
-        } catch(AbstractMethodError ame) {
-                
+        } catch (AbstractMethodError ame) {
+
         }
     }
 
@@ -146,8 +147,8 @@ public class ClojureSpout implements IRichSpout {
     public void deactivate() {
         try {
             _spout.deactivate();
-        } catch(AbstractMethodError ame) {
-                
+        } catch (AbstractMethodError ame) {
+
         }
     }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/clojure/RichShellBolt.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/clojure/RichShellBolt.java b/jstorm-core/src/main/java/backtype/storm/clojure/RichShellBolt.java
index a155008..53136c7 100755
--- a/jstorm-core/src/main/java/backtype/storm/clojure/RichShellBolt.java
+++ b/jstorm-core/src/main/java/backtype/storm/clojure/RichShellBolt.java
@@ -26,20 +26,20 @@ import java.util.Map;
 
 public class RichShellBolt extends ShellBolt implements IRichBolt {
     private Map<String, StreamInfo> _outputs;
-    
+
     public RichShellBolt(String[] command, Map<String, StreamInfo> outputs) {
         super(command);
         _outputs = outputs;
     }
-    
+
     @Override
     public void declareOutputFields(OutputFieldsDeclarer declarer) {
-        for(String stream: _outputs.keySet()) {
+        for (String stream : _outputs.keySet()) {
             StreamInfo def = _outputs.get(stream);
-            if(def.is_direct()) {
+            if (def.is_direct()) {
                 declarer.declareStream(stream, true, new Fields(def.get_output_fields()));
             } else {
-                declarer.declareStream(stream, new Fields(def.get_output_fields()));                
+                declarer.declareStream(stream, new Fields(def.get_output_fields()));
             }
         }
     }
@@ -47,5 +47,5 @@ public class RichShellBolt extends ShellBolt implements IRichBolt {
     @Override
     public Map<String, Object> getComponentConfiguration() {
         return null;
-    }    
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/clojure/RichShellSpout.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/clojure/RichShellSpout.java b/jstorm-core/src/main/java/backtype/storm/clojure/RichShellSpout.java
index b49fbef..2f7a134 100755
--- a/jstorm-core/src/main/java/backtype/storm/clojure/RichShellSpout.java
+++ b/jstorm-core/src/main/java/backtype/storm/clojure/RichShellSpout.java
@@ -34,9 +34,9 @@ public class RichShellSpout extends ShellSpout implements IRichSpout {
 
     @Override
     public void declareOutputFields(OutputFieldsDeclarer declarer) {
-        for(String stream: _outputs.keySet()) {
+        for (String stream : _outputs.keySet()) {
             StreamInfo def = _outputs.get(stream);
-            if(def.is_direct()) {
+            if (def.is_direct()) {
                 declarer.declareStream(stream, true, new Fields(def.get_output_fields()));
             } else {
                 declarer.declareStream(stream, new Fields(def.get_output_fields()));

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/command/activate.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/command/activate.java b/jstorm-core/src/main/java/backtype/storm/command/activate.java
index ed12e09..11a0db5 100755
--- a/jstorm-core/src/main/java/backtype/storm/command/activate.java
+++ b/jstorm-core/src/main/java/backtype/storm/command/activate.java
@@ -30,7 +30,7 @@ import backtype.storm.utils.Utils;
  * 
  */
 public class activate {
-    
+
     /**
      * @param args
      */
@@ -39,17 +39,17 @@ public class activate {
         if (args == null || args.length == 0) {
             throw new InvalidParameterException("Should input topology name");
         }
-        
+
         String topologyName = args[0];
-        
+
         NimbusClient client = null;
         try {
-            
+
             Map conf = Utils.readStormConfig();
             client = NimbusClient.getConfiguredClient(conf);
-            
+
             client.getClient().activate(topologyName);
-            
+
             System.out.println("Successfully submit command activate " + topologyName);
         } catch (Exception e) {
             System.out.println(e.getMessage());
@@ -61,5 +61,5 @@ public class activate {
             }
         }
     }
-    
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/command/config_value.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/command/config_value.java b/jstorm-core/src/main/java/backtype/storm/command/config_value.java
index 868ffdc..dd8812a 100755
--- a/jstorm-core/src/main/java/backtype/storm/command/config_value.java
+++ b/jstorm-core/src/main/java/backtype/storm/command/config_value.java
@@ -30,7 +30,7 @@ import backtype.storm.utils.Utils;
  * 
  */
 public class config_value {
-    
+
     /**
      * @param args
      */
@@ -39,12 +39,12 @@ public class config_value {
         if (args == null || args.length == 0) {
             throw new InvalidParameterException("Should input key name");
         }
-        
+
         String key = args[0];
-        
+
         Map conf = Utils.readStormConfig();
-        
+
         System.out.print("VALUE: " + String.valueOf(conf.get(key)));
     }
-    
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/command/deactivate.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/command/deactivate.java b/jstorm-core/src/main/java/backtype/storm/command/deactivate.java
index 22ac20d..59e97d6 100755
--- a/jstorm-core/src/main/java/backtype/storm/command/deactivate.java
+++ b/jstorm-core/src/main/java/backtype/storm/command/deactivate.java
@@ -30,7 +30,7 @@ import backtype.storm.utils.Utils;
  * 
  */
 public class deactivate {
-    
+
     /**
      * @param args
      */
@@ -39,17 +39,17 @@ public class deactivate {
         if (args == null || args.length == 0) {
             throw new InvalidParameterException("Should input topology name");
         }
-        
+
         String topologyName = args[0];
-        
+
         NimbusClient client = null;
         try {
-            
+
             Map conf = Utils.readStormConfig();
             client = NimbusClient.getConfiguredClient(conf);
-            
+
             client.getClient().deactivate(topologyName);
-            
+
             System.out.println("Successfully submit command deactivate " + topologyName);
         } catch (Exception e) {
             System.out.println(e.getMessage());
@@ -61,5 +61,5 @@ public class deactivate {
             }
         }
     }
-    
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/command/kill_topology.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/command/kill_topology.java b/jstorm-core/src/main/java/backtype/storm/command/kill_topology.java
index 4ab3893..bda20e1 100755
--- a/jstorm-core/src/main/java/backtype/storm/command/kill_topology.java
+++ b/jstorm-core/src/main/java/backtype/storm/command/kill_topology.java
@@ -17,13 +17,13 @@
  */
 package backtype.storm.command;
 
-import java.security.InvalidParameterException;
-import java.util.Map;
-
 import backtype.storm.generated.KillOptions;
 import backtype.storm.utils.NimbusClient;
 import backtype.storm.utils.Utils;
 
+import java.security.InvalidParameterException;
+import java.util.Map;
+
 /**
  * Kill topology
  * 
@@ -31,7 +31,7 @@ import backtype.storm.utils.Utils;
  * 
  */
 public class kill_topology {
-    
+
     /**
      * @param args
      */
@@ -40,28 +40,28 @@ public class kill_topology {
         if (args == null || args.length == 0) {
             throw new InvalidParameterException("Should input topology name");
         }
-        
+
         String topologyName = args[0];
-        
+
         NimbusClient client = null;
         try {
-            
+
             Map conf = Utils.readStormConfig();
             client = NimbusClient.getConfiguredClient(conf);
-            
+
             if (args.length == 1) {
-                
+
                 client.getClient().killTopology(topologyName);
             } else {
                 int delaySeconds = Integer.parseInt(args[1]);
-                
+
                 KillOptions options = new KillOptions();
                 options.set_wait_secs(delaySeconds);
-                
+
                 client.getClient().killTopologyWithOpts(topologyName, options);
-                
+
             }
-            
+
             System.out.println("Successfully submit command kill " + topologyName);
         } catch (Exception e) {
             System.out.println(e.getMessage());
@@ -73,5 +73,5 @@ public class kill_topology {
             }
         }
     }
-    
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/command/list.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/command/list.java b/jstorm-core/src/main/java/backtype/storm/command/list.java
index 3b4efdb..0c6930d 100755
--- a/jstorm-core/src/main/java/backtype/storm/command/list.java
+++ b/jstorm-core/src/main/java/backtype/storm/command/list.java
@@ -33,29 +33,29 @@ import backtype.storm.utils.Utils;
  * 
  */
 public class list {
-    
+
     /**
      * @param args
      */
     public static void main(String[] args) {
-        
+
         NimbusClient client = null;
         try {
-            
+
             Map conf = Utils.readStormConfig();
             client = NimbusClient.getConfiguredClient(conf);
-            
+
             if (args.length > 0 && StringUtils.isBlank(args[0]) == false) {
                 String topologyName = args[0];
                 TopologyInfo info = client.getClient().getTopologyInfoByName(topologyName);
-                
+
                 System.out.println("Successfully get topology info \n" + Utils.toPrettyJsonString(info));
             } else {
                 ClusterSummary clusterSummary = client.getClient().getClusterInfo();
-                
+
                 System.out.println("Successfully get cluster info \n" + Utils.toPrettyJsonString(clusterSummary));
             }
-            
+
         } catch (Exception e) {
             System.out.println(e.getMessage());
             e.printStackTrace();
@@ -66,5 +66,5 @@ public class list {
             }
         }
     }
-    
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/command/metrics_monitor.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/command/metrics_monitor.java b/jstorm-core/src/main/java/backtype/storm/command/metrics_monitor.java
index 6607445..a673fcf 100755
--- a/jstorm-core/src/main/java/backtype/storm/command/metrics_monitor.java
+++ b/jstorm-core/src/main/java/backtype/storm/command/metrics_monitor.java
@@ -17,13 +17,13 @@
  */
 package backtype.storm.command;
 
-import java.util.Map;
-import java.security.InvalidParameterException;
-
 import backtype.storm.generated.MonitorOptions;
 import backtype.storm.utils.NimbusClient;
 import backtype.storm.utils.Utils;
 
+import java.security.InvalidParameterException;
+import java.util.Map;
+
 /**
  * Monitor topology
  * 
@@ -31,7 +31,7 @@ import backtype.storm.utils.Utils;
  * 
  */
 public class metrics_monitor {
-    
+
     /**
      * @param args
      */
@@ -40,22 +40,22 @@ public class metrics_monitor {
         if (args == null || args.length <= 1) {
             throw new InvalidParameterException("Should input topology name and enable flag");
         }
-        
+
         String topologyName = args[0];
-        
+
         NimbusClient client = null;
         try {
-            
+
             Map conf = Utils.readStormConfig();
             client = NimbusClient.getConfiguredClient(conf);
-            
+
             boolean isEnable = Boolean.valueOf(args[1]).booleanValue();
-            
+
             MonitorOptions options = new MonitorOptions();
             options.set_isEnable(isEnable);
-            
+
             client.getClient().metricMonitor(topologyName, options);
-            
+
             String str = (isEnable) ? "enable" : "disable";
             System.out.println("Successfully submit command to " + str + " the monitor of " + topologyName);
         } catch (Exception e) {
@@ -68,5 +68,5 @@ public class metrics_monitor {
             }
         }
     }
-    
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/command/rebalance.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/command/rebalance.java b/jstorm-core/src/main/java/backtype/storm/command/rebalance.java
index f0cf69f..6d08934 100755
--- a/jstorm-core/src/main/java/backtype/storm/command/rebalance.java
+++ b/jstorm-core/src/main/java/backtype/storm/command/rebalance.java
@@ -17,13 +17,12 @@
  */
 package backtype.storm.command;
 
-import java.security.InvalidParameterException;
-import java.util.Map;
-
 import backtype.storm.generated.RebalanceOptions;
 import backtype.storm.utils.NimbusClient;
 import backtype.storm.utils.Utils;
 
+import java.util.Map;
+
 /**
  * Active topology
  * 
@@ -32,7 +31,7 @@ import backtype.storm.utils.Utils;
  */
 public class rebalance {
     static final String REASSIGN_FLAG = "-r";
-    
+
     /**
      * @param args
      */
@@ -42,15 +41,15 @@ public class rebalance {
             printErrorInfo();
             return;
         }
-        
+
         int argsIndex = 0;
         String topologyName = null;
-        
+
         try {
             RebalanceOptions options = new RebalanceOptions();
             options.set_reassign(false);
             options.set_conf(null);
-            
+
             if (args[argsIndex].equalsIgnoreCase(REASSIGN_FLAG)) {
                 options.set_reassign(true);
                 argsIndex++;
@@ -64,7 +63,7 @@ public class rebalance {
             } else {
                 topologyName = args[argsIndex];
             }
-            
+
             argsIndex++;
             if (args.length > argsIndex) {
                 for (int i = argsIndex; i < args.length; i++) {
@@ -85,32 +84,34 @@ public class rebalance {
                     }
                 }
             }
-            
+
             submitRebalance(topologyName, options);
-            
-            System.out.println("Successfully submit command rebalance " + topologyName + ", delaySecs=" + options.get_wait_secs() + ", reassignFlag=" + options.is_reassign() + ", newConfiguration=" + options.get_conf());
+
+            System.out.println("Successfully submit command rebalance " + topologyName + ", delaySecs=" +
+                    options.get_wait_secs() + ", reassignFlag="
+                    + options.is_reassign() + ", newConfiguration=" + options.get_conf());
         } catch (Exception e) {
             System.out.println(e.getMessage());
             e.printStackTrace();
             throw new RuntimeException(e);
         }
     }
-    
+
     private static void printErrorInfo() {
         System.out.println("Error: Invalid parameters!");
         System.out.println("USAGE: jstorm rebalance [-r] TopologyName [DelayTime] [NewConfig]");
     }
-    
+
     public static void submitRebalance(String topologyName, RebalanceOptions options) throws Exception {
         submitRebalance(topologyName, options, null);
     }
-    
+
     public static void submitRebalance(String topologyName, RebalanceOptions options, Map conf) throws Exception {
         Map stormConf = Utils.readStormConfig();
         if (conf != null) {
             stormConf.putAll(conf);
         }
-        
+
         NimbusClient client = null;
         try {
             client = NimbusClient.getConfiguredClient(stormConf);
@@ -123,5 +124,5 @@ public class rebalance {
             }
         }
     }
-    
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/command/restart.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/command/restart.java b/jstorm-core/src/main/java/backtype/storm/command/restart.java
index ecec9a3..5a216ea 100755
--- a/jstorm-core/src/main/java/backtype/storm/command/restart.java
+++ b/jstorm-core/src/main/java/backtype/storm/command/restart.java
@@ -45,26 +45,26 @@ public class restart {
         if (args == null || args.length == 0) {
             throw new InvalidParameterException("Should input topology name");
         }
-        
+
         String topologyName = args[0];
-        
+
         NimbusClient client = null;
         try {
             Map conf = Utils.readStormConfig();
             client = NimbusClient.getConfiguredClient(conf);
-            
+
             System.out.println("It will take 15 ~ 100 seconds to restart, please wait patiently\n");
-            
+
             if (args.length == 1) {
                 client.getClient().restart(topologyName, null);
             } else {
                 Map loadConf = Utils.loadConf(args[1]);
                 String jsonConf = Utils.to_json(loadConf);
                 System.out.println("New configuration:\n" + jsonConf);
-                
+
                 client.getClient().restart(topologyName, jsonConf);
             }
-            
+
             System.out.println("Successfully submit command restart " + topologyName);
         } catch (Exception e) {
             System.out.println(e.getMessage());
@@ -76,5 +76,5 @@ public class restart {
             }
         }
     }
-    
+
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/command/update_config.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/command/update_config.java b/jstorm-core/src/main/java/backtype/storm/command/update_config.java
deleted file mode 100644
index be78f19..0000000
--- a/jstorm-core/src/main/java/backtype/storm/command/update_config.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm.command;
-
-import java.security.InvalidParameterException;
-import java.util.Map;
-
-import backtype.storm.utils.NimbusClient;
-import backtype.storm.utils.Utils;
-
-/**
- * Update user configuration
- * 
- * @author basti
- * 
- */
-public class update_config {
-    /**
-     * @param args
-     */
-    public static void main(String[] args) {
-        // TODO Auto-generated method stub
-        if (args == null || args.length < 2) {
-            throw new InvalidParameterException(
-                    "[USAGE] update_config topologyName config");
-        }
-
-        String topologyName = args[0];
-
-        NimbusClient client = null;
-        try {
-            Map conf = Utils.readStormConfig();
-            client = NimbusClient.getConfiguredClient(conf);
-
-            Map loadConf = Utils.loadConf(args[1]);
-            String jsonConf = Utils.to_json(loadConf);
-            System.out.println("New configuration:\n" + jsonConf);
-
-            client.getClient().updateConf(topologyName, jsonConf);
-
-            System.out.println("Successfully submit command update_conf "
-                    + topologyName);
-        } catch (Exception e) {
-            System.out.println(e.getMessage());
-            e.printStackTrace();
-            throw new RuntimeException(e);
-        } finally {
-            if (client != null) {
-                client.close();
-            }
-        }
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/command/update_topology.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/command/update_topology.java b/jstorm-core/src/main/java/backtype/storm/command/update_topology.java
new file mode 100644
index 0000000..85172a7
--- /dev/null
+++ b/jstorm-core/src/main/java/backtype/storm/command/update_topology.java
@@ -0,0 +1,164 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package backtype.storm.command;
+
+import backtype.storm.GenericOptionsParser;
+import backtype.storm.StormSubmitter;
+import backtype.storm.utils.NimbusClient;
+import backtype.storm.utils.Utils;
+import org.apache.commons.cli.*;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Created by xiaojian.fxj on 2015/10/13.
+ */
+public class update_topology {
+    public static final String UPDATE_CONF = "-conf";
+
+    public static final String UPDATE_JAR = "-jar";
+
+    public static void usage() {
+        System.out.println("update topology config, please do as following:");
+        System.out.println("update_topology topologyName -conf configFile");
+
+        System.out.println("update topology jar, please do as following:");
+        System.out.println("update_topology topologyName -jar jarFile");
+
+        System.out.println("update topology jar and conf, please do as following:");
+        System.out.println("update_topology topologyName -jar jarFile -conf configFile");
+    }
+
+    private static Options buildGeneralOptions(Options opts) {
+        Options r = new Options();
+
+        for (Object o : opts.getOptions())
+            r.addOption((Option) o);
+
+        Option jar = OptionBuilder.withArgName("path").hasArg()
+                .withDescription("comma  jar of the submitted topology")
+                .create("jar");
+        r.addOption(jar);
+
+        Option conf = OptionBuilder.withArgName("configuration file").hasArg()
+                .withDescription("an application configuration file")
+                .create("conf");
+        r.addOption(conf);
+        return r;
+    }
+
+    private static void updateTopology(String topologyName, String pathJar,
+            String pathConf) {
+        NimbusClient client = null;
+        Map loadMap = null;
+        if (pathConf != null) {
+            loadMap = Utils.loadConf(pathConf);
+        } else {
+            loadMap = new HashMap();
+        }
+
+        Map conf = Utils.readStormConfig();
+
+        conf.putAll(loadMap);
+        client = NimbusClient.getConfiguredClient(conf);
+        try {
+            // update jar
+            String uploadLocation = null;
+            if (pathJar != null) {
+                System.out.println("Jar update to master yet. Submitting jar of " + pathJar);
+                String path = client.getClient().beginFileUpload();
+                String[] pathCache = path.split("/");
+                uploadLocation = path + "/stormjar-" + pathCache[pathCache.length - 1] + ".jar";
+                List<String> lib = (List<String>) conf .get(GenericOptionsParser.TOPOLOGY_LIB_NAME);
+                Map<String, String> libPath = (Map<String, String>) conf .get(GenericOptionsParser.TOPOLOGY_LIB_PATH);
+                if (lib != null && lib.size() != 0) {
+                    for (String libName : lib) {
+                        String jarPath = path + "/lib/" + libName;
+                        client.getClient().beginLibUpload(jarPath);
+                        StormSubmitter.submitJar(conf, libPath.get(libName), jarPath, client);
+                    }
+
+                } else {
+                    if (pathJar == null) {
+                        // no lib, no client jar
+                        throw new RuntimeException( "No client app jar, please upload it");
+                    }
+                }
+
+                if (pathJar != null) {
+                    StormSubmitter.submitJar(conf, pathJar, uploadLocation, client);
+                } else {
+                    // no client jar, but with lib jar
+                    client.getClient().finishFileUpload(uploadLocation);
+                }
+            }
+
+            // update topology
+            String jsonConf = Utils.to_json(loadMap);
+            System.out.println("New configuration:\n" + jsonConf);
+
+            client.getClient().updateTopology(topologyName, uploadLocation,
+                    jsonConf);
+
+            System.out.println("Successfully submit command update " + topologyName);
+
+        } catch (Exception e) {
+            e.printStackTrace();
+            throw new RuntimeException(e);
+        } finally {
+            if (client != null) {
+                client.close();
+            }
+        }
+
+    }
+
+    /**
+     * @param args
+     */
+    public static void main(String[] args) {
+        if (args == null || args.length < 3) {
+            System.out.println("Invalid parameter");
+            usage();
+            return;
+        }
+        String topologyName = args[0];
+        try {
+            String[] str2 = Arrays.copyOfRange(args, 1, args.length);
+            CommandLineParser parser = new GnuParser();
+            Options r = buildGeneralOptions(new Options());
+            CommandLine commandLine = parser.parse(r, str2, true);
+
+            String pathConf = null;
+            String pathJar = null;
+            if (commandLine.hasOption("conf")) {
+                pathConf = (commandLine.getOptionValues("conf"))[0];
+            }
+            if (commandLine.hasOption("jar")) {
+                pathJar = (commandLine.getOptionValues("jar"))[0];
+            }
+            if (pathConf != null || pathJar != null)
+                updateTopology(topologyName, pathJar, pathConf);
+        } catch (Exception e) {
+            e.printStackTrace();
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/coordination/BatchBoltExecutor.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/coordination/BatchBoltExecutor.java b/jstorm-core/src/main/java/backtype/storm/coordination/BatchBoltExecutor.java
index 8653010..d9163e5 100755
--- a/jstorm-core/src/main/java/backtype/storm/coordination/BatchBoltExecutor.java
+++ b/jstorm-core/src/main/java/backtype/storm/coordination/BatchBoltExecutor.java
@@ -32,18 +32,18 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public class BatchBoltExecutor implements IRichBolt, FinishedCallback, TimeoutCallback {
-    public static Logger LOG = LoggerFactory.getLogger(BatchBoltExecutor.class);    
+    public static Logger LOG = LoggerFactory.getLogger(BatchBoltExecutor.class);
 
     byte[] _boltSer;
     Map<Object, IBatchBolt> _openTransactions;
     Map _conf;
     TopologyContext _context;
     BatchOutputCollectorImpl _collector;
-    
+
     public BatchBoltExecutor(IBatchBolt bolt) {
         _boltSer = Utils.javaSerialize(bolt);
     }
-    
+
     @Override
     public void prepare(Map conf, TopologyContext context, OutputCollector collector) {
         _conf = conf;
@@ -57,11 +57,11 @@ public class BatchBoltExecutor implements IRichBolt, FinishedCallback, TimeoutCa
         Object id = input.getValue(0);
         IBatchBolt bolt = getBatchBolt(id);
         try {
-             bolt.execute(input);
+            bolt.execute(input);
             _collector.ack(input);
-        } catch(FailedException e) {
+        } catch (FailedException e) {
             LOG.error("Failed to process tuple in batch", e);
-            _collector.fail(input);                
+            _collector.fail(input);
         }
     }
 
@@ -78,30 +78,29 @@ public class BatchBoltExecutor implements IRichBolt, FinishedCallback, TimeoutCa
 
     @Override
     public void timeoutId(Object attempt) {
-        _openTransactions.remove(attempt);        
-    }    
-    
+        _openTransactions.remove(attempt);
+    }
 
     @Override
     public void declareOutputFields(OutputFieldsDeclarer declarer) {
         newTransactionalBolt().declareOutputFields(declarer);
     }
-    
+
     @Override
     public Map<String, Object> getComponentConfiguration() {
         return newTransactionalBolt().getComponentConfiguration();
     }
-    
+
     private IBatchBolt getBatchBolt(Object id) {
         IBatchBolt bolt = _openTransactions.get(id);
-        if(bolt==null) {
+        if (bolt == null) {
             bolt = newTransactionalBolt();
             bolt.prepare(_conf, _context, _collector, id);
-            _openTransactions.put(id, bolt);            
+            _openTransactions.put(id, bolt);
         }
         return bolt;
     }
-    
+
     private IBatchBolt newTransactionalBolt() {
         return Utils.javaDeserialize(_boltSer, IBatchBolt.class);
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/coordination/BatchOutputCollector.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/coordination/BatchOutputCollector.java b/jstorm-core/src/main/java/backtype/storm/coordination/BatchOutputCollector.java
index f5f3457..0b99339 100755
--- a/jstorm-core/src/main/java/backtype/storm/coordination/BatchOutputCollector.java
+++ b/jstorm-core/src/main/java/backtype/storm/coordination/BatchOutputCollector.java
@@ -30,17 +30,16 @@ public abstract class BatchOutputCollector {
     }
 
     public abstract List<Integer> emit(String streamId, List<Object> tuple);
-    
+
     /**
-     * Emits a tuple to the specified task on the default output stream. This output
-     * stream must have been declared as a direct stream, and the specified task must
-     * use a direct grouping on this stream to receive the message.
+     * Emits a tuple to the specified task on the default output stream. This output stream must have been declared as a direct stream, and the specified task
+     * must use a direct grouping on this stream to receive the message.
      */
     public void emitDirect(int taskId, List<Object> tuple) {
         emitDirect(taskId, Utils.DEFAULT_STREAM_ID, tuple);
     }
-    
-    public abstract void emitDirect(int taskId, String streamId, List<Object> tuple); 
-    
+
+    public abstract void emitDirect(int taskId, String streamId, List<Object> tuple);
+
     public abstract void reportError(Throwable error);
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/coordination/BatchOutputCollectorImpl.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/coordination/BatchOutputCollectorImpl.java b/jstorm-core/src/main/java/backtype/storm/coordination/BatchOutputCollectorImpl.java
index cae7560..44a1f01 100755
--- a/jstorm-core/src/main/java/backtype/storm/coordination/BatchOutputCollectorImpl.java
+++ b/jstorm-core/src/main/java/backtype/storm/coordination/BatchOutputCollectorImpl.java
@@ -23,11 +23,11 @@ import java.util.List;
 
 public class BatchOutputCollectorImpl extends BatchOutputCollector {
     OutputCollector _collector;
-    
+
     public BatchOutputCollectorImpl(OutputCollector collector) {
         _collector = collector;
     }
-    
+
     @Override
     public List<Integer> emit(String streamId, List<Object> tuple) {
         return _collector.emit(streamId, tuple);
@@ -42,11 +42,11 @@ public class BatchOutputCollectorImpl extends BatchOutputCollector {
     public void reportError(Throwable error) {
         _collector.reportError(error);
     }
-    
+
     public void ack(Tuple tup) {
         _collector.ack(tup);
     }
-    
+
     public void fail(Tuple tup) {
         _collector.fail(tup);
     }


[11/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/WorkerData.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/WorkerData.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/WorkerData.java
index da69070..0cc16e4 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/WorkerData.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/WorkerData.java
@@ -1,3 +1,4 @@
+
 /**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -17,27 +18,6 @@
  */
 package com.alibaba.jstorm.daemon.worker;
 
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.net.URL;
-import java.security.InvalidParameterException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentSkipListSet;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.apache.commons.lang.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import backtype.storm.Config;
 import backtype.storm.generated.InvalidTopologyException;
 import backtype.storm.generated.StormTopology;
@@ -48,28 +28,38 @@ import backtype.storm.scheduler.WorkerSlot;
 import backtype.storm.utils.DisruptorQueue;
 import backtype.storm.utils.Utils;
 import backtype.storm.utils.WorkerClassLoader;
-
 import com.alibaba.jstorm.callback.AsyncLoopDefaultKill;
+import com.alibaba.jstorm.callback.AsyncLoopThread;
 import com.alibaba.jstorm.client.ConfigExtension;
-import com.alibaba.jstorm.cluster.Cluster;
-import com.alibaba.jstorm.cluster.ClusterState;
-import com.alibaba.jstorm.cluster.Common;
-import com.alibaba.jstorm.cluster.StormClusterState;
-import com.alibaba.jstorm.cluster.StormConfig;
-import com.alibaba.jstorm.common.metric.window.Metric;
+import com.alibaba.jstorm.cluster.*;
+import com.alibaba.jstorm.common.metric.AsmGauge;
+import com.alibaba.jstorm.common.metric.AsmMetric;
 import com.alibaba.jstorm.daemon.nimbus.StatusType;
 import com.alibaba.jstorm.daemon.worker.timer.TimerTrigger;
 import com.alibaba.jstorm.message.netty.ControlMessage;
+import com.alibaba.jstorm.metric.*;
 import com.alibaba.jstorm.schedule.Assignment;
-import com.alibaba.jstorm.schedule.Assignment.AssignmentType;
 import com.alibaba.jstorm.schedule.default_assign.ResourceWorkerSlot;
 import com.alibaba.jstorm.task.TaskShutdownDameon;
 import com.alibaba.jstorm.utils.JStormServerUtils;
 import com.alibaba.jstorm.utils.JStormUtils;
 import com.alibaba.jstorm.zk.ZkTool;
+import com.codahale.metrics.Gauge;
 import com.lmax.disruptor.WaitStrategy;
 import com.lmax.disruptor.dsl.ProducerType;
+import org.apache.commons.lang.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.URL;
+import java.security.InvalidParameterException;
+import java.util.*;
+import java.util.concurrent.*;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static com.alibaba.jstorm.schedule.Assignment.AssignmentType;
 
 public class WorkerData {
 
@@ -112,6 +102,8 @@ public class WorkerData {
 
     private volatile Set<Integer> outboundTasks;
     private Set<Integer> localTasks;
+    private Set<Integer> localNodeTasks;
+
 
     private ConcurrentHashMap<Integer, DisruptorQueue> innerTaskTransfer;
     private ConcurrentHashMap<Integer, DisruptorQueue> deserializeQueues;
@@ -152,15 +144,18 @@ public class WorkerData {
     private ScheduledExecutorService threadPool;
 
     private volatile Long assignmentTS; // Assignment timeStamp. The time of
-                                        // last update of assignment
+    // last update of assignment
+
     private volatile AssignmentType assignmentType;
-    
+
     private IConnection recvConnection;
 
-    @SuppressWarnings({ "rawtypes", "unchecked" })
-    public WorkerData(Map conf, IContext context, String topology_id,
-            String supervisor_id, int port, String worker_id, String jar_path)
-            throws Exception {
+    private JStormMetricsReporter metricReporter;
+
+    private AsyncLoopThread healthReporterThread;
+
+    @SuppressWarnings({"rawtypes", "unchecked"})
+    public WorkerData(Map conf, IContext context, String topology_id, String supervisor_id, int port, String worker_id, String jar_path) throws Exception {
 
         this.conf = conf;
         this.context = context;
@@ -170,7 +165,7 @@ public class WorkerData {
         this.workerId = worker_id;
 
         this.shutdown = new AtomicBoolean(false);
-        
+
         this.monitorEnable = new AtomicBoolean(true);
         this.topologyStatus = StatusType.active;
 
@@ -183,29 +178,54 @@ public class WorkerData {
         this.zkClusterstate = ZkTool.mk_distributed_cluster_state(conf);
         this.zkCluster = Cluster.mk_storm_cluster_state(zkClusterstate);
 
-        Map rawConf =
-                StormConfig.read_supervisor_topology_conf(conf, topology_id);
+        Map rawConf = StormConfig.read_supervisor_topology_conf(conf, topology_id);
         this.stormConf = new HashMap<Object, Object>();
         this.stormConf.putAll(conf);
         this.stormConf.putAll(rawConf);
-        
+
+        JStormMetrics.setTopologyId(topology_id);
+        JStormMetrics.setPort(port);
+        JStormMetrics.setDebug(ConfigExtension.isEnableMetricDebug(stormConf));
+        JStormMetrics.setEnabled(ConfigExtension.isEnableMetrics(stormConf));
+        JStormMetrics.addDebugMetrics(ConfigExtension.getDebugMetricNames(stormConf));
+        AsmMetric.setSampleRate(ConfigExtension.getMetricSampleRate(stormConf));
+
         ConfigExtension.setLocalSupervisorId(stormConf, supervisorId);
         ConfigExtension.setLocalWorkerId(stormConf, workerId);
         ConfigExtension.setLocalWorkerPort(stormConf, port);
         ControlMessage.setPort(port);
-        Metric.setEnable(ConfigExtension.isEnablePerformanceMetrics(stormConf));
+
+        JStormMetrics.registerWorkerTopologyMetric(
+                JStormMetrics.workerMetricName(MetricDef.CPU_USED_RATIO, MetricType.GAUGE),
+                new AsmGauge(new Gauge<Double>() {
+                    @Override
+                    public Double getValue() {
+                        return JStormUtils.getCpuUsage();
+                    }
+                }));
+
+        JStormMetrics.registerWorkerTopologyMetric(JStormMetrics.workerMetricName(MetricDef.MEMORY_USED, MetricType.GAUGE),
+                new AsmGauge(new Gauge<Double>() {
+                    @Override
+                    public Double getValue() {
+                        return JStormUtils.getMemUsage();
+                    }
+                }));
+
+        JStormMetrics.registerWorkerMetric(JStormMetrics.workerMetricName(MetricDef.DISK_USAGE, MetricType.GAUGE), new AsmGauge(new Gauge<Double>() {
+            @Override
+            public Double getValue() {
+                return JStormUtils.getDiskUsage();
+            }
+        }));
 
         LOG.info("Worker Configuration " + stormConf);
 
         try {
+            boolean enableClassloader = ConfigExtension.isEnableTopologyClassLoader(stormConf);
+            boolean enableDebugClassloader = ConfigExtension.isEnableClassloaderDebug(stormConf);
 
-            boolean enableClassloader =
-                    ConfigExtension.isEnableTopologyClassLoader(stormConf);
-            boolean enableDebugClassloader =
-                    ConfigExtension.isEnableClassloaderDebug(stormConf);
-
-            if (jar_path == null && enableClassloader == true
-                    && !conf.get(Config.STORM_CLUSTER_MODE).equals("local")) {
+            if (jar_path == null && enableClassloader == true && !conf.get(Config.STORM_CLUSTER_MODE).equals("local")) {
                 LOG.error("enable classloader, but not app jar");
                 throw new InvalidParameterException();
             }
@@ -221,14 +241,11 @@ public class WorkerData {
                     urls.add(url);
                 }
                 urlArray = urls.toArray(new URL[0]);
-
             }
 
-            WorkerClassLoader.mkInstance(urlArray, ClassLoader
-                    .getSystemClassLoader(), ClassLoader.getSystemClassLoader()
-                    .getParent(), enableClassloader, enableDebugClassloader);
+            WorkerClassLoader.mkInstance(urlArray, ClassLoader.getSystemClassLoader(), ClassLoader.getSystemClassLoader().getParent(), enableClassloader,
+                    enableDebugClassloader);
         } catch (Exception e) {
-            // TODO Auto-generated catch block
             LOG.error("init jarClassLoader error!", e);
             throw new InvalidParameterException();
         }
@@ -237,39 +254,27 @@ public class WorkerData {
             this.context = TransportFactory.makeContext(stormConf);
         }
 
-        boolean disruptorUseSleep =
-                ConfigExtension.isDisruptorUseSleep(stormConf);
+        boolean disruptorUseSleep = ConfigExtension.isDisruptorUseSleep(stormConf);
         DisruptorQueue.setUseSleep(disruptorUseSleep);
-        boolean isLimited =
-                ConfigExtension.getTopologyBufferSizeLimited(stormConf);
+        boolean isLimited = ConfigExtension.getTopologyBufferSizeLimited(stormConf);
         DisruptorQueue.setLimited(isLimited);
-        LOG.info("Disruptor use sleep:" + disruptorUseSleep + ", limited size:"
-                + isLimited);
+        LOG.info("Disruptor use sleep:" + disruptorUseSleep + ", limited size:" + isLimited);
 
         // this.transferQueue = new LinkedBlockingQueue<TransferData>();
-        int buffer_size =
-                Utils.getInt(conf.get(Config.TOPOLOGY_TRANSFER_BUFFER_SIZE));
-        WaitStrategy waitStrategy =
-                (WaitStrategy) JStormUtils.createDisruptorWaitStrategy(conf);
-        this.transferQueue =
-                DisruptorQueue.mkInstance("TotalTransfer", ProducerType.MULTI,
-                        buffer_size, waitStrategy);
+        int buffer_size = Utils.getInt(stormConf.get(Config.TOPOLOGY_TRANSFER_BUFFER_SIZE));
+        WaitStrategy waitStrategy = (WaitStrategy) JStormUtils.createDisruptorWaitStrategy(stormConf);
+        this.transferQueue = DisruptorQueue.mkInstance("TotalTransfer", ProducerType.MULTI, buffer_size, waitStrategy);
         this.transferQueue.consumerStarted();
-        this.sendingQueue =
-                DisruptorQueue.mkInstance("TotalSending", ProducerType.MULTI,
-                        buffer_size, waitStrategy);
+        this.sendingQueue = DisruptorQueue.mkInstance("TotalSending", ProducerType.MULTI, buffer_size, waitStrategy);
         this.sendingQueue.consumerStarted();
 
         this.nodeportSocket = new ConcurrentHashMap<WorkerSlot, IConnection>();
         this.taskNodeport = new ConcurrentHashMap<Integer, WorkerSlot>();
         this.workerToResource = new ConcurrentSkipListSet<ResourceWorkerSlot>();
-        this.innerTaskTransfer =
-                new ConcurrentHashMap<Integer, DisruptorQueue>();
-        this.deserializeQueues =
-                new ConcurrentHashMap<Integer, DisruptorQueue>();
+        this.innerTaskTransfer = new ConcurrentHashMap<Integer, DisruptorQueue>();
+        this.deserializeQueues = new ConcurrentHashMap<Integer, DisruptorQueue>();
         this.tasksToComponent = new ConcurrentHashMap<Integer, String>();
-        this.componentToSortedTasks =
-                new ConcurrentHashMap<String, List<Integer>>();
+        this.componentToSortedTasks = new ConcurrentHashMap<String, List<Integer>>();
 
         Assignment assignment = zkCluster.assignment_info(topologyId, null);
         if (assignment == null) {
@@ -288,8 +293,7 @@ public class WorkerData {
         LOG.info("Current worker taskList:" + taskids);
 
         // deserialize topology code from local dir
-        rawTopology =
-                StormConfig.read_supervisor_topology_code(conf, topology_id);
+        rawTopology = StormConfig.read_supervisor_topology_code(conf, topology_id);
         sysTopology = Common.system_topology(stormConf, rawTopology);
 
         generateMaps();
@@ -301,15 +305,17 @@ public class WorkerData {
         threadPool = Executors.newScheduledThreadPool(THREAD_POOL_NUM);
         TimerTrigger.setScheduledExecutorService(threadPool);
 
+        if (!StormConfig.local_mode(stormConf)) {
+            healthReporterThread = new AsyncLoopThread(new JStormHealthReporter(this));
+        }
+
         try {
-            Long tmp =
-                    StormConfig.read_supervisor_topology_timestamp(conf,
-                            topology_id);
+            Long tmp = StormConfig.read_supervisor_topology_timestamp(conf, topology_id);
             assignmentTS = (tmp == null ? System.currentTimeMillis() : tmp);
         } catch (FileNotFoundException e) {
             assignmentTS = System.currentTimeMillis();
         }
-        
+
         outboundTasks = new HashSet<Integer>();
 
         LOG.info("Successfully create WorkerData");
@@ -317,13 +323,10 @@ public class WorkerData {
     }
 
     /**
-     * private ConcurrentHashMap<Integer, WorkerSlot> taskNodeport; private
-     * HashMap<Integer, String> tasksToComponent; private Map<String,
-     * List<Integer>> componentToSortedTasks; private Map<String, Map<String,
-     * Fields>> componentToStreamToFields; private Map<String, Object>
-     * defaultResources; private Map<String, Object> userResources; private
-     * Map<String, Object> executorData; private Map registeredMetrics;
-     * 
+     * private ConcurrentHashMap<Integer, WorkerSlot> taskNodeport; private HashMap<Integer, String> tasksToComponent; private Map<String, List<Integer>>
+     * componentToSortedTasks; private Map<String, Map<String, Fields>> componentToStreamToFields; private Map<String, Object> defaultResources; private
+     * Map<String, Object> userResources; private Map<String, Object> executorData; private Map registeredMetrics;
+     *
      * @throws Exception
      */
     private void generateMaps() throws Exception {
@@ -335,7 +338,7 @@ public class WorkerData {
         this.registeredMetrics = new HashMap();
     }
 
-    public Map<Object, Object> getConf() {
+    public Map<Object, Object> getRawConf() {
         return conf;
     }
 
@@ -351,6 +354,10 @@ public class WorkerData {
         this.topologyStatus = topologyStatus;
     }
 
+    public Map<Object, Object> getConf() {
+        return stormConf;
+    }
+
     public Map<Object, Object> getStormConf() {
         return stormConf;
     }
@@ -396,7 +403,19 @@ public class WorkerData {
     }
 
     public ConcurrentSkipListSet<ResourceWorkerSlot> getWorkerToResource() {
-        return workerToResource;
+        synchronized (workerToResource) {
+            return workerToResource;
+        }
+    }
+
+    public void updateWorkerToResource(Set<ResourceWorkerSlot> workers) {
+        synchronized (workerToResource) {
+            Set<ResourceWorkerSlot> oldWorkers = workerToResource.clone();
+            oldWorkers.removeAll(workers);
+            if (oldWorkers.size() > 0)
+                workerToResource.removeAll(workers);
+            workerToResource.addAll(workers);
+        }
     }
 
     public ConcurrentHashMap<Integer, DisruptorQueue> getInnerTaskTransfer() {
@@ -471,8 +490,7 @@ public class WorkerData {
         this.shutdownTasks.add(shutdownTask);
     }
 
-    public List<TaskShutdownDameon> getShutdownDaemonbyTaskIds(
-            Set<Integer> taskIds) {
+    public List<TaskShutdownDameon> getShutdownDaemonbyTaskIds(Set<Integer> taskIds) {
         List<TaskShutdownDameon> ret = new ArrayList<TaskShutdownDameon>();
         for (TaskShutdownDameon shutdown : shutdownTasks) {
             if (taskIds.contains(shutdown.getTaskId()))
@@ -494,7 +512,7 @@ public class WorkerData {
             outTaskStatus.put(taskId, false);
         }
     }
-    
+
     public Map<Integer, Boolean> getOutboundTaskStatus() {
         return outTaskStatus;
     }
@@ -502,7 +520,7 @@ public class WorkerData {
     public void addOutboundTaskStatusIfAbsent(Integer taskId) {
         outTaskStatus.putIfAbsent(taskId, false);
     }
-    
+
     public void removeOutboundTaskStatus(Integer taskId) {
         outTaskStatus.remove(taskId);
     }
@@ -512,8 +530,7 @@ public class WorkerData {
     }
 
     public boolean isOutboundTaskActive(Integer taskId) {
-        return outTaskStatus.get(taskId) != null ? outTaskStatus.get(taskId)
-                : false;
+        return outTaskStatus.get(taskId) != null ? outTaskStatus.get(taskId) : false;
     }
 
     public ScheduledExecutorService getThreadPool() {
@@ -527,11 +544,11 @@ public class WorkerData {
     public Long getAssignmentTs() {
         return assignmentTS;
     }
-    
+
     public void setAssignmentType(AssignmentType type) {
         this.assignmentType = type;
     }
-    
+
     public AssignmentType getAssignmentType() {
         return assignmentType;
     }
@@ -544,28 +561,33 @@ public class WorkerData {
 
     public void updateTaskIds(Assignment assignment) {
         this.taskids.clear();
-        this.taskids.addAll(assignment
-                .getCurrentWorkerTasks(supervisorId, port));
+        this.taskids.addAll(assignment.getCurrentWorkerTasks(supervisorId, port));
+    }
+
+    public Set<Integer> getLocalNodeTasks() {
+        return localNodeTasks;
+    }
+
+    public void setLocalNodeTasks(Set<Integer> localNodeTasks) {
+        this.localNodeTasks = localNodeTasks;
     }
 
     public void setOutboundTasks(Set<Integer> outboundTasks) {
         this.outboundTasks = outboundTasks;
     }
-    
+
     public Set<Integer> getOutboundTasks() {
         return outboundTasks;
     }
 
     private void updateTaskComponentMap() throws Exception {
-        Map<Integer, String> tmp = Common.getTaskToComponent(
-                        Cluster.get_all_taskInfo(zkCluster, topologyId));
+        Map<Integer, String> tmp = Common.getTaskToComponent(Cluster.get_all_taskInfo(zkCluster, topologyId));
 
         this.tasksToComponent.putAll(tmp);
         LOG.info("Updated tasksToComponentMap:" + tasksToComponent);
 
         this.componentToSortedTasks.putAll(JStormUtils.reverse_map(tmp));
-        for (java.util.Map.Entry<String, List<Integer>> entry : componentToSortedTasks
-                .entrySet()) {
+        for (Map.Entry<String, List<Integer>> entry : componentToSortedTasks.entrySet()) {
             List<Integer> tasks = entry.getValue();
 
             Collections.sort(tasks);
@@ -573,16 +595,13 @@ public class WorkerData {
     }
 
     private void updateStormTopology() {
-        StormTopology rawTmp = null;
-        StormTopology sysTmp = null;
-
+        StormTopology rawTmp;
+        StormTopology sysTmp;
         try {
-            rawTmp =
-                    StormConfig.read_supervisor_topology_code(conf, topologyId);
+            rawTmp = StormConfig.read_supervisor_topology_code(conf, topologyId);
             sysTmp = Common.system_topology(stormConf, rawTopology);
         } catch (IOException e) {
-            LOG.error("Failed to read supervisor topology code for "
-                    + topologyId, e);
+            LOG.error("Failed to read supervisor topology code for " + topologyId, e);
             return;
         } catch (InvalidTopologyException e) {
             LOG.error("Failed to update sysTopology for " + topologyId, e);
@@ -593,8 +612,7 @@ public class WorkerData {
         updateTopology(sysTopology, sysTmp);
     }
 
-    private void updateTopology(StormTopology oldTopology,
-            StormTopology newTopology) {
+    private void updateTopology(StormTopology oldTopology, StormTopology newTopology) {
         oldTopology.set_bolts(newTopology.get_bolts());
         oldTopology.set_spouts(newTopology.get_spouts());
         oldTopology.set_state_spouts(newTopology.get_state_spouts());
@@ -604,12 +622,19 @@ public class WorkerData {
         return monitorEnable;
     }
 
-	public IConnection getRecvConnection() {
-		return recvConnection;
-	}
+    public IConnection getRecvConnection() {
+        return recvConnection;
+    }
 
-	public void setRecvConnection(IConnection recvConnection) {
-		this.recvConnection = recvConnection;
-	}
+    public void setRecvConnection(IConnection recvConnection) {
+        this.recvConnection = recvConnection;
+    }
 
+    public JStormMetricsReporter getMetricsReporter() {
+        return metricReporter;
+    }
+
+    public void setMetricsReporter(JStormMetricsReporter metricReporter) {
+        this.metricReporter = metricReporter;
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/WorkerHeartbeat.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/WorkerHeartbeat.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/WorkerHeartbeat.java
index d8ec622..9d1cca7 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/WorkerHeartbeat.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/WorkerHeartbeat.java
@@ -25,8 +25,7 @@ import org.apache.commons.lang.builder.ToStringBuilder;
 import org.apache.commons.lang.builder.ToStringStyle;
 
 /**
- * Worker's Heartbeat data woker will update the object to
- * /LOCAL-DIR/workers/${woker-id}/heartbeats
+ * Worker's Heartbeat data woker will update the object to /LOCAL-DIR/workers/${woker-id}/heartbeats
  * 
  * @author yannian/Longda
  * 
@@ -39,8 +38,7 @@ public class WorkerHeartbeat implements Serializable {
     private Set<Integer> taskIds;
     private Integer port;
 
-    public WorkerHeartbeat(int timeSecs, String topologyId,
-            Set<Integer> taskIds, Integer port) {
+    public WorkerHeartbeat(int timeSecs, String topologyId, Set<Integer> taskIds, Integer port) {
 
         this.timeSecs = timeSecs;
         this.topologyId = topologyId;
@@ -83,7 +81,6 @@ public class WorkerHeartbeat implements Serializable {
 
     @Override
     public String toString() {
-        return ToStringBuilder.reflectionToString(this,
-                ToStringStyle.SHORT_PREFIX_STYLE);
+        return ToStringBuilder.reflectionToString(this, ToStringStyle.SHORT_PREFIX_STYLE);
     }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/WorkerReportError.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/WorkerReportError.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/WorkerReportError.java
new file mode 100644
index 0000000..79075bc
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/WorkerReportError.java
@@ -0,0 +1,38 @@
+package com.alibaba.jstorm.daemon.worker;
+
+import com.alibaba.jstorm.cluster.StormClusterState;
+import com.alibaba.jstorm.utils.TimeFormat;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Date;
+import java.util.Set;
+
+/**
+ * Created by xiaojian.fxj on 2015/8/12.
+ */
+public class WorkerReportError {
+    private static Logger LOG = LoggerFactory.getLogger(WorkerReportError.class);
+    private StormClusterState zkCluster;
+    private String hostName;
+
+    public WorkerReportError(StormClusterState _storm_cluster_state,
+                             String _hostName) {
+        this.zkCluster = _storm_cluster_state;
+        this.hostName = _hostName;
+    }
+    public void report(String topology_id, Integer worker_port,
+                       Set<Integer> tasks, String error) {
+        // Report worker's error to zk
+        try {
+            Date now = new Date();
+            String nowStr = TimeFormat.getSecond(now);
+            String errorInfo = error + "on " + this.hostName + ":" + worker_port + "," + nowStr;
+            for (Integer task : tasks){
+                zkCluster.report_task_error(topology_id, task, errorInfo, null);
+            }
+        } catch (Exception e) {
+            LOG.error("Failed update "+worker_port+ "errors to ZK" + "\n", e);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/WorkerShutdown.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/WorkerShutdown.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/WorkerShutdown.java
index 0691fee..403c8cf 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/WorkerShutdown.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/WorkerShutdown.java
@@ -86,14 +86,13 @@ public class WorkerShutdown implements ShutdownableDameon {
             LOG.info("Worker has been shutdown already");
             return;
         }
-        
-        if(recvConnection != null) {
-        	recvConnection.close();
+
+        if (recvConnection != null) {
+            recvConnection.close();
         }
 
         AsyncLoopRunnable.getShutdown().set(true);
         threadPool.shutdown();
-        
 
         // shutdown tasks
         for (ShutdownableDameon task : shutdowntasks) {

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/hearbeat/SyncContainerHb.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/hearbeat/SyncContainerHb.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/hearbeat/SyncContainerHb.java
index 783f584..ab4213f 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/hearbeat/SyncContainerHb.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/hearbeat/SyncContainerHb.java
@@ -35,8 +35,7 @@ import com.alibaba.jstorm.utils.JStormUtils;
 import com.alibaba.jstorm.utils.PathUtils;
 
 public class SyncContainerHb extends RunnableCallback {
-    private final static Logger LOG = LoggerFactory
-            .getLogger(SyncContainerHb.class);
+    private final static Logger LOG = LoggerFactory.getLogger(SyncContainerHb.class);
 
     private String readDir;
     private String writeDir;
@@ -113,8 +112,7 @@ public class SyncContainerHb extends RunnableCallback {
         try {
             hb = Long.valueOf(biggest);
         } catch (Exception e) {
-            LOG.info("Heartbeat file " + biggest
-                    + " isn't a valid file, remove it");
+            LOG.info("Heartbeat file " + biggest + " isn't a valid file, remove it");
 
             String path = readDir + File.separator + biggest;
             try {
@@ -151,8 +149,7 @@ public class SyncContainerHb extends RunnableCallback {
             return;
         }
 
-        String seconds =
-                String.valueOf(System.currentTimeMillis() / SECOND_MILLISCOND);
+        String seconds = String.valueOf(System.currentTimeMillis() / SECOND_MILLISCOND);
 
         String path = writeDir + File.separator + seconds;
 
@@ -289,8 +286,7 @@ public class SyncContainerHb extends RunnableCallback {
         this.reserverNum = reserverNum;
     }
 
-    public static AsyncLoopThread mkInstance(String containerHbDir,
-            String hbDir, int timeout, int frequence) {
+    public static AsyncLoopThread mkInstance(String containerHbDir, String hbDir, int timeout, int frequence) {
         SyncContainerHb syncContainerHbThread = new SyncContainerHb();
 
         syncContainerHbThread.setReadDir(containerHbDir);
@@ -306,9 +302,7 @@ public class SyncContainerHb extends RunnableCallback {
         sb.append(",frequence:").append(frequence);
         LOG.info(sb.toString());
 
-        AsyncLoopThread thread =
-                new AsyncLoopThread(syncContainerHbThread, true,
-                        Thread.NORM_PRIORITY, true);
+        AsyncLoopThread thread = new AsyncLoopThread(syncContainerHbThread, true, Thread.NORM_PRIORITY, true);
 
         return thread;
     }
@@ -329,31 +323,23 @@ public class SyncContainerHb extends RunnableCallback {
 
     }
 
-    public static AsyncLoopThread mkSupervisorInstance(Map conf)
-            throws IOException {
-        boolean isEnableContainer =
-                ConfigExtension.isEnableContainerSupervisor();
+    public static AsyncLoopThread mkSupervisorInstance(Map conf) throws IOException {
+        boolean isEnableContainer = ConfigExtension.isEnableContainerSupervisor();
         if (isEnableContainer) {
-            String containerHbDir =
-                    ConfigExtension.getContainerSupervisorHearbeat();
+            String containerHbDir = ConfigExtension.getContainerSupervisorHearbeat();
             String hbDir = StormConfig.supervisorHearbeatForContainer(conf);
-            int timeout =
-                    ConfigExtension.getContainerHeartbeatTimeoutSeconds(conf);
-            int frequence =
-                    ConfigExtension.getContainerHeartbeatFrequence(conf);
+            int timeout = ConfigExtension.getContainerHeartbeatTimeoutSeconds(conf);
+            int frequence = ConfigExtension.getContainerHeartbeatFrequence(conf);
 
             return mkInstance(containerHbDir, hbDir, timeout, frequence);
         }
 
-        boolean isWorkerAutomaticStop =
-                ConfigExtension.isWorkerStopWithoutSupervisor(conf);
+        boolean isWorkerAutomaticStop = ConfigExtension.isWorkerStopWithoutSupervisor(conf);
         if (isWorkerAutomaticStop) {
             String containerHbDir = null;
             String hbDir = StormConfig.supervisorHearbeatForContainer(conf);
-            int timeout =
-                    ConfigExtension.getContainerHeartbeatTimeoutSeconds(conf);
-            int frequence =
-                    ConfigExtension.getContainerHeartbeatFrequence(conf);
+            int timeout = ConfigExtension.getContainerHeartbeatTimeoutSeconds(conf);
+            int frequence = ConfigExtension.getContainerHeartbeatFrequence(conf);
 
             return mkInstance(containerHbDir, hbDir, timeout, frequence);
         }
@@ -364,17 +350,14 @@ public class SyncContainerHb extends RunnableCallback {
     }
 
     public static AsyncLoopThread mkWorkerInstance(Map conf) throws IOException {
-        boolean isEnableContainer =
-                ConfigExtension.isEnableContainerSupervisor();
-        boolean isWorkerAutomaticStop =
-                ConfigExtension.isWorkerStopWithoutSupervisor(conf);
+        boolean isEnableContainer = ConfigExtension.isEnableContainerSupervisor();
+        boolean isWorkerAutomaticStop = ConfigExtension.isWorkerStopWithoutSupervisor(conf);
         if (isEnableContainer == false && isWorkerAutomaticStop == false) {
             LOG.info("Run worker without Apsara/Yarn container");
             return null;
         }
 
-        String containerHbDir =
-                StormConfig.supervisorHearbeatForContainer(conf);
+        String containerHbDir = StormConfig.supervisorHearbeatForContainer(conf);
         String hbDir = null;
         int timeout = ConfigExtension.getContainerHeartbeatTimeoutSeconds(conf);
         int frequence = ConfigExtension.getContainerHeartbeatFrequence(conf);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/hearbeat/WorkerHeartbeatRunable.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/hearbeat/WorkerHeartbeatRunable.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/hearbeat/WorkerHeartbeatRunable.java
index 74aca05..51e74f8 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/hearbeat/WorkerHeartbeatRunable.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/hearbeat/WorkerHeartbeatRunable.java
@@ -44,8 +44,7 @@ import com.alibaba.jstorm.utils.TimeUtils;
  * 
  */
 public class WorkerHeartbeatRunable extends RunnableCallback {
-    private static Logger LOG = LoggerFactory
-            .getLogger(WorkerHeartbeatRunable.class);
+    private static Logger LOG = LoggerFactory.getLogger(WorkerHeartbeatRunable.class);
 
     private WorkerData workerData;
 
@@ -69,8 +68,7 @@ public class WorkerHeartbeatRunable extends RunnableCallback {
         this.worker_id = workerData.getWorkerId();
         this.port = workerData.getPort();
         this.topologyId = workerData.getTopologyId();
-        this.task_ids =
-                new CopyOnWriteArraySet<Integer>(workerData.getTaskids());
+        this.task_ids = new CopyOnWriteArraySet<Integer>(workerData.getTaskids());
         this.shutdown = workerData.getShutdown();
 
         String key = Config.WORKER_HEARTBEAT_FREQUENCY_SECS;
@@ -97,11 +95,9 @@ public class WorkerHeartbeatRunable extends RunnableCallback {
     public void doHeartbeat() throws IOException {
 
         int currtime = TimeUtils.current_time_secs();
-        WorkerHeartbeat hb =
-                new WorkerHeartbeat(currtime, topologyId, task_ids, port);
+        WorkerHeartbeat hb = new WorkerHeartbeat(currtime, topologyId, task_ids, port);
 
-        LOG.debug("Doing heartbeat:" + worker_id + ",port:" + port + ",hb"
-                + hb.toString());
+        LOG.debug("Doing heartbeat:" + worker_id + ",port:" + port + ",hb" + hb.toString());
 
         LocalState state = getWorkerState();
         state.put(Common.LS_WORKER_HEARTBEAT, hb);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/timer/BackpressureCheckTrigger.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/timer/BackpressureCheckTrigger.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/timer/BackpressureCheckTrigger.java
new file mode 100644
index 0000000..19ba2c9
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/timer/BackpressureCheckTrigger.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.alibaba.jstorm.daemon.worker.timer;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.alibaba.jstorm.task.backpressure.BackpressureTrigger;
+
+
+public class BackpressureCheckTrigger extends TimerTrigger{
+    private static final Logger LOG = LoggerFactory.getLogger(BackpressureCheckTrigger.class);
+
+    private BackpressureTrigger trigger;
+
+    public BackpressureCheckTrigger(int initDelay, int frequence, String name, BackpressureTrigger trigger) {
+        if (frequence <= 0) {
+            LOG.warn(" The frequence of " + name + " is invalid");
+            frequence = 1;
+        }
+        this.firstTime = initDelay;
+        this.frequence = frequence;
+        this.trigger = trigger;
+    }
+
+    @Override
+    public void run() {
+        try {
+            trigger.checkAndTrigger();
+        } catch (Exception e) {
+            LOG.warn("Failed to publish timer event to " + name, e);
+            return;
+        }
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/timer/RotatingMapTrigger.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/timer/RotatingMapTrigger.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/timer/RotatingMapTrigger.java
index 5a59e6f..a1a0990 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/timer/RotatingMapTrigger.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/timer/RotatingMapTrigger.java
@@ -29,26 +29,20 @@ import com.alibaba.jstorm.task.acker.Acker;
 import com.alibaba.jstorm.utils.JStormUtils;
 
 public class RotatingMapTrigger extends TimerTrigger {
-    private static final Logger LOG = LoggerFactory
-            .getLogger(RotatingMapTrigger.class);
+    private static final Logger LOG = LoggerFactory.getLogger(RotatingMapTrigger.class);
 
     public RotatingMapTrigger(Map conf, String name, DisruptorQueue queue) {
         this.name = name;
         this.queue = queue;
         this.opCode = TimerConstants.ROTATING_MAP;
 
-        int msgTimeOut =
-                JStormUtils.parseInt(
-                        conf.get(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS), 30);
+        int msgTimeOut = JStormUtils.parseInt(conf.get(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS), 30);
         frequence = (msgTimeOut) / (Acker.TIMEOUT_BUCKET_NUM - 1);
         if (frequence <= 0) {
             frequence = 1;
         }
 
-        firstTime =
-                JStormUtils.parseInt(
-                        conf.get(Config.SUPERVISOR_WORKER_START_TIMEOUT_SECS),
-                        120);
+        firstTime = JStormUtils.parseInt(conf.get(Config.SUPERVISOR_WORKER_START_TIMEOUT_SECS), 120);
 
         firstTime += frequence;
     }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/timer/TaskBatchCheckTrigger.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/timer/TaskBatchCheckTrigger.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/timer/TaskBatchCheckTrigger.java
new file mode 100644
index 0000000..0e00f66
--- /dev/null
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/timer/TaskBatchCheckTrigger.java
@@ -0,0 +1,32 @@
+package com.alibaba.jstorm.daemon.worker.timer;
+
+import org.apache.log4j.Logger;
+
+import com.alibaba.jstorm.task.TaskBatchTransfer;
+
+public class TaskBatchCheckTrigger extends TimerTrigger {
+    private static final Logger LOG = Logger.getLogger(TickTupleTrigger.class);
+
+    private TaskBatchTransfer batchTransfer;
+
+    public TaskBatchCheckTrigger(int frequence, String name, TaskBatchTransfer transfer) {
+        if (frequence <= 0) {
+            LOG.warn(" The frequence of " + name + " is invalid");
+            frequence = 1;
+        }
+        this.firstTime = frequence;
+        this.frequence = frequence;
+        this.batchTransfer = transfer;
+    }
+
+    @Override
+    public void run() {
+        try {
+            batchTransfer.startCheck();
+        } catch (Exception e) {
+            LOG.warn("Failed to public timer event to " + name, e);
+            return;
+        }
+    }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/timer/TaskBatchFlushTrigger.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/timer/TaskBatchFlushTrigger.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/timer/TaskBatchFlushTrigger.java
index 3a5353f..96165d0 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/timer/TaskBatchFlushTrigger.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/timer/TaskBatchFlushTrigger.java
@@ -22,9 +22,9 @@ import org.slf4j.LoggerFactory;
 
 import com.alibaba.jstorm.task.TaskBatchTransfer;
 
-public class TaskBatchFlushTrigger extends TimerTrigger{
+public class TaskBatchFlushTrigger extends TimerTrigger {
     private static final Logger LOG = LoggerFactory.getLogger(TickTupleTrigger.class);
-    
+
     private TaskBatchTransfer batchTransfer;
 
     public TaskBatchFlushTrigger(int frequence, String name, TaskBatchTransfer transfer) {

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/timer/TaskHeartbeatTrigger.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/timer/TaskHeartbeatTrigger.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/timer/TaskHeartbeatTrigger.java
index 0b67776..ad81a2b 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/timer/TaskHeartbeatTrigger.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/timer/TaskHeartbeatTrigger.java
@@ -17,6 +17,7 @@
  */
 package com.alibaba.jstorm.daemon.worker.timer;
 
+import java.util.List;
 import java.util.Map;
 import java.util.concurrent.BlockingQueue;
 
@@ -24,33 +25,66 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import backtype.storm.Config;
+import backtype.storm.spout.SpoutOutputCollector;
+import backtype.storm.task.OutputCollector;
+import backtype.storm.task.TopologyContext;
+import backtype.storm.tuple.Tuple;
+import backtype.storm.tuple.TupleExt;
+import backtype.storm.tuple.TupleImplExt;
+import backtype.storm.tuple.Values;
 import backtype.storm.utils.DisruptorQueue;
 
-import com.alibaba.jstorm.daemon.worker.timer.TimerTrigger.TimerEvent;
+import com.alibaba.jstorm.cluster.Common;
+import com.alibaba.jstorm.task.error.ITaskReportErr;
+import com.alibaba.jstorm.task.UptimeComputer;
+import com.alibaba.jstorm.utils.IntervalCheck;
 import com.alibaba.jstorm.utils.JStormUtils;
+import com.alibaba.jstorm.utils.TimeUtils;
 
 public class TaskHeartbeatTrigger extends TimerTrigger {
-    private static final Logger LOG = LoggerFactory
-            .getLogger(TaskHeartbeatTrigger.class);
+    private static final Logger LOG = LoggerFactory.getLogger(TaskHeartbeatTrigger.class);
 
     private int taskId;
-    
-    private BlockingQueue<Object> controlQueue;
+    private String componentId;
+    private TopologyContext sysTopologyCtx;
 
-    public TaskHeartbeatTrigger(Map conf, String name, DisruptorQueue queue,
-            BlockingQueue<Object> controlQueue, int taskId) {
+    private BlockingQueue<Object> controlQueue = null;
+
+    private OutputCollector boltOutputCollector = null;
+    private SpoutOutputCollector spoutOutputCollector = null;
+
+    private long executeThreadHbTime;
+    private int taskHbTimeout;
+
+    private ITaskReportErr reportError;
+
+    private IntervalCheck intervalCheck;
+
+    private UptimeComputer uptime;
+
+    public TaskHeartbeatTrigger(Map conf, String name, DisruptorQueue queue, BlockingQueue<Object> controlQueue, int taskId, String componentId,
+            TopologyContext sysTopologyCtx, ITaskReportErr reportError) {
         this.name = name;
         this.queue = queue;
         this.controlQueue = controlQueue;
         this.opCode = TimerConstants.TASK_HEARTBEAT;
 
         this.taskId = taskId;
+        this.componentId = componentId;
+        this.sysTopologyCtx = sysTopologyCtx;
+
+        this.frequence = JStormUtils.parseInt(conf.get(Config.TASK_HEARTBEAT_FREQUENCY_SECS), 10);
+        this.firstTime = frequence;
 
-        frequence =
-                JStormUtils.parseInt(
-                        conf.get(Config.TASK_HEARTBEAT_FREQUENCY_SECS), 10);
+        this.executeThreadHbTime = TimeUtils.current_time_secs();
+        this.taskHbTimeout = JStormUtils.parseInt(conf.get(Config.NIMBUS_TASK_TIMEOUT_SECS), 180);
+        this.intervalCheck = new IntervalCheck();
+        this.intervalCheck.setInterval(taskHbTimeout);
+        this.intervalCheck.start();
 
-        firstTime = frequence;
+        this.reportError = reportError;
+
+        this.uptime = new UptimeComputer();
     }
 
     @Override
@@ -60,7 +94,6 @@ public class TaskHeartbeatTrigger extends TimerTrigger {
 
     @Override
     public void run() {
-
         try {
             updateObject();
 
@@ -69,16 +102,63 @@ public class TaskHeartbeatTrigger extends TimerTrigger {
                 return;
             }
 
+            if (intervalCheck.check()) {
+                checkExecuteThreadHb();
+            }
+
+            if (componentId.equals(Common.TOPOLOGY_MASTER_COMPONENT_ID)) {
+                Values values = new Values(uptime.uptime());
+                TupleExt tuple = new TupleImplExt(sysTopologyCtx, values, taskId, Common.TOPOLOGY_MASTER_HB_STREAM_ID);
+                queue.publish(tuple);
+            } else {
+                // Send task heartbeat to topology master
+                sendHbMsg();
+            }
+
+            // Send message used to monitor execute thread 
             TimerEvent event = new TimerEvent(opCode, object);
-            
-            controlQueue.offer(event);
-            LOG.debug("Offer task HB event to controlQueue, taskId=" + taskId);
+            boolean ret = controlQueue.offer(event);
+            if (ret)
+                LOG.debug("Offer task HB event to controlQueue, taskId=" + taskId);
+            else
+                LOG.debug("Failed to offer task HB event to controlQueue, taskId=" + taskId);
         } catch (Exception e) {
-            LOG.warn("Failed to public timer event to " + name, e);
+            LOG.warn("Failed to publish timer event to " + name, e);
             return;
         }
 
         LOG.debug(" Trigger timer event to " + name);
 
     }
+
+    public void setSpoutOutputCollector(SpoutOutputCollector spoutOutputCollector) {
+        this.spoutOutputCollector = spoutOutputCollector;
+    }
+
+    public void setBoltOutputCollector(OutputCollector boltOutputCollector) {
+        this.boltOutputCollector = boltOutputCollector;
+    }
+
+    public void setExeThreadHbTime(long hbTime) {
+        this.executeThreadHbTime = hbTime;
+    }
+
+    private void sendHbMsg() {
+        List values = JStormUtils.mk_list(uptime.uptime());
+        if (spoutOutputCollector != null) {
+            spoutOutputCollector.emit(Common.TOPOLOGY_MASTER_HB_STREAM_ID, values);
+        } else if (boltOutputCollector != null) {
+            boltOutputCollector.emit(Common.TOPOLOGY_MASTER_HB_STREAM_ID, values);
+        } else {
+            LOG.warn("Failed to send hearbeat msg. OutputCollector has not been initialized!");
+        }
+    }
+
+    private void checkExecuteThreadHb() {
+        long currentTime = TimeUtils.current_time_secs();
+        if (currentTime - executeThreadHbTime > taskHbTimeout) {
+            String error = "No response from Task-" + taskId + ", last report time(sec) is " + executeThreadHbTime;
+            reportError.report(error);
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/timer/TickTupleTrigger.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/timer/TickTupleTrigger.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/timer/TickTupleTrigger.java
index ecf01c5..a70d8ab 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/timer/TickTupleTrigger.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/timer/TickTupleTrigger.java
@@ -29,13 +29,11 @@ import backtype.storm.utils.DisruptorQueue;
 import com.alibaba.jstorm.utils.TimeUtils;
 
 public class TickTupleTrigger extends TimerTrigger {
-    private static final Logger LOG = LoggerFactory
-            .getLogger(TickTupleTrigger.class);
+    private static final Logger LOG = LoggerFactory.getLogger(TickTupleTrigger.class);
 
     TopologyContext topologyContext;
 
-    public TickTupleTrigger(TopologyContext topologyContext, int frequence,
-            String name, DisruptorQueue queue) {
+    public TickTupleTrigger(TopologyContext topologyContext, int frequence, String name, DisruptorQueue queue) {
         this.name = name;
         this.queue = queue;
         this.opCode = TimerConstants.TICK_TUPLE;
@@ -53,10 +51,7 @@ public class TickTupleTrigger extends TimerTrigger {
     @Override
     public void updateObject() {
         this.object =
-                new TupleImplExt(topologyContext, new Values(
-                        TimeUtils.current_time_secs()),
-                        (int) Constants.SYSTEM_TASK_ID,
-                        Constants.SYSTEM_TICK_STREAM_ID);
+                new TupleImplExt(topologyContext, new Values(TimeUtils.current_time_secs()), (int) Constants.SYSTEM_TASK_ID, Constants.SYSTEM_TICK_STREAM_ID);
     }
 
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/timer/TimerTrigger.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/timer/TimerTrigger.java b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/timer/TimerTrigger.java
index 4cecbea..2c2c39c 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/timer/TimerTrigger.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/daemon/worker/timer/TimerTrigger.java
@@ -30,13 +30,11 @@ import backtype.storm.utils.DisruptorQueue;
 import com.lmax.disruptor.InsufficientCapacityException;
 
 public class TimerTrigger implements Runnable {
-    private static final Logger LOG = LoggerFactory
-            .getLogger(TimerTrigger.class);
+    private static final Logger LOG = LoggerFactory.getLogger(TimerTrigger.class);
 
     private static ScheduledExecutorService threadPool;
 
-    public static void setScheduledExecutorService(
-            ScheduledExecutorService scheduledExecutorService) {
+    public static void setScheduledExecutorService(ScheduledExecutorService scheduledExecutorService) {
         threadPool = scheduledExecutorService;
     }
 
@@ -44,7 +42,7 @@ public class TimerTrigger implements Runnable {
     protected int opCode;
     protected int firstTime;
     protected int frequence;
-    protected DisruptorQueue queue;
+    protected DisruptorQueue queue = null;
     protected Object object;
     protected boolean block = true;
 
@@ -53,8 +51,7 @@ public class TimerTrigger implements Runnable {
     }
 
     public void register(TimeUnit timeUnit) {
-        threadPool.scheduleAtFixedRate(this, firstTime, frequence,
-                timeUnit);
+        threadPool.scheduleAtFixedRate(this, firstTime, frequence, timeUnit);
         LOG.info("Successfully register timer " + this);
     }
 
@@ -145,8 +142,7 @@ public class TimerTrigger implements Runnable {
 
     @Override
     public String toString() {
-        return ToStringBuilder.reflectionToString(this,
-                ToStringStyle.SHORT_PREFIX_STYLE);
+        return ToStringBuilder.reflectionToString(this, ToStringStyle.SHORT_PREFIX_STYLE);
     }
 
     public class TimerEvent {

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/drpc/ClearThread.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/drpc/ClearThread.java b/jstorm-core/src/main/java/com/alibaba/jstorm/drpc/ClearThread.java
index 5f16b0b..4d7d32c 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/drpc/ClearThread.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/drpc/ClearThread.java
@@ -31,8 +31,7 @@ import com.alibaba.jstorm.utils.JStormUtils;
 import com.alibaba.jstorm.utils.TimeUtils;
 
 public class ClearThread extends RunnableCallback {
-    private static final Logger LOG = LoggerFactory
-            .getLogger(ClearThread.class);
+    private static final Logger LOG = LoggerFactory.getLogger(ClearThread.class);
 
     private final int REQUEST_TIMEOUT_SECS;
     private static final int TIMEOUT_CHECK_SECS = 5;
@@ -42,10 +41,7 @@ public class ClearThread extends RunnableCallback {
     public ClearThread(Drpc drpc) {
         drpcService = drpc;
 
-        REQUEST_TIMEOUT_SECS =
-                JStormUtils.parseInt(
-                        drpcService.getConf().get(
-                                Config.DRPC_REQUEST_TIMEOUT_SECS), 60);
+        REQUEST_TIMEOUT_SECS = JStormUtils.parseInt(drpcService.getConf().get(Config.DRPC_REQUEST_TIMEOUT_SECS), 60);
         LOG.info("Drpc timeout seconds is " + REQUEST_TIMEOUT_SECS);
     }
 
@@ -56,8 +52,7 @@ public class ClearThread extends RunnableCallback {
             if (TimeUtils.time_delta(e.getValue()) > REQUEST_TIMEOUT_SECS) {
                 String id = e.getKey();
 
-                drpcService.getIdtoResult().put(id,
-                        new DRPCExecutionException("Request timed out"));
+                drpcService.getIdtoResult().put(id, new DRPCExecutionException("Request timed out"));
                 Semaphore s = drpcService.getIdtoSem().get(id);
                 if (s != null) {
                     s.release();

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/drpc/Drpc.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/drpc/Drpc.java b/jstorm-core/src/main/java/com/alibaba/jstorm/drpc/Drpc.java
index 09b4885..adbec06 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/drpc/Drpc.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/drpc/Drpc.java
@@ -27,6 +27,7 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import com.alibaba.jstorm.utils.JStormServerUtils;
 import org.apache.thrift.TException;
 import org.apache.thrift.protocol.TBinaryProtocol;
 import org.apache.thrift.server.THsHaServer;
@@ -53,8 +54,7 @@ import com.alibaba.jstorm.utils.TimeUtils;
  * @author yannian
  * 
  */
-public class Drpc implements DistributedRPC.Iface,
-        DistributedRPCInvocations.Iface, Shutdownable {
+public class Drpc implements DistributedRPC.Iface, DistributedRPCInvocations.Iface, Shutdownable {
 
     private static final Logger LOG = LoggerFactory.getLogger(Drpc.class);
 
@@ -76,23 +76,18 @@ public class Drpc implements DistributedRPC.Iface,
 
     private AtomicBoolean shutdown = new AtomicBoolean(false);
 
-    private THsHaServer initHandlerServer(Map conf, final Drpc service)
-            throws Exception {
+    private THsHaServer initHandlerServer(Map conf, final Drpc service) throws Exception {
         int port = JStormUtils.parseInt(conf.get(Config.DRPC_PORT));
-        int workerThreadNum =
-                JStormUtils.parseInt(conf.get(Config.DRPC_WORKER_THREADS));
+        int workerThreadNum = JStormUtils.parseInt(conf.get(Config.DRPC_WORKER_THREADS));
         int queueSize = JStormUtils.parseInt(conf.get(Config.DRPC_QUEUE_SIZE));
 
         TNonblockingServerSocket socket = new TNonblockingServerSocket(port);
         THsHaServer.Args targs = new THsHaServer.Args(socket);
         targs.workerThreads(64);
         targs.protocolFactory(new TBinaryProtocol.Factory());
-        targs.processor(new DistributedRPC.Processor<DistributedRPC.Iface>(
-                service));
+        targs.processor(new DistributedRPC.Processor<DistributedRPC.Iface>(service));
 
-        ThreadPoolExecutor executor =
-                new ThreadPoolExecutor(workerThreadNum, workerThreadNum, 60,
-                        TimeUnit.SECONDS, new ArrayBlockingQueue(queueSize));
+        ThreadPoolExecutor executor = new ThreadPoolExecutor(workerThreadNum, workerThreadNum, 60, TimeUnit.SECONDS, new ArrayBlockingQueue(queueSize));
         targs.executorService(executor);
 
         THsHaServer handlerServer = new THsHaServer(targs);
@@ -101,17 +96,14 @@ public class Drpc implements DistributedRPC.Iface,
         return handlerServer;
     }
 
-    private THsHaServer initInvokeServer(Map conf, final Drpc service)
-            throws Exception {
+    private THsHaServer initInvokeServer(Map conf, final Drpc service) throws Exception {
         int port = JStormUtils.parseInt(conf.get(Config.DRPC_INVOCATIONS_PORT));
 
         TNonblockingServerSocket socket = new TNonblockingServerSocket(port);
         THsHaServer.Args targsInvoke = new THsHaServer.Args(socket);
         targsInvoke.workerThreads(64);
         targsInvoke.protocolFactory(new TBinaryProtocol.Factory());
-        targsInvoke
-                .processor(new DistributedRPCInvocations.Processor<DistributedRPCInvocations.Iface>(
-                        service));
+        targsInvoke.processor(new DistributedRPCInvocations.Processor<DistributedRPCInvocations.Iface>(service));
 
         THsHaServer invokeServer = new THsHaServer(targsInvoke);
 
@@ -136,6 +128,7 @@ public class Drpc implements DistributedRPC.Iface,
 
         LOG.info("Starting Distributed RPC servers...");
         new Thread(new Runnable() {
+
             @Override
             public void run() {
                 invokeServer.serve();
@@ -148,11 +141,18 @@ public class Drpc implements DistributedRPC.Iface,
         clearThread = new AsyncLoopThread(new ClearThread(this));
         LOG.info("Successfully start clear thread");
     }
+    private void createPid(Map conf) throws Exception {
+        String pidDir = StormConfig.drpcPids(conf);
+
+        JStormServerUtils.createPid(pidDir);
+    }
 
     public void init() throws Exception {
         conf = StormConfig.read_storm_config();
         LOG.info("Configuration is \n" + conf);
 
+        createPid(conf);
+
         initClearThread();
 
         initThrift();
@@ -188,14 +188,10 @@ public class Drpc implements DistributedRPC.Iface,
     }
 
     private AtomicInteger ctr = new AtomicInteger(0);
-    private ConcurrentHashMap<String, Semaphore> idtoSem =
-            new ConcurrentHashMap<String, Semaphore>();
-    private ConcurrentHashMap<String, Object> idtoResult =
-            new ConcurrentHashMap<String, Object>();
-    private ConcurrentHashMap<String, Integer> idtoStart =
-            new ConcurrentHashMap<String, Integer>();
-    private ConcurrentHashMap<String, ConcurrentLinkedQueue<DRPCRequest>> requestQueues =
-            new ConcurrentHashMap<String, ConcurrentLinkedQueue<DRPCRequest>>();
+    private ConcurrentHashMap<String, Semaphore> idtoSem = new ConcurrentHashMap<String, Semaphore>();
+    private ConcurrentHashMap<String, Object> idtoResult = new ConcurrentHashMap<String, Object>();
+    private ConcurrentHashMap<String, Integer> idtoStart = new ConcurrentHashMap<String, Integer>();
+    private ConcurrentHashMap<String, ConcurrentLinkedQueue<DRPCRequest>> requestQueues = new ConcurrentHashMap<String, ConcurrentLinkedQueue<DRPCRequest>>();
 
     public void cleanup(String id) {
         LOG.info("clean id " + id + " @ " + (System.currentTimeMillis()));
@@ -206,10 +202,8 @@ public class Drpc implements DistributedRPC.Iface,
     }
 
     @Override
-    public String execute(String function, String args)
-            throws DRPCExecutionException, TException {
-        LOG.info("Received DRPC request for " + function + " " + args + " at "
-                + (System.currentTimeMillis()));
+    public String execute(String function, String args) throws DRPCExecutionException, TException {
+        LOG.info("Received DRPC request for " + function + " " + args + " at " + (System.currentTimeMillis()));
         int idinc = this.ctr.incrementAndGet();
         int maxvalue = 1000000000;
         int newid = idinc % maxvalue;
@@ -225,19 +219,16 @@ public class Drpc implements DistributedRPC.Iface,
         this.idtoSem.put(strid, sem);
         ConcurrentLinkedQueue<DRPCRequest> queue = acquireQueue(function);
         queue.add(req);
-        LOG.info("Waiting for DRPC request for " + function + " " + args
-                + " at " + (System.currentTimeMillis()));
+        LOG.info("Waiting for DRPC request for " + function + " " + args + " at " + (System.currentTimeMillis()));
         try {
             sem.acquire();
         } catch (InterruptedException e) {
             LOG.error("acquire fail ", e);
         }
-        LOG.info("Acquired for DRPC request for " + function + " " + args
-                + " at " + (System.currentTimeMillis()));
+        LOG.info("Acquired for DRPC request for " + function + " " + args + " at " + (System.currentTimeMillis()));
 
         Object result = this.idtoResult.get(strid);
-        LOG.info("Returning for DRPC request for " + function + " " + args
-                + " at " + (System.currentTimeMillis()));
+        LOG.info("Returning for DRPC request for " + function + " " + args + " at " + (System.currentTimeMillis()));
 
         this.cleanup(strid);
 
@@ -250,8 +241,7 @@ public class Drpc implements DistributedRPC.Iface,
     @Override
     public void result(String id, String result) throws TException {
         Semaphore sem = this.idtoSem.get(id);
-        LOG.info("Received result " + result + " for id " + id + " at "
-                + (System.currentTimeMillis()));
+        LOG.info("Received result " + result + " for id " + id + " at " + (System.currentTimeMillis()));
         if (sem != null) {
             this.idtoResult.put(id, result);
             sem.release();
@@ -265,8 +255,7 @@ public class Drpc implements DistributedRPC.Iface,
         ConcurrentLinkedQueue<DRPCRequest> queue = acquireQueue(functionName);
         DRPCRequest req = queue.poll();
         if (req != null) {
-            LOG.info("Fetched request for " + functionName + " at "
-                    + (System.currentTimeMillis()));
+            LOG.info("Fetched request for " + functionName + " at " + (System.currentTimeMillis()));
             return req;
         } else {
             return new DRPCRequest("", "");
@@ -277,18 +266,15 @@ public class Drpc implements DistributedRPC.Iface,
     @Override
     public void failRequest(String id) throws TException {
         Semaphore sem = this.idtoSem.get(id);
-        LOG.info("failRequest result  for id " + id + " at "
-                + (System.currentTimeMillis()));
+        LOG.info("failRequest result  for id " + id + " at " + (System.currentTimeMillis()));
         if (sem != null) {
-            this.idtoResult.put(id,
-                    new DRPCExecutionException("Request failed"));
+            this.idtoResult.put(id, new DRPCExecutionException("Request failed"));
             sem.release();
         }
     }
 
     private ConcurrentLinkedQueue<DRPCRequest> acquireQueue(String function) {
-        ConcurrentLinkedQueue<DRPCRequest> reqQueue =
-                requestQueues.get(function);
+        ConcurrentLinkedQueue<DRPCRequest> reqQueue = requestQueues.get(function);
         if (reqQueue == null) {
             reqQueue = new ConcurrentLinkedQueue<DRPCRequest>();
             requestQueues.put(function, reqQueue);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/event/EventManagerImp.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/event/EventManagerImp.java b/jstorm-core/src/main/java/com/alibaba/jstorm/event/EventManagerImp.java
index cdfa1cc..2ec7e20 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/event/EventManagerImp.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/event/EventManagerImp.java
@@ -29,14 +29,12 @@ import com.alibaba.jstorm.callback.RunnableCallback;
  * Event Manager, drop one event from queue, then execute the event.
  */
 public class EventManagerImp extends RunnableCallback implements EventManager {
-    private static final Logger LOG = LoggerFactory
-            .getLogger(EventManagerImp.class);
+    private static final Logger LOG = LoggerFactory.getLogger(EventManagerImp.class);
 
     private AtomicInteger added = new AtomicInteger();
     private AtomicInteger processed = new AtomicInteger();
 
-    private LinkedBlockingQueue<RunnableCallback> queue =
-            new LinkedBlockingQueue<RunnableCallback>();
+    private LinkedBlockingQueue<RunnableCallback> queue = new LinkedBlockingQueue<RunnableCallback>();
 
     private Exception e;
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/event/EventManagerPusher.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/event/EventManagerPusher.java b/jstorm-core/src/main/java/com/alibaba/jstorm/event/EventManagerPusher.java
index 7f5e9ef..e4bf9e2 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/event/EventManagerPusher.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/event/EventManagerPusher.java
@@ -30,8 +30,7 @@ public class EventManagerPusher extends RunnableCallback {
 
     private int frequence;
 
-    public EventManagerPusher(EventManager eventManager,
-            RunnableCallback event, int frequence) {
+    public EventManagerPusher(EventManager eventManager, RunnableCallback event, int frequence) {
         this.eventManager = eventManager;
         this.event = event;
         this.frequence = frequence;

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/ControlMessage.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/ControlMessage.java b/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/ControlMessage.java
index d68347a..a83f07f 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/ControlMessage.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/ControlMessage.java
@@ -24,11 +24,10 @@ import org.jboss.netty.buffer.ChannelBuffers;
 public enum ControlMessage {
     EOB_MESSAGE((short) -201), OK_RESPONSE((short) -200);
 
-
     private short code;
     private long timeStamp;
     protected static int port;
-    
+
     static public void setPort(int port) {
         ControlMessage.port = port;
     }
@@ -62,9 +61,7 @@ public enum ControlMessage {
      * @throws Exception
      */
     ChannelBuffer buffer() throws Exception {
-        ChannelBufferOutputStream bout =
-                new ChannelBufferOutputStream(
-                        ChannelBuffers.directBuffer(encodeLength()));
+        ChannelBufferOutputStream bout = new ChannelBufferOutputStream(ChannelBuffers.directBuffer(encodeLength()));
         write(bout);
         bout.close();
         return bout.buffer();

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/MessageBatch.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/MessageBatch.java b/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/MessageBatch.java
index 172822d..45d6600 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/MessageBatch.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/MessageBatch.java
@@ -28,8 +28,7 @@ import org.slf4j.LoggerFactory;
 import backtype.storm.messaging.TaskMessage;
 
 class MessageBatch {
-    private static final Logger LOG = LoggerFactory
-            .getLogger(MessageBatch.class);
+    private static final Logger LOG = LoggerFactory.getLogger(MessageBatch.class);
     private int buffer_size;
     private ArrayList<Object> msgs;
     private int encoded_length;
@@ -58,8 +57,7 @@ class MessageBatch {
             return;
         }
 
-        throw new RuntimeException("Unsuppoted object type "
-                + obj.getClass().getName());
+        throw new RuntimeException("Unsuppoted object type " + obj.getClass().getName());
     }
 
     void remove(Object obj) {
@@ -89,8 +87,7 @@ class MessageBatch {
      * try to add a TaskMessage to a batch
      * 
      * @param taskMsg
-     * @return false if the msg could not be added due to buffer size limit;
-     *         true otherwise
+     * @return false if the msg could not be added due to buffer size limit; true otherwise
      */
     boolean tryAdd(TaskMessage taskMsg) {
         if ((encoded_length + msgEncodeLength(taskMsg)) > buffer_size)
@@ -144,9 +141,7 @@ class MessageBatch {
      * create a buffer containing the encoding of this batch
      */
     ChannelBuffer buffer() throws Exception {
-        ChannelBufferOutputStream bout =
-                new ChannelBufferOutputStream(
-                        ChannelBuffers.directBuffer(encoded_length));
+        ChannelBufferOutputStream bout = new ChannelBufferOutputStream(ChannelBuffers.directBuffer(encoded_length));
 
         for (Object msg : msgs)
             if (msg instanceof TaskMessage)
@@ -168,19 +163,16 @@ class MessageBatch {
     /**
      * write a TaskMessage into a stream
      * 
-     * Each TaskMessage is encoded as: task ... short(2) len ... int(4) payload
-     * ... byte[] *
+     * Each TaskMessage is encoded as: task ... short(2) len ... int(4) payload ... byte[] *
      */
-    private void writeTaskMessage(ChannelBufferOutputStream bout,
-            TaskMessage message) throws Exception {
+    private void writeTaskMessage(ChannelBufferOutputStream bout, TaskMessage message) throws Exception {
         int payload_len = 0;
         if (message.message() != null)
             payload_len = message.message().length;
 
         int task_id = message.task();
         if (task_id > Short.MAX_VALUE)
-            throw new RuntimeException("Task ID should not exceed "
-                    + Short.MAX_VALUE);
+            throw new RuntimeException("Task ID should not exceed " + Short.MAX_VALUE);
 
         bout.writeShort((short) task_id);
         bout.writeInt(payload_len);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/MessageDecoder.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/MessageDecoder.java b/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/MessageDecoder.java
index b147092..38e7930 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/MessageDecoder.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/MessageDecoder.java
@@ -17,11 +17,17 @@
  */
 package com.alibaba.jstorm.message.netty;
 
-import java.net.InetSocketAddress;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-
+import backtype.storm.messaging.TaskMessage;
+import com.alibaba.jstorm.client.ConfigExtension;
+import com.alibaba.jstorm.common.metric.AsmHistogram;
+import com.alibaba.jstorm.common.metric.AsmMeter;
+import com.alibaba.jstorm.common.metric.AsmMetric;
+import com.alibaba.jstorm.metric.JStormMetrics;
+import com.alibaba.jstorm.metric.MetricType;
+import com.alibaba.jstorm.metric.MetricUtils;
+import com.alibaba.jstorm.metric.MetricDef;
+import com.alibaba.jstorm.utils.NetWorkUtils;
+import com.alibaba.jstorm.utils.TimeUtils;
 import org.jboss.netty.buffer.ChannelBuffer;
 import org.jboss.netty.channel.Channel;
 import org.jboss.netty.channel.ChannelHandlerContext;
@@ -29,46 +35,40 @@ import org.jboss.netty.handler.codec.frame.FrameDecoder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.alibaba.jstorm.client.ConfigExtension;
-import com.alibaba.jstorm.common.metric.Histogram;
-import com.alibaba.jstorm.common.metric.Meter;
-import com.alibaba.jstorm.metric.JStormMetrics;
-import com.alibaba.jstorm.metric.MetricDef;
-import com.alibaba.jstorm.utils.NetWorkUtils;
-
-import backtype.storm.messaging.TaskMessage;
+import java.net.InetSocketAddress;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
 
 public class MessageDecoder extends FrameDecoder {
-    private static final Logger LOG = LoggerFactory
-            .getLogger(MessageDecoder.class);
+    private static final Logger LOG = LoggerFactory.getLogger(MessageDecoder.class);
 
     // here doesn't use Timer due to competition
-    private static Histogram timer = JStormMetrics
-            .registerWorkerHistogram(MetricDef.NETWORK_MSG_DECODE_TIME);
-    private static Meter recvSpeed = JStormMetrics
-            .registerWorkerMeter(MetricDef.NETTY_SRV_RECV_SPEED);
-    private static Map<Channel, Histogram> networkTransmitTimeMap =
-            new HashMap<Channel, Histogram>();
-    private static Map<Channel, String> transmitNameMap =
-            new HashMap<Channel, String>();
+
+    private static AsmHistogram msgDecodeTime = (AsmHistogram) JStormMetrics.registerWorkerMetric(
+            MetricUtils.workerMetricName(MetricDef.NETWORK_MSG_DECODE_TIME, MetricType.HISTOGRAM), new AsmHistogram());
+    private static AsmMeter recvSpeed = (AsmMeter) JStormMetrics.registerWorkerMetric(
+            MetricUtils.workerMetricName(MetricDef.NETTY_SRV_RECV_SPEED, MetricType.METER), new AsmMeter());
+
+    private static Map<Channel, AsmHistogram> networkTransmitTimeMap = new HashMap<Channel, AsmHistogram>();
+    private static Map<Channel, String> transmitNameMap = new HashMap<Channel, String>();
     private boolean isServer;
     private String localIp;
     private int localPort;
 
+    private boolean enableTransitTimeMetrics;
+
     public MessageDecoder(boolean isServer, Map conf) {
         this.isServer = isServer;
         this.localPort = ConfigExtension.getLocalWorkerPort(conf);
         this.localIp = NetWorkUtils.ip();
-
+        this.enableTransitTimeMetrics = MetricUtils.isEnableNettyMetrics(conf);
     }
 
     /*
-     * Each ControlMessage is encoded as: code (<0) ... short(2) Each
-     * TaskMessage is encoded as: task (>=0) ... short(2) len ... int(4) payload
-     * ... byte[] *
+     * Each ControlMessage is encoded as: code (<0) ... short(2) Each TaskMessage is encoded as: task (>=0) ... short(2) len ... int(4) payload ... byte[] *
      */
-    protected Object decode(ChannelHandlerContext ctx, Channel channel,
-            ChannelBuffer buf) throws Exception {
+    protected Object decode(ChannelHandlerContext ctx, Channel channel, ChannelBuffer buf) throws Exception {
         // Make sure that we have received at least a short
         long available = buf.readableBytes();
         // Length of control message is 10.
@@ -106,21 +106,20 @@ public class MessageDecoder extends FrameDecoder {
                 available -= 12;
                 if (ctrl_msg == ControlMessage.EOB_MESSAGE) {
 
+
                     long interval = System.currentTimeMillis() - timeStamp;
-                    if (interval > 0) {
-
-	                    Histogram netTransTime =
-	                            getTransmitHistogram(channel, clientPort);
-	                    if (netTransTime != null) {
-	                        netTransTime.update(interval );
-	
-	                    }
+                    if (interval < 0)
+                        interval = 0;
+
+                    if(enableTransitTimeMetrics) {
+                        AsmHistogram netTransTime = getTransmitHistogram(channel, clientPort);
+                        if (netTransTime != null) {
+                            netTransTime.update(interval * TimeUtils.NS_PER_US);
+                        }
                     }
-
-                    recvSpeed.update(Double.valueOf(ControlMessage
-                            .encodeLength()));
                 }
 
+                recvSpeed.update(ControlMessage.encodeLength());
                 return ctrl_msg;
             }
 
@@ -138,9 +137,7 @@ public class MessageDecoder extends FrameDecoder {
             // Read the length field.
             int length = buf.readInt();
             if (length <= 0) {
-                LOG.info(
-                        "Receive one message whose TaskMessage's message length is {}",
-                        length);
+                LOG.info("Receive one message whose TaskMessage's message length is {}", length);
                 return new TaskMessage(task, null);
             }
 
@@ -165,72 +162,55 @@ public class MessageDecoder extends FrameDecoder {
             // task, length, JStormUtils.toPrintableString(rawBytes));
 
             TaskMessage ret = new TaskMessage(task, rawBytes);
-            recvSpeed.update(Double.valueOf(rawBytes.length + 6));
+            recvSpeed.update(rawBytes.length + 6);
             return ret;
         } finally {
             if (isServer) {
                 Long endTime = System.nanoTime();
-                timer.update((endTime - startTime) / 1000000.0d);
+                msgDecodeTime.update((endTime - startTime) / TimeUtils.NS_PER_US);
             }
         }
 
     }
 
-    public Histogram getTransmitHistogram(Channel channel, int clientPort) {
-        Histogram netTransTime = networkTransmitTimeMap.get(channel);
+    public AsmHistogram getTransmitHistogram(Channel channel, int clientPort) {
+        AsmHistogram netTransTime = networkTransmitTimeMap.get(channel);
         if (netTransTime == null) {
+            InetSocketAddress sockAddr = (InetSocketAddress) (channel.getRemoteAddress());
+
+            String nettyConnection = NettyConnection.mkString(sockAddr.getAddress().getHostAddress(), clientPort, localIp, localPort);
+            netTransTime =
+                    (AsmHistogram) JStormMetrics.registerNettyMetric(
+                            MetricUtils.nettyMetricName(AsmMetric.mkName(MetricDef.NETTY_SRV_MSG_TRANS_TIME, nettyConnection), MetricType.HISTOGRAM),
+                            new AsmHistogram());
 
-            InetSocketAddress sockAddr =
-                    (InetSocketAddress) (channel.getRemoteAddress());
-
-            String nettyConnection =
-                    NettyConnection.mkString(sockAddr.getAddress()
-                            .getHostAddress(), clientPort, localIp, localPort);
-            try {
-                netTransTime =
-                        JStormMetrics.registerWorkerHistogram(
-                                MetricDef.NETTY_SRV_MSG_TRANS_TIME,
-                                nettyConnection);
-            } catch (Exception e) {
-                LOG.error("{}.{} has been register",
-                        MetricDef.NETTY_SRV_MSG_TRANS_TIME, nettyConnection);
-                removeTransmitHistogram(nettyConnection);
-                return null;
-            }
             networkTransmitTimeMap.put(channel, netTransTime);
             transmitNameMap.put(channel, nettyConnection);
-            LOG.info("Register Transmit Histogram of {}, channel {}",
-                    nettyConnection, channel);
+            LOG.info("Register Transmit Histogram of {}, channel {}", nettyConnection, channel);
         }
 
         return netTransTime;
     }
 
     public static void removeTransmitHistogram(Channel channel) {
-        Histogram netTransTime = networkTransmitTimeMap.remove(channel);
+        AsmHistogram netTransTime = networkTransmitTimeMap.remove(channel);
         if (netTransTime != null) {
-
             String nettyConnection = transmitNameMap.remove(channel);
-
-            JStormMetrics.unregisterWorkerMetric(
-                    MetricDef.NETTY_SRV_MSG_TRANS_TIME, nettyConnection);
-
-            LOG.info("Remove Transmit Histogram of {}, channel {}",
-                    nettyConnection, channel);
+            JStormMetrics.unregisterNettyMetric(MetricUtils.nettyMetricName(AsmMetric.mkName(MetricDef.NETTY_SRV_MSG_TRANS_TIME, nettyConnection),
+                    MetricType.HISTOGRAM));
+            LOG.info("Remove Transmit Histogram of {}, channel {}", nettyConnection, channel);
         }
     }
-    
+
     public static void removeTransmitHistogram(String nettyConnection) {
         Channel oldChannel = null;
-        
-        for (Entry<Channel, String> entry: transmitNameMap.entrySet()) {
+
+        for (Entry<Channel, String> entry : transmitNameMap.entrySet()) {
             if (nettyConnection.equals(entry.getValue())) {
                 oldChannel = entry.getKey();
             }
         }
-        
+
         removeTransmitHistogram(oldChannel);
     }
-    
-    
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/MessageEncoder.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/MessageEncoder.java b/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/MessageEncoder.java
index 61e9187..0a750e0 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/MessageEncoder.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/message/netty/MessageEncoder.java
@@ -23,8 +23,7 @@ import org.jboss.netty.handler.codec.oneone.OneToOneEncoder;
 
 public class MessageEncoder extends OneToOneEncoder {
     @Override
-    protected Object encode(ChannelHandlerContext ctx, Channel channel,
-            Object obj) throws Exception {
+    protected Object encode(ChannelHandlerContext ctx, Channel channel, Object obj) throws Exception {
         if (obj instanceof ControlMessage) {
             return ((ControlMessage) obj).buffer();
         }
@@ -33,8 +32,7 @@ public class MessageEncoder extends OneToOneEncoder {
             return ((MessageBatch) obj).buffer();
         }
 
-        throw new RuntimeException("Unsupported encoding of object of class "
-                + obj.getClass().getName());
+        throw new RuntimeException("Unsupported encoding of object of class " + obj.getClass().getName());
     }
 
 }


[02/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/utils/JStormServerUtils.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/JStormServerUtils.java b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/JStormServerUtils.java
index 6000688..59e14d9 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/JStormServerUtils.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/JStormServerUtils.java
@@ -43,12 +43,10 @@ import com.alibaba.jstorm.cluster.StormConfig;
  */
 public class JStormServerUtils {
 
-    private static final Logger LOG = LoggerFactory
-            .getLogger(JStormServerUtils.class);
+    private static final Logger LOG = LoggerFactory.getLogger(JStormServerUtils.class);
 
-    public static void downloadCodeFromMaster(Map conf, String localRoot,
-            String masterCodeDir, String topologyId, boolean isSupervisor)
-            throws IOException, TException {
+    public static void downloadCodeFromMaster(Map conf, String localRoot, String masterCodeDir, String topologyId, boolean isSupervisor) throws IOException,
+            TException {
         FileUtils.forceMkdir(new File(localRoot));
         FileUtils.forceMkdir(new File(StormConfig.stormlib_path(localRoot)));
 
@@ -64,25 +62,18 @@ public class JStormServerUtils {
         String masterStormConfPath = StormConfig.stormconf_path(masterCodeDir);
         Utils.downloadFromMaster(conf, masterStormConfPath, localStormConfPath);
 
-        Map stormConf =
-                (Map) StormConfig.readLocalObject(topologyId,
-                        localStormConfPath);
+        Map stormConf = (Map) StormConfig.readLocalObject(topologyId, localStormConfPath);
 
         if (stormConf == null)
             throw new IOException("Get topology conf error: " + topologyId);
 
-        List<String> libs =
-                (List<String>) stormConf
-                        .get(GenericOptionsParser.TOPOLOGY_LIB_NAME);
+        List<String> libs = (List<String>) stormConf.get(GenericOptionsParser.TOPOLOGY_LIB_NAME);
         if (libs == null)
             return;
         for (String libName : libs) {
-            String localStromLibPath =
-                    StormConfig.stormlib_path(localRoot, libName);
-            String masterStormLibPath =
-                    StormConfig.stormlib_path(masterCodeDir, libName);
-            Utils.downloadFromMaster(conf, masterStormLibPath,
-                    localStromLibPath);
+            String localStromLibPath = StormConfig.stormlib_path(localRoot, libName);
+            String masterStormLibPath = StormConfig.stormlib_path(masterCodeDir, libName);
+            Utils.downloadFromMaster(conf, masterStormLibPath, localStromLibPath);
         }
     }
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/utils/JStormUtils.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/JStormUtils.java b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/JStormUtils.java
index 983f579..ad56815 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/JStormUtils.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/JStormUtils.java
@@ -53,6 +53,7 @@ import org.apache.commons.exec.ExecuteException;
 import org.apache.commons.exec.ExecuteResultHandler;
 import org.apache.commons.exec.PumpStreamHandler;
 import org.apache.commons.lang.StringUtils;
+import org.apache.thrift.TFieldIdEnum;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -67,11 +68,9 @@ import com.alibaba.jstorm.client.ConfigExtension;
  * JStorm utility
  * 
  * @author yannian/Longda/Xin.Zhou/Xin.Li
- * 
  */
 public class JStormUtils {
-    private static final Logger LOG = LoggerFactory
-            .getLogger(JStormUtils.class);
+    private static final Logger LOG = LoggerFactory.getLogger(JStormUtils.class);
 
     public static long SIZE_1_K = 1024;
     public static long SIZE_1_M = SIZE_1_K * 1024;
@@ -223,8 +222,7 @@ public class JStormUtils {
     }
 
     /**
-     * Gets the pid of this JVM, because Java doesn't provide a real way to do
-     * this.
+     * Gets the pid of this JVM, because Java doesn't provide a real way to do this.
      * 
      * @return
      */
@@ -238,8 +236,7 @@ public class JStormUtils {
         return split[0];
     }
 
-    public static void exec_command(String command) throws ExecuteException,
-            IOException {
+    public static void exec_command(String command) throws ExecuteException, IOException {
         String[] cmdlist = command.split(" ");
         CommandLine cmd = new CommandLine(cmdlist[0]);
         for (int i = 1; i < cmdlist.length; i++) {
@@ -257,14 +254,12 @@ public class JStormUtils {
      * @param dir
      * @param destdir
      */
-    public static void extract_dir_from_jar(String jarpath, String dir,
-            String destdir) {
+    public static void extract_dir_from_jar(String jarpath, String dir, String destdir) {
         String cmd = "unzip -qq " + jarpath + " " + dir + "/** -d " + destdir;
         try {
             exec_command(cmd);
         } catch (Exception e) {
-            LOG.warn("No " + dir + " from " + jarpath + " by cmd:" + cmd
-                    + "!\n" + e.getMessage());
+            LOG.warn("No " + dir + " from " + jarpath + " by cmd:" + cmd + "!\n" + e.getMessage());
         }
 
     }
@@ -278,8 +273,7 @@ public class JStormUtils {
                 LOG.info("kill -9 process " + pid);
                 sleepMs(100);
             } catch (ExecuteException e) {
-                LOG.info("Error when trying to kill " + pid
-                        + ". Process has been killed");
+                LOG.info("Error when trying to kill " + pid + ". Process has been killed");
             } catch (Exception e) {
                 LOG.info("Error when trying to kill " + pid + ".Exception ", e);
             }
@@ -291,8 +285,7 @@ public class JStormUtils {
             exec_command("kill " + pid);
             LOG.info("kill process " + pid);
         } catch (ExecuteException e) {
-            LOG.info("Error when trying to kill " + pid
-                    + ". Process has been killed. ");
+            LOG.info("Error when trying to kill " + pid + ". Process has been killed. ");
         } catch (Exception e) {
             LOG.info("Error when trying to kill " + pid + ".Exception ", e);
         }
@@ -349,7 +342,8 @@ public class JStormUtils {
         String output = null;
         try {
             String pid = JStormUtils.process_pid();
-            output = SystemOperation.exec("top -b -n 1 | grep " + pid);
+            String command = String.format("top -b -n 1 -p %s | grep -w %s", pid, pid);
+            output = SystemOperation.exec(command);
             String subStr = output.substring(output.indexOf("S") + 1);
             for (int i = 0; i < subStr.length(); i++) {
                 char ch = subStr.charAt(i);
@@ -369,64 +363,89 @@ public class JStormUtils {
 
         return value;
     }
-    
+
+    public static Double getDiskUsage() {
+        if (!OSInfo.isLinux() && !OSInfo.isMac()) {
+            return 0.0;
+        }
+        try {
+            String output = SystemOperation.exec("df -h /");
+            if (output != null) {
+                String[] lines = output.split("[\\r\\n]+");
+                if (lines.length >= 2) {
+                    String[] parts = lines[1].split("\\s+");
+                    if (parts.length >= 5) {
+                        String pct = parts[4];
+                        if (pct.endsWith("%")) {
+                            return Integer.valueOf(pct.substring(0, pct.length() - 1)) / 100.0;
+                        }
+                    }
+                }
+            }
+        } catch (Exception e) {
+            LOG.warn("failed to get disk usage:", e);
+        }
+        return 0.0;
+    }
+
     public static Double getMemUsage() {
-    	if (OSInfo.isLinux() == true) {
-    		try {
-    			Double value = 0.0;
+        if (OSInfo.isLinux() == true) {
+            try {
+                Double value = 0.0;
                 String pid = JStormUtils.process_pid();
-                String output = SystemOperation.exec("top -b -n 1 | grep " + pid);
-                
-                int m = 0;  
-                String[] strArray = output.split(" ");  
-                for (int i = 0; i < strArray.length; i++) {  
-                    String info = strArray[i];  
-                    if (info.trim().length() == 0){  
-                        continue;  
-                    }  
-                    if(m == 5) {
-                    	// memory
-                        String unit = info.substring(info.length() - 1); 
-                        
-                        if(unit.equalsIgnoreCase("g")) {  
-                            value =  Double.parseDouble(info.substring(0, info.length() - 1));
+                String command = String.format("top -b -n 1 -p %s | grep -w %s", pid, pid);
+                String output = SystemOperation.exec(command);
+
+                int m = 0;
+                String[] strArray = output.split(" ");
+                for (int i = 0; i < strArray.length; i++) {
+                    String info = strArray[i];
+                    if (info.trim().length() == 0) {
+                        continue;
+                    }
+                    if (m == 5) {
+                        // memory
+                        String unit = info.substring(info.length() - 1);
+
+                        if (unit.equalsIgnoreCase("g")) {
+                            value = Double.parseDouble(info.substring(0, info.length() - 1));
                             value *= 1000000000;
-                        } else if(unit.equalsIgnoreCase("m")) {  
-                        	value =  Double.parseDouble(info.substring(0, info.length() - 1)); 
-                        	value *= 1000000;
-                        } else {  
-                        	value =  Double.parseDouble(info);  
-                        } 
+                        } else if (unit.equalsIgnoreCase("m")) {
+                            value = Double.parseDouble(info.substring(0, info.length() - 1));
+                            value *= 1000000;
+                        } else {
+                            value = Double.parseDouble(info);
+                        }
+                        
+                        //LOG.info("!!!! Get Memory Size:{}, info:{}", value, info);
                         return value;
-                    }  
-                    if(m == 8) {
-                    	// cpu usage
-                          
-                    }  
-                    if(m == 9) {
-                    	// memory ratio
-                         
-                    }  
-                    m++;  
-                }  
+                    }
+                    if (m == 8) {
+                        // cpu usage
+
+                    }
+                    if (m == 9) {
+                        // memory ratio
+
+                    }
+                    m++;
+                }
             } catch (Exception e) {
                 LOG.warn("Failed to get memory usage .");
 
             }
-    	}
-    	
-    	// this will be incorrect
-		MemoryMXBean memoryMXBean = ManagementFactory.getMemoryMXBean();    
+        }
+
+        // this will be incorrect
+        MemoryMXBean memoryMXBean = ManagementFactory.getMemoryMXBean();
         MemoryUsage memoryUsage = memoryMXBean.getHeapMemoryUsage();
 
         return Double.valueOf(memoryUsage.getUsed());
     }
 
     /**
-     * If it is backend, please set resultHandler, such as
-     * DefaultExecuteResultHandler If it is frontend,
-     * ByteArrayOutputStream.toString get the result
-     * 
+     * If it is backend, please set resultHandler, such as DefaultExecuteResultHandler If it is frontend, ByteArrayOutputStream.toString get the result
+     * <p/>
      * This function don't care whether the command is successfully or not
      * 
      * @param command
@@ -436,9 +455,8 @@ public class JStormUtils {
      * @return
      * @throws IOException
      */
-    public static ByteArrayOutputStream launchProcess(String command,
-            final Map environment, final String workDir,
-            ExecuteResultHandler resultHandler) throws IOException {
+    public static ByteArrayOutputStream launchProcess(String command, final Map environment, final String workDir, ExecuteResultHandler resultHandler)
+            throws IOException {
 
         String[] cmdlist = command.split(" ");
 
@@ -479,8 +497,7 @@ public class JStormUtils {
 
     }
 
-    protected static java.lang.Process launchProcess(final String[] cmdlist,
-            final Map<String, String> environment) throws IOException {
+    protected static Process launchProcess(final String[] cmdlist, final Map<String, String> environment) throws IOException {
         ArrayList<String> buff = new ArrayList<String>();
         for (String tok : cmdlist) {
             if (!tok.isEmpty()) {
@@ -499,33 +516,26 @@ public class JStormUtils {
     }
 
     /**
-     * @@@ it should use DefaultExecutor to start a process, but some little
-     *     problem have been found, such as exitCode/output string so still use
-     *     the old method to start process
-     * 
      * @param command
      * @param environment
      * @param backend
      * @return
      * @throws IOException
+     * @@@ it should use DefaultExecutor to start a process, but some little problem have been found, such as exitCode/output string so still use the old method
+     *     to start process
      */
-    public static java.lang.Process launch_process(final String command,
-            final Map<String, String> environment, boolean backend)
-            throws IOException {
+    public static Process launch_process(final String command, final Map<String, String> environment, boolean backend) throws IOException {
 
         if (backend == true) {
             new Thread(new Runnable() {
 
                 @Override
                 public void run() {
-                    String[] cmdlist =
-                            (new String("nohup " + command + " &")).split(" ");
+                    String[] cmdlist = (new String("nohup " + command + " &")).split(" ");
                     try {
                         launchProcess(cmdlist, environment);
                     } catch (IOException e) {
-                        LOG.error(
-                                "Failed to run " + command + ":" + e.getCause(),
-                                e);
+                        LOG.error("Failed to run " + command + ":" + e.getCause(), e);
                     }
                 }
             }).start();
@@ -568,9 +578,8 @@ public class JStormUtils {
     }
 
     /**
-     * 
      * if the list exist repeat string, return the repeated string
-     * 
+     * <p/>
      * this function will be used to check wheter bolt or spout exist same id
      * 
      * @param sets
@@ -629,7 +638,7 @@ public class JStormUtils {
         return rtn;
     }
 
-    public static <T> Long bit_xor_vals(java.util.List<T> vals) {
+    public static <T> Long bit_xor_vals(List<T> vals) {
         Long rtn = 0l;
         for (T n : vals) {
             rtn = bit_xor(rtn, n);
@@ -638,7 +647,7 @@ public class JStormUtils {
         return rtn;
     }
 
-    public static <T> Long bit_xor_vals_sets(java.util.Set<T> vals) {
+    public static <T> Long bit_xor_vals_sets(Set<T> vals) {
         Long rtn = 0l;
         for (T n : vals) {
             rtn = bit_xor(rtn, n);
@@ -675,7 +684,7 @@ public class JStormUtils {
         return rtn;
     }
 
-    public static <V> List<V> mk_list(java.util.Set<V> args) {
+    public static <V> List<V> mk_list(Set<V> args) {
         ArrayList<V> rtn = new ArrayList<V>();
         if (args != null) {
             for (V o : args) {
@@ -712,8 +721,7 @@ public class JStormUtils {
         } else if (o instanceof Long) {
             return (Long) o;
         } else {
-            throw new RuntimeException("Invalid value "
-                    + o.getClass().getName() + " " + o);
+            throw new RuntimeException("Invalid value " + o.getClass().getName() + " " + o);
         }
     }
 
@@ -733,8 +741,18 @@ public class JStormUtils {
         } else if (o instanceof Double) {
             return (Double) o;
         } else {
-            throw new RuntimeException("Invalid value "
-                    + o.getClass().getName() + " " + o);
+            throw new RuntimeException("Invalid value " + o.getClass().getName() + " " + o);
+        }
+    }
+
+    public static Double parseDouble(Object o, double defaultValue) {
+        if (o == null) {
+            return defaultValue;
+        }
+        try {
+            return parseDouble(o);
+        } catch (Exception ignored) {
+            return defaultValue;
         }
     }
 
@@ -769,8 +787,7 @@ public class JStormUtils {
         } else if (o instanceof Integer) {
             return (Integer) o;
         } else {
-            throw new RuntimeException("Invalid value "
-                    + o.getClass().getName() + " " + o);
+            throw new RuntimeException("Invalid value " + o.getClass().getName() + " " + o);
         }
     }
 
@@ -791,6 +808,20 @@ public class JStormUtils {
         }
     }
 
+    public static Boolean parseBoolean(Object o) {
+        if (o == null) {
+            return null;
+        }
+
+        if (o instanceof String) {
+            return Boolean.valueOf((String) o);
+        } else if (o instanceof Boolean) {
+            return (Boolean) o;
+        } else {
+            throw new RuntimeException("Invalid value " + o.getClass().getName() + " " + o);
+        }
+    }
+
     public static boolean parseBoolean(Object o, boolean defaultValue) {
         if (o == null) {
             return defaultValue;
@@ -863,8 +894,7 @@ public class JStormUtils {
         } else if (oldValue instanceof BigInteger) {
             return ((BigInteger) oldValue).add((BigInteger) newValue);
         } else if (oldValue instanceof Number) {
-            return ((Number) oldValue).doubleValue()
-                    + ((Number) newValue).doubleValue();
+            return ((Number) oldValue).doubleValue() + ((Number) newValue).doubleValue();
         } else {
             return null;
         }
@@ -933,8 +963,7 @@ public class JStormUtils {
 
     public static String formatSimpleDouble(Double value) {
         try {
-            java.text.DecimalFormat form =
-                    new java.text.DecimalFormat("##0.000");
+            java.text.DecimalFormat form = new java.text.DecimalFormat("##0.000");
             String s = form.format(value);
             return s;
         } catch (Exception e) {
@@ -955,8 +984,7 @@ public class JStormUtils {
 
     public static double formatDoubleDecPoint4(Double value) {
         try {
-            java.text.DecimalFormat form =
-                    new java.text.DecimalFormat("###.0000");
+            java.text.DecimalFormat form = new java.text.DecimalFormat("###.0000");
             String s = form.format(value);
             return Double.valueOf(s);
         } catch (Exception e) {
@@ -1041,18 +1069,13 @@ public class JStormUtils {
     }
 
     /**
-     * @@@ Todo
-     * 
      * @return
+     * @@@ Todo
      */
     public static Long getPhysicMemorySize() {
         Object object;
         try {
-            object =
-                    ManagementFactory.getPlatformMBeanServer().getAttribute(
-                            new ObjectName("java.lang", "type",
-                                    "OperatingSystem"),
-                            "TotalPhysicalMemorySize");
+            object = ManagementFactory.getPlatformMBeanServer().getAttribute(new ObjectName("java.lang", "type", "OperatingSystem"), "TotalPhysicalMemorySize");
         } catch (Exception e) {
             LOG.warn("Failed to get system physical memory size,", e);
             return null;
@@ -1089,19 +1112,15 @@ public class JStormUtils {
 
     public static String getLogFileName() {
         try {
-            Logger rootLogger =
-                    LoggerFactory.getLogger(org.slf4j.Logger.ROOT_LOGGER_NAME);
+            Logger rootLogger = LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME);
             if (rootLogger instanceof ch.qos.logback.classic.Logger) {
-                ch.qos.logback.classic.Logger logbackLogger =
-                        (ch.qos.logback.classic.Logger) rootLogger;
+                ch.qos.logback.classic.Logger logbackLogger = (ch.qos.logback.classic.Logger) rootLogger;
                 // Logger framework is Logback
-                for (Iterator<ch.qos.logback.core.Appender<ch.qos.logback.classic.spi.ILoggingEvent>> index =
-                        logbackLogger.iteratorForAppenders(); index.hasNext();) {
-                    ch.qos.logback.core.Appender<ch.qos.logback.classic.spi.ILoggingEvent> appender =
-                            index.next();
+                for (Iterator<ch.qos.logback.core.Appender<ch.qos.logback.classic.spi.ILoggingEvent>> index = logbackLogger.iteratorForAppenders(); index
+                        .hasNext();) {
+                    ch.qos.logback.core.Appender<ch.qos.logback.classic.spi.ILoggingEvent> appender = index.next();
                     if (appender instanceof ch.qos.logback.core.FileAppender) {
-                        ch.qos.logback.core.FileAppender fileAppender =
-                                (ch.qos.logback.core.FileAppender) appender;
+                        ch.qos.logback.core.FileAppender fileAppender = (ch.qos.logback.core.FileAppender) appender;
                         return fileAppender.getFile();
                     }
                 }
@@ -1156,8 +1175,7 @@ public class JStormUtils {
 
         FileOutputStream workerOut = new FileOutputStream(new File(file));
 
-        PrintStream ps =
-                new PrintStream(new BufferedOutputStream(workerOut), true);
+        PrintStream ps = new PrintStream(new BufferedOutputStream(workerOut), true);
         System.setOut(ps);
         System.setErr(ps);
 
@@ -1170,13 +1188,11 @@ public class JStormUtils {
         return new AsyncLoopDefaultKill();
     }
 
-    public static TreeMap<Integer, Integer> integer_divided(int sum,
-            int num_pieces) {
+    public static TreeMap<Integer, Integer> integer_divided(int sum, int num_pieces) {
         return Utils.integerDivided(sum, num_pieces);
     }
 
-    public static <K, V> HashMap<K, V> filter_val(RunnableCallback fn,
-            Map<K, V> amap) {
+    public static <K, V> HashMap<K, V> filter_val(RunnableCallback fn, Map<K, V> amap) {
         HashMap<K, V> rtn = new HashMap<K, V>();
 
         for (Entry<K, V> entry : amap.entrySet()) {
@@ -1191,16 +1207,14 @@ public class JStormUtils {
     }
 
     public static List<Integer> getSupervisorPortList(Map conf) {
-        List<Integer> portList =
-                (List<Integer>) conf.get(Config.SUPERVISOR_SLOTS_PORTS);
+        List<Integer> portList = (List<Integer>) conf.get(Config.SUPERVISOR_SLOTS_PORTS);
         if (portList != null && portList.size() > 0) {
             return portList;
         }
 
         LOG.info("Generate port list through CPU cores and system memory size");
 
-        double cpuWeight =
-                ConfigExtension.getSupervisorSlotsPortCpuWeight(conf);
+        double cpuWeight = ConfigExtension.getSupervisorSlotsPortCpuWeight(conf);
         int sysCpuNum = 4;
         try {
             sysCpuNum = Runtime.getRuntime().availableProcessors();
@@ -1211,11 +1225,11 @@ public class JStormUtils {
         int cpuPortNum = (int) (sysCpuNum / cpuWeight);
         if (cpuPortNum < 1) {
 
-            LOG.info("Invalid supervisor.slots.port.cpu.weight setting :"
-                    + cpuWeight + ", cpu cores:" + sysCpuNum);
+            LOG.info("Invalid supervisor.slots.port.cpu.weight setting :" + cpuWeight + ", cpu cores:" + sysCpuNum);
             cpuPortNum = 1;
         }
 
+        Double memWeight = ConfigExtension.getSupervisorSlotsPortMemWeight(conf);
         int memPortNum = Integer.MAX_VALUE;
         Long physicalMemSize = JStormUtils.getPhysicMemorySize();
         if (physicalMemSize == null) {
@@ -1223,7 +1237,7 @@ public class JStormUtils {
         } else {
             LOG.info("Get system memory size :" + physicalMemSize);
             long workerMemSize = ConfigExtension.getMemSizePerWorker(conf);
-            memPortNum = (int) (physicalMemSize / workerMemSize);
+            memPortNum = (int) (physicalMemSize / (workerMemSize * memWeight));
             if (memPortNum < 1) {
                 LOG.info("Invalide worker.memory.size setting:" + workerMemSize);
                 memPortNum = 4;
@@ -1261,14 +1275,10 @@ public class JStormUtils {
     }
 
     public static Object createDisruptorWaitStrategy(Map conf) {
-        String waitStrategy =
-                (String) conf.get(Config.TOPOLOGY_DISRUPTOR_WAIT_STRATEGY);
+        String waitStrategy = (String) conf.get(Config.TOPOLOGY_DISRUPTOR_WAIT_STRATEGY);
         Object ret;
-
-        if (waitStrategy.indexOf("TimeoutBlockingWaitStrategy") != -1) {
-            long timeout =
-                    parseLong(conf.get(Config.TOPOLOGY_DISRUPTOR_WAIT_TIMEOUT),
-                            10);
+        if (waitStrategy.contains("TimeoutBlockingWaitStrategy")) {
+            long timeout = parseLong(conf.get(Config.TOPOLOGY_DISRUPTOR_WAIT_TIMEOUT), 10);
             ret = Utils.newInstance(waitStrategy, timeout, TimeUnit.MILLISECONDS);
         } else {
             ret = Utils.newInstance(waitStrategy);
@@ -1276,4 +1286,64 @@ public class JStormUtils {
 
         return ret;
     }
+    
+    public static Object thriftToObject(Object obj) {
+    	Object ret = null;
+    	if (obj instanceof org.apache.thrift.TBase) {
+			ret = thriftToMap((org.apache.thrift.TBase)obj);
+		}else if (obj instanceof List) {
+			ret = new ArrayList<>();
+			for (Object item : (List)obj) {
+				((List)ret).add(thriftToObject(item));
+			}
+		}else if (obj instanceof Map) {
+			ret = new HashMap<String, Object>();
+			Set<Entry> entrySet = ((Map)obj).entrySet();
+			for (Entry entry : entrySet) {
+				((Map)ret).put(String.valueOf(entry.getKey()), thriftToObject(entry.getValue()));
+			}
+		}else {
+
+			ret = String.valueOf(obj);
+		}
+    	
+    	return ret;
+    }
+    
+    public static  Map<String, Object> thriftToMap(
+    		org.apache.thrift.TBase thriftObj) {
+    	Map<String, Object> ret = new HashMap<String, Object>();
+    	
+    	int i = 1;
+    	TFieldIdEnum field = thriftObj.fieldForId(i);
+    	while(field != null) {
+    		if (thriftObj.isSet(field)) {
+    			Object obj = thriftObj.getFieldValue(field);
+    			ret.put(field.getFieldName(), thriftToObject(obj));
+    			
+    		}
+    		field = thriftObj.fieldForId(++i);
+    	}
+    	
+    	return ret;
+    }
+    
+    public static List<Map<String, Object>> thriftToMap(List thriftObjs) {
+    	List<Map<String, Object> > ret = new ArrayList<Map<String, Object> > () ;
+    	
+    	for (Object thriftObj : thriftObjs) {
+    		ret.add(thriftToMap((org.apache.thrift.TBase)thriftObj));
+    	}
+    	
+    	return ret;
+    }
+
+
+    public static long halfValueOfSum(long v1, long v2, boolean increment) {
+        long ret = (v1 + v2) / 2;
+        if (increment) {
+            ret += (v1 + v2) % 2;
+        }
+        return ret;
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/utils/LoadConf.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/LoadConf.java b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/LoadConf.java
index d082fcc..91be977 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/LoadConf.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/LoadConf.java
@@ -21,121 +21,121 @@ import org.yaml.snakeyaml.Yaml;
 import org.yaml.snakeyaml.constructor.SafeConstructor;
 
 public class LoadConf {
-	private static final Logger LOG = LoggerFactory.getLogger(LoadConf.class);
-
-	public static List<URL> findResources(String name) {
-		try {
-			Enumeration<URL> resources = Thread.currentThread().getContextClassLoader().getResources(name);
-			List<URL> ret = new ArrayList<URL>();
-			while (resources.hasMoreElements()) {
-				ret.add(resources.nextElement());
-			}
-			return ret;
-		} catch (IOException e) {
-			throw new RuntimeException(e);
-		}
-	}
-
-	/**
-	 * 
-	 * @param name
-	 * @param mustExist   -- if this is true, the file must exist, otherwise throw exception 
-	 * @param canMultiple -- if this is false and there is multiple conf,  it will throw exception
-	 * @return
-	 */
-	public static Map findAndReadYaml(String name, boolean mustExist, boolean canMultiple) {
-		InputStream in = null;
-		boolean confFileEmpty = false;
-		try {
-			in = getConfigFileInputStream(name, canMultiple);
-			if (null != in) {
-				Yaml yaml = new Yaml(new SafeConstructor());
-				Map ret = (Map) yaml.load(new InputStreamReader(in));
-				if (null != ret) {
-					return new HashMap(ret);
-				} else {
-					confFileEmpty = true;
-				}
-			}
-
-			if (mustExist) {
-				if (confFileEmpty)
-					throw new RuntimeException("Config file " + name + " doesn't have any valid storm configs");
-				else
-					throw new RuntimeException("Could not find config file on classpath " + name);
-			} else {
-				return new HashMap();
-			}
-		} catch (IOException e) {
-			throw new RuntimeException(e);
-		} finally {
-			if (null != in) {
-				try {
-					in.close();
-				} catch (IOException e) {
-					throw new RuntimeException(e);
-				}
-			}
-		}
-	}
-
-	public static InputStream getConfigFileInputStream(String configFilePath, boolean canMultiple) throws IOException {
-		if (null == configFilePath) {
-			throw new IOException("Could not find config file, name not specified");
-		}
-
-		HashSet<URL> resources = new HashSet<URL>(findResources(configFilePath));
-		if (resources.isEmpty()) {
-			File configFile = new File(configFilePath);
-			if (configFile.exists()) {
-				return new FileInputStream(configFile);
-			}
-		} else if (resources.size() > 1 && canMultiple == false) {
-			throw new IOException("Found multiple " + configFilePath
-					+ " resources. You're probably bundling the Storm jars with your topology jar. " + resources);
-		} else {
-			LOG.info("Using " + configFilePath + " from resources");
-			URL resource = resources.iterator().next();
-			return resource.openStream();
-		}
-		return null;
-	}
-
-	public static InputStream getConfigFileInputStream(String configFilePath) throws IOException {
-		return getConfigFileInputStream(configFilePath, true);
-	}
-
-	public static Map LoadYaml(String confPath) {
-
-		return findAndReadYaml(confPath, true, true);
-
-	}
-
-	public static Map LoadProperty(String prop) {
-
-		InputStream in = null;
-		Properties properties = new Properties();
-
-		try {
-			in = getConfigFileInputStream(prop);
-			properties.load(in);
-		} catch (FileNotFoundException e) {
-			throw new RuntimeException("No such file " + prop);
-		} catch (Exception e1) {
-			throw new RuntimeException("Failed to read config file");
-		} finally {
-			if (null != in) {
-				try {
-					in.close();
-				} catch (IOException e) {
-					throw new RuntimeException(e);
-				}
-			}
-		}
-
-		Map ret = new HashMap();
-		ret.putAll(properties);
-		return ret;
-	}
+    private static final Logger LOG = LoggerFactory.getLogger(LoadConf.class);
+
+    public static List<URL> findResources(String name) {
+        try {
+            Enumeration<URL> resources = Thread.currentThread().getContextClassLoader().getResources(name);
+            List<URL> ret = new ArrayList<URL>();
+            while (resources.hasMoreElements()) {
+                ret.add(resources.nextElement());
+            }
+            return ret;
+        } catch (IOException e) {
+            throw new RuntimeException(e);
+        }
+    }
+
+    /**
+     * 
+     * @param name
+     * @param mustExist -- if this is true, the file must exist, otherwise throw exception
+     * @param canMultiple -- if this is false and there is multiple conf, it will throw exception
+     * @return
+     */
+    public static Map findAndReadYaml(String name, boolean mustExist, boolean canMultiple) {
+        InputStream in = null;
+        boolean confFileEmpty = false;
+        try {
+            in = getConfigFileInputStream(name, canMultiple);
+            if (null != in) {
+                Yaml yaml = new Yaml(new SafeConstructor());
+                Map ret = (Map) yaml.load(new InputStreamReader(in));
+                if (null != ret) {
+                    return new HashMap(ret);
+                } else {
+                    confFileEmpty = true;
+                }
+            }
+
+            if (mustExist) {
+                if (confFileEmpty)
+                    throw new RuntimeException("Config file " + name + " doesn't have any valid storm configs");
+                else
+                    throw new RuntimeException("Could not find config file on classpath " + name);
+            } else {
+                return new HashMap();
+            }
+        } catch (IOException e) {
+            throw new RuntimeException(e);
+        } finally {
+            if (null != in) {
+                try {
+                    in.close();
+                } catch (IOException e) {
+                    throw new RuntimeException(e);
+                }
+            }
+        }
+    }
+
+    public static InputStream getConfigFileInputStream(String configFilePath, boolean canMultiple) throws IOException {
+        if (null == configFilePath) {
+            throw new IOException("Could not find config file, name not specified");
+        }
+
+        HashSet<URL> resources = new HashSet<URL>(findResources(configFilePath));
+        if (resources.isEmpty()) {
+            File configFile = new File(configFilePath);
+            if (configFile.exists()) {
+                return new FileInputStream(configFile);
+            }
+        } else if (resources.size() > 1 && canMultiple == false) {
+            throw new IOException("Found multiple " + configFilePath + " resources. You're probably bundling the Storm jars with your topology jar. "
+                    + resources);
+        } else {
+            LOG.info("Using " + configFilePath + " from resources");
+            URL resource = resources.iterator().next();
+            return resource.openStream();
+        }
+        return null;
+    }
+
+    public static InputStream getConfigFileInputStream(String configFilePath) throws IOException {
+        return getConfigFileInputStream(configFilePath, true);
+    }
+
+    public static Map LoadYaml(String confPath) {
+
+        return findAndReadYaml(confPath, true, true);
+
+    }
+
+    public static Map LoadProperty(String prop) {
+
+        InputStream in = null;
+        Properties properties = new Properties();
+
+        try {
+            in = getConfigFileInputStream(prop);
+            properties.load(in);
+        } catch (FileNotFoundException e) {
+            throw new RuntimeException("No such file " + prop);
+        } catch (Exception e1) {
+            throw new RuntimeException("Failed to read config file");
+        } finally {
+            if (null != in) {
+                try {
+                    in.close();
+                } catch (IOException e) {
+                    throw new RuntimeException(e);
+                }
+            }
+        }
+
+        Map ret = new HashMap();
+        ret.putAll(properties);
+        return ret;
+    }
 
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/utils/NetWorkUtils.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/NetWorkUtils.java b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/NetWorkUtils.java
index 8bca599..e005f38 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/NetWorkUtils.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/NetWorkUtils.java
@@ -103,41 +103,39 @@ public class NetWorkUtils {
         try {
             address = InetAddress.getByName(host);
         } catch (UnknownHostException e) {
-            LOG.warn("NetWorkUtil can't transfer hostname(" + host
-                    + ") to ip, return hostname", e);
+            LOG.warn("NetWorkUtil can't transfer hostname(" + host + ") to ip, return hostname", e);
             return host;
         }
         return address.getHostAddress();
     }
-    
-    public static List<String>  host2Ip(List<String> servers) {
-    	if (servers == null || servers.size() == 0) {
-    		return new ArrayList<String>();
-    	}
-    	
-    	Set<String> ret = new HashSet<String>();
-    	for (String server : servers) {
-    		if (StringUtils.isBlank(server)) {
-    			continue;
-    		}
-    		
-    		InetAddress ia;
-			try {
-				ia = InetAddress.getByName(server);
-			} catch (UnknownHostException e) {
-				// TODO Auto-generated catch block
-				LOG.info("Fail to get address of ", server);
-				continue;
-			}
-    		if (ia.isLoopbackAddress() || ia.isAnyLocalAddress()) {
-    			ret.add(NetWorkUtils.ip());
-    		}else {
-    			ret.add(ia.getHostAddress());
-    		}
-    	}
-    	
-    	
-    	return JStormUtils.mk_list(ret);
+
+    public static List<String> host2Ip(List<String> servers) {
+        if (servers == null || servers.size() == 0) {
+            return new ArrayList<String>();
+        }
+
+        Set<String> ret = new HashSet<String>();
+        for (String server : servers) {
+            if (StringUtils.isBlank(server)) {
+                continue;
+            }
+
+            InetAddress ia;
+            try {
+                ia = InetAddress.getByName(server);
+            } catch (UnknownHostException e) {
+                // TODO Auto-generated catch block
+                LOG.info("Fail to get address of ", server);
+                continue;
+            }
+            if (ia.isLoopbackAddress() || ia.isAnyLocalAddress()) {
+                ret.add(NetWorkUtils.ip());
+            } else {
+                ret.add(ia.getHostAddress());
+            }
+        }
+
+        return JStormUtils.mk_list(ret);
     }
 
     public static String ip2Host(String ip) {
@@ -145,8 +143,7 @@ public class NetWorkUtils {
         try {
             address = InetAddress.getByName(ip);
         } catch (UnknownHostException e) {
-            LOG.warn("NetWorkUtil can't transfer ip(" + ip
-                    + ") to hostname, return ip", e);
+            LOG.warn("NetWorkUtil can't transfer ip(" + ip + ") to hostname, return ip", e);
             return ip;
         }
         return address.getHostName();
@@ -168,13 +165,13 @@ public class NetWorkUtils {
         return StringUtils.equalsIgnoreCase(ip1, ip2);
 
     }
-    
+
     public static void main(String[] args) {
-    	List<String> servers = new ArrayList<String>();
-    	servers.add("localhost");
-    	
-    	System.out.println(host2Ip(servers));
-    	
+        List<String> servers = new ArrayList<String>();
+        servers.add("localhost");
+
+        System.out.println(host2Ip(servers));
+
     }
 
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/utils/OSInfo.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/OSInfo.java b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/OSInfo.java
index d4f6e0f..f5acda7 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/OSInfo.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/OSInfo.java
@@ -17,141 +17,144 @@
  */
 package com.alibaba.jstorm.utils;
 
-public class OSInfo {  
-    
-    private static String OS = System.getProperty("os.name").toLowerCase();  
-      
-    private static OSInfo _instance = new OSInfo();  
-      
-    private EPlatform platform;  
-      
-    private OSInfo(){}  
-      
-    public static boolean isLinux(){  
-        return OS.indexOf("linux")>=0;  
-    }  
-      
-    public static boolean isMacOS(){  
-        return OS.indexOf("mac")>=0&&OS.indexOf("os")>0&&OS.indexOf("x")<0;  
-    }  
-      
-    public static boolean isMacOSX(){  
-        return OS.indexOf("mac")>=0&&OS.indexOf("os")>0&&OS.indexOf("x")>0;  
-    }  
-    
+public class OSInfo {
+
+    private static String OS = System.getProperty("os.name").toLowerCase();
+
+    private static OSInfo _instance = new OSInfo();
+
+    private EPlatform platform;
+
+    private OSInfo() {
+    }
+
+    public static boolean isLinux() {
+        return OS.indexOf("linux") >= 0;
+    }
+
+    public static boolean isMacOS() {
+        return OS.indexOf("mac") >= 0 && OS.indexOf("os") > 0 && OS.indexOf("x") < 0;
+    }
+
+    public static boolean isMacOSX() {
+        return OS.indexOf("mac") >= 0 && OS.indexOf("os") > 0 && OS.indexOf("x") > 0;
+    }
+
     public static boolean isMac() {
-        return OS.indexOf("mac")>=0&&OS.indexOf("os")>0;
-    }
-      
-    public static boolean isWindows(){  
-        return OS.indexOf("windows")>=0;  
-    }  
-      
-    public static boolean isOS2(){  
-        return OS.indexOf("os/2")>=0;  
-    }  
-      
-    public static boolean isSolaris(){  
-        return OS.indexOf("solaris")>=0;  
-    }  
-      
-    public static boolean isSunOS(){  
-        return OS.indexOf("sunos")>=0;  
-    }  
-      
-    public static boolean isMPEiX(){  
-        return OS.indexOf("mpe/ix")>=0;  
-    }  
-      
-    public static boolean isHPUX(){  
-        return OS.indexOf("hp-ux")>=0;  
-    }  
-      
-    public static boolean isAix(){  
-        return OS.indexOf("aix")>=0;  
-    }  
-      
-    public static boolean isOS390(){  
-        return OS.indexOf("os/390")>=0;  
-    }  
-      
-    public static boolean isFreeBSD(){  
-        return OS.indexOf("freebsd")>=0;  
-    }  
-      
-    public static boolean isIrix(){  
-        return OS.indexOf("irix")>=0;  
-    }  
-      
-    public static boolean isDigitalUnix(){  
-        return OS.indexOf("digital")>=0&&OS.indexOf("unix")>0;  
-    }  
-      
-    public static boolean isNetWare(){  
-        return OS.indexOf("netware")>=0;  
-    }  
-      
-    public static boolean isOSF1(){  
-        return OS.indexOf("osf1")>=0;  
-    }  
-      
-    public static boolean isOpenVMS(){  
-        return OS.indexOf("openvms")>=0;  
-    }  
-      
-    /** 
-     * Get OS name 
-     * @return OS name 
-     */  
-    public static EPlatform getOSname(){  
-        if(isAix()){  
-            _instance.platform = EPlatform.AIX;  
-        }else if (isDigitalUnix()) {  
-            _instance.platform = EPlatform.Digital_Unix;  
-        }else if (isFreeBSD()) {  
-            _instance.platform = EPlatform.FreeBSD;  
-        }else if (isHPUX()) {  
-            _instance.platform = EPlatform.HP_UX;  
-        }else if (isIrix()) {  
-            _instance.platform = EPlatform.Irix;  
-        }else if (isLinux()) {  
-            _instance.platform = EPlatform.Linux;  
-        }else if (isMacOS()) {  
-            _instance.platform = EPlatform.Mac_OS;  
-        }else if (isMacOSX()) {  
-            _instance.platform = EPlatform.Mac_OS_X;  
-        }else if (isMPEiX()) {  
-            _instance.platform = EPlatform.MPEiX;  
-        }else if (isNetWare()) {  
-            _instance.platform = EPlatform.NetWare_411;  
-        }else if (isOpenVMS()) {  
-            _instance.platform = EPlatform.OpenVMS;  
-        }else if (isOS2()) {  
-            _instance.platform = EPlatform.OS2;  
-        }else if (isOS390()) {  
-            _instance.platform = EPlatform.OS390;  
-        }else if (isOSF1()) {  
-            _instance.platform = EPlatform.OSF1;  
-        }else if (isSolaris()) {  
-            _instance.platform = EPlatform.Solaris;  
-        }else if (isSunOS()) {  
-            _instance.platform = EPlatform.SunOS;  
-        }else if (isWindows()) {  
-            _instance.platform = EPlatform.Windows;  
-        }else{  
-            _instance.platform = EPlatform.Others;  
-        }  
-        return _instance.platform;  
-    }  
-    /** 
-     * @param args 
-     */  
-    public static void main(String[] args) {  
-        System.out.println( System.getProperty("os.name") );  
-        System.out.println( System.getProperty("os.version") );  
-        System.out.println( System.getProperty("os.arch") );  
-        
-        System.out.println(OSInfo.getOSname());  
-    }  
-  
-}  
+        return OS.indexOf("mac") >= 0 && OS.indexOf("os") > 0;
+    }
+
+    public static boolean isWindows() {
+        return OS.indexOf("windows") >= 0;
+    }
+
+    public static boolean isOS2() {
+        return OS.indexOf("os/2") >= 0;
+    }
+
+    public static boolean isSolaris() {
+        return OS.indexOf("solaris") >= 0;
+    }
+
+    public static boolean isSunOS() {
+        return OS.indexOf("sunos") >= 0;
+    }
+
+    public static boolean isMPEiX() {
+        return OS.indexOf("mpe/ix") >= 0;
+    }
+
+    public static boolean isHPUX() {
+        return OS.indexOf("hp-ux") >= 0;
+    }
+
+    public static boolean isAix() {
+        return OS.indexOf("aix") >= 0;
+    }
+
+    public static boolean isOS390() {
+        return OS.indexOf("os/390") >= 0;
+    }
+
+    public static boolean isFreeBSD() {
+        return OS.indexOf("freebsd") >= 0;
+    }
+
+    public static boolean isIrix() {
+        return OS.indexOf("irix") >= 0;
+    }
+
+    public static boolean isDigitalUnix() {
+        return OS.indexOf("digital") >= 0 && OS.indexOf("unix") > 0;
+    }
+
+    public static boolean isNetWare() {
+        return OS.indexOf("netware") >= 0;
+    }
+
+    public static boolean isOSF1() {
+        return OS.indexOf("osf1") >= 0;
+    }
+
+    public static boolean isOpenVMS() {
+        return OS.indexOf("openvms") >= 0;
+    }
+
+    /**
+     * Get OS name
+     * 
+     * @return OS name
+     */
+    public static EPlatform getOSname() {
+        if (isAix()) {
+            _instance.platform = EPlatform.AIX;
+        } else if (isDigitalUnix()) {
+            _instance.platform = EPlatform.Digital_Unix;
+        } else if (isFreeBSD()) {
+            _instance.platform = EPlatform.FreeBSD;
+        } else if (isHPUX()) {
+            _instance.platform = EPlatform.HP_UX;
+        } else if (isIrix()) {
+            _instance.platform = EPlatform.Irix;
+        } else if (isLinux()) {
+            _instance.platform = EPlatform.Linux;
+        } else if (isMacOS()) {
+            _instance.platform = EPlatform.Mac_OS;
+        } else if (isMacOSX()) {
+            _instance.platform = EPlatform.Mac_OS_X;
+        } else if (isMPEiX()) {
+            _instance.platform = EPlatform.MPEiX;
+        } else if (isNetWare()) {
+            _instance.platform = EPlatform.NetWare_411;
+        } else if (isOpenVMS()) {
+            _instance.platform = EPlatform.OpenVMS;
+        } else if (isOS2()) {
+            _instance.platform = EPlatform.OS2;
+        } else if (isOS390()) {
+            _instance.platform = EPlatform.OS390;
+        } else if (isOSF1()) {
+            _instance.platform = EPlatform.OSF1;
+        } else if (isSolaris()) {
+            _instance.platform = EPlatform.Solaris;
+        } else if (isSunOS()) {
+            _instance.platform = EPlatform.SunOS;
+        } else if (isWindows()) {
+            _instance.platform = EPlatform.Windows;
+        } else {
+            _instance.platform = EPlatform.Others;
+        }
+        return _instance.platform;
+    }
+
+    /**
+     * @param args
+     */
+    public static void main(String[] args) {
+        System.out.println(System.getProperty("os.name"));
+        System.out.println(System.getProperty("os.version"));
+        System.out.println(System.getProperty("os.arch"));
+
+        System.out.println(OSInfo.getOSname());
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/utils/OlderFileFilter.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/OlderFileFilter.java b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/OlderFileFilter.java
index 13b1d98..30c2726 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/OlderFileFilter.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/OlderFileFilter.java
@@ -39,8 +39,25 @@ public class OlderFileFilter implements FileFilter {
 
         long current_time = System.currentTimeMillis();
 
-        return (pathname.isFile() && (pathname.lastModified() + seconds * 1000 <= current_time))
-                || pathname.isDirectory();
+        return (pathname.lastModified() + seconds * 1000 <= current_time) ;
+    }
+    
+    
+    public static void main(String[] args) {
+    	long current_time = System.currentTimeMillis();
+    	String test = "test";
+    	
+    	
+    	File file = new File(test);
+    	file.delete();
+    	file.mkdir();
+    	file.setLastModified(current_time);
+    	
+    	JStormUtils.sleepMs(10 * 1000);
+
+    	File newFile = new File(test);
+    	System.out.println("modify time: " + newFile.lastModified() + ", raw:" + current_time);
+    	
     }
 
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/utils/Pair.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/Pair.java b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/Pair.java
index 49d35d6..1bc8b56 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/Pair.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/Pair.java
@@ -43,11 +43,11 @@ public class Pair<F, S> {
     }
 
     @Override
-    public String toString(){
+    public String toString() {
         StringBuilder sb = new StringBuilder();
-        sb.append("first:"+ first);
+        sb.append("first:" + first);
         sb.append(":");
-        sb.append("sencond:"+ second);
+        sb.append("sencond:" + second);
         return sb.toString();
     }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/utils/PathUtils.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/PathUtils.java b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/PathUtils.java
index 939b81b..b3732dc 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/PathUtils.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/PathUtils.java
@@ -44,7 +44,7 @@ public class PathUtils {
      */
     public static List<String> tokenize_path(String path) {
         String[] toks = path.split(SEPERATOR);
-        java.util.ArrayList<String> rtn = new ArrayList<String>();
+        ArrayList<String> rtn = new ArrayList<String>();
         for (String str : toks) {
             if (!str.isEmpty()) {
                 rtn.add(str);

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/utils/RandomRange.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/RandomRange.java b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/RandomRange.java
index e3be73f..20b9535 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/RandomRange.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/RandomRange.java
@@ -20,8 +20,7 @@ package com.alibaba.jstorm.utils;
 import java.util.ArrayList;
 
 /**
- * Shuffle the Range, This class is used in shuffle grouping, it is better than
- * random, which can't make sure balance.
+ * Shuffle the Range, This class is used in shuffle grouping, it is better than random, which can't make sure balance.
  * 
  * @author yannian
  * 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/utils/RotatingMap.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/RotatingMap.java b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/RotatingMap.java
index 454e987..877e1d6 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/RotatingMap.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/RotatingMap.java
@@ -28,9 +28,8 @@ import java.util.concurrent.LinkedBlockingDeque;
 /**
  * RotatingMap must be used under thread-safe environment
  * 
- * Expires keys that have not been updated in the configured number of seconds.
- * The algorithm used will take between expirationSecs and expirationSecs * (1 +
- * 1 / (numBuckets-1)) to actually expire the message.
+ * Expires keys that have not been updated in the configured number of seconds. The algorithm used will take between expirationSecs and expirationSecs * (1 + 1
+ * / (numBuckets-1)) to actually expire the message.
  * 
  * get, put, remove, containsKey, and size take O(numBuckets) time to run.
  * 
@@ -45,8 +44,7 @@ public class RotatingMap<K, V> implements TimeOutMap<K, V> {
 
     private final Object lock = new Object();
 
-    public RotatingMap(int numBuckets, ExpiredCallback<K, V> callback,
-            boolean isSingleThread) {
+    public RotatingMap(int numBuckets, ExpiredCallback<K, V> callback, boolean isSingleThread) {
         if (numBuckets < 2) {
             throw new IllegalArgumentException("numBuckets must be >= 2");
         }
@@ -121,8 +119,7 @@ public class RotatingMap<K, V> implements TimeOutMap<K, V> {
     /**
      * Remove item from Rotate
      * 
-     * On the side of performance, scanning from header is faster On the side of
-     * logic, it should scan from the end to first.
+     * On the side of performance, scanning from header is faster On the side of logic, it should scan from the end to first.
      * 
      * @param key
      * @return

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/utils/SystemOperation.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/SystemOperation.java b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/SystemOperation.java
index ba7547b..5bc2252 100644
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/SystemOperation.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/SystemOperation.java
@@ -25,20 +25,16 @@ import org.slf4j.LoggerFactory;
 
 public class SystemOperation {
 
-    public static final Logger LOG = LoggerFactory
-            .getLogger(SystemOperation.class);
+    public static final Logger LOG = LoggerFactory.getLogger(SystemOperation.class);
 
     public static boolean isRoot() throws IOException {
         String result = SystemOperation.exec("echo $EUID").substring(0, 1);
-        return Integer.valueOf(result.substring(0, result.length())).intValue() == 0 ? true
-                : false;
+        return Integer.valueOf(result.substring(0, result.length())).intValue() == 0 ? true : false;
     };
 
-    public static void mount(String name, String target, String type,
-            String data) throws IOException {
+    public static void mount(String name, String target, String type, String data) throws IOException {
         StringBuilder sb = new StringBuilder();
-        sb.append("mount -t ").append(type).append(" -o ").append(data)
-                .append(" ").append(name).append(" ").append(target);
+        sb.append("mount -t ").append(type).append(" -o ").append(data).append(" ").append(name).append(" ").append(target);
         SystemOperation.exec(sb.toString());
     }
 
@@ -50,9 +46,7 @@ public class SystemOperation {
 
     public static String exec(String cmd) throws IOException {
         LOG.debug("Shell cmd: " + cmd);
-        Process process =
-                new ProcessBuilder(new String[] { "/bin/bash", "-c", cmd })
-                        .start();
+        Process process = new ProcessBuilder(new String[] { "/bin/bash", "-c", cmd }).start();
         try {
             process.waitFor();
             String output = IOUtils.toString(process.getInputStream());

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/utils/Thrift.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/Thrift.java b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/Thrift.java
index c55751c..5116c4f 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/Thrift.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/Thrift.java
@@ -17,34 +17,22 @@
  */
 package com.alibaba.jstorm.utils;
 
-import java.lang.reflect.Constructor;
-import java.nio.ByteBuffer;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import backtype.storm.generated.Bolt;
-import backtype.storm.generated.ComponentCommon;
-import backtype.storm.generated.ComponentObject;
-import backtype.storm.generated.GlobalStreamId;
-import backtype.storm.generated.Grouping;
-import backtype.storm.generated.JavaObject;
-import backtype.storm.generated.JavaObjectArg;
-import backtype.storm.generated.NullStruct;
-import backtype.storm.generated.StormTopology;
+import backtype.storm.generated.*;
 import backtype.storm.generated.StormTopology._Fields;
-import backtype.storm.generated.StreamInfo;
-import backtype.storm.generated.TopologyInitialStatus;
 import backtype.storm.grouping.CustomStreamGrouping;
 import backtype.storm.task.IBolt;
 import backtype.storm.utils.Utils;
-
 import com.alibaba.jstorm.cluster.StormStatus;
 import com.alibaba.jstorm.daemon.nimbus.StatusType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.lang.reflect.Constructor;
+import java.nio.ByteBuffer;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
 
 /**
  * Thrift utils
@@ -57,8 +45,7 @@ import com.alibaba.jstorm.daemon.nimbus.StatusType;
 public class Thrift {
     private static Logger LOG = LoggerFactory.getLogger(Thrift.class);
 
-    public static StormStatus topologyInitialStatusToStormStatus(
-            TopologyInitialStatus tStatus) {
+    public static StormStatus topologyInitialStatusToStormStatus(TopologyInitialStatus tStatus) {
         if (tStatus.equals(TopologyInitialStatus.ACTIVE)) {
             return new StormStatus(StatusType.active);
         } else {
@@ -79,16 +66,13 @@ public class Thrift {
                 paraTypes[i] = Integer.class;
             } else if (arg.getSetField().equals(JavaObjectArg._Fields.LONG_ARG)) {
                 paraTypes[i] = Long.class;
-            } else if (arg.getSetField().equals(
-                    JavaObjectArg._Fields.STRING_ARG)) {
+            } else if (arg.getSetField().equals(JavaObjectArg._Fields.STRING_ARG)) {
                 paraTypes[i] = String.class;
             } else if (arg.getSetField().equals(JavaObjectArg._Fields.BOOL_ARG)) {
                 paraTypes[i] = Boolean.class;
-            } else if (arg.getSetField().equals(
-                    JavaObjectArg._Fields.BINARY_ARG)) {
+            } else if (arg.getSetField().equals(JavaObjectArg._Fields.BINARY_ARG)) {
                 paraTypes[i] = ByteBuffer.class;
-            } else if (arg.getSetField().equals(
-                    JavaObjectArg._Fields.DOUBLE_ARG)) {
+            } else if (arg.getSetField().equals(JavaObjectArg._Fields.DOUBLE_ARG)) {
                 paraTypes[i] = Double.class;
             } else {
                 paraTypes[i] = Object.class;
@@ -113,8 +97,7 @@ public class Thrift {
 
     public static List<String> fieldGrouping(Grouping grouping) {
         if (!Grouping._Fields.FIELDS.equals(groupingType(grouping))) {
-            throw new IllegalArgumentException(
-                    "Tried to get grouping fields from non fields grouping");
+            throw new IllegalArgumentException("Tried to get grouping fields from non fields grouping");
         }
 
         return grouping.get_fields();
@@ -152,9 +135,11 @@ public class Thrift {
         return Grouping.direct(new NullStruct());
     }
 
-    private static ComponentCommon mkComponentcommon(
-            Map<GlobalStreamId, Grouping> inputs,
-            HashMap<String, StreamInfo> output_spec, Integer parallelism_hint) {
+    public static Grouping mkAllGrouping() {
+        return Grouping.all(new NullStruct());
+    }
+
+    private static ComponentCommon mkComponentcommon(Map<GlobalStreamId, Grouping> inputs, HashMap<String, StreamInfo> output_spec, Integer parallelism_hint) {
         ComponentCommon ret = new ComponentCommon(inputs, output_spec);
         if (parallelism_hint != null) {
             ret.set_parallelism_hint(parallelism_hint);
@@ -162,8 +147,7 @@ public class Thrift {
         return ret;
     }
 
-    public static Bolt mkBolt(Map<GlobalStreamId, Grouping> inputs, IBolt bolt,
-            HashMap<String, StreamInfo> output, Integer p) {
+    public static Bolt mkBolt(Map<GlobalStreamId, Grouping> inputs, IBolt bolt, HashMap<String, StreamInfo> output, Integer p) {
         ComponentCommon common = mkComponentcommon(inputs, output, p);
         byte[] boltSer = Utils.serialize(bolt);
         ComponentObject component = ComponentObject.serialized_java(boltSer);
@@ -171,8 +155,7 @@ public class Thrift {
     }
 
     public static StormTopology._Fields[] STORM_TOPOLOGY_FIELDS = null;
-    public static StormTopology._Fields[] SPOUT_FIELDS = {
-            StormTopology._Fields.SPOUTS, StormTopology._Fields.STATE_SPOUTS };
+    public static StormTopology._Fields[] SPOUT_FIELDS = { StormTopology._Fields.SPOUTS, StormTopology._Fields.STATE_SPOUTS };
     static {
         Set<_Fields> keys = StormTopology.metaDataMap.keySet();
         STORM_TOPOLOGY_FIELDS = new StormTopology._Fields[keys.size()];

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/utils/TimeCacheMap.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/TimeCacheMap.java b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/TimeCacheMap.java
index c56e307..4d2ea0f 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/TimeCacheMap.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/TimeCacheMap.java
@@ -24,9 +24,8 @@ import java.util.Map;
 import java.util.Map.Entry;
 
 /**
- * Expires keys that have not been updated in the configured number of seconds.
- * The algorithm used will take between expirationSecs and expirationSecs * (1 +
- * 1 / (numBuckets-1)) to actually expire the message.
+ * Expires keys that have not been updated in the configured number of seconds. The algorithm used will take between expirationSecs and expirationSecs * (1 + 1
+ * / (numBuckets-1)) to actually expire the message.
  * 
  * get, put, remove, containsKey, and size take O(numBuckets) time to run.
  * 
@@ -42,8 +41,7 @@ public class TimeCacheMap<K, V> implements TimeOutMap<K, V> {
     private Thread _cleaner;
     private ExpiredCallback _callback;
 
-    public TimeCacheMap(int expirationSecs, int numBuckets,
-            ExpiredCallback<K, V> callback) {
+    public TimeCacheMap(int expirationSecs, int numBuckets, ExpiredCallback<K, V> callback) {
         if (numBuckets < 2) {
             throw new IllegalArgumentException("numBuckets must be >= 2");
         }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/utils/TimeCacheQueue.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/TimeCacheQueue.java b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/TimeCacheQueue.java
index 8468310..00e5cf3 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/TimeCacheQueue.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/TimeCacheQueue.java
@@ -25,14 +25,12 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
- * Expires keys that have not been updated in the configured number of seconds.
- * The algorithm used will take between expirationSecs and expirationSecs * (1 +
- * 1 / (numBuckets-1)) to actually expire the message.
+ * Expires keys that have not been updated in the configured number of seconds. The algorithm used will take between expirationSecs and expirationSecs * (1 + 1
+ * / (numBuckets-1)) to actually expire the message.
  * 
  * get, put, remove, containsKey, and size take O(numBuckets) time to run.
  * 
- * The advantage of this design is that the expiration thread only locks the
- * object for O(1) time, meaning the object is essentially always available for
+ * The advantage of this design is that the expiration thread only locks the object for O(1) time, meaning the object is essentially always available for
  * poll/offer
  */
 public class TimeCacheQueue<K> {
@@ -44,8 +42,7 @@ public class TimeCacheQueue<K> {
     }
 
     public static class DefaultExpiredCallback<K> implements ExpiredCallback<K> {
-        protected static final Logger LOG = LoggerFactory
-                .getLogger(TimeCacheQueue.DefaultExpiredCallback.class);
+        protected static final Logger LOG = LoggerFactory.getLogger(DefaultExpiredCallback.class);
 
         protected String queueName;
 
@@ -54,8 +51,7 @@ public class TimeCacheQueue<K> {
         }
 
         public void expire(K entry) {
-            LOG.info("TimeCacheQueue " + queueName + " entry:" + entry
-                    + ", timeout");
+            LOG.info("TimeCacheQueue " + queueName + " entry:" + entry + ", timeout");
         }
     }
 
@@ -65,8 +61,7 @@ public class TimeCacheQueue<K> {
     protected Thread _cleaner;
     protected ExpiredCallback _callback;
 
-    public TimeCacheQueue(int expirationSecs, int numBuckets,
-            ExpiredCallback<K> callback) {
+    public TimeCacheQueue(int expirationSecs, int numBuckets, ExpiredCallback<K> callback) {
         if (numBuckets < 2) {
             throw new IllegalArgumentException("numBuckets must be >= 2");
         }
@@ -130,8 +125,7 @@ public class TimeCacheQueue<K> {
 
     public K poll() {
         synchronized (_lock) {
-            Iterator<LinkedBlockingDeque<K>> itor =
-                    _buckets.descendingIterator();
+            Iterator<LinkedBlockingDeque<K>> itor = _buckets.descendingIterator();
             while (itor.hasNext()) {
                 LinkedBlockingDeque<K> bucket = itor.next();
                 K entry = bucket.poll();

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/utils/TimeFormat.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/TimeFormat.java b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/TimeFormat.java
index a5c189f..fbae631 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/TimeFormat.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/TimeFormat.java
@@ -40,29 +40,24 @@ public class TimeFormat {
 
     public static final long ONE_DAY_HOURS = 24;
 
-    public static final long ONE_MINUTE_MILLISECONDS = ONE_MINUTE_SECONDS
-            * ONE_SECOND_MILLISECONDS;
+    public static final long ONE_MINUTE_MILLISECONDS = ONE_MINUTE_SECONDS * ONE_SECOND_MILLISECONDS;
 
-    public static final long ONE_HOUR_MILLISECONDS = ONE_HOUR_MINUTES
-            * ONE_MINUTE_MILLISECONDS;
+    public static final long ONE_HOUR_MILLISECONDS = ONE_HOUR_MINUTES * ONE_MINUTE_MILLISECONDS;
 
-    public static final long ONE_DAY_MILLISECONDS = ONE_DAY_HOURS
-            * ONE_HOUR_MILLISECONDS;
+    public static final long ONE_DAY_MILLISECONDS = ONE_DAY_HOURS * ONE_HOUR_MILLISECONDS;
 
     public static Date convertDate(String dateStr, String format) {
         Date date = null;
         try {
             if (format != null) {
-                SimpleDateFormat simpleDateFormat =
-                        new SimpleDateFormat(format);
+                SimpleDateFormat simpleDateFormat = new SimpleDateFormat(format);
                 date = simpleDateFormat.parse(dateStr);
             } else {
                 date = new Date(dateStr);
             }
 
         } catch (Exception ex) {
-            log.error("Failed to convert " + dateStr + " to Date, format:"
-                    + format);
+            log.error("Failed to convert " + dateStr + " to Date, format:" + format);
             return null;
         }
         return date;
@@ -77,8 +72,7 @@ public class TimeFormat {
             ret = sdf.format(date);
 
         } catch (Exception e) {
-            log.error("Failed to convert " + date + " to String, format:"
-                    + format);
+            log.error("Failed to convert " + date + " to String, format:" + format);
             return null;
         }
         return ret;
@@ -207,12 +201,9 @@ public class TimeFormat {
         tomorrow.set(Calendar.MINUTE, 0);
         Date startTime = tomorrow.getTime();
 
-        long hourdiff =
-                (startTime.getTime() - current.getTime())
-                        / ONE_HOUR_MILLISECONDS;
+        long hourdiff = (startTime.getTime() - current.getTime()) / ONE_HOUR_MILLISECONDS;
 
-        System.out.println("Current:" + current + ", tomorrow" + startTime
-                + ", diff hour" + hourdiff);
+        System.out.println("Current:" + current + ", tomorrow" + startTime + ", diff hour" + hourdiff);
 
     }
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/utils/TimeUtils.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/TimeUtils.java b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/TimeUtils.java
index 8c9bd3d..9068731 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/utils/TimeUtils.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/utils/TimeUtils.java
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- *
+ * <p/>
  * http://www.apache.org/licenses/LICENSE-2.0
- *
+ * <p/>
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -18,19 +18,23 @@
 package com.alibaba.jstorm.utils;
 
 import backtype.storm.utils.Time;
+import com.alibaba.jstorm.metric.AsmWindow;
+
+import java.text.SimpleDateFormat;
+import java.util.Calendar;
+import java.util.Date;
 
 /**
  * Time utils
- * 
+ *
  * @author yannian
- * 
  */
 public class TimeUtils {
 
+    public static final long NS_PER_MS = 1000000L;
+
     /**
      * Take care of int overflow
-     * 
-     * @return
      */
     public static int current_time_secs() {
         return (int) (Time.currentTimeMillis() / 1000);
@@ -38,8 +42,6 @@ public class TimeUtils {
 
     /**
      * Take care of int overflow
-     * 
-     * @return
      */
     public static int time_delta(int time_secs) {
         return current_time_secs() - time_secs;
@@ -48,4 +50,91 @@ public class TimeUtils {
     public static long time_delta_ms(long time_ms) {
         return System.currentTimeMillis() - time_ms;
     }
+
+    public static final long NS_PER_US = 1000l;
+
+    public static final int ONE_SEC = 1;
+    public static final int SEC_PER_MIN = 60;
+    public static final int SEC_PER_DAY = 86400;
+
+    public static boolean isTimeAligned() {
+        return current_time_secs() % SEC_PER_DAY % SEC_PER_MIN == 0;
+    }
+
+    public static int secOffset() {
+        return current_time_secs() % SEC_PER_DAY % SEC_PER_MIN;
+    }
+
+    public static int secOffset(long ts) {
+        return (int) (ts % SEC_PER_DAY % SEC_PER_MIN);
+    }
+
+    public static int winSecOffset(long ts, int window) {
+        return (int) (ts / 1000 % SEC_PER_DAY % window);
+    }
+
+    public static long alignTimeToWin(long ts, int win) {
+        if (win != AsmWindow.D1_WINDOW) {
+            long curTimeSec = ts / 1000;
+            return (curTimeSec - curTimeSec % win) * 1000;
+        } else {
+            Calendar cal = Calendar.getInstance();
+            cal.setTimeInMillis(ts);
+            int year = cal.get(Calendar.YEAR);
+            int month = cal.get(Calendar.MONTH);
+            int day = cal.get(Calendar.DAY_OF_MONTH);
+            int hour = cal.get(Calendar.HOUR);
+            int min = cal.get(Calendar.MINUTE);
+            int sec = cal.get(Calendar.SECOND);
+            if (sec + min + hour > 0) {
+                cal.set(year, month, day + 1, 0, 0, 0);
+            }
+            return cal.getTimeInMillis();
+        }
+    }
+
+    public static long alignTimeToMin(long ts) {
+        return alignTimeToWin(ts, AsmWindow.M1_WINDOW);
+    }
+
+    public static String toTimeStr(Date time) {
+        int hour = time.getHours();
+        int min = time.getMinutes();
+        StringBuilder sb = new StringBuilder();
+        if (hour < 10) {
+            sb.append(0).append(hour);
+        } else {
+            sb.append(hour);
+        }
+        sb.append(":");
+        if (min < 10) {
+            sb.append(0).append(min);
+        } else {
+            sb.append(min);
+        }
+        return sb.toString();
+    }
+
+    public static String format(int curTimeSec) {
+        return format(new Date(curTimeSec * 1000L), "yyyy-MM-dd HH:mm:ss");
+    }
+
+    public static String format(Date time, String fmt) {
+        SimpleDateFormat df = new SimpleDateFormat(fmt);
+        return df.format(time);
+    }
+
+
+    public static void main(String[] args) throws Exception {
+        System.out.println(new Date(alignTimeToWin(System.currentTimeMillis(), AsmWindow.M1_WINDOW)));
+        System.out.println(new Date(alignTimeToWin(System.currentTimeMillis(), AsmWindow.M10_WINDOW)));
+        System.out.println(new Date(alignTimeToWin(System.currentTimeMillis(), AsmWindow.H2_WINDOW)));
+        System.out.println(new Date(alignTimeToWin(System.currentTimeMillis(), AsmWindow.D1_WINDOW)));
+
+        Calendar cal = Calendar.getInstance();
+        cal.set(2015, 6, 23, 0, 0, 0);
+        System.out.println(new Date(alignTimeToWin(cal.getTimeInMillis(), AsmWindow.D1_WINDOW)));
+
+        System.out.println(format(TimeUtils.current_time_secs()));
+    }
 }

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/zk/ZkEventTypes.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/zk/ZkEventTypes.java b/jstorm-core/src/main/java/com/alibaba/jstorm/zk/ZkEventTypes.java
index 09c25a5..eab0212 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/zk/ZkEventTypes.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/zk/ZkEventTypes.java
@@ -32,8 +32,7 @@ public class ZkEventTypes {
         map.put(Watcher.Event.EventType.NodeCreated, ":node-created");
         map.put(Watcher.Event.EventType.NodeDeleted, ":node-deleted");
         map.put(Watcher.Event.EventType.NodeDataChanged, ":node-data-changed");
-        map.put(Watcher.Event.EventType.NodeChildrenChanged,
-                ":node-children-changed");
+        map.put(Watcher.Event.EventType.NodeChildrenChanged, ":node-children-changed");
 
     }
 

http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/com/alibaba/jstorm/zk/ZkTool.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/com/alibaba/jstorm/zk/ZkTool.java b/jstorm-core/src/main/java/com/alibaba/jstorm/zk/ZkTool.java
index b726781..a098730 100755
--- a/jstorm-core/src/main/java/com/alibaba/jstorm/zk/ZkTool.java
+++ b/jstorm-core/src/main/java/com/alibaba/jstorm/zk/ZkTool.java
@@ -20,34 +20,44 @@ package com.alibaba.jstorm.zk;
 import java.util.List;
 import java.util.Map;
 
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+import com.alibaba.jstorm.cluster.Cluster;
+import org.apache.log4j.Logger;
 
 import backtype.storm.Config;
 import backtype.storm.utils.Utils;
 
-import com.alibaba.jstorm.cluster.Cluster;
 import com.alibaba.jstorm.cluster.ClusterState;
 import com.alibaba.jstorm.cluster.DistributedClusterState;
 import com.google.common.collect.Maps;
 
 public class ZkTool {
-    private static Logger LOG = LoggerFactory.getLogger(ZkTool.class);
+    private static Logger LOG = Logger.getLogger(ZkTool.class);
 
     public static final String READ_CMD = "read";
 
     public static final String RM_CMD = "rm";
 
+    public static final String LIST_CMD = "list";
+
+    public static final String CLEAN_CMD = "clean";
+
     public static void usage() {
         LOG.info("Read ZK node's data, please do as following:");
         LOG.info(ZkTool.class.getName() + " read zkpath");
 
         LOG.info("\nDelete topology backup assignment, please do as following:");
         LOG.info(ZkTool.class.getName() + " rm topologyname");
+
+        LOG.info("\nlist subdirectory of zkPath , please do as following:");
+        LOG.info(ZkTool.class.getName() + " list zkpath");
+
+        LOG.info("\nDelete all nodes about a topologyId of zk , please do as following:");
+        LOG.info(ZkTool.class.getName() + " clean topologyId");
+
     }
 
     public static String getData(DistributedClusterState zkClusterState,
-            String path) throws Exception {
+                                 String path) throws Exception {
         byte[] data = zkClusterState.get_data(path, false);
         if (data == null || data.length == 0) {
             return null;
@@ -58,6 +68,135 @@ public class ZkTool {
         return obj.toString();
     }
 
+
+    public static void list(String path) {
+        DistributedClusterState zkClusterState = null;
+
+        try {
+            conf.put(Config.STORM_ZOOKEEPER_ROOT, "/");
+
+            zkClusterState = new DistributedClusterState(conf);
+
+            List<String>  children = zkClusterState.get_children(path, false);
+            if (children == null || children.isEmpty() ) {
+                LOG.info("No children of " + path);
+            }
+            else
+            {
+                StringBuilder sb = new StringBuilder();
+                sb.append("Zk node children of " + path + "\n");
+                for (String str : children){
+                    sb.append(" " + str + ",");
+                }
+                sb.append("\n");
+                LOG.info(sb.toString());
+            }
+        } catch (Exception e) {
+            if (zkClusterState == null) {
+                LOG.error("Failed to connect ZK ", e);
+            } else {
+                LOG.error("Failed to list children of  " + path + "\n", e);
+            }
+        } finally {
+            if (zkClusterState != null) {
+                zkClusterState.close();
+            }
+        }
+    }
+    /**
+     * warnning! use this method cann't delete zkCache right now because of
+     *  new DistributedClusterState(conf)
+     */
+    public static void cleanTopology( String topologyId){
+        DistributedClusterState zkClusterState = null;
+        try {
+            zkClusterState = new DistributedClusterState(conf);
+            String rootDir = String.valueOf(conf.get(Config.STORM_ZOOKEEPER_ROOT));
+            String assignmentPath = "/assignments/"+ topologyId;
+            String stormBase = "/topology/"+ topologyId;
+            String taskbeats = "/taskbeats/"+ topologyId;
+            String tasks = "/tasks/"+ topologyId;
+            String taskerrors = "/taskerrors/"+ topologyId;
+            String monitor = "/monitor/"+ topologyId;
+            if (zkClusterState.node_existed(assignmentPath, false)){
+                try {
+                    zkClusterState.delete_node(assignmentPath);
+                } catch (Exception e) {
+                    LOG.error("Could not remove assignments for " + topologyId, e);
+                }
+            }else {
+                LOG.info(" node of " + rootDir + assignmentPath + " isn't existed ");
+
+            }
+
+            if (zkClusterState.node_existed(stormBase, false)){
+                try {
+                    zkClusterState.delete_node(stormBase);
+                } catch (Exception e) {
+                    LOG.error("Failed to remove storm base for " + topologyId, e);
+                }
+            }else {
+                LOG.info(" node of " + rootDir + stormBase + " isn't existed ");
+
+            }
+
+            if (zkClusterState.node_existed(taskbeats, false)){
+                try {
+                    zkClusterState.delete_node(taskbeats);
+                } catch (Exception e) {
+                    LOG.error("Failed to remove taskbeats for " + topologyId, e);
+                }
+            }else {
+                LOG.info(" node of " + rootDir + taskbeats + " isn't existed ");
+
+            }
+
+            if (zkClusterState.node_existed(tasks, false)){
+                try {
+                    zkClusterState.delete_node(tasks);
+                } catch (Exception e) {
+                    LOG.error("Failed to remove tasks for " + topologyId, e);
+                }
+            }else {
+                LOG.info(" node of " + rootDir + tasks + " isn't existed ");
+
+            }
+
+            if (zkClusterState.node_existed(taskerrors, false)){
+                try {
+                    zkClusterState.delete_node(taskerrors);
+                } catch (Exception e) {
+                    LOG.error("Failed to remove taskerrors for " + topologyId, e);
+                }
+            }else {
+                LOG.info(" node of " + rootDir + taskerrors + " isn't existed ");
+
+            }
+
+            if (zkClusterState.node_existed(monitor, false)){
+                try {
+                    zkClusterState.delete_node(monitor);
+                } catch (Exception e) {
+                    LOG.error("Failed to remove monitor for " + topologyId, e);
+                }
+            }else {
+                LOG.info(" node of " + rootDir + monitor + " isn't existed ");
+
+            }
+        } catch (Exception e) {
+            if (zkClusterState == null) {
+                LOG.error("Failed to connect ZK ", e);
+            } else {
+                LOG.error("Failed to clean  topolodyId: " + topologyId + "\n", e);
+            }
+        } finally {
+            if (zkClusterState != null) {
+                zkClusterState.close();
+            }
+        }
+
+    }
+
     public static void readData(String path) {
 
         DistributedClusterState zkClusterState = null;
@@ -110,8 +249,7 @@ public class ZkTool {
                 if (tid.equals(topologyName)) {
                     LOG.info("Find backup " + topologyName);
 
-                    String topologyPath =
-                            Cluster.assignment_bak_path(topologyName);
+                    String topologyPath = assignment_bak_path(topologyName);
                     zkClusterState.delete_node(topologyPath);
 
                     LOG.info("Successfully delete topology " + topologyName
@@ -161,12 +299,21 @@ public class ZkTool {
 
         } else if (args[0].equalsIgnoreCase(RM_CMD)) {
             rmBakTopology(args[1]);
+        } else if (args[0].equalsIgnoreCase(LIST_CMD)) {
+            list(args[1]);
+        } else if (args[0].equalsIgnoreCase(CLEAN_CMD)) {
+            cleanTopology(args[1]);
         }
 
     }
 
     /*******************************************************************/
 
+    public static String assignment_bak_path(String id) {
+        return Cluster.ASSIGNMENTS_BAK_SUBTREE + Cluster.ZK_SEPERATOR
+                + id;
+    }
+
     @SuppressWarnings("rawtypes")
     public static ClusterState mk_distributed_cluster_state(Map _conf)
             throws Exception {
@@ -177,7 +324,8 @@ public class ZkTool {
             throws Exception {
         Map<String, String> ret = Maps.newHashMap();
         List<String> followers =
-                cluster_state.get_children(Cluster.NIMBUS_SLAVE_SUBTREE, false);
+                cluster_state.get_children(Cluster.NIMBUS_SLAVE_SUBTREE,
+                        false);
         if (followers == null || followers.size() == 0) {
             return ret;
         }


[42/51] [partial] storm git commit: Update JStorm to latest release 2.1.0

Posted by pt...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/7eaf0651/jstorm-core/src/main/java/backtype/storm/generated/NettyMetric.java
----------------------------------------------------------------------
diff --git a/jstorm-core/src/main/java/backtype/storm/generated/NettyMetric.java b/jstorm-core/src/main/java/backtype/storm/generated/NettyMetric.java
deleted file mode 100644
index b6a9bc9..0000000
--- a/jstorm-core/src/main/java/backtype/storm/generated/NettyMetric.java
+++ /dev/null
@@ -1,553 +0,0 @@
-/**
- * Autogenerated by Thrift Compiler (0.9.2)
- *
- * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
- *  @generated
- */
-package backtype.storm.generated;
-
-import org.apache.thrift.scheme.IScheme;
-import org.apache.thrift.scheme.SchemeFactory;
-import org.apache.thrift.scheme.StandardScheme;
-
-import org.apache.thrift.scheme.TupleScheme;
-import org.apache.thrift.protocol.TTupleProtocol;
-import org.apache.thrift.protocol.TProtocolException;
-import org.apache.thrift.EncodingUtils;
-import org.apache.thrift.TException;
-import org.apache.thrift.async.AsyncMethodCallback;
-import org.apache.thrift.server.AbstractNonblockingServer.*;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.EnumMap;
-import java.util.Set;
-import java.util.HashSet;
-import java.util.EnumSet;
-import java.util.Collections;
-import java.util.BitSet;
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import javax.annotation.Generated;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-7-27")
-public class NettyMetric implements org.apache.thrift.TBase<NettyMetric, NettyMetric._Fields>, java.io.Serializable, Cloneable, Comparable<NettyMetric> {
-  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("NettyMetric");
-
-  private static final org.apache.thrift.protocol.TField CONNECTIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("connections", org.apache.thrift.protocol.TType.MAP, (short)1);
-  private static final org.apache.thrift.protocol.TField CONNECTION_NUM_FIELD_DESC = new org.apache.thrift.protocol.TField("connectionNum", org.apache.thrift.protocol.TType.I32, (short)2);
-
-  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
-  static {
-    schemes.put(StandardScheme.class, new NettyMetricStandardSchemeFactory());
-    schemes.put(TupleScheme.class, new NettyMetricTupleSchemeFactory());
-  }
-
-  private Map<String,MetricInfo> connections; // required
-  private int connectionNum; // required
-
-  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
-  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-    CONNECTIONS((short)1, "connections"),
-    CONNECTION_NUM((short)2, "connectionNum");
-
-    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
-
-    static {
-      for (_Fields field : EnumSet.allOf(_Fields.class)) {
-        byName.put(field.getFieldName(), field);
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, or null if its not found.
-     */
-    public static _Fields findByThriftId(int fieldId) {
-      switch(fieldId) {
-        case 1: // CONNECTIONS
-          return CONNECTIONS;
-        case 2: // CONNECTION_NUM
-          return CONNECTION_NUM;
-        default:
-          return null;
-      }
-    }
-
-    /**
-     * Find the _Fields constant that matches fieldId, throwing an exception
-     * if it is not found.
-     */
-    public static _Fields findByThriftIdOrThrow(int fieldId) {
-      _Fields fields = findByThriftId(fieldId);
-      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
-      return fields;
-    }
-
-    /**
-     * Find the _Fields constant that matches name, or null if its not found.
-     */
-    public static _Fields findByName(String name) {
-      return byName.get(name);
-    }
-
-    private final short _thriftId;
-    private final String _fieldName;
-
-    _Fields(short thriftId, String fieldName) {
-      _thriftId = thriftId;
-      _fieldName = fieldName;
-    }
-
-    public short getThriftFieldId() {
-      return _thriftId;
-    }
-
-    public String getFieldName() {
-      return _fieldName;
-    }
-  }
-
-  // isset id assignments
-  private static final int __CONNECTIONNUM_ISSET_ID = 0;
-  private byte __isset_bitfield = 0;
-  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
-  static {
-    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.CONNECTIONS, new org.apache.thrift.meta_data.FieldMetaData("connections", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
-            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
-            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, MetricInfo.class))));
-    tmpMap.put(_Fields.CONNECTION_NUM, new org.apache.thrift.meta_data.FieldMetaData("connectionNum", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
-    metaDataMap = Collections.unmodifiableMap(tmpMap);
-    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(NettyMetric.class, metaDataMap);
-  }
-
-  public NettyMetric() {
-  }
-
-  public NettyMetric(
-    Map<String,MetricInfo> connections,
-    int connectionNum)
-  {
-    this();
-    this.connections = connections;
-    this.connectionNum = connectionNum;
-    set_connectionNum_isSet(true);
-  }
-
-  /**
-   * Performs a deep copy on <i>other</i>.
-   */
-  public NettyMetric(NettyMetric other) {
-    __isset_bitfield = other.__isset_bitfield;
-    if (other.is_set_connections()) {
-      Map<String,MetricInfo> __this__connections = new HashMap<String,MetricInfo>(other.connections.size());
-      for (Map.Entry<String, MetricInfo> other_element : other.connections.entrySet()) {
-
-        String other_element_key = other_element.getKey();
-        MetricInfo other_element_value = other_element.getValue();
-
-        String __this__connections_copy_key = other_element_key;
-
-        MetricInfo __this__connections_copy_value = new MetricInfo(other_element_value);
-
-        __this__connections.put(__this__connections_copy_key, __this__connections_copy_value);
-      }
-      this.connections = __this__connections;
-    }
-    this.connectionNum = other.connectionNum;
-  }
-
-  public NettyMetric deepCopy() {
-    return new NettyMetric(this);
-  }
-
-  @Override
-  public void clear() {
-    this.connections = null;
-    set_connectionNum_isSet(false);
-    this.connectionNum = 0;
-  }
-
-  public int get_connections_size() {
-    return (this.connections == null) ? 0 : this.connections.size();
-  }
-
-  public void put_to_connections(String key, MetricInfo val) {
-    if (this.connections == null) {
-      this.connections = new HashMap<String,MetricInfo>();
-    }
-    this.connections.put(key, val);
-  }
-
-  public Map<String,MetricInfo> get_connections() {
-    return this.connections;
-  }
-
-  public void set_connections(Map<String,MetricInfo> connections) {
-    this.connections = connections;
-  }
-
-  public void unset_connections() {
-    this.connections = null;
-  }
-
-  /** Returns true if field connections is set (has been assigned a value) and false otherwise */
-  public boolean is_set_connections() {
-    return this.connections != null;
-  }
-
-  public void set_connections_isSet(boolean value) {
-    if (!value) {
-      this.connections = null;
-    }
-  }
-
-  public int get_connectionNum() {
-    return this.connectionNum;
-  }
-
-  public void set_connectionNum(int connectionNum) {
-    this.connectionNum = connectionNum;
-    set_connectionNum_isSet(true);
-  }
-
-  public void unset_connectionNum() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __CONNECTIONNUM_ISSET_ID);
-  }
-
-  /** Returns true if field connectionNum is set (has been assigned a value) and false otherwise */
-  public boolean is_set_connectionNum() {
-    return EncodingUtils.testBit(__isset_bitfield, __CONNECTIONNUM_ISSET_ID);
-  }
-
-  public void set_connectionNum_isSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __CONNECTIONNUM_ISSET_ID, value);
-  }
-
-  public void setFieldValue(_Fields field, Object value) {
-    switch (field) {
-    case CONNECTIONS:
-      if (value == null) {
-        unset_connections();
-      } else {
-        set_connections((Map<String,MetricInfo>)value);
-      }
-      break;
-
-    case CONNECTION_NUM:
-      if (value == null) {
-        unset_connectionNum();
-      } else {
-        set_connectionNum((Integer)value);
-      }
-      break;
-
-    }
-  }
-
-  public Object getFieldValue(_Fields field) {
-    switch (field) {
-    case CONNECTIONS:
-      return get_connections();
-
-    case CONNECTION_NUM:
-      return Integer.valueOf(get_connectionNum());
-
-    }
-    throw new IllegalStateException();
-  }
-
-  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
-  public boolean isSet(_Fields field) {
-    if (field == null) {
-      throw new IllegalArgumentException();
-    }
-
-    switch (field) {
-    case CONNECTIONS:
-      return is_set_connections();
-    case CONNECTION_NUM:
-      return is_set_connectionNum();
-    }
-    throw new IllegalStateException();
-  }
-
-  @Override
-  public boolean equals(Object that) {
-    if (that == null)
-      return false;
-    if (that instanceof NettyMetric)
-      return this.equals((NettyMetric)that);
-    return false;
-  }
-
-  public boolean equals(NettyMetric that) {
-    if (that == null)
-      return false;
-
-    boolean this_present_connections = true && this.is_set_connections();
-    boolean that_present_connections = true && that.is_set_connections();
-    if (this_present_connections || that_present_connections) {
-      if (!(this_present_connections && that_present_connections))
-        return false;
-      if (!this.connections.equals(that.connections))
-        return false;
-    }
-
-    boolean this_present_connectionNum = true;
-    boolean that_present_connectionNum = true;
-    if (this_present_connectionNum || that_present_connectionNum) {
-      if (!(this_present_connectionNum && that_present_connectionNum))
-        return false;
-      if (this.connectionNum != that.connectionNum)
-        return false;
-    }
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    List<Object> list = new ArrayList<Object>();
-
-    boolean present_connections = true && (is_set_connections());
-    list.add(present_connections);
-    if (present_connections)
-      list.add(connections);
-
-    boolean present_connectionNum = true;
-    list.add(present_connectionNum);
-    if (present_connectionNum)
-      list.add(connectionNum);
-
-    return list.hashCode();
-  }
-
-  @Override
-  public int compareTo(NettyMetric other) {
-    if (!getClass().equals(other.getClass())) {
-      return getClass().getName().compareTo(other.getClass().getName());
-    }
-
-    int lastComparison = 0;
-
-    lastComparison = Boolean.valueOf(is_set_connections()).compareTo(other.is_set_connections());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_connections()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.connections, other.connections);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(is_set_connectionNum()).compareTo(other.is_set_connectionNum());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (is_set_connectionNum()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.connectionNum, other.connectionNum);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    return 0;
-  }
-
-  public _Fields fieldForId(int fieldId) {
-    return _Fields.findByThriftId(fieldId);
-  }
-
-  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
-    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
-  }
-
-  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
-    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder("NettyMetric(");
-    boolean first = true;
-
-    sb.append("connections:");
-    if (this.connections == null) {
-      sb.append("null");
-    } else {
-      sb.append(this.connections);
-    }
-    first = false;
-    if (!first) sb.append(", ");
-    sb.append("connectionNum:");
-    sb.append(this.connectionNum);
-    first = false;
-    sb.append(")");
-    return sb.toString();
-  }
-
-  public void validate() throws org.apache.thrift.TException {
-    // check for required fields
-    if (!is_set_connections()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'connections' is unset! Struct:" + toString());
-    }
-
-    if (!is_set_connectionNum()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'connectionNum' is unset! Struct:" + toString());
-    }
-
-    // check for sub-struct validity
-  }
-
-  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
-    try {
-      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
-    try {
-      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
-      __isset_bitfield = 0;
-      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
-    } catch (org.apache.thrift.TException te) {
-      throw new java.io.IOException(te);
-    }
-  }
-
-  private static class NettyMetricStandardSchemeFactory implements SchemeFactory {
-    public NettyMetricStandardScheme getScheme() {
-      return new NettyMetricStandardScheme();
-    }
-  }
-
-  private static class NettyMetricStandardScheme extends StandardScheme<NettyMetric> {
-
-    public void read(org.apache.thrift.protocol.TProtocol iprot, NettyMetric struct) throws org.apache.thrift.TException {
-      org.apache.thrift.protocol.TField schemeField;
-      iprot.readStructBegin();
-      while (true)
-      {
-        schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
-          break;
-        }
-        switch (schemeField.id) {
-          case 1: // CONNECTIONS
-            if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
-              {
-                org.apache.thrift.protocol.TMap _map274 = iprot.readMapBegin();
-                struct.connections = new HashMap<String,MetricInfo>(2*_map274.size);
-                String _key275;
-                MetricInfo _val276;
-                for (int _i277 = 0; _i277 < _map274.size; ++_i277)
-                {
-                  _key275 = iprot.readString();
-                  _val276 = new MetricInfo();
-                  _val276.read(iprot);
-                  struct.connections.put(_key275, _val276);
-                }
-                iprot.readMapEnd();
-              }
-              struct.set_connections_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 2: // CONNECTION_NUM
-            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
-              struct.connectionNum = iprot.readI32();
-              struct.set_connectionNum_isSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          default:
-            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-        }
-        iprot.readFieldEnd();
-      }
-      iprot.readStructEnd();
-      struct.validate();
-    }
-
-    public void write(org.apache.thrift.protocol.TProtocol oprot, NettyMetric struct) throws org.apache.thrift.TException {
-      struct.validate();
-
-      oprot.writeStructBegin(STRUCT_DESC);
-      if (struct.connections != null) {
-        oprot.writeFieldBegin(CONNECTIONS_FIELD_DESC);
-        {
-          oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.connections.size()));
-          for (Map.Entry<String, MetricInfo> _iter278 : struct.connections.entrySet())
-          {
-            oprot.writeString(_iter278.getKey());
-            _iter278.getValue().write(oprot);
-          }
-          oprot.writeMapEnd();
-        }
-        oprot.writeFieldEnd();
-      }
-      oprot.writeFieldBegin(CONNECTION_NUM_FIELD_DESC);
-      oprot.writeI32(struct.connectionNum);
-      oprot.writeFieldEnd();
-      oprot.writeFieldStop();
-      oprot.writeStructEnd();
-    }
-
-  }
-
-  private static class NettyMetricTupleSchemeFactory implements SchemeFactory {
-    public NettyMetricTupleScheme getScheme() {
-      return new NettyMetricTupleScheme();
-    }
-  }
-
-  private static class NettyMetricTupleScheme extends TupleScheme<NettyMetric> {
-
-    @Override
-    public void write(org.apache.thrift.protocol.TProtocol prot, NettyMetric struct) throws org.apache.thrift.TException {
-      TTupleProtocol oprot = (TTupleProtocol) prot;
-      {
-        oprot.writeI32(struct.connections.size());
-        for (Map.Entry<String, MetricInfo> _iter279 : struct.connections.entrySet())
-        {
-          oprot.writeString(_iter279.getKey());
-          _iter279.getValue().write(oprot);
-        }
-      }
-      oprot.writeI32(struct.connectionNum);
-    }
-
-    @Override
-    public void read(org.apache.thrift.protocol.TProtocol prot, NettyMetric struct) throws org.apache.thrift.TException {
-      TTupleProtocol iprot = (TTupleProtocol) prot;
-      {
-        org.apache.thrift.protocol.TMap _map280 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-        struct.connections = new HashMap<String,MetricInfo>(2*_map280.size);
-        String _key281;
-        MetricInfo _val282;
-        for (int _i283 = 0; _i283 < _map280.size; ++_i283)
-        {
-          _key281 = iprot.readString();
-          _val282 = new MetricInfo();
-          _val282.read(iprot);
-          struct.connections.put(_key281, _val282);
-        }
-      }
-      struct.set_connections_isSet(true);
-      struct.connectionNum = iprot.readI32();
-      struct.set_connectionNum_isSet(true);
-    }
-  }
-
-}
-