You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@storm.apache.org by ka...@apache.org on 2016/08/22 08:34:40 UTC

[1/9] storm git commit: STORM-1994: Add table with per-topology and worker resource usage and components in (new) supervisor and topology pages

Repository: storm
Updated Branches:
  refs/heads/1.x-branch ce3884933 -> dc20e9ce0


http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/ui/public/component.html
----------------------------------------------------------------------
diff --git a/storm-core/src/ui/public/component.html b/storm-core/src/ui/public/component.html
index fc7b15e..bd487af 100644
--- a/storm-core/src/ui/public/component.html
+++ b/storm-core/src/ui/public/component.html
@@ -162,6 +162,12 @@ $(document).ready(function() {
         }
     });
 
+    function renderSupervisorPageLink(data, type, row, meta) {
+        return type === 'display' ? 
+                   ("<a href='/supervisor.html?host=" + data + "'>" + data + "</a>") :
+                   data; 
+    }
+
     function renderActionCheckbox(data, type, row, meta) {
       var host_port = row[2]+':'+$(row[3])[0].text;
       switch(type) {
@@ -268,6 +274,7 @@ $(document).ready(function() {
                 //id, uptime, host, port, actions, emitted, transferred, complete latency, acked, failed
                 dtAutoPage("#executor-stats-table", {
                   columnDefs: [
+                    {render: renderSupervisorPageLink, searchable: true, targets: [2]},
                     {render: renderActionCheckbox, searchable: false, targets: [4]},
                     {type: "num", targets: [5, 6, 7, 8, 9]},
                     {type: "time-str", targets: [1]},
@@ -303,6 +310,7 @@ $(document).ready(function() {
                 //id, uptime, host, port, actions, emitted, transferred, capacity, execute latency, executed, process latency, acked, failed
                 dtAutoPage("#executor-stats-table", {
                   columnDefs: [
+                    {render: renderSupervisorPageLink, searchable: true, targets: [2]},
                     {render: renderActionCheckbox, searchable: false, targets: [4]},
                     {type: "num", targets: [5, 6, 7, 8, 9, 10, 11, 12]},
                     {type: "time-str", targets: [1]},

http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/ui/public/css/style.css
----------------------------------------------------------------------
diff --git a/storm-core/src/ui/public/css/style.css b/storm-core/src/ui/public/css/style.css
index ddae2d5..c4c41fd 100644
--- a/storm-core/src/ui/public/css/style.css
+++ b/storm-core/src/ui/public/css/style.css
@@ -112,3 +112,23 @@ PRE.jsonFormatter-codeContainer {
   width: 1em;
 }
 
+.worker-component-button {
+  margin-right: 2px;
+  margin-top: 2px;
+}
+
+.worker-component-button .badge {
+  margin-left: 2px; 
+}
+
+.worker-child-row {
+  padding: 10px;
+}
+
+.supervisor-page #toggle-sys {
+  padding: 10px;
+}
+
+#toggle-on-components-btn .btn {
+  margin: 10px;
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/ui/public/js/script.js
----------------------------------------------------------------------
diff --git a/storm-core/src/ui/public/js/script.js b/storm-core/src/ui/public/js/script.js
index 769442f..fcdd75a 100644
--- a/storm-core/src/ui/public/js/script.js
+++ b/storm-core/src/ui/public/js/script.js
@@ -271,3 +271,194 @@ function getStatic(url, cb) {
         success: cb
     });
 };
+
+function makeSupervisorWorkerStatsTable (response, elId, parentId) {
+    makeWorkerStatsTable (response, elId, parentId, "supervisor");
+};
+
+function makeTopologyWorkerStatsTable (response, elId, parentId) {
+    makeWorkerStatsTable (response, elId, parentId, "topology");
+};
+
+var formatComponents = function (row) {
+    if (!row) return;
+    var result = '';
+    Object.keys(row.componentNumTasks || {}).sort().forEach (function (component){
+        var numTasks = row.componentNumTasks[component];
+        result += '<a class="worker-component-button btn btn-xs btn-primary" href="/component.html?id=' + 
+                        component + '&topology_id=' + row.topologyId + '">';
+        result += component;
+        result += '<span class="badge">' + numTasks + '</span>';
+        result += '</a>';
+    });
+    return result;
+};
+
+var format = function (row){
+    var result = '<div class="worker-child-row">Worker components: ';
+    result += formatComponents (row) || 'N/A';
+    result += '</div>';
+    return result;
+};
+
+// Build a table of per-worker resources and components (when permitted)
+var makeWorkerStatsTable = function (response, elId, parentId, type) {
+    var showCpu = response.schedulerDisplayResource;
+
+    var columns = [
+        {
+            data: 'host', 
+            render: function (data, type, row){
+                return type === 'display' ? 
+                    ('<a href="/supervisor.html?host=' + data + '">' + data + '</a>') :
+                    data;
+            }
+        },
+        {
+            data: 'port',
+            render: function (data, type, row) {
+                var logLink = row.workerLogLink;
+                return type === 'display' ?
+                    ('<a href="' + logLink + '">' + data + '</a>'): 
+                    data;
+            }
+        },
+        { 
+            data: function (row, type){
+                // if we are showing or filtering, using the formatted
+                // uptime, else use the seconds (for sorting)
+                return (type === 'display' || type === 'filter') ? 
+                    row.uptime :
+                    row.uptimeSeconds;
+            }
+        },
+        { data: 'executorsTotal' },
+        { 
+            data: function (row){
+                return row.assignedMemOnHeap + row.assignedMemOffHeap;
+            }
+        },
+    ];
+
+    if (showCpu) {
+        columns.push ({ data: 'assignedCpu' });
+    }
+
+    columns.push ({ 
+        data: function (row, type, obj, dt) {
+            var components = Object.keys(row.componentNumTasks || {});
+            if (components.length === 0){
+                // if no components returned, it means the worker
+                // topology isn't one the user is authorized to see
+                return "N/A";
+            }
+
+            if (type == 'filter') {
+                return components;
+            }
+
+            if (type == 'display') {
+                // show a button to toggle the component row
+                return '<button class="btn btn-xs btn-info details-control" type="button">' +
+                       components.length + ' components</button>';
+            }
+
+            return components.length;
+        }
+    });
+
+    switch (type){
+        case 'topology':
+            // the topology page has the supervisor id as the second column in the worker table
+            columns.splice(1, 0, {
+                data: 'supervisorId', 
+                render: function (data, type, row){
+                    return type === 'display' ? 
+                        ('<a href="/supervisor.html?id=' + data + '">' + data + '</a>') :
+                        data;
+                }
+            });
+            break;
+        case 'supervisor':
+            // the supervisor page has the topology name as the first column in the worker table
+            columns.unshift ({
+                data: function (row, type){
+                    return type === 'display' ? 
+                        ('<a href="/topology.html?id=' + row.topologyId + '">' + row.topologyName + '</a>') :
+                        row.topologyId;
+                }
+            });
+            break;
+    }
+
+    var workerStatsTable = dtAutoPage(elId, {
+        data: response.workers,
+        autoWidth: false,
+        columns: columns,
+        initComplete: function (){
+            // add a "Toggle Components" button
+            renderToggleComponents ($(elId + '_filter'), elId);
+            var show = $.cookies.get("showComponents") || false;
+
+            // if the cookie is false, then we are done
+            if (!show) {
+                return;
+            }
+
+            // toggle all components visibile
+            $(elId + ' tr').each(function (){
+                var dt = $(elId).dataTable();
+                showComponents(dt.api().row(this), true);
+            });
+        }
+    });
+
+    // Add event listener for opening and closing components row
+    // on a per component basis
+    $(elId + ' tbody').on('click', 'button.details-control', function () {
+        var tr = $(this).closest('tr');
+        var row = workerStatsTable.row(tr);
+        showComponents(row, !row.child.isShown());
+    });
+
+    $(parentId + ' #toggle-on-components-btn').on('click', 'input', function (){
+        toggleComponents(elId);
+    });
+
+    $(elId + ' [data-toggle="tooltip"]').tooltip();
+};
+
+function renderToggleComponents(div, targetTable) {
+     var showComponents = $.cookies.get("showComponents") || false;
+     div.append("<span id='toggle-on-components-btn' class=\"tip right\" " +
+                "title=\"Use this to toggle visibility of worker components.\">"+
+                    "<input value=\"Toggle Components\" type=\"button\" class=\"btn btn-info\">" + 
+                "</span>");
+}
+
+function showComponents(row, open) {
+    var tr = $(this).closest('tr');
+    if (!open) {
+        // This row is already open - close it
+        row.child.hide();
+        tr.removeClass('shown');
+    } else {
+        // Open this row
+        row.child (format (row.data())).show();
+        tr.addClass('shown');
+    }
+}
+
+function toggleComponents(elId) {
+    var show = $.cookies.get('showComponents') || false;
+    show = !show;
+
+    var exDate = new Date();
+    exDate.setDate(exDate.getDate() + 365);
+
+    $.cookies.set('showComponents', show, {'path':'/', 'expiresAt':exDate.toUTCString()});
+    $(elId + ' tr').each(function (){
+        var dt = $(elId).dataTable();
+        showComponents(dt.api().row(this), show);
+    });
+}

http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/ui/public/supervisor.html
----------------------------------------------------------------------
diff --git a/storm-core/src/ui/public/supervisor.html b/storm-core/src/ui/public/supervisor.html
new file mode 100644
index 0000000..afe946e
--- /dev/null
+++ b/storm-core/src/ui/public/supervisor.html
@@ -0,0 +1,132 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<html><head>
+<meta charset="UTF-8">
+<meta name="viewport" content="width=device-width, initial-scale=1">
+<title>Storm UI</title>
+<link href="/css/bootstrap-3.3.1.min.css" rel="stylesheet" type="text/css">
+<link href="/css/jquery.dataTables.1.10.4.min.css" rel="stylesheet" type="text/css">
+<link href="/css/dataTables.bootstrap.css" rel="stylesheet" type="text/css">
+<link href="/css/jsonFormatter.min.css" rel="stylesheet" type="text/css">
+<link href="/css/style.css?_ts=${packageTimestamp}" rel="stylesheet" type="text/css">
+<script src="/js/jquery-1.11.1.min.js" type="text/javascript"></script>
+<script src="/js/jquery.dataTables.1.10.4.min.js" type="text/javascript"></script>
+<script src="/js/jquery.cookies.2.2.0.min.js" type="text/javascript"></script>
+<script src="/js/jquery.mustache.js" type="text/javascript"></script>
+<script src="/js/url.min.js" type="text/javascript"></script>
+<script src="/js/bootstrap-3.3.1.min.js" type="text/javascript"></script>
+<script src="/js/jquery.blockUI.min.js" type="text/javascript"></script>
+<script src="/js/jsonFormatter.min.js" type="text/javascript"></script>
+<script src="/js/script.js?_ts=${packageTimestamp}" type="text/javascript"></script>
+<script src="/js/dataTables.bootstrap.min.js" type="text/javascript"></script>
+</head>
+<body>
+<div class="supervisor-page container-fluid">
+  <div class="row">
+    <div class="col-md-11">
+      <h1><a href="/">Storm UI</a></h1>
+    </div>
+    <div id="ui-user" class="col-md-1"></div>
+  </div>
+  <div class="row">
+    <div class="col-md-12">
+      <h2>Supervisor summary</h2>
+      <div id="supervisor-summary"></div>
+    </div>
+  </div>
+  <div class="row">
+    <div class="col-md-12">
+      <h2 id="worker-resources-header">Worker resources</h2>
+      <div id="worker-resources"></div>
+    </div>
+  </div>
+  <div class="row">
+    <div class="col-md-12">
+      <span id="toggle-sys" style="display: block;" class="js-only"></span>
+    </div>
+  </div>
+</div>
+</body>
+
+<script>
+    
+$(document).ajaxStop($.unblockUI);
+$(document).ajaxStart(function(){
+    $.blockUI({ message: '<img src="images/spinner.gif" /> <h3>Loading summary...</h3>'});
+});
+$(document).ready(function() {
+    var supervisorId = $.url("?id");
+    var host = $.url("?host");
+    var windowParam = $.url("?window");
+    var sys = $.cookies.get("sys") || "false";
+    var url = "/api/v1/supervisor?" + 
+                (supervisorId ? "id="+supervisorId : "host="+host) 
+                + "&sys="+sys;
+    if(windowParam) url += "&window=" + windowParam;
+    $.extend( $.fn.dataTable.defaults, {
+      stateSave: true,
+      lengthMenu: [[20,40,60,100,-1], [20, 40, 60, 100, "All"]],
+      pageLength: 20
+    });
+
+    renderToggleSys($("#toggle-sys"));
+
+    var supervisorSummary = $("#supervisor-summary");
+    var workerStats = $("#worker-resources");
+
+    $.ajaxSetup({
+        "error":function(jqXHR,textStatus,response) {
+            var errorJson = jQuery.parseJSON(jqXHR.responseText);
+            getStatic("/templates/json-error-template.html", function(template) {
+                $("#json-response-error").append(Mustache.render($(template).filter("#json-error-template").html(),errorJson));
+            });
+        }
+    });
+    function jsError(other) {
+      try {
+        other();
+      } catch (err) {
+        getStatic("/templates/json-error-template.html", function(template) {
+          $("#json-response-error").append(Mustache.render($(template).filter("#json-error-template").html(),{error: "JS Error", errorMessage: err}));
+        });
+      }
+    }
+
+    $.getJSON(url,function(response,status,jqXHR) {
+        getStatic("/templates/supervisor-page-template.html", function(template) {
+            jsError(function() {
+                supervisorSummary.append(
+                        Mustache.render($(template).filter("#supervisor-summary-template").html(),response));
+                
+                //id, host, uptime, slots, used slots
+                dtAutoPage("#supervisor-summary-table", {
+                    columnDefs: [
+                    {type: "num", targets: [3, 4]},
+                    {type: "time-str", targets: [2]}
+                    ]
+                });
+
+                $('#supervisor-summary-table [data-toggle="tooltip"]').tooltip();
+                workerStats.append(Mustache.render($(template).filter("#worker-stats-template").html(),response));
+                makeSupervisorWorkerStatsTable(response, '#worker-stats-table', '#worker-resources');
+            });
+        });
+    });
+});
+</script>

http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/ui/public/templates/index-page-template.html
----------------------------------------------------------------------
diff --git a/storm-core/src/ui/public/templates/index-page-template.html b/storm-core/src/ui/public/templates/index-page-template.html
index 0574e87..c23838f 100644
--- a/storm-core/src/ui/public/templates/index-page-template.html
+++ b/storm-core/src/ui/public/templates/index-page-template.html
@@ -252,8 +252,8 @@
   <tbody>
     {{#supervisors}}
     <tr>
-      <td><a href="{{logLink}}">{{host}}</a></td>
-      <td>{{id}}</td>
+      <td><a href="/supervisor.html?host={{host}}">{{host}}</a> (<a href="{{logLink}}" title="View log">log</a>)</td>
+      <td><a href="/supervisor.html?id={{id}}">{{id}}</a></td>
       <td>{{uptime}}</td>
       <td>{{slotsTotal}}</td>
       <td>{{slotsUsed}}</td>

http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/ui/public/templates/supervisor-page-template.html
----------------------------------------------------------------------
diff --git a/storm-core/src/ui/public/templates/supervisor-page-template.html b/storm-core/src/ui/public/templates/supervisor-page-template.html
new file mode 100644
index 0000000..9e6fadc
--- /dev/null
+++ b/storm-core/src/ui/public/templates/supervisor-page-template.html
@@ -0,0 +1,145 @@
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<script id="supervisor-summary-template" type="text/html">
+    <table class="table table-striped compact" id="supervisor-summary-table">
+      <thead>
+        <tr>
+          <th>
+            <span data-toggle="tooltip" data-placement="top" title="The hostname reported by the remote host. (Note that this hostname is not the result of a reverse lookup at the Nimbus node.)">
+              Host
+            </span>
+          </th>
+          <th>
+            <span data-toggle="tooltip" data-placement="top" title="A unique identifier given to a Supervisor when it joins the cluster.">
+              Supervisor Id
+            </span>
+          </th>
+          <th>
+            <span data-toggle="tooltip" data-placement="top" title="The length of time a Supervisor has been registered to the cluster.">
+              Uptime
+            </span>
+          </th>
+          <th>
+            <span data-toggle="tooltip" data-placement="top" title="Slots are Workers (processes).">
+              Slots
+            </span>
+          </th>
+          <th>
+            <span data-toggle="tooltip" data-placement="top" title="Slots are Workers (processes).">
+              Used slots
+            </span>
+          </th>
+          <th>
+            <span data-toggle="tooltip" data-placement="top" title="Memory capacity of a supervisor.">
+              Total Mem (MB)
+            </span>
+          </th>
+          <th>
+            <span data-toggle="tooltip" data-placement="top" title="Memory that has been allocated.">
+              Used Mem (MB)
+            </span>
+          </th>
+          {{#schedulerDisplayResource}}
+          <th>
+            <span data-toggle="tooltip" data-placement="top" title="CPU capacity of a supervisor. Every 100 means one core.">
+              Total CPU (%)
+            </span>
+          </th>
+          <th>
+            <span data-toggle="tooltip" data-placement="top" title="CPU that has been allocated. Every 100 means one core">
+              Used CPU (%)
+            </span>
+          </th>
+          {{/schedulerDisplayResource}}
+          <th>
+            <span data-toggle="tooltip" data-placement="top" title="Version">
+              Version
+            </span>
+          </th>
+        </tr>
+      </thead>
+      <tbody>
+      {{#supervisors}}
+          <tr>
+            <td><a href="/supervisor.html?host={{host}}">{{host}} (<a href="{{logLink}}" title="View log">log</a>)</a></td>
+            <td><a href="/supervisor.html?id={{id}}">{{id}}</td>
+            <td>{{uptime}}</td>
+            <td>{{slotsTotal}}</td>
+            <td>{{slotsUsed}}</td>
+            <td>{{totalMem}}</td>
+            <td>{{usedMem}}</td>
+            {{#schedulerDisplayResource}}
+            <td>{{totalCpu}}</td>
+            <td>{{usedCpu}}</td>
+            {{/schedulerDisplayResource}}
+            <td>{{version}}</td>
+          </tr>
+      {{/supervisors}}
+      </tbody>
+    </table>
+</script>
+<script id="worker-stats-template" type="text/html">
+  <table class="table table-striped compact" id="worker-stats-table">
+    <thead>
+      <tr>
+        <th class="header headerSortDown">
+          <span data-toggle="tooltip" data-placement="top" title="The name given to the topology by when it was submitted. Click the name to view the Topology's information.">
+            Topology Name
+          </span>
+        </th>
+        <th class="header">
+          <span data-original-title="The hostname reported by the remote host. (Note that this hostname is not the result of a reverse lookup at the Nimbus node.)" data-toggle="tooltip" data-placement="top">
+            Host
+          </span>
+        </th>
+        <th class="header">
+          <span data-toggle="tooltip" data-placement="top" title="The port number used by the Worker. Click on the port number to open the logviewer page for this Worker.">
+            Port 
+          </span>
+        </th>
+        <th class="header">
+          <span data-toggle="tooltip" data-placement="top" title="The length of time a Worker has been alive.">
+            Uptime
+          </span>
+        </th>
+        <th class="header">
+          <span data-toggle="tooltip" data-placement="top" title="The number of executors.">
+            Num executors
+          </span>
+        </th>
+        <th class="header">
+          <span data-toggle="tooltip" data-placement="top" title="Assigned Total Memory by Scheduler.">
+            Assigned Mem (MB)
+          </span>
+        </th>
+        {{#schedulerDisplayResource}}
+        <th class="header">
+          <span data-toggle="tooltip" data-placement="top" title="Assigned Total CPU by Scheduler. Every 100 means 1 core.">
+            Assigned CPU (%)
+          </span>
+        </th>
+        {{/schedulerDisplayResource}}
+        <th class="header">
+          <span data-toggle="tooltip" data-placement="top" title="The components running in this worker and the number of tasks per component.">
+            Components
+          </span>
+        </th>
+    </tr></thead>
+    <tbody>
+    </tbody>
+  </table>
+</script>

http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/ui/public/templates/topology-page-template.html
----------------------------------------------------------------------
diff --git a/storm-core/src/ui/public/templates/topology-page-template.html b/storm-core/src/ui/public/templates/topology-page-template.html
index 1825364..f390a21 100644
--- a/storm-core/src/ui/public/templates/topology-page-template.html
+++ b/storm-core/src/ui/public/templates/topology-page-template.html
@@ -29,45 +29,52 @@
           </span>
         </th>
         <th>
-          <span data-toggle="tooltip" data-placement="above" title="The user that submitted the Topology, if authentication is enabled.">
+          <span data-toggle="tooltip" data-placement="top" title="The user that submitted the Topology, if authentication is enabled.">
             Owner
           </span>
         </th>
         <th>
-          <span data-toggle="tooltip" data-placement="above" title="The status can be one of ACTIVE, INACTIVE, KILLED, or REBALANCING.">
+          <span data-toggle="tooltip" data-placement="top" title="The status can be one of ACTIVE, INACTIVE, KILLED, or REBALANCING.">
             Status
           </span>
         </th>
         <th>
-          <span data-toggle="tooltip" data-placement="above" title="The time since the Topology was submitted.">
+          <span data-toggle="tooltip" data-placement="top" title="The time since the Topology was submitted.">
             Uptime
           </span>
         </th>
         <th>
-          <span data-toggle="tooltip" data-placement="above" title="The number of Workers (processes).">
+          <span data-toggle="tooltip" data-placement="top" title="The number of Workers (processes).">
             Num workers
           </span>
         </th>
         <th>
-          <span data-toggle="tooltip" data-placement="above" title="Executors are threads in a Worker process.">
+          <span data-toggle="tooltip" data-placement="top" title="Executors are threads in a Worker process.">
             Num executors
           </span>
         </th>
         <th>
-          <span data-toggle="tooltip" data-placement="above" title="A Task is an instance of a Bolt or Spout. The number of Tasks is almost always equal to the number of Executors.">
+          <span data-toggle="tooltip" data-placement="top" title="A Task is an instance of a Bolt or Spout. The number of Tasks is almost always equal to the number of Executors.">
             Num tasks
           </span>
         </th>
         <th>
-          <span cdata-toggle="tooltip" data-placement="above" title="Number of nimbus hosts on which this topology's code is replicated. ">
+          <span cdata-toggle="tooltip" data-placement="top" title="Number of nimbus hosts on which this topology's code is replicated. ">
             Replication count
           </span>
         </th>
         <th>
-          <span data-toggle="tooltip" data-placement="above" title="Assigned Total Memory by Scheduler.">
+          <span data-toggle="tooltip" data-placement="top" title="Assigned Total Memory by Scheduler.">
             Assigned Mem (MB)
           </span>
         </th>
+        {{#schedulerDisplayResource}}
+        <th>
+          <span data-toggle="tooltip" data-placement="top" title="Assigned Total CPU by Scheduler. Every 100 means 1 core.">
+            Assigned CPU (%)
+          </span>
+        </th>
+        {{/schedulerDisplayResource}}
         <th>
           <span data-toggle="tooltip" data-placement="left" title="This shows information from the scheduler about the latest attempt to schedule the Topology on the cluster.">
             Scheduler Info
@@ -87,6 +94,9 @@
         <td>{{tasksTotal}}</td>
         <td>{{replicationCount}}</td>
         <td>{{assignedTotalMem}}</td>
+        {{#schedulerDisplayResource}}
+        <td>{{assignedCpu}}</td>
+        {{/schedulerDisplayResource}}
         <td>{{schedulerInfo}}</td>
       </tr>
     </tbody>
@@ -95,48 +105,48 @@
 <script id="topology-resources-template" type="text/html">
   <table id="topology-resources-table" class="table compact">
     <thead>
-    <tr>
-      <th>
-          <span data-toggle="tooltip" data-placement="right" title="The name given to the topology by when it was submitted.">
-            Name
-          </span>
-      </th>
-      <th>
-          <span data-toggle="tooltip" data-placement="right" title="The unique ID given to a Topology each time it is launched.">
-            Id
-          </span>
-      </th>
-      <th>
-          <span data-toggle="tooltip" data-placement="above" title="Requested Total On-Heap Memory by User.">
-            Requested On-Heap Memory (MB)
-          </span>
-      </th>
-      <th>
-          <span data-toggle="tooltip" data-placement="above" title="Assigned Total On-Heap Memory by Scheduler.">
-            Assigned On-Heap Memory (MB)
-          </span>
-      </th>
-      <th>
-          <span data-toggle="tooltip" data-placement="above" title="Requested Total Off-Heap Memory by User.">
-            Requested Off-Heap Memory (MB)
-          </span>
-      </th>
-      <th>
-          <span data-toggle="tooltip" data-placement="above" title="Assigned Total Off-Heap Memory by Scheduler.">
-            Assigned Off-Heap Memory (MB)
-          </span>
-      </th>
-      <th>
-          <span data-toggle="tooltip" data-placement="above" title="Requested Total CPU by User. Every 100 means 1 core.">
-            Requested CPU (%)
-          </span>
-      </th>
-      <th>
-          <span data-toggle="tooltip" data-placement="left" title="Assigned Total CPU by Scheduler. Every 100 means 1 core.">
-            Assigned CPU (%)
-          </span>
-      </th>
-    </tr>
+      <tr>
+         <th>
+           <span data-toggle="tooltip" data-placement="right" title="The name given to the topology by when it was submitted.">
+             Name
+           </span>
+         </th>
+         <th>
+           <span data-toggle="tooltip" data-placement="right" title="The unique ID given to a Topology each time it is launched.">
+             Id
+           </span>
+         </th>
+         <th>
+           <span data-toggle="tooltip" data-placement="top" title="Requested Total On-Heap Memory by User.">
+             Requested On-Heap Memory (MB)
+           </span>
+         </th>
+         <th>
+           <span data-toggle="tooltip" data-placement="top" title="Assigned Total On-Heap Memory by Scheduler.">
+             Assigned On-Heap Memory (MB)
+           </span>
+         </th>
+         <th>
+           <span data-toggle="tooltip" data-placement="top" title="Requested Total Off-Heap Memory by User.">
+             Requested Off-Heap Memory (MB)
+           </span>
+         </th>
+         <th>
+           <span data-toggle="tooltip" data-placement="top" title="Assigned Total Off-Heap Memory by Scheduler.">
+             Assigned Off-Heap Memory (MB)
+           </span>
+         </th>
+         <th>
+           <span data-toggle="tooltip" data-placement="top" title="Requested Total CPU by User. Every 100 means 1 core.">
+             Requested CPU (%)
+           </span>
+         </th>
+         <th>
+             <span data-toggle="tooltip" data-placement="left" title="Assigned Total CPU by Scheduler. Every 100 means 1 core.">
+               Assigned CPU (%)
+             </span>
+         </th>
+      </tr>
     </thead>
     <tbody>
     <tr>
@@ -163,22 +173,22 @@
           </span>
         </th>
         <th>
-          <span data-toggle="tooltip" data-placement="above" title="The number of Tuples emitted.">
+          <span data-toggle="tooltip" data-placement="top" title="The number of Tuples emitted.">
             Emitted
           </span>
         </th>
         <th>
-          <span data-toggle="tooltip" data-placement="above" title="The number of Tuples emitted that sent to one or more bolts.">
+          <span data-toggle="tooltip" data-placement="top" title="The number of Tuples emitted that sent to one or more bolts.">
             Transferred
           </span>
         </th>
         <th>
-          <span data-toggle="tooltip" data-placement="above" title="The average time a Tuple &quot;tree&quot; takes to be completely processed by the Topology. A value of 0 is expected if no acking is done.">
+          <span data-toggle="tooltip" data-placement="top" title="The average time a Tuple &quot;tree&quot; takes to be completely processed by the Topology. A value of 0 is expected if no acking is done.">
             Complete latency (ms)
           </span>
         </th>
         <th>
-          <span data-toggle="tooltip" data-placement="above" title="The number of Tuple &quot;trees&quot; successfully processed. A value of 0 is expected if no acking is done.">
+          <span data-toggle="tooltip" data-placement="top" title="The number of Tuple &quot;trees&quot; successfully processed. A value of 0 is expected if no acking is done.">
             Acked
           </span>
         </th>
@@ -215,22 +225,22 @@
           </span>
         </th>
         <th>
-          <span data-toggle="tooltip" data-placement="above" title="Topic">
+          <span data-toggle="tooltip" data-placement="top" title="Topic">
             Topic
           </span>
         </th>
         <th>
-          <span data-toggle="tooltip" data-placement="above" title="Partition">
+          <span data-toggle="tooltip" data-placement="top" title="Partition">
             Partition
           </span>
         </th>
         <th>
-          <span data-toggle="tooltip" data-placement="above" title="Latest Offset">
+          <span data-toggle="tooltip" data-placement="top" title="Latest Offset">
             Latest Offset
           </span>
         </th>
         <th>
-          <span data-toggle="tooltip" data-placement="above" title="Offset of last spout message successfully acked">
+          <span data-toggle="tooltip" data-placement="top" title="Offset of last spout message successfully acked">
             Spout Committed Offset
           </span>
         </th>
@@ -267,7 +277,7 @@
           </span>
         </th>
         <th>
-          <span data-toggle="tooltip" data-placement="above" title="Type of spout">
+          <span data-toggle="tooltip" data-placement="top" title="Type of spout">
             Type
           </span>
         </th>
@@ -353,32 +363,32 @@
           </span>
         </th>
         <th class="header">
-          <span data-toggle="tooltip" data-placement="above" title="A Task is an instance of a Bolt or Spout. The number of Tasks is almost always equal to the number of Executors.">
+          <span data-toggle="tooltip" data-placement="top" title="A Task is an instance of a Bolt or Spout. The number of Tasks is almost always equal to the number of Executors.">
             Tasks
           </span>
         </th>
         <th class="header">
-          <span data-toggle="tooltip" data-placement="above" title="The number of Tuples emitted.">
+          <span data-toggle="tooltip" data-placement="top" title="The number of Tuples emitted.">
             Emitted
           </span>
         </th>
         <th class="header">
-          <span data-toggle="tooltip" data-placement="above" title="The number of Tuples emitted that sent to one or more bolts.">
+          <span data-toggle="tooltip" data-placement="top" title="The number of Tuples emitted that sent to one or more bolts.">
             Transferred
           </span>
         </th>
         <th class="header">
-          <span data-toggle="tooltip" data-placement="above" title="The average time a Tuple &quot;tree&quot; takes to be completely processed by the Topology. A value of 0 is expected if no acking is done.">
+          <span data-toggle="tooltip" data-placement="top" title="The average time a Tuple &quot;tree&quot; takes to be completely processed by the Topology. A value of 0 is expected if no acking is done.">
             Complete latency (ms)
           </span>
         </th>
         <th class="header">
-          <span data-toggle="tooltip" data-placement="above" title="The number of Tuple &quot;trees&quot; successfully processed. A value of 0 is expected if no acking is done.">
+          <span data-toggle="tooltip" data-placement="top" title="The number of Tuple &quot;trees&quot; successfully processed. A value of 0 is expected if no acking is done.">
             Acked
           </span>
         </th>
         <th class="header">
-          <span data-toggle="tooltip" data-placement="above" title="The number of Tuple &quot;trees&quot; that were explicitly failed or timed out before acking was completed. A value of 0 is expected if no acking is done.">
+          <span data-toggle="tooltip" data-placement="top" title="The number of Tuple &quot;trees&quot; that were explicitly failed or timed out before acking was completed. A value of 0 is expected if no acking is done.">
             Failed
           </span>
         </th>
@@ -432,42 +442,42 @@
           </span>
         </th>
         <th class="header">
-          <span data-toggle="tooltip" data-placement="above" title="A Task is an instance of a Bolt or Spout. The number of Tasks is almost always equal to the number of Executors.">
+          <span data-toggle="tooltip" data-placement="top" title="A Task is an instance of a Bolt or Spout. The number of Tasks is almost always equal to the number of Executors.">
             Tasks
           </span>
         </th>
         <th class="header">
-          <span data-toggle="tooltip" data-placement="above" title="The number of Tuples emitted.">
+          <span data-toggle="tooltip" data-placement="top" title="The number of Tuples emitted.">
             Emitted
           </span>
         </th>
         <th class="header">
-          <span data-toggle="tooltip" data-placement="above" title="The number of Tuples emitted that sent to one or more bolts.">
+          <span data-toggle="tooltip" data-placement="top" title="The number of Tuples emitted that sent to one or more bolts.">
             Transferred
           </span>
         </th>
         <th class="header">
-          <span data-original-title="If this is around 1.0, the corresponding Bolt is running as fast as it can, so you may want to increase the Bolt's parallelism. This is (number executed * average execute latency) / measurement time." data-toggle="tooltip" data-placement="above">
+          <span data-original-title="If this is around 1.0, the corresponding Bolt is running as fast as it can, so you may want to increase the Bolt's parallelism. This is (number executed * average execute latency) / measurement time." data-toggle="tooltip" data-placement="top">
             Capacity (last 10m)
           </span>
         </th>
         <th class="header">
-          <span data-toggle="tooltip" data-placement="above" title="The average time a Tuple spends in the execute method. The execute method may complete without sending an Ack for the tuple.">
+          <span data-toggle="tooltip" data-placement="top" title="The average time a Tuple spends in the execute method. The execute method may complete without sending an Ack for the tuple.">
             Execute latency (ms)
           </span>
         </th>
         <th class="header">
-          <span data-toggle="tooltip" data-placement="above" title="The number of incoming Tuples processed.">
+          <span data-toggle="tooltip" data-placement="top" title="The number of incoming Tuples processed.">
             Executed
           </span>
         </th>
         <th class="header">
-          <span data-toggle="tooltip" data-placement="above" title="The average time it takes to Ack a Tuple after it is first received.  Bolts that join, aggregate or batch may not Ack a tuple until a number of other Tuples have been received.">
+          <span data-toggle="tooltip" data-placement="top" title="The average time it takes to Ack a Tuple after it is first received.  Bolts that join, aggregate or batch may not Ack a tuple until a number of other Tuples have been received.">
             Process latency (ms)
           </span>
         </th>
         <th class="header">
-          <span data-toggle="tooltip" data-placement="above" title="The number of Tuples acknowledged by this Bolt.">
+          <span data-toggle="tooltip" data-placement="top" title="The number of Tuples acknowledged by this Bolt.">
             Acked
           </span>
         </th>
@@ -512,7 +522,57 @@
         {{/bolts}}
     </tbody>
 </script>
-
+<script id="worker-stats-template" type="text/html">
+  <h2>Worker Resources</h2>
+  <table class="table table-striped compact" id="worker-stats-table">
+    <thead>
+      <tr>
+        <th class="header headerSortDown">
+          <span data-original-title="The hostname reported by the remote host. (Note that this hostname is not the result of a reverse lookup at the Nimbus node.)" data-toggle="tooltip" data-placement="top">
+            Host
+          </span>
+        </th>
+        <th class="header">
+          <span data-toggle="tooltip" data-placement="top" title="A unique identifier given to a Supervisor when it joins the cluster.">
+            Supervisor Id
+          </span>
+        </th>
+        <th class="header">
+          <span data-toggle="tooltip" data-placement="top" title="The port number used by the Worker. Click on the port number to open the logviewer page for this Worker.">
+            Port 
+          </span>
+        </th>
+        <th class="header">
+          <span data-toggle="tooltip" data-placement="top" title="The length of time a Worker has been alive.">
+            Uptime
+          </span>
+        </th>
+        <th class="header">
+          <span data-toggle="tooltip" data-placement="top" title="The number of executors">
+            Num executors
+          </span>
+        </th>
+        <th class="header">
+          <span data-toggle="tooltip" data-placement="top" title="Assigned Total Memory by Scheduler.">
+            Assigned Mem (MB)
+          </span>
+        </th>
+        {{#schedulerDisplayResource}}
+        <th class="header">
+          <span data-toggle="tooltip" data-placement="top" title="Assigned Total CPU by Scheduler. Every 100 means 1 core.">
+            Assigned CPU (%)
+          </span>
+        </th>
+        {{/schedulerDisplayResource}}
+        <th class="header">
+          <span data-toggle="tooltip" data-placement="top" title="The components running in this worker and the number of tasks per component.">
+            Components
+          </span>
+        </th>
+    </tr></thead>
+    <tbody>
+    </tbody>
+</script>
 <script id="topology-actions-template" type="text/html">
   <h2>Topology actions</h2>
   <p id="topology-actions">

http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/ui/public/topology.html
----------------------------------------------------------------------
diff --git a/storm-core/src/ui/public/topology.html b/storm-core/src/ui/public/topology.html
index dc3c0b3..bb1279f 100644
--- a/storm-core/src/ui/public/topology.html
+++ b/storm-core/src/ui/public/topology.html
@@ -80,6 +80,9 @@
     <div id="bolt-stats" class="col-md-12"></div>
   </div>
   <div class="row">
+    <div id="worker-stats" class="col-md-12"></div>
+  </div>
+  <div class="row">
     <div id="topology-visualization" class="col-md-12"></div>
   </div>
   <div class="row">
@@ -284,6 +287,7 @@ $(document).ready(function() {
         var topologySpoutsLag = $("#topology-spouts-lag");
         var spoutStats = $("#spout-stats");
         var boltStats = $("#bolt-stats");
+        var workerStats = $("#worker-stats");
         var config = $("#topology-configuration");
         var topologyActions = $("#topology-actions");
         var topologyVisualization = $("#topology-visualization")
@@ -332,6 +336,12 @@ $(document).ready(function() {
               ]
             });
 
+          jsError(function() {
+            workerStats.append(Mustache.render($(template).filter("#worker-stats-template").html(),response));
+            makeTopologyWorkerStatsTable (response, '#worker-stats-table', '#worker-stats');
+          });
+
+          jsError(function() {
             topologyVisualization.append(Mustache.render($(template).filter("#topology-visualization-template").html(), response));
             $("#show-hide-visualization").click(function () { show_visualization(null) });
 
@@ -439,9 +449,9 @@ $(document).ready(function() {
                     }
                 }
             });
+          });
       }});
     });
-
  });
 </script>
 </html>

http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/test/clj/org/apache/storm/nimbus_test.clj
----------------------------------------------------------------------
diff --git a/storm-core/test/clj/org/apache/storm/nimbus_test.clj b/storm-core/test/clj/org/apache/storm/nimbus_test.clj
index dbb89bd..4daa556 100644
--- a/storm-core/test/clj/org/apache/storm/nimbus_test.clj
+++ b/storm-core/test/clj/org/apache/storm/nimbus_test.clj
@@ -26,7 +26,7 @@
   (:import [org.apache.storm.generated Credentials NotAliveException SubmitOptions
             TopologyInitialStatus TopologyStatus AlreadyAliveException KillOptions RebalanceOptions
             InvalidTopologyException AuthorizationException
-            LogConfig LogLevel LogLevelAction])
+            LogConfig LogLevel LogLevelAction NodeInfo])
   (:import [java.util HashMap])
   (:import [java.io File])
   (:import [org.apache.storm.utils Time Utils])
@@ -1214,7 +1214,42 @@
                     nimbus/check-authorization!
                       [1 2 3] expected-name expected-conf expected-operation)
                   (verify-first-call-args-for-indices
-                    nimbus/try-read-storm-topology [0] "fake-id"))))))))))
+                    nimbus/try-read-storm-topology [0] "fake-id"))))))
+
+        (testing "getSupervisorPageInfo only calls check-authorization as getTopology"
+          (let [expected-operation "getTopology"
+                assignment {:executor->node+port {[1 1] ["super1" "host1"],
+                                                  [2 2] ["super2" "host1"]}}
+                topo-assignment {expected-name assignment}
+                check-auth-state (atom [])
+                mock-check-authorization (fn [nimbus storm-name storm-conf operation] 
+                                           (swap! check-auth-state conj {:nimbus nimbus
+                                                                         :storm-name storm-name
+                                                                         :storm-conf storm-conf
+                                                                         :operation operation}))]
+            (stubbing [nimbus/check-authorization! mock-check-authorization
+                       nimbus/try-read-storm-conf expected-conf
+                       nimbus/try-read-storm-topology nil
+                       storm-task-info nil
+                       nimbus/all-supervisor-info {"super1" {:hostname "host1", :meta [1234], :uptime-secs 123}
+                                                   "super2" {:hostname "host2", :meta [1234], :uptime-secs 123}}
+                       nimbus/topology-assignments topo-assignment
+                       nimbus/get-launch-time-secs 0]
+              ;; not called yet
+              (verify-call-times-for nimbus/check-authorization! 0)
+              (.getSupervisorPageInfo nimbus "super1" nil true)
+ 
+              ;; afterwards, it should get called twice
+              (verify-call-times-for nimbus/check-authorization! 2)
+              (let [first-call (nth @check-auth-state 0)
+                    second-call (nth @check-auth-state 1)]
+                 (is (= expected-name (:storm-name first-call)))
+                 (is (= expected-conf (:storm-conf first-call)))
+                 (is (= "getTopology" (:operation first-call)))
+ 
+                 (is (= expected-name (:storm-name second-call)))
+                 (is (= expected-conf (:storm-conf second-call)))
+                 (is (= "getSupervisorPageInfo" (:operation second-call)))))))))))
 
 (deftest test-nimbus-iface-getTopology-methods-throw-correctly
   (with-local-cluster [cluster]
@@ -1271,7 +1306,8 @@
                         :status {:type bogus-type}}
                 }
         ]
-      (stubbing [topology-bases bogus-bases
+      (stubbing [nimbus/get-resources-for-topology nil
+                 topology-bases bogus-bases
                  nimbus/get-blob-replication-count 1]
         (let [topos (.get_topologies (.getClusterInfo nimbus))]
           ; The number of topologies in the summary is correct.
@@ -1614,3 +1650,33 @@
         (is (= (count @hb-cache) 2))
         (is (contains? @hb-cache "topo1"))
         (is (contains? @hb-cache "topo2"))))))
+
+
+(deftest user-topologies-for-supervisor
+  (let [assignment {:executor->node+port {[1 1] ["super1" "host1"],
+                                          [2 2] ["super2" "host2"]}}
+        assignment2 {:executor->node+port {[1 1] ["super2" "host2"],
+                                           [2 2] ["super2" "host2"]}}
+        assignments {"topo1" assignment, "topo2" assignment2}]
+    (stubbing [nimbus/is-authorized? true]
+      (let [topos1 (nimbus/user-and-supervisor-topos nil nil nil assignments "super1")
+            topos2 (nimbus/user-and-supervisor-topos nil nil nil assignments "super2")]
+        (is (= (list "topo1") (:supervisor-topologies topos1)))
+        (is (= #{"topo1"} (:user-topologies topos1))) 
+        (is (= (list "topo1" "topo2") (:supervisor-topologies topos2)))
+        (is (= #{"topo1" "topo2"} (:user-topologies topos2)))))))
+
+(defn- mock-check-auth 
+  [nimbus conf blob-store op topo-name]
+  (= topo-name "authorized"))
+
+(deftest user-topologies-for-supervisor-with-unauthorized-user
+  (let [assignment {:executor->node+port {[1 1] ["super1" "host1"],
+                                          [2 2] ["super2" "host2"]}}
+        assignment2 {:executor->node+port {[1 1] ["super1" "host1"],
+                                           [2 2] ["super2" "host2"]}}
+        assignments {"topo1" assignment, "authorized" assignment2}] 
+    (stubbing [nimbus/is-authorized? mock-check-auth]
+      (let [topos (nimbus/user-and-supervisor-topos nil nil nil assignments "super1")]
+        (is (= (list "topo1" "authorized") (:supervisor-topologies topos)))
+        (is (= #{"authorized"} (:user-topologies topos)))))))

http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/test/clj/org/apache/storm/stats_test.clj
----------------------------------------------------------------------
diff --git a/storm-core/test/clj/org/apache/storm/stats_test.clj b/storm-core/test/clj/org/apache/storm/stats_test.clj
new file mode 100644
index 0000000..5c1ec50
--- /dev/null
+++ b/storm-core/test/clj/org/apache/storm/stats_test.clj
@@ -0,0 +1,134 @@
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements.  See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership.  The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License.  You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+(ns org.apache.storm.stats-test
+  (:use [clojure test])
+  (:import [org.apache.storm.scheduler WorkerSlot])
+  (:import [org.apache.storm.generated WorkerResources])
+  (:require [org.apache.storm [stats :as stats]]))
+
+(defn- make-topo-info-no-beats 
+  []
+  {:storm-name "testing", 
+   :assignment {:executor->node+port {[1 3] ["node" 1234] 
+                                      [4 4] ["node" 1234]}
+                :node->host {"node" "host"}}})
+
+(defn- make-topo-info
+  []
+  (merge 
+    {:beats {[1 3] {:uptime 6}
+             [4 4] {:uptime 6}}}
+    {:task->component {1 "exclaim1", 2 "__sys", 3 "exclaim1", 4 "__sys2"}}
+    (make-topo-info-no-beats)))
+
+(defn- make-worker-resources
+  []
+  (doto (WorkerResources.)
+    (.set_mem_on_heap 3)
+    (.set_mem_off_heap 4)
+    (.set_cpu 5)))
+
+(deftest agg-worker-populates-worker-summary
+  (let [storm-id "foo"
+        topo-info (make-topo-info)
+        worker->resources {(WorkerSlot. "node" 1234) (make-worker-resources)}
+        include-sys? true 
+        user-authorized true 
+        worker-summaries (stats/agg-worker-stats storm-id 
+                                                 topo-info 
+                                                 worker->resources 
+                                                 include-sys? 
+                                                 user-authorized)]
+    (let [summ (first worker-summaries)
+          comps (.get_component_to_num_tasks summ)]
+      (is (= 1 (count worker-summaries)))
+      (is (= "host" (.get_host summ)))
+      (is (= 6 (.get_uptime_secs summ)))
+      (is (= "node" (.get_supervisor_id summ)))
+      (is (= 1234 (.get_port summ)))
+      (is (= "foo" (.get_topology_id summ)))
+      (is (= "testing" (.get_topology_name summ)))
+      (is (= 2 (.get_num_executors summ)))
+      (is (= 3.0 (.get_assigned_memonheap summ)))
+      (is (= 4.0 (.get_assigned_memoffheap summ)))
+      (is (= 5.0 (.get_assigned_cpu summ)))
+      ;; agg-worker-stats groups the components together
+      (is (= 2 (get comps "exclaim1")))
+      (is (= 1 (get comps "__sys"))))))
+
+(deftest agg-worker-skips-sys-if-not-enabled
+  (let [storm-id "foo"
+        topo-info (make-topo-info)
+        worker->resources {(WorkerSlot. "node" 1234) (make-worker-resources)}
+        include-sys? false
+        user-authorized true 
+        worker-summaries (stats/agg-worker-stats storm-id 
+                                                 topo-info 
+                                                 worker->resources 
+                                                 include-sys? 
+                                                 user-authorized)]
+    (let [summ (first worker-summaries)
+          comps (.get_component_to_num_tasks summ)]
+      (is (= nil (get comps "__sys")))
+      (is (= 2 (.get_num_executors summ)))
+      (is (= 2 (get comps "exclaim1"))))))
+
+(deftest agg-worker-gracefully-handles-missing-beats
+  (let [storm-id "foo"
+        topo-info (make-topo-info-no-beats)
+        worker->resources {(WorkerSlot. "node" 1234) (make-worker-resources)}
+        include-sys? false
+        user-authorized true 
+        worker-summaries (stats/agg-worker-stats storm-id 
+                                                 topo-info 
+                                                 worker->resources 
+                                                 include-sys? 
+                                                 user-authorized)]
+    (let [summ (first worker-summaries)]
+      (is (= 0 (.get_uptime_secs summ))))))
+
+(deftest agg-worker-stats-exclude-components-if-not-authorized
+  (let [storm-id "foo"
+        topo-info (make-topo-info-no-beats)
+        worker->resources {(WorkerSlot. "node" 1234) (make-worker-resources)}
+        include-sys? false
+        user-authorized  false
+        worker-summaries (stats/agg-worker-stats storm-id 
+                                                 topo-info 
+                                                 worker->resources 
+                                                 include-sys? 
+                                                 user-authorized)]
+    (let [summ (first worker-summaries)]
+      (is (= 0 (.get_uptime_secs summ)))
+      (is (= nil (.get_component_to_num_tasks summ))))))
+
+(deftest agg-worker-stats-can-handle-nil-worker->resources
+  (let [storm-id "foo"
+        topo-info (make-topo-info-no-beats)
+        worker->resources nil
+        include-sys? false
+        user-authorized  false
+        worker-summaries (stats/agg-worker-stats storm-id 
+                                                 topo-info 
+                                                 worker->resources 
+                                                 include-sys? 
+                                                 user-authorized)]
+    (let [summ (first worker-summaries)]
+      (is (= 0 (.get_uptime_secs summ)))
+      (is (= 0.0 (.get_assigned_memonheap summ)))
+      (is (= 0.0 (.get_assigned_memoffheap summ)))
+      (is (= 0.0 (.get_assigned_cpu summ)))
+      (is (= nil (.get_component_to_num_tasks summ))))))


[7/9] storm git commit: STORM-1994: Add table with per-topology and worker resource usage and components in (new) supervisor and topology pages

Posted by ka...@apache.org.
STORM-1994: Add table with per-topology and worker resource usage and components in (new) supervisor and topology pages


Project: http://git-wip-us.apache.org/repos/asf/storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/storm/commit/0e0bcf27
Tree: http://git-wip-us.apache.org/repos/asf/storm/tree/0e0bcf27
Diff: http://git-wip-us.apache.org/repos/asf/storm/diff/0e0bcf27

Branch: refs/heads/1.x-branch
Commit: 0e0bcf27f4b7787cc3e6886ccbcd5dc55daef771
Parents: ce38849
Author: Alessandro Bellina <ab...@yahoo-inc.com>
Authored: Wed Jul 6 14:23:18 2016 -0500
Committer: Alessandro Bellina <ab...@yahoo-inc.com>
Committed: Sun Aug 21 22:31:08 2016 -0500

----------------------------------------------------------------------
 docs/STORM-UI-REST-API.md                       |  121 +-
 docs/images/supervisor_page.png                 |  Bin 0 -> 133290 bytes
 .../src/clj/org/apache/storm/daemon/nimbus.clj  |  296 +-
 storm-core/src/clj/org/apache/storm/stats.clj   |   68 +-
 storm-core/src/clj/org/apache/storm/ui/core.clj |   94 +-
 .../org/apache/storm/generated/Assignment.java  |  244 +-
 .../storm/generated/ClusterWorkerHeartbeat.java |   52 +-
 .../storm/generated/ComponentPageInfo.java      |  220 +-
 .../org/apache/storm/generated/Credentials.java |   44 +-
 .../jvm/org/apache/storm/generated/HBNodes.java |   32 +-
 .../org/apache/storm/generated/HBRecords.java   |   36 +-
 .../storm/generated/LSApprovedWorkers.java      |   44 +-
 .../generated/LSSupervisorAssignments.java      |   48 +-
 .../apache/storm/generated/LSTopoHistory.java   |   64 +-
 .../storm/generated/LSTopoHistoryList.java      |   36 +-
 .../storm/generated/LSWorkerHeartbeat.java      |   36 +-
 .../apache/storm/generated/ListBlobsResult.java |   32 +-
 .../apache/storm/generated/LocalAssignment.java |   36 +-
 .../apache/storm/generated/LocalStateData.java  |   48 +-
 .../org/apache/storm/generated/LogConfig.java   |   48 +-
 .../jvm/org/apache/storm/generated/Nimbus.java  | 3486 ++++++++++++------
 .../org/apache/storm/generated/NodeInfo.java    |   32 +-
 .../storm/generated/RebalanceOptions.java       |   44 +-
 .../storm/generated/SettableBlobMeta.java       |   36 +-
 .../org/apache/storm/generated/StormBase.java   |   92 +-
 .../apache/storm/generated/SupervisorInfo.java  |  152 +-
 .../storm/generated/SupervisorPageInfo.java     |  624 ++++
 .../storm/generated/TopologyHistoryInfo.java    |   32 +-
 .../storm/generated/TopologyPageInfo.java       |  284 +-
 .../apache/storm/generated/WorkerSummary.java   | 1880 ++++++++++
 .../jvm/org/apache/storm/scheduler/Cluster.java |  217 +-
 .../resource/ResourceAwareScheduler.java        |    9 +
 .../auth/authorizer/SimpleACLAuthorizer.java    |    7 +-
 storm-core/src/py/storm/Nimbus-remote           |    7 +
 storm-core/src/py/storm/Nimbus.py               |  272 +-
 storm-core/src/py/storm/ttypes.py               | 1457 ++++++--
 storm-core/src/storm.thrift                     |   25 +
 storm-core/src/ui/public/component.html         |    8 +
 storm-core/src/ui/public/css/style.css          |   20 +
 storm-core/src/ui/public/js/script.js           |  191 +
 storm-core/src/ui/public/supervisor.html        |  132 +
 .../public/templates/index-page-template.html   |    4 +-
 .../templates/supervisor-page-template.html     |  145 +
 .../templates/topology-page-template.html       |  208 +-
 storm-core/src/ui/public/topology.html          |   12 +-
 .../test/clj/org/apache/storm/nimbus_test.clj   |   72 +-
 .../test/clj/org/apache/storm/stats_test.clj    |  134 +
 47 files changed, 8820 insertions(+), 2361 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/docs/STORM-UI-REST-API.md
----------------------------------------------------------------------
diff --git a/docs/STORM-UI-REST-API.md b/docs/STORM-UI-REST-API.md
index 884c6d5..340137d 100644
--- a/docs/STORM-UI-REST-API.md
+++ b/docs/STORM-UI-REST-API.md
@@ -125,6 +125,7 @@ Response fields:
 |uptimeSeconds| Integer| Shows how long the supervisor is running in seconds|
 |slotsTotal| Integer| Total number of available worker slots for this supervisor|
 |slotsUsed| Integer| Number of worker slots used on this supervisor|
+|schedulerDisplayResource| Boolean | Whether to display scheduler resource information|
 |totalMem| Double| Total memory capacity on this supervisor|
 |totalCpu| Double| Total CPU capacity on this supervisor|
 |usedMem| Double| Used memory capacity on this supervisor|
@@ -207,6 +208,123 @@ Sample response:
 }
 ```
 
+### /api/v1/supervisor (GET)
+
+Returns summary for a supervisor by id, or all supervisors running on a host.
+
+Examples:
+
+```no-highlight
+ 1. By host: http://ui-daemon-host-name:8080/api/v1/supervisor?host=supervisor-daemon-host-name
+ 2. By id: http://ui-daemon-host-name:8080/api/v1/supervisor?id=f5449110-1daa-43e2-89e3-69917b16dec9-192.168.1.1
+```
+
+Request parameters:
+
+|Parameter |Value   |Description  |
+|----------|--------|-------------|
+|id   	   |String. Supervisor id | If specified, respond with the supervisor and worker stats with id. Note that when id is specified, the host argument is ignored. |
+|host      |String. Host name| If specified, respond with all supervisors and worker stats in the host (normally just one)|
+|sys       |String. Values 1 or 0. Default value 0| Controls including sys stats part of the response|
+
+Response fields:
+
+|Field  |Value|Description|
+|---	|---	|---
+|supervisors| Array| Array of supervisor summaries|
+|workers| Array| Array of worker summaries |
+|schedulerDisplayResource| Boolean | Whether to display scheduler resource information|
+
+Each supervisor is defined by:
+
+|Field  |Value|Description|
+|---	|---	|---
+|id| String | Supervisor's id|
+|host| String| Supervisor's host name|
+|uptime| String| Shows how long the supervisor is running|
+|uptimeSeconds| Integer| Shows how long the supervisor is running in seconds|
+|slotsTotal| Integer| Total number of worker slots for this supervisor|
+|slotsUsed| Integer| Number of worker slots used on this supervisor|
+|totalMem| Double| Total memory capacity on this supervisor|
+|totalCpu| Double| Total CPU capacity on this supervisor|
+|usedMem| Double| Used memory capacity on this supervisor|
+|usedCpu| Double| Used CPU capacity on this supervisor|
+
+Each worker is defined by:
+
+|Field  |Value  |Description|
+|-------|-------|-----------|
+|supervisorId | String| Supervisor's id|
+|host | String | Worker's host name|
+|port | Integer | Worker's port|
+|topologyId | String | Topology Id|
+|topologyName | String | Topology Name|
+|executorsTotal | Integer | Number of executors used by the topology in this worker|
+|assignedMemOnHeap | Double | Assigned On-Heap Memory by Scheduler (MB)|
+|assignedMemOffHeap | Double | Assigned Off-Heap Memory by Scheduler (MB)|
+|assignedCpu | Number | Assigned CPU by Scheduler (%)| 
+|componentNumTasks | Dictionary | Components -> # of executing tasks|
+|uptime| String| Shows how long the worker is running|
+|uptimeSeconds| Integer| Shows how long the worker is running in seconds|
+|workerLogLink | String | Link to worker log viewer page|
+
+Sample response:
+
+```json
+{
+    "supervisors": [{ 
+        "totalMem": 4096.0, 
+        "host":"192.168.10.237",
+        "id":"bdfe8eff-f1d8-4bce-81f5-9d3ae1bf432e-169.254.129.212",
+        "uptime":"7m 8s",
+        "totalCpu":400.0,
+        "usedCpu":495.0,
+        "usedMem":3432.0,
+        "slotsUsed":2,
+        "version":"0.10.1",
+        "slotsTotal":4,
+        "uptimeSeconds":428
+    }],
+    "schedulerDisplayResource":true,
+    "workers":[{
+        "topologyName":"ras",
+        "topologyId":"ras-4-1460229987",
+        "host":"192.168.10.237",
+        "supervisorId":"bdfe8eff-f1d8-4bce-81f5-9d3ae1bf432e-169.254.129.212",
+        "assignedMemOnHeap":704.0,
+        "uptime":"2m 47s",
+        "uptimeSeconds":167,
+        "port":6707,
+        "workerLogLink":"http:\/\/192.168.10.237:8000\/log?file=ras-4-1460229987%2F6707%2Fworker.log",
+        "componentNumTasks": {
+            "word":5
+        },
+        "executorsTotal":8,
+        "assignedCpu":130.0,
+        "assignedMemOffHeap":80.0
+    },
+    {
+        "topologyName":"ras",
+        "topologyId":"ras-4-1460229987",
+        "host":"192.168.10.237",
+        "supervisorId":"bdfe8eff-f1d8-4bce-81f5-9d3ae1bf432e-169.254.129.212",
+        "assignedMemOnHeap":904.0,
+        "uptime":"2m 53s",
+        "port":6706,
+        "workerLogLink":"http:\/\/192.168.10.237:8000\/log?file=ras-4-1460229987%2F6706%2Fworker.log",
+        "componentNumTasks":{
+            "exclaim2":2,
+            "exclaim1":3,
+            "word":5
+        },
+        "executorsTotal":10,
+        "uptimeSeconds":173,
+        "assignedCpu":165.0,
+        "assignedMemOffHeap":80.0
+    }]
+}
+```
+
 ### /api/v1/topology/summary (GET)
 
 Returns summary information for all topologies.
@@ -232,6 +350,7 @@ Response fields:
 |assignedMemOffHeap| Double|Assigned Off-Heap Memory by Scheduler (MB)|
 |assignedTotalMem| Double|Assigned Total Memory by Scheduler (MB)|
 |assignedCpu| Double|Assigned CPU by Scheduler (%)|
+|schedulerDisplayResource| Boolean | Whether to display scheduler resource information|
 
 Sample response:
 
@@ -257,7 +376,7 @@ Sample response:
             "assignedTotalMem": 768,
             "assignedCpu": 80
         }
-    ]
+    ],
     "schedulerDisplayResource": true
 }
 ```

http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/docs/images/supervisor_page.png
----------------------------------------------------------------------
diff --git a/docs/images/supervisor_page.png b/docs/images/supervisor_page.png
new file mode 100644
index 0000000..5133681
Binary files /dev/null and b/docs/images/supervisor_page.png differ

http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/clj/org/apache/storm/daemon/nimbus.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/daemon/nimbus.clj b/storm-core/src/clj/org/apache/storm/daemon/nimbus.clj
index 29d9f28..c17e2fd 100644
--- a/storm-core/src/clj/org/apache/storm/daemon/nimbus.clj
+++ b/storm-core/src/clj/org/apache/storm/daemon/nimbus.clj
@@ -45,7 +45,7 @@
             KillOptions RebalanceOptions ClusterSummary SupervisorSummary TopologySummary TopologyInfo TopologyHistoryInfo
             ExecutorSummary AuthorizationException GetInfoOptions NumErrorsChoice SettableBlobMeta ReadableBlobMeta
             BeginDownloadResult ListBlobsResult ComponentPageInfo TopologyPageInfo LogConfig LogLevel LogLevelAction
-            ProfileRequest ProfileAction NodeInfo])
+            ProfileRequest ProfileAction NodeInfo SupervisorPageInfo WorkerSummary WorkerResources])
   (:import [org.apache.storm.daemon Shutdownable])
   (:import [org.apache.storm.cluster ClusterStateContext DaemonType])
   (:use [org.apache.storm util config log timer zookeeper local-state])
@@ -95,6 +95,7 @@
 (defmeter nimbus:num-getTopologyInfoWithOpts-calls)
 (defmeter nimbus:num-getTopologyInfo-calls)
 (defmeter nimbus:num-getTopologyPageInfo-calls)
+(defmeter nimbus:num-getSupervisorPageInfo-calls)
 (defmeter nimbus:num-getComponentPageInfo-calls)
 (defmeter nimbus:num-shutdown-calls)
 
@@ -210,6 +211,7 @@
      :id->sched-status (atom {})
      :node-id->resources (atom {}) ;;resources of supervisors
      :id->resources (atom {}) ;;resources of topologies
+     :id->worker-resources (atom {}) ; resources of workers per topology
      :cred-renewers (AuthUtils/GetCredentialRenewers conf)
      :topology-history-lock (Object.)
      :topo-history-state (nimbus-topo-history-state conf)
@@ -428,7 +430,8 @@
       {})
     ))
 
-(defn- all-supervisor-info
+;; public for testing
+(defn all-supervisor-info
   ([storm-cluster-state] (all-supervisor-info storm-cluster-state nil))
   ([storm-cluster-state callback]
      (let [supervisor-ids (.supervisors storm-cluster-state callback)]
@@ -738,8 +741,7 @@
                                                     all-ports (-> (get all-scheduling-slots sid)
                                                                   (set/difference dead-ports)
                                                                   ((fn [ports] (map int ports))))
-                                                    supervisor-details (SupervisorDetails. sid hostname scheduler-meta all-ports (:resources-map supervisor-info))
-                                                    ]]
+                                                    supervisor-details (SupervisorDetails. sid hostname scheduler-meta all-ports (:resources-map supervisor-info))]]
                                           {sid supervisor-details}))]
     (merge all-supervisor-details
            (into {}
@@ -818,6 +820,9 @@
 
     new-topology->executor->node+port))
 
+(defrecord TopologyResources [requested-mem-on-heap requested-mem-off-heap requested-cpu
+                              assigned-mem-on-heap assigned-mem-off-heap assigned-cpu])
+ 
 ;; public so it can be mocked out
 (defn compute-new-scheduler-assignments [nimbus existing-assignments topologies scratch-topology-id]
   (let [conf (:conf nimbus)
@@ -855,19 +860,77 @@
                                   (apply merge-with set/union))
 
         supervisors (read-all-supervisor-details nimbus all-scheduling-slots supervisor->dead-ports)
-        cluster (Cluster. (:inimbus nimbus) supervisors topology->scheduler-assignment conf)
-        _ (.setStatusMap cluster (deref (:id->sched-status nimbus)))
-        ;; call scheduler.schedule to schedule all the topologies
-        ;; the new assignments for all the topologies are in the cluster object.
-        _ (.schedule (:scheduler nimbus) topologies cluster)
-        _ (.setTopologyResourcesMap cluster @(:id->resources nimbus))
-        _ (if-not (conf SCHEDULER-DISPLAY-RESOURCE) (.updateAssignedMemoryForTopologyAndSupervisor cluster topologies))
-        ;;merge with existing statuses
-        _ (reset! (:id->sched-status nimbus) (merge (deref (:id->sched-status nimbus)) (.getStatusMap cluster)))
-        _ (reset! (:node-id->resources nimbus) (.getSupervisorsResourcesMap cluster))
-        _ (reset! (:id->resources nimbus) (.getTopologyResourcesMap cluster))]
+        cluster (Cluster. (:inimbus nimbus) supervisors topology->scheduler-assignment conf)]
+
+    ;; set the status map with existing topology statuses
+    (.setStatusMap cluster (deref (:id->sched-status nimbus)))
+    ;; call scheduler.schedule to schedule all the topologies
+    ;; the new assignments for all the topologies are in the cluster object.
+    (.schedule (:scheduler nimbus) topologies cluster)
+
+    ;;merge with existing statuses
+    (reset! (:id->sched-status nimbus) (merge (deref (:id->sched-status nimbus)) (.getStatusMap cluster)))
+    (reset! (:node-id->resources nimbus) (.getSupervisorsResourcesMap cluster))
+
+    (if-not (conf SCHEDULER-DISPLAY-RESOURCE) 
+      (.updateAssignedMemoryForTopologyAndSupervisor cluster topologies))
+
+    ; Remove both of swaps below at first opportunity. This is a hack for non-ras scheduler topology and worker resources.
+    (swap! (:id->resources nimbus) merge (into {} (map (fn [[k v]] [k (->TopologyResources (nth v 0) (nth v 1) (nth v 2)
+                                                                                           (nth v 3) (nth v 4) (nth v 5))])
+                                                       (.getTopologyResourcesMap cluster))))
+    ; Remove this also at first chance
+    (swap! (:id->worker-resources nimbus) merge 
+           (into {} (map (fn [[k v]] [k (map-val #(doto (WorkerResources.)
+                                                        (.set_mem_on_heap (nth % 0))
+                                                        (.set_mem_off_heap (nth % 1))
+                                                        (.set_cpu (nth % 2))) v)])
+                         (.getWorkerResourcesMap cluster))))
+
     (.getAssignments cluster)))
 
+(defn get-resources-for-topology [nimbus topo-id]
+  (or (get @(:id->resources nimbus) topo-id)
+      (try
+        (let [storm-cluster-state (:storm-cluster-state nimbus)
+              topology-details (read-topology-details nimbus topo-id)
+              assigned-resources (->> (.assignment-info storm-cluster-state topo-id nil)
+                                      :worker->resources
+                                      (vals)
+                                        ; Default to [[0 0 0]] if there are no values
+                                      (#(or % [[0 0 0]]))
+                                        ; [[on-heap, off-heap, cpu]] -> [[on-heap], [off-heap], [cpu]]
+                                      (apply map vector)
+                                        ; [[on-heap], [off-heap], [cpu]] -> [on-heap-sum, off-heap-sum, cpu-sum]
+                                      (map (partial reduce +)))
+              worker-resources (->TopologyResources (.getTotalRequestedMemOnHeap topology-details)
+                                                    (.getTotalRequestedMemOffHeap topology-details)
+                                                    (.getTotalRequestedCpu topology-details)
+                                                    (nth assigned-resources 0)
+                                                    (nth assigned-resources 1)
+                                                    (nth assigned-resources 2))]
+          (swap! (:id->resources nimbus) assoc topo-id worker-resources)
+          worker-resources)
+        (catch KeyNotFoundException e
+          ; This can happen when a topology is first coming up.
+          ; It's thrown by the blobstore code.
+          (log-error e "Failed to get topology details")
+          (->TopologyResources 0 0 0 0 0 0)))))
+
+(defn- get-worker-resources-for-topology [nimbus topo-id]
+  (or (get @(:id->worker-resources nimbus) topo-id)
+      (try
+        (let [storm-cluster-state (:storm-cluster-state nimbus)
+              assigned-resources (->> (.assignment-info storm-cluster-state topo-id nil)
+                                      :worker->resources)
+              worker-resources (into {} (map #(identity {(WorkerSlot. (first (key %)) (second (key %)))  
+                                                         (doto (WorkerResources.)
+                                                             (.set_mem_on_heap (nth (val %) 0))
+                                                             (.set_mem_off_heap (nth (val %) 1))
+                                                             (.set_cpu (nth (val %) 2)))}) assigned-resources))]
+          (swap! (:id->worker-resources nimbus) assoc topo-id worker-resources)
+          worker-resources))))
+          
 (defn changed-executors [executor->node+port new-executor->node+port]
   (let [executor->node+port (if executor->node+port (sort executor->node+port) nil)
         new-executor->node+port (if new-executor->node+port (sort new-executor->node+port) nil)
@@ -959,6 +1022,10 @@
                                                  start-times
                                                  worker->resources)}))]
 
+    (when (not= new-assignments existing-assignments)
+      (log-debug "RESETTING id->resources and id->worker-resources cache!")
+      (reset! (:id->resources nimbus) {})
+      (reset! (:id->worker-resources nimbus) {}))
     ;; tasks figure out what tasks to talk to by looking at topology at runtime
     ;; only log/set when there's been a change to the assignment
     (doseq [[topology-id assignment] new-assignments
@@ -1026,6 +1093,18 @@
       (throw (AlreadyAliveException. (str storm-name " is already active"))))
     ))
 
+(defn try-read-storm-conf [conf storm-id blob-store]
+  (try-cause
+    (read-storm-conf-as-nimbus conf storm-id blob-store)
+    (catch KeyNotFoundException e
+       (throw (NotAliveException. (str storm-id))))))
+
+(defn try-read-storm-conf-from-name [conf storm-name nimbus]
+  (let [storm-cluster-state (:storm-cluster-state nimbus)
+        blob-store (:blob-store nimbus)
+        id (get-storm-id storm-cluster-state storm-name)]
+   (try-read-storm-conf conf id blob-store)))
+
 (defn check-authorization!
   ([nimbus storm-name storm-conf operation context]
      (let [aclHandler (:authorization-handler nimbus)
@@ -1051,6 +1130,15 @@
   ([nimbus storm-name storm-conf operation]
      (check-authorization! nimbus storm-name storm-conf operation (ReqContext/context))))
 
+;; no-throw version of check-authorization!
+(defn is-authorized?
+  [nimbus conf blob-store operation topology-id]
+  (let [topology-conf (try-read-storm-conf conf topology-id blob-store)
+        storm-name (topology-conf TOPOLOGY-NAME)]
+    (try (check-authorization! nimbus storm-name topology-conf operation)
+         true
+      (catch AuthorizationException e false))))
+
 (defn code-ids [blob-store]
   (let [to-id (reify KeyFilter
                 (filter [this key] (get-id-from-blob-key key)))]
@@ -1355,24 +1443,55 @@
 (defmethod blob-sync :local [conf nimbus]
   nil)
 
+(defn make-supervisor-summary 
+  [nimbus id info]
+    (let [ports (set (:meta info)) ;;TODO: this is only true for standalone
+          sup-sum (SupervisorSummary. (:hostname info)
+                                      (:uptime-secs info)
+                                      (count ports)
+                                      (count (:used-ports info))
+                                      id)]
+      (.set_total_resources sup-sum (map-val double (:resources-map info)))
+      (when-let [[total-mem total-cpu used-mem used-cpu] (.get @(:node-id->resources nimbus) id)]
+        (.set_used_mem sup-sum (or used-mem 0))
+        (.set_used_cpu sup-sum (or used-cpu 0)))
+      (when-let [version (:version info)] (.set_version sup-sum version))
+      sup-sum))
+
+(defn user-and-supervisor-topos
+  [nimbus conf blob-store assignments supervisor-id]
+  (let [topo-id->supervisors 
+          (into {} (for [[topo-id assignment] assignments] 
+                     {topo-id (into #{} 
+                                    (map #(first (second %)) 
+                                         (:executor->node+port assignment)))}))
+        supervisor-topologies (keys (filter #(get (val %) supervisor-id) topo-id->supervisors))]
+    {:supervisor-topologies supervisor-topologies
+     :user-topologies (into #{} (filter (partial is-authorized? nimbus 
+                                                 conf 
+                                                 blob-store 
+                                                 "getTopology") 
+                  supervisor-topologies))}))
+
+(defn topology-assignments 
+  [storm-cluster-state]
+  (let [assigned-topology-ids (.assignments storm-cluster-state nil)]
+    (into {} (for [tid assigned-topology-ids]
+               {tid (.assignment-info storm-cluster-state tid nil)}))))
+
+(defn get-launch-time-secs 
+  [base storm-id]
+  (if base (:launch-time-secs base)
+    (throw
+      (NotAliveException. (str storm-id)))))
+
 (defn get-cluster-info [nimbus]
   (let [storm-cluster-state (:storm-cluster-state nimbus)
         supervisor-infos (all-supervisor-info storm-cluster-state)
         ;; TODO: need to get the port info about supervisors...
         ;; in standalone just look at metadata, otherwise just say N/A?
         supervisor-summaries (dofor [[id info] supervisor-infos]
-                                    (let [ports (set (:meta info)) ;;TODO: this is only true for standalone
-                                          sup-sum (SupervisorSummary. (:hostname info)
-                                                                      (:uptime-secs info)
-                                                                      (count ports)
-                                                                      (count (:used-ports info))
-                                                                      id) ]
-                                      (.set_total_resources sup-sum (map-val double (:resources-map info)))
-                                      (when-let [[total-mem total-cpu used-mem used-cpu] (.get @(:node-id->resources nimbus) id)]
-                                        (.set_used_mem sup-sum used-mem)
-                                        (.set_used_cpu sup-sum used-cpu))
-                                      (when-let [version (:version info)] (.set_version sup-sum version))
-                                      sup-sum))
+                                    (make-supervisor-summary nimbus id info))
         nimbus-uptime ((:uptime nimbus))
         bases (topology-bases storm-cluster-state)
         nimbuses (.nimbuses storm-cluster-state)
@@ -1404,13 +1523,13 @@
                                                                     (extract-status-str base))]
                                     (when-let [owner (:owner base)] (.set_owner topo-summ owner))
                                     (when-let [sched-status (.get @(:id->sched-status nimbus) id)] (.set_sched_status topo-summ sched-status))
-                                    (when-let [resources (.get @(:id->resources nimbus) id)]
-                                      (.set_requested_memonheap topo-summ (get resources 0))
-                                      (.set_requested_memoffheap topo-summ (get resources 1))
-                                      (.set_requested_cpu topo-summ (get resources 2))
-                                      (.set_assigned_memonheap topo-summ (get resources 3))
-                                      (.set_assigned_memoffheap topo-summ (get resources 4))
-                                      (.set_assigned_cpu topo-summ (get resources 5)))
+                                    (when-let [resources (get-resources-for-topology nimbus id)]
+                                      (.set_requested_memonheap topo-summ (:requested-mem-on-heap resources))
+                                      (.set_requested_memoffheap topo-summ (:requested-mem-off-heap resources))
+                                      (.set_requested_cpu topo-summ (:requested-cpu resources))
+                                      (.set_assigned_memonheap topo-summ (:assigned-mem-on-heap resources))
+                                      (.set_assigned_memoffheap topo-summ (:assigned-mem-off-heap resources))
+                                      (.set_assigned_cpu topo-summ (:assigned-cpu resources)))
                                     (.set_replication_count topo-summ (get-blob-replication-count (master-stormcode-key id) nimbus))
                                     topo-summ))
         ret (ClusterSummary. supervisor-summaries
@@ -1469,9 +1588,7 @@
                   topology (try-read-storm-topology storm-id blob-store)
                   task->component (storm-task-info topology topology-conf)
                   base (.storm-base storm-cluster-state storm-id nil)
-                  launch-time-secs (if base (:launch-time-secs base)
-                                     (throw
-                                       (NotAliveException. (str storm-id))))
+                  launch-time-secs (get-launch-time-secs base storm-id)
                   assignment (.assignment-info storm-cluster-state storm-id nil)
                   beats (map-val :heartbeat (get @(:heartbeats-cache nimbus)
                                                  storm-id))
@@ -1874,13 +1991,13 @@
                                       )]
         (when-let [owner (:owner base)] (.set_owner topo-info owner))
         (when-let [sched-status (.get @(:id->sched-status nimbus) storm-id)] (.set_sched_status topo-info sched-status))
-        (when-let [resources (.get @(:id->resources nimbus) storm-id)]
-          (.set_requested_memonheap topo-info (get resources 0))
-          (.set_requested_memoffheap topo-info (get resources 1))
-          (.set_requested_cpu topo-info (get resources 2))
-          (.set_assigned_memonheap topo-info (get resources 3))
-          (.set_assigned_memoffheap topo-info (get resources 4))
-          (.set_assigned_cpu topo-info (get resources 5)))
+        (when-let [resources (get-resources-for-topology nimbus storm-id)]
+          (.set_requested_memonheap topo-info (:requested-mem-on-heap resources))
+          (.set_requested_memoffheap topo-info (:requested-mem-off-heap resources))
+          (.set_requested_cpu topo-info (:requested-cpu resources))
+          (.set_assigned_memonheap topo-info (:assigned-mem-on-heap resources))
+          (.set_assigned_memoffheap topo-info (:assigned-mem-off-heap resources))
+          (.set_assigned_cpu topo-info (:assigned-cpu resources)))
         (when-let [component->debug (:component->debug base)]
           (.set_component_debug topo-info (map-val converter/thriftify-debugoptions component->debug)))
         (.set_replication_count topo-info (get-blob-replication-count (master-stormcode-key storm-id) nimbus))
@@ -2046,45 +2163,98 @@
     (^TopologyPageInfo getTopologyPageInfo
       [this ^String topo-id ^String window ^boolean include-sys?]
       (mark! nimbus:num-getTopologyPageInfo-calls)
-      (let [info (get-common-topo-info topo-id "getTopologyPageInfo")
-
-            exec->node+port (:executor->node+port (:assignment info))
+      (let [topo-info (get-common-topo-info topo-id "getTopologyPageInfo")
+            {:keys [storm-name
+                    storm-cluster-state
+                    launch-time-secs
+                    assignment
+                    beats
+                    task->component
+                    topology
+                    base]} topo-info
+            exec->node+port (:executor->node+port assignment)
+            node->host (:node->host assignment)
+            worker->resources (get-worker-resources-for-topology nimbus topo-id)
+            worker-summaries (stats/agg-worker-stats topo-id 
+                                                     topo-info
+                                                     worker->resources
+                                                     include-sys?
+                                                     true)  ;; this is the topology page, so we know the user is authorized 
+
+            exec->node+port (:executor->node+port assignment)
             last-err-fn (partial get-last-error
-                                 (:storm-cluster-state info)
+                                 storm-cluster-state
                                  topo-id)
             topo-page-info (stats/agg-topo-execs-stats topo-id
                                                        exec->node+port
-                                                       (:task->component info)
-                                                       (:beats info)
-                                                       (:topology info)
+                                                       task->component
+                                                       beats
+                                                       topology
                                                        window
                                                        include-sys?
                                                        last-err-fn)]
-        (when-let [owner (:owner (:base info))]
+        (.set_workers topo-page-info worker-summaries)
+        (when-let [owner (:owner base)]
           (.set_owner topo-page-info owner))
         (when-let [sched-status (.get @(:id->sched-status nimbus) topo-id)]
           (.set_sched_status topo-page-info sched-status))
-        (when-let [resources (.get @(:id->resources nimbus) topo-id)]
-          (.set_requested_memonheap topo-page-info (get resources 0))
-          (.set_requested_memoffheap topo-page-info (get resources 1))
-          (.set_requested_cpu topo-page-info (get resources 2))
-          (.set_assigned_memonheap topo-page-info (get resources 3))
-          (.set_assigned_memoffheap topo-page-info (get resources 4))
-          (.set_assigned_cpu topo-page-info (get resources 5)))
+        (when-let [resources (get-resources-for-topology nimbus topo-id)]
+          (.set_requested_memonheap topo-page-info (:requested-mem-on-heap resources))
+          (.set_requested_memoffheap topo-page-info (:requested-mem-off-heap resources))
+          (.set_requested_cpu topo-page-info (:requested-cpu resources))
+          (.set_assigned_memonheap topo-page-info (:assigned-mem-on-heap resources))
+          (.set_assigned_memoffheap topo-page-info (:assigned-mem-off-heap resources))
+          (.set_assigned_cpu topo-page-info (:assigned-cpu resources)))
         (doto topo-page-info
-          (.set_name (:storm-name info))
-          (.set_status (extract-status-str (:base info)))
-          (.set_uptime_secs (time-delta (:launch-time-secs info)))
+          (.set_name storm-name)
+          (.set_status (extract-status-str base))
+          (.set_uptime_secs (time-delta launch-time-secs))
           (.set_topology_conf (to-json (try-read-storm-conf conf
                                                             topo-id (:blob-store nimbus))))
           (.set_replication_count (get-blob-replication-count (master-stormcode-key topo-id) nimbus)))
         (when-let [debug-options
-                   (get-in info [:base :component->debug topo-id])]
+                   (get-in topo-info [:base :component->debug topo-id])]
           (.set_debug_options
             topo-page-info
             (converter/thriftify-debugoptions debug-options)))
         topo-page-info))
 
+    (^SupervisorPageInfo getSupervisorPageInfo
+      [this
+       ^String supervisor-id
+       ^String host 
+       ^boolean include-sys?]
+      (.mark nimbus:num-getSupervisorPageInfo-calls)
+      (let [storm-cluster-state (:storm-cluster-state nimbus)
+            supervisor-infos (all-supervisor-info storm-cluster-state)
+            host->supervisor-id (reverse-map (map-val :hostname supervisor-infos))
+            supervisor-ids (if (nil? supervisor-id)
+                              (get host->supervisor-id host)
+                                [supervisor-id])
+            page-info (SupervisorPageInfo.)]
+            (doseq [sid supervisor-ids]
+              (let [supervisor-info (get supervisor-infos sid)
+                    sup-sum (make-supervisor-summary nimbus sid supervisor-info)
+                    _ (.add_to_supervisor_summaries page-info sup-sum)
+                    topo-id->assignments (topology-assignments storm-cluster-state)
+                    {:keys [user-topologies 
+                            supervisor-topologies]} (user-and-supervisor-topos nimbus
+                                                                               conf
+                                                                               blob-store
+                                                                               topo-id->assignments 
+                                                                               sid)]
+                (doseq [storm-id supervisor-topologies]
+                    (let [topo-info (get-common-topo-info storm-id "getSupervisorPageInfo")
+                          worker->resources (get-worker-resources-for-topology nimbus storm-id)]
+                      (doseq [worker-summary (stats/agg-worker-stats storm-id 
+                                                                     topo-info
+                                                                     worker->resources
+                                                                     include-sys?
+                                                                     (get user-topologies storm-id)
+                                                                     sid)]
+                        (.add_to_worker_summaries page-info worker-summary)))))) 
+            page-info))
+
     (^ComponentPageInfo getComponentPageInfo
       [this
        ^String topo-id

http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/clj/org/apache/storm/stats.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/stats.clj b/storm-core/src/clj/org/apache/storm/stats.clj
index 26a4eb4..17d0219 100644
--- a/storm-core/src/clj/org/apache/storm/stats.clj
+++ b/storm-core/src/clj/org/apache/storm/stats.clj
@@ -21,9 +21,11 @@
             ExecutorSpecificStats SpoutStats BoltStats ErrorInfo
             SupervisorSummary CommonAggregateStats ComponentAggregateStats
             ComponentPageInfo ComponentType BoltAggregateStats
-            ExecutorAggregateStats SpecificAggregateStats
-            SpoutAggregateStats TopologyPageInfo TopologyStats])
+            ExecutorAggregateStats WorkerSummary SpecificAggregateStats
+            SpoutAggregateStats TopologyPageInfo TopologyStats
+            WorkerResources])
   (:import [org.apache.storm.utils Utils])
+  (:import [org.apache.storm.scheduler WorkerSlot])
   (:import [org.apache.storm.metric.internal MultiCountStatAndMetric MultiLatencyStatAndMetric])
   (:use [org.apache.storm log util])
   (:use [clojure.math.numeric-tower :only [ceil]]))
@@ -256,7 +258,6 @@
    (.get_failed stats)
    (.get_complete_ms_avg stats)])
 
-
 (defn clojurify-executor-stats
   [^ExecutorStats stats]
   (let [ specific-stats (.get_specific stats)
@@ -1002,15 +1003,62 @@
                            window->complete-latency)
                          (.set_window_to_acked window->acked)
                          (.set_window_to_failed window->failed))
-      topo-page-info (doto (TopologyPageInfo. topology-id)
-                       (.set_num_tasks num-tasks)
-                       (.set_num_workers num-workers)
-                       (.set_num_executors num-executors)
-                       (.set_id_to_spout_agg_stats spout-agg-stats)
-                       (.set_id_to_bolt_agg_stats bolt-agg-stats)
-                       (.set_topology_stats topology-stats))]
+        topo-page-info (doto (TopologyPageInfo. topology-id)
+                         (.set_num_tasks num-tasks)
+                         (.set_num_workers num-workers)
+                         (.set_num_executors num-executors)
+                         (.set_id_to_spout_agg_stats spout-agg-stats)
+                         (.set_id_to_bolt_agg_stats bolt-agg-stats)
+                         (.set_topology_stats topology-stats))]
     topo-page-info))
 
+(defn agg-worker-stats
+  "Aggregate statistics per worker for a topology. Optionally filtering on specific supervisors."
+  ([storm-id topo-info worker->resources include-sys? user-authorized]
+    (agg-worker-stats storm-id topo-info worker->resources include-sys?  user-authorized nil))
+  ([storm-id topo-info worker->resources include-sys? user-authorized filter-supervisor]
+    (let [{:keys [storm-name
+                  assignment
+                  beats
+                  task->component]} topo-info
+          exec->node+port (:executor->node+port assignment)
+          node->host (:node->host assignment)
+          all-node+port->exec (reverse-map exec->node+port)
+          node+port->exec (if (nil? filter-supervisor) 
+                            all-node+port->exec 
+                            (filter #(= filter-supervisor (ffirst %)) all-node+port->exec))
+          handle-sys-components-fn (mk-include-sys-fn include-sys?)]
+      (dofor [[[node port] executors] node+port->exec]
+        (let [executor-tasks (map #(range (first %) (inc (last %))) executors)
+              worker-beats (vals (select-keys beats executors))
+              not-null-worker-beat (first (filter identity worker-beats))
+              worker-uptime (or (:uptime not-null-worker-beat) 0)
+              ;; list of components per executor ((c1 c2 c3) (c4) (c5))
+              ;; if the executor was running only system components, an empty list for that executor is possible
+              components-per-executor (for [tasks executor-tasks] 
+                                        (filter handle-sys-components-fn (map #(get task->component %) tasks)))
+              component->num-tasks (frequencies (flatten components-per-executor))
+              num-executors (count executors)
+              default-worker-resources (WorkerResources.)
+              resources (if (nil? worker->resources) 
+                            default-worker-resources 
+                            (or (.get worker->resources (WorkerSlot. node port)) 
+                                default-worker-resources))
+              worker-summary (doto 
+                 (WorkerSummary.)
+                   (.set_host (node->host node))
+                   (.set_uptime_secs worker-uptime)
+                   (.set_supervisor_id node)
+                   (.set_port port)
+                   (.set_topology_id storm-id)
+                   (.set_topology_name storm-name)
+                   (.set_num_executors num-executors)
+                   (.set_assigned_memonheap (.get_mem_on_heap resources))
+                   (.set_assigned_memoffheap (.get_mem_off_heap resources))
+                   (.set_assigned_cpu (.get_cpu resources)))]
+          (if user-authorized (.set_component_to_num_tasks worker-summary component->num-tasks))
+          worker-summary)))))
+
 (defn agg-topo-execs-stats
   "Aggregate various executor statistics for a topology from the given
   heartbeats."

http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/clj/org/apache/storm/ui/core.clj
----------------------------------------------------------------------
diff --git a/storm-core/src/clj/org/apache/storm/ui/core.clj b/storm-core/src/clj/org/apache/storm/ui/core.clj
index 8b59aab..4cb01f9 100644
--- a/storm-core/src/clj/org/apache/storm/ui/core.clj
+++ b/storm-core/src/clj/org/apache/storm/ui/core.clj
@@ -38,7 +38,7 @@
             TopologyStats CommonAggregateStats ComponentAggregateStats
             ComponentType BoltAggregateStats SpoutAggregateStats
             ExecutorAggregateStats SpecificAggregateStats ComponentPageInfo
-            LogConfig LogLevel LogLevelAction])
+            LogConfig LogLevel LogLevelAction SupervisorPageInfo WorkerSummary])
   (:import [org.apache.storm.security.auth AuthUtils ReqContext])
   (:import [org.apache.storm.generated AuthorizationException ProfileRequest ProfileAction NodeInfo])
   (:import [org.apache.storm.security.auth AuthUtils])
@@ -64,6 +64,7 @@
 (defmeter ui:num-cluster-configuration-http-requests)
 (defmeter ui:num-cluster-summary-http-requests)
 (defmeter ui:num-nimbus-summary-http-requests)
+(defmeter ui:num-supervisor-http-requests)
 (defmeter ui:num-supervisor-summary-http-requests)
 (defmeter ui:num-all-topologies-summary-http-requests)
 (defmeter ui:num-topology-page-http-requests)
@@ -410,26 +411,77 @@
           "nimbusUpTime" (pretty-uptime-sec uptime)
           "nimbusUpTimeSeconds" uptime}))})))
 
+(defn worker-summary-to-json
+  [secure? ^WorkerSummary worker-summary]
+  (let [host (.get_host worker-summary)
+        port (.get_port worker-summary)
+        topology-id (.get_topology_id worker-summary)
+        uptime-secs (.get_uptime_secs worker-summary)]
+    {"supervisorId" (.get_supervisor_id worker-summary)
+     "host" host
+     "port" port
+     "topologyId" topology-id
+     "topologyName" (.get_topology_name worker-summary)
+     "executorsTotal" (.get_num_executors worker-summary)
+     "assignedMemOnHeap" (.get_assigned_memonheap worker-summary)
+     "assignedMemOffHeap" (.get_assigned_memoffheap worker-summary)
+     "assignedCpu" (.get_assigned_cpu worker-summary)
+     "componentNumTasks" (.get_component_to_num_tasks worker-summary)
+     "uptime" (pretty-uptime-sec uptime-secs)
+     "uptimeSeconds" uptime-secs
+     "workerLogLink" (worker-log-link host port topology-id secure?)}))
+
+(defn supervisor-summary-to-json 
+  [summary]
+  (let [slotsTotal (.get_num_workers summary)
+        slotsUsed (.get_num_used_workers summary)
+        slotsFree (max (- slotsTotal slotsUsed) 0)
+        totalMem (get (.get_total_resources summary) Config/SUPERVISOR_MEMORY_CAPACITY_MB)
+        totalCpu (get (.get_total_resources summary) Config/SUPERVISOR_CPU_CAPACITY)
+        usedMem (.get_used_mem summary)
+        usedCpu (.get_used_cpu summary)
+        availMem (max (- totalMem usedMem) 0)
+        availCpu (max (- totalCpu usedCpu) 0)]
+  {"id" (.get_supervisor_id summary)
+   "host" (.get_host summary)
+   "uptime" (pretty-uptime-sec (.get_uptime_secs summary))
+   "uptimeSeconds" (.get_uptime_secs summary)
+   "slotsTotal" slotsTotal
+   "slotsUsed" slotsUsed
+   "slotsFree" slotsFree
+   "totalMem" totalMem
+   "totalCpu" totalCpu
+   "usedMem" usedMem
+   "usedCpu" usedCpu
+   "logLink" (supervisor-log-link (.get_host summary))
+   "availMem" availMem
+   "availCpu" availCpu
+   "version" (.get_version summary)}))
+
+(defn supervisor-page-info
+  ([supervisor-id host include-sys? secure?]
+     (thrift/with-configured-nimbus-connection 
+        nimbus (supervisor-page-info (.getSupervisorPageInfo ^Nimbus$Client nimbus
+                                                      supervisor-id
+                                                      host
+                                                      include-sys?) secure?)))
+  ([^SupervisorPageInfo supervisor-page-info secure?]
+    ;; ask nimbus to return supervisor workers + any details user is allowed
+    ;; access on a per-topology basis (i.e. components)
+    (let [supervisors-json (map supervisor-summary-to-json (.get_supervisor_summaries supervisor-page-info))]
+      {"supervisors" supervisors-json
+       "schedulerDisplayResource" (*STORM-CONF* Config/SCHEDULER_DISPLAY_RESOURCE)
+       "workers" (into [] (for [^WorkerSummary worker-summary (.get_worker_summaries supervisor-page-info)]
+                            (worker-summary-to-json secure? worker-summary)))})))
+
 (defn supervisor-summary
   ([]
    (thrift/with-configured-nimbus-connection nimbus
                 (supervisor-summary
                   (.get_supervisors (.getClusterInfo ^Nimbus$Client nimbus)))))
   ([summs]
-   {"supervisors"
-    (for [^SupervisorSummary s summs]
-      {"id" (.get_supervisor_id s)
-       "host" (.get_host s)
-       "uptime" (pretty-uptime-sec (.get_uptime_secs s))
-       "uptimeSeconds" (.get_uptime_secs s)
-       "slotsTotal" (.get_num_workers s)
-       "slotsUsed" (.get_num_used_workers s)
-       "totalMem" (get (.get_total_resources s) Config/SUPERVISOR_MEMORY_CAPACITY_MB)
-       "totalCpu" (get (.get_total_resources s) Config/SUPERVISOR_CPU_CAPACITY)
-       "usedMem" (.get_used_mem s)
-       "usedCpu" (.get_used_cpu s)
-       "logLink" (supervisor-log-link (.get_host s))
-       "version" (.get_version s)})
+   {"supervisors" (for [^SupervisorSummary s summs]
+                    (supervisor-summary-to-json s))
     "schedulerDisplayResource" (*STORM-CONF* Config/SCHEDULER_DISPLAY_RESOURCE)}))
 
 (defn all-topologies-summary
@@ -588,6 +640,8 @@
      "assignedTotalMem" (+ (.get_assigned_memonheap topo-info) (.get_assigned_memoffheap topo-info))
      "assignedCpu" (.get_assigned_cpu topo-info)
      "topologyStats" topo-stats
+     "workers"  (map (partial worker-summary-to-json secure?)
+                     (.get_workers topo-info))
      "spouts" (map (partial comp-agg-stats-json id secure?)
                    (.get_id_to_spout_agg_stats topo-info))
      "bolts" (map (partial comp-agg-stats-json id secure?)
@@ -1046,6 +1100,16 @@
     (assert-authorized-user "getClusterInfo")
     (json-response (assoc (supervisor-summary)
                      "logviewerPort" (*STORM-CONF* LOGVIEWER-PORT)) (:callback m)))
+  (GET "/api/v1/supervisor" [:as {:keys [cookies servlet-request scheme]} & m]
+    (.mark ui:num-supervisor-http-requests)
+    (populate-context! servlet-request)
+    (assert-authorized-user "getSupervisorPageInfo")
+    ;; supervisor takes either an id or a host query parameter, and technically both
+    ;; that said, if both the id and host are provided, the id wins
+    (let [id (:id m)
+          host (:host m)]
+      (json-response (assoc (supervisor-page-info id host (check-include-sys? (:sys m)) (= scheme :https))
+                            "logviewerPort" (*STORM-CONF* LOGVIEWER-PORT)) (:callback m))))
   (GET "/api/v1/topology/summary" [:as {:keys [cookies servlet-request]} & m]
     (mark! ui:num-all-topologies-summary-http-requests)
     (populate-context! servlet-request)

http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/jvm/org/apache/storm/generated/Assignment.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/org/apache/storm/generated/Assignment.java b/storm-core/src/jvm/org/apache/storm/generated/Assignment.java
index c7a3f8a..90b7516 100644
--- a/storm-core/src/jvm/org/apache/storm/generated/Assignment.java
+++ b/storm-core/src/jvm/org/apache/storm/generated/Assignment.java
@@ -787,15 +787,15 @@ public class Assignment implements org.apache.thrift.TBase<Assignment, Assignmen
           case 2: // NODE_HOST
             if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
               {
-                org.apache.thrift.protocol.TMap _map548 = iprot.readMapBegin();
-                struct.node_host = new HashMap<String,String>(2*_map548.size);
-                String _key549;
-                String _val550;
-                for (int _i551 = 0; _i551 < _map548.size; ++_i551)
+                org.apache.thrift.protocol.TMap _map582 = iprot.readMapBegin();
+                struct.node_host = new HashMap<String,String>(2*_map582.size);
+                String _key583;
+                String _val584;
+                for (int _i585 = 0; _i585 < _map582.size; ++_i585)
                 {
-                  _key549 = iprot.readString();
-                  _val550 = iprot.readString();
-                  struct.node_host.put(_key549, _val550);
+                  _key583 = iprot.readString();
+                  _val584 = iprot.readString();
+                  struct.node_host.put(_key583, _val584);
                 }
                 iprot.readMapEnd();
               }
@@ -807,26 +807,26 @@ public class Assignment implements org.apache.thrift.TBase<Assignment, Assignmen
           case 3: // EXECUTOR_NODE_PORT
             if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
               {
-                org.apache.thrift.protocol.TMap _map552 = iprot.readMapBegin();
-                struct.executor_node_port = new HashMap<List<Long>,NodeInfo>(2*_map552.size);
-                List<Long> _key553;
-                NodeInfo _val554;
-                for (int _i555 = 0; _i555 < _map552.size; ++_i555)
+                org.apache.thrift.protocol.TMap _map586 = iprot.readMapBegin();
+                struct.executor_node_port = new HashMap<List<Long>,NodeInfo>(2*_map586.size);
+                List<Long> _key587;
+                NodeInfo _val588;
+                for (int _i589 = 0; _i589 < _map586.size; ++_i589)
                 {
                   {
-                    org.apache.thrift.protocol.TList _list556 = iprot.readListBegin();
-                    _key553 = new ArrayList<Long>(_list556.size);
-                    long _elem557;
-                    for (int _i558 = 0; _i558 < _list556.size; ++_i558)
+                    org.apache.thrift.protocol.TList _list590 = iprot.readListBegin();
+                    _key587 = new ArrayList<Long>(_list590.size);
+                    long _elem591;
+                    for (int _i592 = 0; _i592 < _list590.size; ++_i592)
                     {
-                      _elem557 = iprot.readI64();
-                      _key553.add(_elem557);
+                      _elem591 = iprot.readI64();
+                      _key587.add(_elem591);
                     }
                     iprot.readListEnd();
                   }
-                  _val554 = new NodeInfo();
-                  _val554.read(iprot);
-                  struct.executor_node_port.put(_key553, _val554);
+                  _val588 = new NodeInfo();
+                  _val588.read(iprot);
+                  struct.executor_node_port.put(_key587, _val588);
                 }
                 iprot.readMapEnd();
               }
@@ -838,25 +838,25 @@ public class Assignment implements org.apache.thrift.TBase<Assignment, Assignmen
           case 4: // EXECUTOR_START_TIME_SECS
             if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
               {
-                org.apache.thrift.protocol.TMap _map559 = iprot.readMapBegin();
-                struct.executor_start_time_secs = new HashMap<List<Long>,Long>(2*_map559.size);
-                List<Long> _key560;
-                long _val561;
-                for (int _i562 = 0; _i562 < _map559.size; ++_i562)
+                org.apache.thrift.protocol.TMap _map593 = iprot.readMapBegin();
+                struct.executor_start_time_secs = new HashMap<List<Long>,Long>(2*_map593.size);
+                List<Long> _key594;
+                long _val595;
+                for (int _i596 = 0; _i596 < _map593.size; ++_i596)
                 {
                   {
-                    org.apache.thrift.protocol.TList _list563 = iprot.readListBegin();
-                    _key560 = new ArrayList<Long>(_list563.size);
-                    long _elem564;
-                    for (int _i565 = 0; _i565 < _list563.size; ++_i565)
+                    org.apache.thrift.protocol.TList _list597 = iprot.readListBegin();
+                    _key594 = new ArrayList<Long>(_list597.size);
+                    long _elem598;
+                    for (int _i599 = 0; _i599 < _list597.size; ++_i599)
                     {
-                      _elem564 = iprot.readI64();
-                      _key560.add(_elem564);
+                      _elem598 = iprot.readI64();
+                      _key594.add(_elem598);
                     }
                     iprot.readListEnd();
                   }
-                  _val561 = iprot.readI64();
-                  struct.executor_start_time_secs.put(_key560, _val561);
+                  _val595 = iprot.readI64();
+                  struct.executor_start_time_secs.put(_key594, _val595);
                 }
                 iprot.readMapEnd();
               }
@@ -868,17 +868,17 @@ public class Assignment implements org.apache.thrift.TBase<Assignment, Assignmen
           case 5: // WORKER_RESOURCES
             if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
               {
-                org.apache.thrift.protocol.TMap _map566 = iprot.readMapBegin();
-                struct.worker_resources = new HashMap<NodeInfo,WorkerResources>(2*_map566.size);
-                NodeInfo _key567;
-                WorkerResources _val568;
-                for (int _i569 = 0; _i569 < _map566.size; ++_i569)
+                org.apache.thrift.protocol.TMap _map600 = iprot.readMapBegin();
+                struct.worker_resources = new HashMap<NodeInfo,WorkerResources>(2*_map600.size);
+                NodeInfo _key601;
+                WorkerResources _val602;
+                for (int _i603 = 0; _i603 < _map600.size; ++_i603)
                 {
-                  _key567 = new NodeInfo();
-                  _key567.read(iprot);
-                  _val568 = new WorkerResources();
-                  _val568.read(iprot);
-                  struct.worker_resources.put(_key567, _val568);
+                  _key601 = new NodeInfo();
+                  _key601.read(iprot);
+                  _val602 = new WorkerResources();
+                  _val602.read(iprot);
+                  struct.worker_resources.put(_key601, _val602);
                 }
                 iprot.readMapEnd();
               }
@@ -910,10 +910,10 @@ public class Assignment implements org.apache.thrift.TBase<Assignment, Assignmen
           oprot.writeFieldBegin(NODE_HOST_FIELD_DESC);
           {
             oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.node_host.size()));
-            for (Map.Entry<String, String> _iter570 : struct.node_host.entrySet())
+            for (Map.Entry<String, String> _iter604 : struct.node_host.entrySet())
             {
-              oprot.writeString(_iter570.getKey());
-              oprot.writeString(_iter570.getValue());
+              oprot.writeString(_iter604.getKey());
+              oprot.writeString(_iter604.getValue());
             }
             oprot.writeMapEnd();
           }
@@ -925,17 +925,17 @@ public class Assignment implements org.apache.thrift.TBase<Assignment, Assignmen
           oprot.writeFieldBegin(EXECUTOR_NODE_PORT_FIELD_DESC);
           {
             oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.LIST, org.apache.thrift.protocol.TType.STRUCT, struct.executor_node_port.size()));
-            for (Map.Entry<List<Long>, NodeInfo> _iter571 : struct.executor_node_port.entrySet())
+            for (Map.Entry<List<Long>, NodeInfo> _iter605 : struct.executor_node_port.entrySet())
             {
               {
-                oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, _iter571.getKey().size()));
-                for (long _iter572 : _iter571.getKey())
+                oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, _iter605.getKey().size()));
+                for (long _iter606 : _iter605.getKey())
                 {
-                  oprot.writeI64(_iter572);
+                  oprot.writeI64(_iter606);
                 }
                 oprot.writeListEnd();
               }
-              _iter571.getValue().write(oprot);
+              _iter605.getValue().write(oprot);
             }
             oprot.writeMapEnd();
           }
@@ -947,17 +947,17 @@ public class Assignment implements org.apache.thrift.TBase<Assignment, Assignmen
           oprot.writeFieldBegin(EXECUTOR_START_TIME_SECS_FIELD_DESC);
           {
             oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.LIST, org.apache.thrift.protocol.TType.I64, struct.executor_start_time_secs.size()));
-            for (Map.Entry<List<Long>, Long> _iter573 : struct.executor_start_time_secs.entrySet())
+            for (Map.Entry<List<Long>, Long> _iter607 : struct.executor_start_time_secs.entrySet())
             {
               {
-                oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, _iter573.getKey().size()));
-                for (long _iter574 : _iter573.getKey())
+                oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, _iter607.getKey().size()));
+                for (long _iter608 : _iter607.getKey())
                 {
-                  oprot.writeI64(_iter574);
+                  oprot.writeI64(_iter608);
                 }
                 oprot.writeListEnd();
               }
-              oprot.writeI64(_iter573.getValue());
+              oprot.writeI64(_iter607.getValue());
             }
             oprot.writeMapEnd();
           }
@@ -969,10 +969,10 @@ public class Assignment implements org.apache.thrift.TBase<Assignment, Assignmen
           oprot.writeFieldBegin(WORKER_RESOURCES_FIELD_DESC);
           {
             oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.STRUCT, struct.worker_resources.size()));
-            for (Map.Entry<NodeInfo, WorkerResources> _iter575 : struct.worker_resources.entrySet())
+            for (Map.Entry<NodeInfo, WorkerResources> _iter609 : struct.worker_resources.entrySet())
             {
-              _iter575.getKey().write(oprot);
-              _iter575.getValue().write(oprot);
+              _iter609.getKey().write(oprot);
+              _iter609.getValue().write(oprot);
             }
             oprot.writeMapEnd();
           }
@@ -1014,52 +1014,52 @@ public class Assignment implements org.apache.thrift.TBase<Assignment, Assignmen
       if (struct.is_set_node_host()) {
         {
           oprot.writeI32(struct.node_host.size());
-          for (Map.Entry<String, String> _iter576 : struct.node_host.entrySet())
+          for (Map.Entry<String, String> _iter610 : struct.node_host.entrySet())
           {
-            oprot.writeString(_iter576.getKey());
-            oprot.writeString(_iter576.getValue());
+            oprot.writeString(_iter610.getKey());
+            oprot.writeString(_iter610.getValue());
           }
         }
       }
       if (struct.is_set_executor_node_port()) {
         {
           oprot.writeI32(struct.executor_node_port.size());
-          for (Map.Entry<List<Long>, NodeInfo> _iter577 : struct.executor_node_port.entrySet())
+          for (Map.Entry<List<Long>, NodeInfo> _iter611 : struct.executor_node_port.entrySet())
           {
             {
-              oprot.writeI32(_iter577.getKey().size());
-              for (long _iter578 : _iter577.getKey())
+              oprot.writeI32(_iter611.getKey().size());
+              for (long _iter612 : _iter611.getKey())
               {
-                oprot.writeI64(_iter578);
+                oprot.writeI64(_iter612);
               }
             }
-            _iter577.getValue().write(oprot);
+            _iter611.getValue().write(oprot);
           }
         }
       }
       if (struct.is_set_executor_start_time_secs()) {
         {
           oprot.writeI32(struct.executor_start_time_secs.size());
-          for (Map.Entry<List<Long>, Long> _iter579 : struct.executor_start_time_secs.entrySet())
+          for (Map.Entry<List<Long>, Long> _iter613 : struct.executor_start_time_secs.entrySet())
           {
             {
-              oprot.writeI32(_iter579.getKey().size());
-              for (long _iter580 : _iter579.getKey())
+              oprot.writeI32(_iter613.getKey().size());
+              for (long _iter614 : _iter613.getKey())
               {
-                oprot.writeI64(_iter580);
+                oprot.writeI64(_iter614);
               }
             }
-            oprot.writeI64(_iter579.getValue());
+            oprot.writeI64(_iter613.getValue());
           }
         }
       }
       if (struct.is_set_worker_resources()) {
         {
           oprot.writeI32(struct.worker_resources.size());
-          for (Map.Entry<NodeInfo, WorkerResources> _iter581 : struct.worker_resources.entrySet())
+          for (Map.Entry<NodeInfo, WorkerResources> _iter615 : struct.worker_resources.entrySet())
           {
-            _iter581.getKey().write(oprot);
-            _iter581.getValue().write(oprot);
+            _iter615.getKey().write(oprot);
+            _iter615.getValue().write(oprot);
           }
         }
       }
@@ -1073,81 +1073,81 @@ public class Assignment implements org.apache.thrift.TBase<Assignment, Assignmen
       BitSet incoming = iprot.readBitSet(4);
       if (incoming.get(0)) {
         {
-          org.apache.thrift.protocol.TMap _map582 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-          struct.node_host = new HashMap<String,String>(2*_map582.size);
-          String _key583;
-          String _val584;
-          for (int _i585 = 0; _i585 < _map582.size; ++_i585)
+          org.apache.thrift.protocol.TMap _map616 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+          struct.node_host = new HashMap<String,String>(2*_map616.size);
+          String _key617;
+          String _val618;
+          for (int _i619 = 0; _i619 < _map616.size; ++_i619)
           {
-            _key583 = iprot.readString();
-            _val584 = iprot.readString();
-            struct.node_host.put(_key583, _val584);
+            _key617 = iprot.readString();
+            _val618 = iprot.readString();
+            struct.node_host.put(_key617, _val618);
           }
         }
         struct.set_node_host_isSet(true);
       }
       if (incoming.get(1)) {
         {
-          org.apache.thrift.protocol.TMap _map586 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.LIST, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-          struct.executor_node_port = new HashMap<List<Long>,NodeInfo>(2*_map586.size);
-          List<Long> _key587;
-          NodeInfo _val588;
-          for (int _i589 = 0; _i589 < _map586.size; ++_i589)
+          org.apache.thrift.protocol.TMap _map620 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.LIST, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.executor_node_port = new HashMap<List<Long>,NodeInfo>(2*_map620.size);
+          List<Long> _key621;
+          NodeInfo _val622;
+          for (int _i623 = 0; _i623 < _map620.size; ++_i623)
           {
             {
-              org.apache.thrift.protocol.TList _list590 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32());
-              _key587 = new ArrayList<Long>(_list590.size);
-              long _elem591;
-              for (int _i592 = 0; _i592 < _list590.size; ++_i592)
+              org.apache.thrift.protocol.TList _list624 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32());
+              _key621 = new ArrayList<Long>(_list624.size);
+              long _elem625;
+              for (int _i626 = 0; _i626 < _list624.size; ++_i626)
               {
-                _elem591 = iprot.readI64();
-                _key587.add(_elem591);
+                _elem625 = iprot.readI64();
+                _key621.add(_elem625);
               }
             }
-            _val588 = new NodeInfo();
-            _val588.read(iprot);
-            struct.executor_node_port.put(_key587, _val588);
+            _val622 = new NodeInfo();
+            _val622.read(iprot);
+            struct.executor_node_port.put(_key621, _val622);
           }
         }
         struct.set_executor_node_port_isSet(true);
       }
       if (incoming.get(2)) {
         {
-          org.apache.thrift.protocol.TMap _map593 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.LIST, org.apache.thrift.protocol.TType.I64, iprot.readI32());
-          struct.executor_start_time_secs = new HashMap<List<Long>,Long>(2*_map593.size);
-          List<Long> _key594;
-          long _val595;
-          for (int _i596 = 0; _i596 < _map593.size; ++_i596)
+          org.apache.thrift.protocol.TMap _map627 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.LIST, org.apache.thrift.protocol.TType.I64, iprot.readI32());
+          struct.executor_start_time_secs = new HashMap<List<Long>,Long>(2*_map627.size);
+          List<Long> _key628;
+          long _val629;
+          for (int _i630 = 0; _i630 < _map627.size; ++_i630)
           {
             {
-              org.apache.thrift.protocol.TList _list597 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32());
-              _key594 = new ArrayList<Long>(_list597.size);
-              long _elem598;
-              for (int _i599 = 0; _i599 < _list597.size; ++_i599)
+              org.apache.thrift.protocol.TList _list631 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32());
+              _key628 = new ArrayList<Long>(_list631.size);
+              long _elem632;
+              for (int _i633 = 0; _i633 < _list631.size; ++_i633)
               {
-                _elem598 = iprot.readI64();
-                _key594.add(_elem598);
+                _elem632 = iprot.readI64();
+                _key628.add(_elem632);
               }
             }
-            _val595 = iprot.readI64();
-            struct.executor_start_time_secs.put(_key594, _val595);
+            _val629 = iprot.readI64();
+            struct.executor_start_time_secs.put(_key628, _val629);
           }
         }
         struct.set_executor_start_time_secs_isSet(true);
       }
       if (incoming.get(3)) {
         {
-          org.apache.thrift.protocol.TMap _map600 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-          struct.worker_resources = new HashMap<NodeInfo,WorkerResources>(2*_map600.size);
-          NodeInfo _key601;
-          WorkerResources _val602;
-          for (int _i603 = 0; _i603 < _map600.size; ++_i603)
+          org.apache.thrift.protocol.TMap _map634 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.worker_resources = new HashMap<NodeInfo,WorkerResources>(2*_map634.size);
+          NodeInfo _key635;
+          WorkerResources _val636;
+          for (int _i637 = 0; _i637 < _map634.size; ++_i637)
           {
-            _key601 = new NodeInfo();
-            _key601.read(iprot);
-            _val602 = new WorkerResources();
-            _val602.read(iprot);
-            struct.worker_resources.put(_key601, _val602);
+            _key635 = new NodeInfo();
+            _key635.read(iprot);
+            _val636 = new WorkerResources();
+            _val636.read(iprot);
+            struct.worker_resources.put(_key635, _val636);
           }
         }
         struct.set_worker_resources_isSet(true);

http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/jvm/org/apache/storm/generated/ClusterWorkerHeartbeat.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/org/apache/storm/generated/ClusterWorkerHeartbeat.java b/storm-core/src/jvm/org/apache/storm/generated/ClusterWorkerHeartbeat.java
index 8585a7d..59c0894 100644
--- a/storm-core/src/jvm/org/apache/storm/generated/ClusterWorkerHeartbeat.java
+++ b/storm-core/src/jvm/org/apache/storm/generated/ClusterWorkerHeartbeat.java
@@ -635,17 +635,17 @@ public class ClusterWorkerHeartbeat implements org.apache.thrift.TBase<ClusterWo
           case 2: // EXECUTOR_STATS
             if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
               {
-                org.apache.thrift.protocol.TMap _map624 = iprot.readMapBegin();
-                struct.executor_stats = new HashMap<ExecutorInfo,ExecutorStats>(2*_map624.size);
-                ExecutorInfo _key625;
-                ExecutorStats _val626;
-                for (int _i627 = 0; _i627 < _map624.size; ++_i627)
+                org.apache.thrift.protocol.TMap _map658 = iprot.readMapBegin();
+                struct.executor_stats = new HashMap<ExecutorInfo,ExecutorStats>(2*_map658.size);
+                ExecutorInfo _key659;
+                ExecutorStats _val660;
+                for (int _i661 = 0; _i661 < _map658.size; ++_i661)
                 {
-                  _key625 = new ExecutorInfo();
-                  _key625.read(iprot);
-                  _val626 = new ExecutorStats();
-                  _val626.read(iprot);
-                  struct.executor_stats.put(_key625, _val626);
+                  _key659 = new ExecutorInfo();
+                  _key659.read(iprot);
+                  _val660 = new ExecutorStats();
+                  _val660.read(iprot);
+                  struct.executor_stats.put(_key659, _val660);
                 }
                 iprot.readMapEnd();
               }
@@ -692,10 +692,10 @@ public class ClusterWorkerHeartbeat implements org.apache.thrift.TBase<ClusterWo
         oprot.writeFieldBegin(EXECUTOR_STATS_FIELD_DESC);
         {
           oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.STRUCT, struct.executor_stats.size()));
-          for (Map.Entry<ExecutorInfo, ExecutorStats> _iter628 : struct.executor_stats.entrySet())
+          for (Map.Entry<ExecutorInfo, ExecutorStats> _iter662 : struct.executor_stats.entrySet())
           {
-            _iter628.getKey().write(oprot);
-            _iter628.getValue().write(oprot);
+            _iter662.getKey().write(oprot);
+            _iter662.getValue().write(oprot);
           }
           oprot.writeMapEnd();
         }
@@ -727,10 +727,10 @@ public class ClusterWorkerHeartbeat implements org.apache.thrift.TBase<ClusterWo
       oprot.writeString(struct.storm_id);
       {
         oprot.writeI32(struct.executor_stats.size());
-        for (Map.Entry<ExecutorInfo, ExecutorStats> _iter629 : struct.executor_stats.entrySet())
+        for (Map.Entry<ExecutorInfo, ExecutorStats> _iter663 : struct.executor_stats.entrySet())
         {
-          _iter629.getKey().write(oprot);
-          _iter629.getValue().write(oprot);
+          _iter663.getKey().write(oprot);
+          _iter663.getValue().write(oprot);
         }
       }
       oprot.writeI32(struct.time_secs);
@@ -743,17 +743,17 @@ public class ClusterWorkerHeartbeat implements org.apache.thrift.TBase<ClusterWo
       struct.storm_id = iprot.readString();
       struct.set_storm_id_isSet(true);
       {
-        org.apache.thrift.protocol.TMap _map630 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-        struct.executor_stats = new HashMap<ExecutorInfo,ExecutorStats>(2*_map630.size);
-        ExecutorInfo _key631;
-        ExecutorStats _val632;
-        for (int _i633 = 0; _i633 < _map630.size; ++_i633)
+        org.apache.thrift.protocol.TMap _map664 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.executor_stats = new HashMap<ExecutorInfo,ExecutorStats>(2*_map664.size);
+        ExecutorInfo _key665;
+        ExecutorStats _val666;
+        for (int _i667 = 0; _i667 < _map664.size; ++_i667)
         {
-          _key631 = new ExecutorInfo();
-          _key631.read(iprot);
-          _val632 = new ExecutorStats();
-          _val632.read(iprot);
-          struct.executor_stats.put(_key631, _val632);
+          _key665 = new ExecutorInfo();
+          _key665.read(iprot);
+          _val666 = new ExecutorStats();
+          _val666.read(iprot);
+          struct.executor_stats.put(_key665, _val666);
         }
       }
       struct.set_executor_stats_isSet(true);


[3/9] storm git commit: STORM-1994: Add table with per-topology and worker resource usage and components in (new) supervisor and topology pages

Posted by ka...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/jvm/org/apache/storm/generated/WorkerSummary.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/org/apache/storm/generated/WorkerSummary.java b/storm-core/src/jvm/org/apache/storm/generated/WorkerSummary.java
new file mode 100644
index 0000000..b1079ab
--- /dev/null
+++ b/storm-core/src/jvm/org/apache/storm/generated/WorkerSummary.java
@@ -0,0 +1,1880 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.storm.generated;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+public class WorkerSummary implements org.apache.thrift.TBase<WorkerSummary, WorkerSummary._Fields>, java.io.Serializable, Cloneable, Comparable<WorkerSummary> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("WorkerSummary");
+
+  private static final org.apache.thrift.protocol.TField SUPERVISOR_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("supervisor_id", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField HOST_FIELD_DESC = new org.apache.thrift.protocol.TField("host", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField PORT_FIELD_DESC = new org.apache.thrift.protocol.TField("port", org.apache.thrift.protocol.TType.I32, (short)3);
+  private static final org.apache.thrift.protocol.TField TOPOLOGY_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("topology_id", org.apache.thrift.protocol.TType.STRING, (short)4);
+  private static final org.apache.thrift.protocol.TField TOPOLOGY_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("topology_name", org.apache.thrift.protocol.TType.STRING, (short)5);
+  private static final org.apache.thrift.protocol.TField NUM_EXECUTORS_FIELD_DESC = new org.apache.thrift.protocol.TField("num_executors", org.apache.thrift.protocol.TType.I32, (short)6);
+  private static final org.apache.thrift.protocol.TField COMPONENT_TO_NUM_TASKS_FIELD_DESC = new org.apache.thrift.protocol.TField("component_to_num_tasks", org.apache.thrift.protocol.TType.MAP, (short)7);
+  private static final org.apache.thrift.protocol.TField TIME_SECS_FIELD_DESC = new org.apache.thrift.protocol.TField("time_secs", org.apache.thrift.protocol.TType.I32, (short)8);
+  private static final org.apache.thrift.protocol.TField UPTIME_SECS_FIELD_DESC = new org.apache.thrift.protocol.TField("uptime_secs", org.apache.thrift.protocol.TType.I32, (short)9);
+  private static final org.apache.thrift.protocol.TField REQUESTED_MEMONHEAP_FIELD_DESC = new org.apache.thrift.protocol.TField("requested_memonheap", org.apache.thrift.protocol.TType.DOUBLE, (short)521);
+  private static final org.apache.thrift.protocol.TField REQUESTED_MEMOFFHEAP_FIELD_DESC = new org.apache.thrift.protocol.TField("requested_memoffheap", org.apache.thrift.protocol.TType.DOUBLE, (short)522);
+  private static final org.apache.thrift.protocol.TField REQUESTED_CPU_FIELD_DESC = new org.apache.thrift.protocol.TField("requested_cpu", org.apache.thrift.protocol.TType.DOUBLE, (short)523);
+  private static final org.apache.thrift.protocol.TField ASSIGNED_MEMONHEAP_FIELD_DESC = new org.apache.thrift.protocol.TField("assigned_memonheap", org.apache.thrift.protocol.TType.DOUBLE, (short)524);
+  private static final org.apache.thrift.protocol.TField ASSIGNED_MEMOFFHEAP_FIELD_DESC = new org.apache.thrift.protocol.TField("assigned_memoffheap", org.apache.thrift.protocol.TType.DOUBLE, (short)525);
+  private static final org.apache.thrift.protocol.TField ASSIGNED_CPU_FIELD_DESC = new org.apache.thrift.protocol.TField("assigned_cpu", org.apache.thrift.protocol.TType.DOUBLE, (short)526);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new WorkerSummaryStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new WorkerSummaryTupleSchemeFactory());
+  }
+
+  private String supervisor_id; // optional
+  private String host; // optional
+  private int port; // optional
+  private String topology_id; // optional
+  private String topology_name; // optional
+  private int num_executors; // optional
+  private Map<String,Long> component_to_num_tasks; // optional
+  private int time_secs; // optional
+  private int uptime_secs; // optional
+  private double requested_memonheap; // optional
+  private double requested_memoffheap; // optional
+  private double requested_cpu; // optional
+  private double assigned_memonheap; // optional
+  private double assigned_memoffheap; // optional
+  private double assigned_cpu; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    SUPERVISOR_ID((short)1, "supervisor_id"),
+    HOST((short)2, "host"),
+    PORT((short)3, "port"),
+    TOPOLOGY_ID((short)4, "topology_id"),
+    TOPOLOGY_NAME((short)5, "topology_name"),
+    NUM_EXECUTORS((short)6, "num_executors"),
+    COMPONENT_TO_NUM_TASKS((short)7, "component_to_num_tasks"),
+    TIME_SECS((short)8, "time_secs"),
+    UPTIME_SECS((short)9, "uptime_secs"),
+    REQUESTED_MEMONHEAP((short)521, "requested_memonheap"),
+    REQUESTED_MEMOFFHEAP((short)522, "requested_memoffheap"),
+    REQUESTED_CPU((short)523, "requested_cpu"),
+    ASSIGNED_MEMONHEAP((short)524, "assigned_memonheap"),
+    ASSIGNED_MEMOFFHEAP((short)525, "assigned_memoffheap"),
+    ASSIGNED_CPU((short)526, "assigned_cpu");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // SUPERVISOR_ID
+          return SUPERVISOR_ID;
+        case 2: // HOST
+          return HOST;
+        case 3: // PORT
+          return PORT;
+        case 4: // TOPOLOGY_ID
+          return TOPOLOGY_ID;
+        case 5: // TOPOLOGY_NAME
+          return TOPOLOGY_NAME;
+        case 6: // NUM_EXECUTORS
+          return NUM_EXECUTORS;
+        case 7: // COMPONENT_TO_NUM_TASKS
+          return COMPONENT_TO_NUM_TASKS;
+        case 8: // TIME_SECS
+          return TIME_SECS;
+        case 9: // UPTIME_SECS
+          return UPTIME_SECS;
+        case 521: // REQUESTED_MEMONHEAP
+          return REQUESTED_MEMONHEAP;
+        case 522: // REQUESTED_MEMOFFHEAP
+          return REQUESTED_MEMOFFHEAP;
+        case 523: // REQUESTED_CPU
+          return REQUESTED_CPU;
+        case 524: // ASSIGNED_MEMONHEAP
+          return ASSIGNED_MEMONHEAP;
+        case 525: // ASSIGNED_MEMOFFHEAP
+          return ASSIGNED_MEMOFFHEAP;
+        case 526: // ASSIGNED_CPU
+          return ASSIGNED_CPU;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __PORT_ISSET_ID = 0;
+  private static final int __NUM_EXECUTORS_ISSET_ID = 1;
+  private static final int __TIME_SECS_ISSET_ID = 2;
+  private static final int __UPTIME_SECS_ISSET_ID = 3;
+  private static final int __REQUESTED_MEMONHEAP_ISSET_ID = 4;
+  private static final int __REQUESTED_MEMOFFHEAP_ISSET_ID = 5;
+  private static final int __REQUESTED_CPU_ISSET_ID = 6;
+  private static final int __ASSIGNED_MEMONHEAP_ISSET_ID = 7;
+  private static final int __ASSIGNED_MEMOFFHEAP_ISSET_ID = 8;
+  private static final int __ASSIGNED_CPU_ISSET_ID = 9;
+  private short __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.SUPERVISOR_ID,_Fields.HOST,_Fields.PORT,_Fields.TOPOLOGY_ID,_Fields.TOPOLOGY_NAME,_Fields.NUM_EXECUTORS,_Fields.COMPONENT_TO_NUM_TASKS,_Fields.TIME_SECS,_Fields.UPTIME_SECS,_Fields.REQUESTED_MEMONHEAP,_Fields.REQUESTED_MEMOFFHEAP,_Fields.REQUESTED_CPU,_Fields.ASSIGNED_MEMONHEAP,_Fields.ASSIGNED_MEMOFFHEAP,_Fields.ASSIGNED_CPU};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.SUPERVISOR_ID, new org.apache.thrift.meta_data.FieldMetaData("supervisor_id", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.HOST, new org.apache.thrift.meta_data.FieldMetaData("host", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.PORT, new org.apache.thrift.meta_data.FieldMetaData("port", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+    tmpMap.put(_Fields.TOPOLOGY_ID, new org.apache.thrift.meta_data.FieldMetaData("topology_id", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TOPOLOGY_NAME, new org.apache.thrift.meta_data.FieldMetaData("topology_name", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.NUM_EXECUTORS, new org.apache.thrift.meta_data.FieldMetaData("num_executors", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+    tmpMap.put(_Fields.COMPONENT_TO_NUM_TASKS, new org.apache.thrift.meta_data.FieldMetaData("component_to_num_tasks", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))));
+    tmpMap.put(_Fields.TIME_SECS, new org.apache.thrift.meta_data.FieldMetaData("time_secs", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+    tmpMap.put(_Fields.UPTIME_SECS, new org.apache.thrift.meta_data.FieldMetaData("uptime_secs", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+    tmpMap.put(_Fields.REQUESTED_MEMONHEAP, new org.apache.thrift.meta_data.FieldMetaData("requested_memonheap", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE)));
+    tmpMap.put(_Fields.REQUESTED_MEMOFFHEAP, new org.apache.thrift.meta_data.FieldMetaData("requested_memoffheap", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE)));
+    tmpMap.put(_Fields.REQUESTED_CPU, new org.apache.thrift.meta_data.FieldMetaData("requested_cpu", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE)));
+    tmpMap.put(_Fields.ASSIGNED_MEMONHEAP, new org.apache.thrift.meta_data.FieldMetaData("assigned_memonheap", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE)));
+    tmpMap.put(_Fields.ASSIGNED_MEMOFFHEAP, new org.apache.thrift.meta_data.FieldMetaData("assigned_memoffheap", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE)));
+    tmpMap.put(_Fields.ASSIGNED_CPU, new org.apache.thrift.meta_data.FieldMetaData("assigned_cpu", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(WorkerSummary.class, metaDataMap);
+  }
+
+  public WorkerSummary() {
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public WorkerSummary(WorkerSummary other) {
+    __isset_bitfield = other.__isset_bitfield;
+    if (other.is_set_supervisor_id()) {
+      this.supervisor_id = other.supervisor_id;
+    }
+    if (other.is_set_host()) {
+      this.host = other.host;
+    }
+    this.port = other.port;
+    if (other.is_set_topology_id()) {
+      this.topology_id = other.topology_id;
+    }
+    if (other.is_set_topology_name()) {
+      this.topology_name = other.topology_name;
+    }
+    this.num_executors = other.num_executors;
+    if (other.is_set_component_to_num_tasks()) {
+      Map<String,Long> __this__component_to_num_tasks = new HashMap<String,Long>(other.component_to_num_tasks);
+      this.component_to_num_tasks = __this__component_to_num_tasks;
+    }
+    this.time_secs = other.time_secs;
+    this.uptime_secs = other.uptime_secs;
+    this.requested_memonheap = other.requested_memonheap;
+    this.requested_memoffheap = other.requested_memoffheap;
+    this.requested_cpu = other.requested_cpu;
+    this.assigned_memonheap = other.assigned_memonheap;
+    this.assigned_memoffheap = other.assigned_memoffheap;
+    this.assigned_cpu = other.assigned_cpu;
+  }
+
+  public WorkerSummary deepCopy() {
+    return new WorkerSummary(this);
+  }
+
+  @Override
+  public void clear() {
+    this.supervisor_id = null;
+    this.host = null;
+    set_port_isSet(false);
+    this.port = 0;
+    this.topology_id = null;
+    this.topology_name = null;
+    set_num_executors_isSet(false);
+    this.num_executors = 0;
+    this.component_to_num_tasks = null;
+    set_time_secs_isSet(false);
+    this.time_secs = 0;
+    set_uptime_secs_isSet(false);
+    this.uptime_secs = 0;
+    set_requested_memonheap_isSet(false);
+    this.requested_memonheap = 0.0;
+    set_requested_memoffheap_isSet(false);
+    this.requested_memoffheap = 0.0;
+    set_requested_cpu_isSet(false);
+    this.requested_cpu = 0.0;
+    set_assigned_memonheap_isSet(false);
+    this.assigned_memonheap = 0.0;
+    set_assigned_memoffheap_isSet(false);
+    this.assigned_memoffheap = 0.0;
+    set_assigned_cpu_isSet(false);
+    this.assigned_cpu = 0.0;
+  }
+
+  public String get_supervisor_id() {
+    return this.supervisor_id;
+  }
+
+  public void set_supervisor_id(String supervisor_id) {
+    this.supervisor_id = supervisor_id;
+  }
+
+  public void unset_supervisor_id() {
+    this.supervisor_id = null;
+  }
+
+  /** Returns true if field supervisor_id is set (has been assigned a value) and false otherwise */
+  public boolean is_set_supervisor_id() {
+    return this.supervisor_id != null;
+  }
+
+  public void set_supervisor_id_isSet(boolean value) {
+    if (!value) {
+      this.supervisor_id = null;
+    }
+  }
+
+  public String get_host() {
+    return this.host;
+  }
+
+  public void set_host(String host) {
+    this.host = host;
+  }
+
+  public void unset_host() {
+    this.host = null;
+  }
+
+  /** Returns true if field host is set (has been assigned a value) and false otherwise */
+  public boolean is_set_host() {
+    return this.host != null;
+  }
+
+  public void set_host_isSet(boolean value) {
+    if (!value) {
+      this.host = null;
+    }
+  }
+
+  public int get_port() {
+    return this.port;
+  }
+
+  public void set_port(int port) {
+    this.port = port;
+    set_port_isSet(true);
+  }
+
+  public void unset_port() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __PORT_ISSET_ID);
+  }
+
+  /** Returns true if field port is set (has been assigned a value) and false otherwise */
+  public boolean is_set_port() {
+    return EncodingUtils.testBit(__isset_bitfield, __PORT_ISSET_ID);
+  }
+
+  public void set_port_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __PORT_ISSET_ID, value);
+  }
+
+  public String get_topology_id() {
+    return this.topology_id;
+  }
+
+  public void set_topology_id(String topology_id) {
+    this.topology_id = topology_id;
+  }
+
+  public void unset_topology_id() {
+    this.topology_id = null;
+  }
+
+  /** Returns true if field topology_id is set (has been assigned a value) and false otherwise */
+  public boolean is_set_topology_id() {
+    return this.topology_id != null;
+  }
+
+  public void set_topology_id_isSet(boolean value) {
+    if (!value) {
+      this.topology_id = null;
+    }
+  }
+
+  public String get_topology_name() {
+    return this.topology_name;
+  }
+
+  public void set_topology_name(String topology_name) {
+    this.topology_name = topology_name;
+  }
+
+  public void unset_topology_name() {
+    this.topology_name = null;
+  }
+
+  /** Returns true if field topology_name is set (has been assigned a value) and false otherwise */
+  public boolean is_set_topology_name() {
+    return this.topology_name != null;
+  }
+
+  public void set_topology_name_isSet(boolean value) {
+    if (!value) {
+      this.topology_name = null;
+    }
+  }
+
+  public int get_num_executors() {
+    return this.num_executors;
+  }
+
+  public void set_num_executors(int num_executors) {
+    this.num_executors = num_executors;
+    set_num_executors_isSet(true);
+  }
+
+  public void unset_num_executors() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUM_EXECUTORS_ISSET_ID);
+  }
+
+  /** Returns true if field num_executors is set (has been assigned a value) and false otherwise */
+  public boolean is_set_num_executors() {
+    return EncodingUtils.testBit(__isset_bitfield, __NUM_EXECUTORS_ISSET_ID);
+  }
+
+  public void set_num_executors_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUM_EXECUTORS_ISSET_ID, value);
+  }
+
+  public int get_component_to_num_tasks_size() {
+    return (this.component_to_num_tasks == null) ? 0 : this.component_to_num_tasks.size();
+  }
+
+  public void put_to_component_to_num_tasks(String key, long val) {
+    if (this.component_to_num_tasks == null) {
+      this.component_to_num_tasks = new HashMap<String,Long>();
+    }
+    this.component_to_num_tasks.put(key, val);
+  }
+
+  public Map<String,Long> get_component_to_num_tasks() {
+    return this.component_to_num_tasks;
+  }
+
+  public void set_component_to_num_tasks(Map<String,Long> component_to_num_tasks) {
+    this.component_to_num_tasks = component_to_num_tasks;
+  }
+
+  public void unset_component_to_num_tasks() {
+    this.component_to_num_tasks = null;
+  }
+
+  /** Returns true if field component_to_num_tasks is set (has been assigned a value) and false otherwise */
+  public boolean is_set_component_to_num_tasks() {
+    return this.component_to_num_tasks != null;
+  }
+
+  public void set_component_to_num_tasks_isSet(boolean value) {
+    if (!value) {
+      this.component_to_num_tasks = null;
+    }
+  }
+
+  public int get_time_secs() {
+    return this.time_secs;
+  }
+
+  public void set_time_secs(int time_secs) {
+    this.time_secs = time_secs;
+    set_time_secs_isSet(true);
+  }
+
+  public void unset_time_secs() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TIME_SECS_ISSET_ID);
+  }
+
+  /** Returns true if field time_secs is set (has been assigned a value) and false otherwise */
+  public boolean is_set_time_secs() {
+    return EncodingUtils.testBit(__isset_bitfield, __TIME_SECS_ISSET_ID);
+  }
+
+  public void set_time_secs_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TIME_SECS_ISSET_ID, value);
+  }
+
+  public int get_uptime_secs() {
+    return this.uptime_secs;
+  }
+
+  public void set_uptime_secs(int uptime_secs) {
+    this.uptime_secs = uptime_secs;
+    set_uptime_secs_isSet(true);
+  }
+
+  public void unset_uptime_secs() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __UPTIME_SECS_ISSET_ID);
+  }
+
+  /** Returns true if field uptime_secs is set (has been assigned a value) and false otherwise */
+  public boolean is_set_uptime_secs() {
+    return EncodingUtils.testBit(__isset_bitfield, __UPTIME_SECS_ISSET_ID);
+  }
+
+  public void set_uptime_secs_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __UPTIME_SECS_ISSET_ID, value);
+  }
+
+  public double get_requested_memonheap() {
+    return this.requested_memonheap;
+  }
+
+  public void set_requested_memonheap(double requested_memonheap) {
+    this.requested_memonheap = requested_memonheap;
+    set_requested_memonheap_isSet(true);
+  }
+
+  public void unset_requested_memonheap() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __REQUESTED_MEMONHEAP_ISSET_ID);
+  }
+
+  /** Returns true if field requested_memonheap is set (has been assigned a value) and false otherwise */
+  public boolean is_set_requested_memonheap() {
+    return EncodingUtils.testBit(__isset_bitfield, __REQUESTED_MEMONHEAP_ISSET_ID);
+  }
+
+  public void set_requested_memonheap_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __REQUESTED_MEMONHEAP_ISSET_ID, value);
+  }
+
+  public double get_requested_memoffheap() {
+    return this.requested_memoffheap;
+  }
+
+  public void set_requested_memoffheap(double requested_memoffheap) {
+    this.requested_memoffheap = requested_memoffheap;
+    set_requested_memoffheap_isSet(true);
+  }
+
+  public void unset_requested_memoffheap() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __REQUESTED_MEMOFFHEAP_ISSET_ID);
+  }
+
+  /** Returns true if field requested_memoffheap is set (has been assigned a value) and false otherwise */
+  public boolean is_set_requested_memoffheap() {
+    return EncodingUtils.testBit(__isset_bitfield, __REQUESTED_MEMOFFHEAP_ISSET_ID);
+  }
+
+  public void set_requested_memoffheap_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __REQUESTED_MEMOFFHEAP_ISSET_ID, value);
+  }
+
+  public double get_requested_cpu() {
+    return this.requested_cpu;
+  }
+
+  public void set_requested_cpu(double requested_cpu) {
+    this.requested_cpu = requested_cpu;
+    set_requested_cpu_isSet(true);
+  }
+
+  public void unset_requested_cpu() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __REQUESTED_CPU_ISSET_ID);
+  }
+
+  /** Returns true if field requested_cpu is set (has been assigned a value) and false otherwise */
+  public boolean is_set_requested_cpu() {
+    return EncodingUtils.testBit(__isset_bitfield, __REQUESTED_CPU_ISSET_ID);
+  }
+
+  public void set_requested_cpu_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __REQUESTED_CPU_ISSET_ID, value);
+  }
+
+  public double get_assigned_memonheap() {
+    return this.assigned_memonheap;
+  }
+
+  public void set_assigned_memonheap(double assigned_memonheap) {
+    this.assigned_memonheap = assigned_memonheap;
+    set_assigned_memonheap_isSet(true);
+  }
+
+  public void unset_assigned_memonheap() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ASSIGNED_MEMONHEAP_ISSET_ID);
+  }
+
+  /** Returns true if field assigned_memonheap is set (has been assigned a value) and false otherwise */
+  public boolean is_set_assigned_memonheap() {
+    return EncodingUtils.testBit(__isset_bitfield, __ASSIGNED_MEMONHEAP_ISSET_ID);
+  }
+
+  public void set_assigned_memonheap_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ASSIGNED_MEMONHEAP_ISSET_ID, value);
+  }
+
+  public double get_assigned_memoffheap() {
+    return this.assigned_memoffheap;
+  }
+
+  public void set_assigned_memoffheap(double assigned_memoffheap) {
+    this.assigned_memoffheap = assigned_memoffheap;
+    set_assigned_memoffheap_isSet(true);
+  }
+
+  public void unset_assigned_memoffheap() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ASSIGNED_MEMOFFHEAP_ISSET_ID);
+  }
+
+  /** Returns true if field assigned_memoffheap is set (has been assigned a value) and false otherwise */
+  public boolean is_set_assigned_memoffheap() {
+    return EncodingUtils.testBit(__isset_bitfield, __ASSIGNED_MEMOFFHEAP_ISSET_ID);
+  }
+
+  public void set_assigned_memoffheap_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ASSIGNED_MEMOFFHEAP_ISSET_ID, value);
+  }
+
+  public double get_assigned_cpu() {
+    return this.assigned_cpu;
+  }
+
+  public void set_assigned_cpu(double assigned_cpu) {
+    this.assigned_cpu = assigned_cpu;
+    set_assigned_cpu_isSet(true);
+  }
+
+  public void unset_assigned_cpu() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ASSIGNED_CPU_ISSET_ID);
+  }
+
+  /** Returns true if field assigned_cpu is set (has been assigned a value) and false otherwise */
+  public boolean is_set_assigned_cpu() {
+    return EncodingUtils.testBit(__isset_bitfield, __ASSIGNED_CPU_ISSET_ID);
+  }
+
+  public void set_assigned_cpu_isSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ASSIGNED_CPU_ISSET_ID, value);
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case SUPERVISOR_ID:
+      if (value == null) {
+        unset_supervisor_id();
+      } else {
+        set_supervisor_id((String)value);
+      }
+      break;
+
+    case HOST:
+      if (value == null) {
+        unset_host();
+      } else {
+        set_host((String)value);
+      }
+      break;
+
+    case PORT:
+      if (value == null) {
+        unset_port();
+      } else {
+        set_port((Integer)value);
+      }
+      break;
+
+    case TOPOLOGY_ID:
+      if (value == null) {
+        unset_topology_id();
+      } else {
+        set_topology_id((String)value);
+      }
+      break;
+
+    case TOPOLOGY_NAME:
+      if (value == null) {
+        unset_topology_name();
+      } else {
+        set_topology_name((String)value);
+      }
+      break;
+
+    case NUM_EXECUTORS:
+      if (value == null) {
+        unset_num_executors();
+      } else {
+        set_num_executors((Integer)value);
+      }
+      break;
+
+    case COMPONENT_TO_NUM_TASKS:
+      if (value == null) {
+        unset_component_to_num_tasks();
+      } else {
+        set_component_to_num_tasks((Map<String,Long>)value);
+      }
+      break;
+
+    case TIME_SECS:
+      if (value == null) {
+        unset_time_secs();
+      } else {
+        set_time_secs((Integer)value);
+      }
+      break;
+
+    case UPTIME_SECS:
+      if (value == null) {
+        unset_uptime_secs();
+      } else {
+        set_uptime_secs((Integer)value);
+      }
+      break;
+
+    case REQUESTED_MEMONHEAP:
+      if (value == null) {
+        unset_requested_memonheap();
+      } else {
+        set_requested_memonheap((Double)value);
+      }
+      break;
+
+    case REQUESTED_MEMOFFHEAP:
+      if (value == null) {
+        unset_requested_memoffheap();
+      } else {
+        set_requested_memoffheap((Double)value);
+      }
+      break;
+
+    case REQUESTED_CPU:
+      if (value == null) {
+        unset_requested_cpu();
+      } else {
+        set_requested_cpu((Double)value);
+      }
+      break;
+
+    case ASSIGNED_MEMONHEAP:
+      if (value == null) {
+        unset_assigned_memonheap();
+      } else {
+        set_assigned_memonheap((Double)value);
+      }
+      break;
+
+    case ASSIGNED_MEMOFFHEAP:
+      if (value == null) {
+        unset_assigned_memoffheap();
+      } else {
+        set_assigned_memoffheap((Double)value);
+      }
+      break;
+
+    case ASSIGNED_CPU:
+      if (value == null) {
+        unset_assigned_cpu();
+      } else {
+        set_assigned_cpu((Double)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case SUPERVISOR_ID:
+      return get_supervisor_id();
+
+    case HOST:
+      return get_host();
+
+    case PORT:
+      return get_port();
+
+    case TOPOLOGY_ID:
+      return get_topology_id();
+
+    case TOPOLOGY_NAME:
+      return get_topology_name();
+
+    case NUM_EXECUTORS:
+      return get_num_executors();
+
+    case COMPONENT_TO_NUM_TASKS:
+      return get_component_to_num_tasks();
+
+    case TIME_SECS:
+      return get_time_secs();
+
+    case UPTIME_SECS:
+      return get_uptime_secs();
+
+    case REQUESTED_MEMONHEAP:
+      return get_requested_memonheap();
+
+    case REQUESTED_MEMOFFHEAP:
+      return get_requested_memoffheap();
+
+    case REQUESTED_CPU:
+      return get_requested_cpu();
+
+    case ASSIGNED_MEMONHEAP:
+      return get_assigned_memonheap();
+
+    case ASSIGNED_MEMOFFHEAP:
+      return get_assigned_memoffheap();
+
+    case ASSIGNED_CPU:
+      return get_assigned_cpu();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case SUPERVISOR_ID:
+      return is_set_supervisor_id();
+    case HOST:
+      return is_set_host();
+    case PORT:
+      return is_set_port();
+    case TOPOLOGY_ID:
+      return is_set_topology_id();
+    case TOPOLOGY_NAME:
+      return is_set_topology_name();
+    case NUM_EXECUTORS:
+      return is_set_num_executors();
+    case COMPONENT_TO_NUM_TASKS:
+      return is_set_component_to_num_tasks();
+    case TIME_SECS:
+      return is_set_time_secs();
+    case UPTIME_SECS:
+      return is_set_uptime_secs();
+    case REQUESTED_MEMONHEAP:
+      return is_set_requested_memonheap();
+    case REQUESTED_MEMOFFHEAP:
+      return is_set_requested_memoffheap();
+    case REQUESTED_CPU:
+      return is_set_requested_cpu();
+    case ASSIGNED_MEMONHEAP:
+      return is_set_assigned_memonheap();
+    case ASSIGNED_MEMOFFHEAP:
+      return is_set_assigned_memoffheap();
+    case ASSIGNED_CPU:
+      return is_set_assigned_cpu();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof WorkerSummary)
+      return this.equals((WorkerSummary)that);
+    return false;
+  }
+
+  public boolean equals(WorkerSummary that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_supervisor_id = true && this.is_set_supervisor_id();
+    boolean that_present_supervisor_id = true && that.is_set_supervisor_id();
+    if (this_present_supervisor_id || that_present_supervisor_id) {
+      if (!(this_present_supervisor_id && that_present_supervisor_id))
+        return false;
+      if (!this.supervisor_id.equals(that.supervisor_id))
+        return false;
+    }
+
+    boolean this_present_host = true && this.is_set_host();
+    boolean that_present_host = true && that.is_set_host();
+    if (this_present_host || that_present_host) {
+      if (!(this_present_host && that_present_host))
+        return false;
+      if (!this.host.equals(that.host))
+        return false;
+    }
+
+    boolean this_present_port = true && this.is_set_port();
+    boolean that_present_port = true && that.is_set_port();
+    if (this_present_port || that_present_port) {
+      if (!(this_present_port && that_present_port))
+        return false;
+      if (this.port != that.port)
+        return false;
+    }
+
+    boolean this_present_topology_id = true && this.is_set_topology_id();
+    boolean that_present_topology_id = true && that.is_set_topology_id();
+    if (this_present_topology_id || that_present_topology_id) {
+      if (!(this_present_topology_id && that_present_topology_id))
+        return false;
+      if (!this.topology_id.equals(that.topology_id))
+        return false;
+    }
+
+    boolean this_present_topology_name = true && this.is_set_topology_name();
+    boolean that_present_topology_name = true && that.is_set_topology_name();
+    if (this_present_topology_name || that_present_topology_name) {
+      if (!(this_present_topology_name && that_present_topology_name))
+        return false;
+      if (!this.topology_name.equals(that.topology_name))
+        return false;
+    }
+
+    boolean this_present_num_executors = true && this.is_set_num_executors();
+    boolean that_present_num_executors = true && that.is_set_num_executors();
+    if (this_present_num_executors || that_present_num_executors) {
+      if (!(this_present_num_executors && that_present_num_executors))
+        return false;
+      if (this.num_executors != that.num_executors)
+        return false;
+    }
+
+    boolean this_present_component_to_num_tasks = true && this.is_set_component_to_num_tasks();
+    boolean that_present_component_to_num_tasks = true && that.is_set_component_to_num_tasks();
+    if (this_present_component_to_num_tasks || that_present_component_to_num_tasks) {
+      if (!(this_present_component_to_num_tasks && that_present_component_to_num_tasks))
+        return false;
+      if (!this.component_to_num_tasks.equals(that.component_to_num_tasks))
+        return false;
+    }
+
+    boolean this_present_time_secs = true && this.is_set_time_secs();
+    boolean that_present_time_secs = true && that.is_set_time_secs();
+    if (this_present_time_secs || that_present_time_secs) {
+      if (!(this_present_time_secs && that_present_time_secs))
+        return false;
+      if (this.time_secs != that.time_secs)
+        return false;
+    }
+
+    boolean this_present_uptime_secs = true && this.is_set_uptime_secs();
+    boolean that_present_uptime_secs = true && that.is_set_uptime_secs();
+    if (this_present_uptime_secs || that_present_uptime_secs) {
+      if (!(this_present_uptime_secs && that_present_uptime_secs))
+        return false;
+      if (this.uptime_secs != that.uptime_secs)
+        return false;
+    }
+
+    boolean this_present_requested_memonheap = true && this.is_set_requested_memonheap();
+    boolean that_present_requested_memonheap = true && that.is_set_requested_memonheap();
+    if (this_present_requested_memonheap || that_present_requested_memonheap) {
+      if (!(this_present_requested_memonheap && that_present_requested_memonheap))
+        return false;
+      if (this.requested_memonheap != that.requested_memonheap)
+        return false;
+    }
+
+    boolean this_present_requested_memoffheap = true && this.is_set_requested_memoffheap();
+    boolean that_present_requested_memoffheap = true && that.is_set_requested_memoffheap();
+    if (this_present_requested_memoffheap || that_present_requested_memoffheap) {
+      if (!(this_present_requested_memoffheap && that_present_requested_memoffheap))
+        return false;
+      if (this.requested_memoffheap != that.requested_memoffheap)
+        return false;
+    }
+
+    boolean this_present_requested_cpu = true && this.is_set_requested_cpu();
+    boolean that_present_requested_cpu = true && that.is_set_requested_cpu();
+    if (this_present_requested_cpu || that_present_requested_cpu) {
+      if (!(this_present_requested_cpu && that_present_requested_cpu))
+        return false;
+      if (this.requested_cpu != that.requested_cpu)
+        return false;
+    }
+
+    boolean this_present_assigned_memonheap = true && this.is_set_assigned_memonheap();
+    boolean that_present_assigned_memonheap = true && that.is_set_assigned_memonheap();
+    if (this_present_assigned_memonheap || that_present_assigned_memonheap) {
+      if (!(this_present_assigned_memonheap && that_present_assigned_memonheap))
+        return false;
+      if (this.assigned_memonheap != that.assigned_memonheap)
+        return false;
+    }
+
+    boolean this_present_assigned_memoffheap = true && this.is_set_assigned_memoffheap();
+    boolean that_present_assigned_memoffheap = true && that.is_set_assigned_memoffheap();
+    if (this_present_assigned_memoffheap || that_present_assigned_memoffheap) {
+      if (!(this_present_assigned_memoffheap && that_present_assigned_memoffheap))
+        return false;
+      if (this.assigned_memoffheap != that.assigned_memoffheap)
+        return false;
+    }
+
+    boolean this_present_assigned_cpu = true && this.is_set_assigned_cpu();
+    boolean that_present_assigned_cpu = true && that.is_set_assigned_cpu();
+    if (this_present_assigned_cpu || that_present_assigned_cpu) {
+      if (!(this_present_assigned_cpu && that_present_assigned_cpu))
+        return false;
+      if (this.assigned_cpu != that.assigned_cpu)
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_supervisor_id = true && (is_set_supervisor_id());
+    list.add(present_supervisor_id);
+    if (present_supervisor_id)
+      list.add(supervisor_id);
+
+    boolean present_host = true && (is_set_host());
+    list.add(present_host);
+    if (present_host)
+      list.add(host);
+
+    boolean present_port = true && (is_set_port());
+    list.add(present_port);
+    if (present_port)
+      list.add(port);
+
+    boolean present_topology_id = true && (is_set_topology_id());
+    list.add(present_topology_id);
+    if (present_topology_id)
+      list.add(topology_id);
+
+    boolean present_topology_name = true && (is_set_topology_name());
+    list.add(present_topology_name);
+    if (present_topology_name)
+      list.add(topology_name);
+
+    boolean present_num_executors = true && (is_set_num_executors());
+    list.add(present_num_executors);
+    if (present_num_executors)
+      list.add(num_executors);
+
+    boolean present_component_to_num_tasks = true && (is_set_component_to_num_tasks());
+    list.add(present_component_to_num_tasks);
+    if (present_component_to_num_tasks)
+      list.add(component_to_num_tasks);
+
+    boolean present_time_secs = true && (is_set_time_secs());
+    list.add(present_time_secs);
+    if (present_time_secs)
+      list.add(time_secs);
+
+    boolean present_uptime_secs = true && (is_set_uptime_secs());
+    list.add(present_uptime_secs);
+    if (present_uptime_secs)
+      list.add(uptime_secs);
+
+    boolean present_requested_memonheap = true && (is_set_requested_memonheap());
+    list.add(present_requested_memonheap);
+    if (present_requested_memonheap)
+      list.add(requested_memonheap);
+
+    boolean present_requested_memoffheap = true && (is_set_requested_memoffheap());
+    list.add(present_requested_memoffheap);
+    if (present_requested_memoffheap)
+      list.add(requested_memoffheap);
+
+    boolean present_requested_cpu = true && (is_set_requested_cpu());
+    list.add(present_requested_cpu);
+    if (present_requested_cpu)
+      list.add(requested_cpu);
+
+    boolean present_assigned_memonheap = true && (is_set_assigned_memonheap());
+    list.add(present_assigned_memonheap);
+    if (present_assigned_memonheap)
+      list.add(assigned_memonheap);
+
+    boolean present_assigned_memoffheap = true && (is_set_assigned_memoffheap());
+    list.add(present_assigned_memoffheap);
+    if (present_assigned_memoffheap)
+      list.add(assigned_memoffheap);
+
+    boolean present_assigned_cpu = true && (is_set_assigned_cpu());
+    list.add(present_assigned_cpu);
+    if (present_assigned_cpu)
+      list.add(assigned_cpu);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(WorkerSummary other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(is_set_supervisor_id()).compareTo(other.is_set_supervisor_id());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_supervisor_id()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.supervisor_id, other.supervisor_id);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(is_set_host()).compareTo(other.is_set_host());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_host()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.host, other.host);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(is_set_port()).compareTo(other.is_set_port());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_port()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.port, other.port);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(is_set_topology_id()).compareTo(other.is_set_topology_id());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_topology_id()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.topology_id, other.topology_id);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(is_set_topology_name()).compareTo(other.is_set_topology_name());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_topology_name()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.topology_name, other.topology_name);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(is_set_num_executors()).compareTo(other.is_set_num_executors());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_num_executors()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.num_executors, other.num_executors);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(is_set_component_to_num_tasks()).compareTo(other.is_set_component_to_num_tasks());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_component_to_num_tasks()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.component_to_num_tasks, other.component_to_num_tasks);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(is_set_time_secs()).compareTo(other.is_set_time_secs());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_time_secs()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.time_secs, other.time_secs);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(is_set_uptime_secs()).compareTo(other.is_set_uptime_secs());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_uptime_secs()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.uptime_secs, other.uptime_secs);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(is_set_requested_memonheap()).compareTo(other.is_set_requested_memonheap());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_requested_memonheap()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.requested_memonheap, other.requested_memonheap);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(is_set_requested_memoffheap()).compareTo(other.is_set_requested_memoffheap());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_requested_memoffheap()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.requested_memoffheap, other.requested_memoffheap);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(is_set_requested_cpu()).compareTo(other.is_set_requested_cpu());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_requested_cpu()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.requested_cpu, other.requested_cpu);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(is_set_assigned_memonheap()).compareTo(other.is_set_assigned_memonheap());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_assigned_memonheap()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.assigned_memonheap, other.assigned_memonheap);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(is_set_assigned_memoffheap()).compareTo(other.is_set_assigned_memoffheap());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_assigned_memoffheap()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.assigned_memoffheap, other.assigned_memoffheap);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(is_set_assigned_cpu()).compareTo(other.is_set_assigned_cpu());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_assigned_cpu()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.assigned_cpu, other.assigned_cpu);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("WorkerSummary(");
+    boolean first = true;
+
+    if (is_set_supervisor_id()) {
+      sb.append("supervisor_id:");
+      if (this.supervisor_id == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.supervisor_id);
+      }
+      first = false;
+    }
+    if (is_set_host()) {
+      if (!first) sb.append(", ");
+      sb.append("host:");
+      if (this.host == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.host);
+      }
+      first = false;
+    }
+    if (is_set_port()) {
+      if (!first) sb.append(", ");
+      sb.append("port:");
+      sb.append(this.port);
+      first = false;
+    }
+    if (is_set_topology_id()) {
+      if (!first) sb.append(", ");
+      sb.append("topology_id:");
+      if (this.topology_id == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.topology_id);
+      }
+      first = false;
+    }
+    if (is_set_topology_name()) {
+      if (!first) sb.append(", ");
+      sb.append("topology_name:");
+      if (this.topology_name == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.topology_name);
+      }
+      first = false;
+    }
+    if (is_set_num_executors()) {
+      if (!first) sb.append(", ");
+      sb.append("num_executors:");
+      sb.append(this.num_executors);
+      first = false;
+    }
+    if (is_set_component_to_num_tasks()) {
+      if (!first) sb.append(", ");
+      sb.append("component_to_num_tasks:");
+      if (this.component_to_num_tasks == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.component_to_num_tasks);
+      }
+      first = false;
+    }
+    if (is_set_time_secs()) {
+      if (!first) sb.append(", ");
+      sb.append("time_secs:");
+      sb.append(this.time_secs);
+      first = false;
+    }
+    if (is_set_uptime_secs()) {
+      if (!first) sb.append(", ");
+      sb.append("uptime_secs:");
+      sb.append(this.uptime_secs);
+      first = false;
+    }
+    if (is_set_requested_memonheap()) {
+      if (!first) sb.append(", ");
+      sb.append("requested_memonheap:");
+      sb.append(this.requested_memonheap);
+      first = false;
+    }
+    if (is_set_requested_memoffheap()) {
+      if (!first) sb.append(", ");
+      sb.append("requested_memoffheap:");
+      sb.append(this.requested_memoffheap);
+      first = false;
+    }
+    if (is_set_requested_cpu()) {
+      if (!first) sb.append(", ");
+      sb.append("requested_cpu:");
+      sb.append(this.requested_cpu);
+      first = false;
+    }
+    if (is_set_assigned_memonheap()) {
+      if (!first) sb.append(", ");
+      sb.append("assigned_memonheap:");
+      sb.append(this.assigned_memonheap);
+      first = false;
+    }
+    if (is_set_assigned_memoffheap()) {
+      if (!first) sb.append(", ");
+      sb.append("assigned_memoffheap:");
+      sb.append(this.assigned_memoffheap);
+      first = false;
+    }
+    if (is_set_assigned_cpu()) {
+      if (!first) sb.append(", ");
+      sb.append("assigned_cpu:");
+      sb.append(this.assigned_cpu);
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class WorkerSummaryStandardSchemeFactory implements SchemeFactory {
+    public WorkerSummaryStandardScheme getScheme() {
+      return new WorkerSummaryStandardScheme();
+    }
+  }
+
+  private static class WorkerSummaryStandardScheme extends StandardScheme<WorkerSummary> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, WorkerSummary struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // SUPERVISOR_ID
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.supervisor_id = iprot.readString();
+              struct.set_supervisor_id_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // HOST
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.host = iprot.readString();
+              struct.set_host_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // PORT
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.port = iprot.readI32();
+              struct.set_port_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // TOPOLOGY_ID
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.topology_id = iprot.readString();
+              struct.set_topology_id_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // TOPOLOGY_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.topology_name = iprot.readString();
+              struct.set_topology_name_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 6: // NUM_EXECUTORS
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.num_executors = iprot.readI32();
+              struct.set_num_executors_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 7: // COMPONENT_TO_NUM_TASKS
+            if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
+              {
+                org.apache.thrift.protocol.TMap _map418 = iprot.readMapBegin();
+                struct.component_to_num_tasks = new HashMap<String,Long>(2*_map418.size);
+                String _key419;
+                long _val420;
+                for (int _i421 = 0; _i421 < _map418.size; ++_i421)
+                {
+                  _key419 = iprot.readString();
+                  _val420 = iprot.readI64();
+                  struct.component_to_num_tasks.put(_key419, _val420);
+                }
+                iprot.readMapEnd();
+              }
+              struct.set_component_to_num_tasks_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 8: // TIME_SECS
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.time_secs = iprot.readI32();
+              struct.set_time_secs_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 9: // UPTIME_SECS
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.uptime_secs = iprot.readI32();
+              struct.set_uptime_secs_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 521: // REQUESTED_MEMONHEAP
+            if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) {
+              struct.requested_memonheap = iprot.readDouble();
+              struct.set_requested_memonheap_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 522: // REQUESTED_MEMOFFHEAP
+            if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) {
+              struct.requested_memoffheap = iprot.readDouble();
+              struct.set_requested_memoffheap_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 523: // REQUESTED_CPU
+            if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) {
+              struct.requested_cpu = iprot.readDouble();
+              struct.set_requested_cpu_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 524: // ASSIGNED_MEMONHEAP
+            if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) {
+              struct.assigned_memonheap = iprot.readDouble();
+              struct.set_assigned_memonheap_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 525: // ASSIGNED_MEMOFFHEAP
+            if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) {
+              struct.assigned_memoffheap = iprot.readDouble();
+              struct.set_assigned_memoffheap_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 526: // ASSIGNED_CPU
+            if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) {
+              struct.assigned_cpu = iprot.readDouble();
+              struct.set_assigned_cpu_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, WorkerSummary struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.supervisor_id != null) {
+        if (struct.is_set_supervisor_id()) {
+          oprot.writeFieldBegin(SUPERVISOR_ID_FIELD_DESC);
+          oprot.writeString(struct.supervisor_id);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.host != null) {
+        if (struct.is_set_host()) {
+          oprot.writeFieldBegin(HOST_FIELD_DESC);
+          oprot.writeString(struct.host);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.is_set_port()) {
+        oprot.writeFieldBegin(PORT_FIELD_DESC);
+        oprot.writeI32(struct.port);
+        oprot.writeFieldEnd();
+      }
+      if (struct.topology_id != null) {
+        if (struct.is_set_topology_id()) {
+          oprot.writeFieldBegin(TOPOLOGY_ID_FIELD_DESC);
+          oprot.writeString(struct.topology_id);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.topology_name != null) {
+        if (struct.is_set_topology_name()) {
+          oprot.writeFieldBegin(TOPOLOGY_NAME_FIELD_DESC);
+          oprot.writeString(struct.topology_name);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.is_set_num_executors()) {
+        oprot.writeFieldBegin(NUM_EXECUTORS_FIELD_DESC);
+        oprot.writeI32(struct.num_executors);
+        oprot.writeFieldEnd();
+      }
+      if (struct.component_to_num_tasks != null) {
+        if (struct.is_set_component_to_num_tasks()) {
+          oprot.writeFieldBegin(COMPONENT_TO_NUM_TASKS_FIELD_DESC);
+          {
+            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I64, struct.component_to_num_tasks.size()));
+            for (Map.Entry<String, Long> _iter422 : struct.component_to_num_tasks.entrySet())
+            {
+              oprot.writeString(_iter422.getKey());
+              oprot.writeI64(_iter422.getValue());
+            }
+            oprot.writeMapEnd();
+          }
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.is_set_time_secs()) {
+        oprot.writeFieldBegin(TIME_SECS_FIELD_DESC);
+        oprot.writeI32(struct.time_secs);
+        oprot.writeFieldEnd();
+      }
+      if (struct.is_set_uptime_secs()) {
+        oprot.writeFieldBegin(UPTIME_SECS_FIELD_DESC);
+        oprot.writeI32(struct.uptime_secs);
+        oprot.writeFieldEnd();
+      }
+      if (struct.is_set_requested_memonheap()) {
+        oprot.writeFieldBegin(REQUESTED_MEMONHEAP_FIELD_DESC);
+        oprot.writeDouble(struct.requested_memonheap);
+        oprot.writeFieldEnd();
+      }
+      if (struct.is_set_requested_memoffheap()) {
+        oprot.writeFieldBegin(REQUESTED_MEMOFFHEAP_FIELD_DESC);
+        oprot.writeDouble(struct.requested_memoffheap);
+        oprot.writeFieldEnd();
+      }
+      if (struct.is_set_requested_cpu()) {
+        oprot.writeFieldBegin(REQUESTED_CPU_FIELD_DESC);
+        oprot.writeDouble(struct.requested_cpu);
+        oprot.writeFieldEnd();
+      }
+      if (struct.is_set_assigned_memonheap()) {
+        oprot.writeFieldBegin(ASSIGNED_MEMONHEAP_FIELD_DESC);
+        oprot.writeDouble(struct.assigned_memonheap);
+        oprot.writeFieldEnd();
+      }
+      if (struct.is_set_assigned_memoffheap()) {
+        oprot.writeFieldBegin(ASSIGNED_MEMOFFHEAP_FIELD_DESC);
+        oprot.writeDouble(struct.assigned_memoffheap);
+        oprot.writeFieldEnd();
+      }
+      if (struct.is_set_assigned_cpu()) {
+        oprot.writeFieldBegin(ASSIGNED_CPU_FIELD_DESC);
+        oprot.writeDouble(struct.assigned_cpu);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class WorkerSummaryTupleSchemeFactory implements SchemeFactory {
+    public WorkerSummaryTupleScheme getScheme() {
+      return new WorkerSummaryTupleScheme();
+    }
+  }
+
+  private static class WorkerSummaryTupleScheme extends TupleScheme<WorkerSummary> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, WorkerSummary struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.is_set_supervisor_id()) {
+        optionals.set(0);
+      }
+      if (struct.is_set_host()) {
+        optionals.set(1);
+      }
+      if (struct.is_set_port()) {
+        optionals.set(2);
+      }
+      if (struct.is_set_topology_id()) {
+        optionals.set(3);
+      }
+      if (struct.is_set_topology_name()) {
+        optionals.set(4);
+      }
+      if (struct.is_set_num_executors()) {
+        optionals.set(5);
+      }
+      if (struct.is_set_component_to_num_tasks()) {
+        optionals.set(6);
+      }
+      if (struct.is_set_time_secs()) {
+        optionals.set(7);
+      }
+      if (struct.is_set_uptime_secs()) {
+        optionals.set(8);
+      }
+      if (struct.is_set_requested_memonheap()) {
+        optionals.set(9);
+      }
+      if (struct.is_set_requested_memoffheap()) {
+        optionals.set(10);
+      }
+      if (struct.is_set_requested_cpu()) {
+        optionals.set(11);
+      }
+      if (struct.is_set_assigned_memonheap()) {
+        optionals.set(12);
+      }
+      if (struct.is_set_assigned_memoffheap()) {
+        optionals.set(13);
+      }
+      if (struct.is_set_assigned_cpu()) {
+        optionals.set(14);
+      }
+      oprot.writeBitSet(optionals, 15);
+      if (struct.is_set_supervisor_id()) {
+        oprot.writeString(struct.supervisor_id);
+      }
+      if (struct.is_set_host()) {
+        oprot.writeString(struct.host);
+      }
+      if (struct.is_set_port()) {
+        oprot.writeI32(struct.port);
+      }
+      if (struct.is_set_topology_id()) {
+        oprot.writeString(struct.topology_id);
+      }
+      if (struct.is_set_topology_name()) {
+        oprot.writeString(struct.topology_name);
+      }
+      if (struct.is_set_num_executors()) {
+        oprot.writeI32(struct.num_executors);
+      }
+      if (struct.is_set_component_to_num_tasks()) {
+        {
+          oprot.writeI32(struct.component_to_num_tasks.size());
+          for (Map.Entry<String, Long> _iter423 : struct.component_to_num_tasks.entrySet())
+          {
+            oprot.writeString(_iter423.getKey());
+            oprot.writeI64(_iter423.getValue());
+          }
+        }
+      }
+      if (struct.is_set_time_secs()) {
+        oprot.writeI32(struct.time_secs);
+      }
+      if (struct.is_set_uptime_secs()) {
+        oprot.writeI32(struct.uptime_secs);
+      }
+      if (struct.is_set_requested_memonheap()) {
+        oprot.writeDouble(struct.requested_memonheap);
+      }
+      if (struct.is_set_requested_memoffheap()) {
+        oprot.writeDouble(struct.requested_memoffheap);
+      }
+      if (struct.is_set_requested_cpu()) {
+        oprot.writeDouble(struct.requested_cpu);
+      }
+      if (struct.is_set_assigned_memonheap()) {
+        oprot.writeDouble(struct.assigned_memonheap);
+      }
+      if (struct.is_set_assigned_memoffheap()) {
+        oprot.writeDouble(struct.assigned_memoffheap);
+      }
+      if (struct.is_set_assigned_cpu()) {
+        oprot.writeDouble(struct.assigned_cpu);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, WorkerSummary struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(15);
+      if (incoming.get(0)) {
+        struct.supervisor_id = iprot.readString();
+        struct.set_supervisor_id_isSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.host = iprot.readString();
+        struct.set_host_isSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.port = iprot.readI32();
+        struct.set_port_isSet(true);
+      }
+      if (incoming.get(3)) {
+        struct.topology_id = iprot.readString();
+        struct.set_topology_id_isSet(true);
+      }
+      if (incoming.get(4)) {
+        struct.topology_name = iprot.readString();
+        struct.set_topology_name_isSet(true);
+      }
+      if (incoming.get(5)) {
+        struct.num_executors = iprot.readI32();
+        struct.set_num_executors_isSet(true);
+      }
+      if (incoming.get(6)) {
+        {
+          org.apache.thrift.protocol.TMap _map424 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I64, iprot.readI32());
+          struct.component_to_num_tasks = new HashMap<String,Long>(2*_map424.size);
+          String _key425;
+          long _val426;
+          for (int _i427 = 0; _i427 < _map424.size; ++_i427)
+          {
+            _key425 = iprot.readString();
+            _val426 = iprot.readI64();
+            struct.component_to_num_tasks.put(_key425, _val426);
+          }
+        }
+        struct.set_component_to_num_tasks_isSet(true);
+      }
+      if (incoming.get(7)) {
+        struct.time_secs = iprot.readI32();
+        struct.set_time_secs_isSet(true);
+      }
+      if (incoming.get(8)) {
+        struct.uptime_secs = iprot.readI32();
+        struct.set_uptime_secs_isSet(true);
+      }
+      if (incoming.get(9)) {
+        struct.requested_memonheap = iprot.readDouble();
+        struct.set_requested_memonheap_isSet(true);
+      }
+      if (incoming.get(10)) {
+        struct.requested_memoffheap = iprot.readDouble();
+        struct.set_requested_memoffheap_isSet(true);
+      }
+      if (incoming.get(11)) {
+        struct.requested_cpu = iprot.readDouble();
+        struct.set_requested_cpu_isSet(true);
+      }
+      if (incoming.get(12)) {
+        struct.assigned_memonheap = iprot.readDouble();
+        struct.set_assigned_memonheap_isSet(true);
+      }
+      if (incoming.get(13)) {
+        struct.assigned_memoffheap = iprot.readDouble();
+        struct.set_assigned_memoffheap_isSet(true);
+      }
+      if (incoming.get(14)) {
+        struct.assigned_cpu = iprot.readDouble();
+        struct.set_assigned_cpu_isSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/jvm/org/apache/storm/scheduler/Cluster.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/org/apache/storm/scheduler/Cluster.java b/storm-core/src/jvm/org/apache/storm/scheduler/Cluster.java
index 89cc1bc..6a061ac 100644
--- a/storm-core/src/jvm/org/apache/storm/scheduler/Cluster.java
+++ b/storm-core/src/jvm/org/apache/storm/scheduler/Cluster.java
@@ -30,17 +30,23 @@ import org.apache.storm.networktopography.DNSToSwitchMapping;
 import org.apache.storm.utils.Utils;
 
 public class Cluster {
-
     /**
      * key: supervisor id, value: supervisor details
      */
     private Map<String, SupervisorDetails> supervisors;
     /**
-     * key: supervisor id, value: supervisor's total and used resources
+     * key: supervisor id,
+     * value: supervisor's total and used resources, i.e. {totalMem, totalCpu, usedMem, usedCpu}
      */
     private Map<String, Double[]> supervisorsResources;
 
     /**
+     * key: topology id,
+     * value: map of worker slot to resources for that worker
+     */
+    private Map<String, Map<WorkerSlot, Double[]>> workerResources;
+
+    /**
      * key: rack, value: nodes in that rack
      */
     private Map<String, List<String>> networkTopography;
@@ -51,11 +57,12 @@ public class Cluster {
     private Map<String, SchedulerAssignmentImpl> assignments;
     /**
      * key topologyId, Value: scheduler's status.
-     */  
+     */
     private Map<String, String> status;
 
     /**
-     * key topologyId, Value: requested and assigned resources (e.g., on-heap/off-heap mem, cpu) for each topology.
+     * key topologyId, Value: requested and assigned resources for each topology.
+     * value: {requestedMemOnHeap, requestedMemOffHeap, requestedCpu, assignedMemOnHeap, assignedMemOffHeap, assignedCpu}
      */
     private Map<String, Double[]> topologyResources;
 
@@ -78,6 +85,7 @@ public class Cluster {
         this.status = new HashMap<String, String>();
         this.topologyResources = new HashMap<String, Double[]>();
         this.supervisorsResources = new HashMap<String, Double[]>();
+        this.workerResources = new HashMap<String, Map<WorkerSlot, Double[]>>();
         this.hostToId = new HashMap<String, List<String>>();
         for (Map.Entry<String, SupervisorDetails> entry : supervisors.entrySet()) {
             String nodeId = entry.getKey();
@@ -107,15 +115,15 @@ public class Cluster {
             this.networkTopography = new HashMap<String, List<String>>(src.networkTopography);
         }
     }
-    
+
     public void setBlacklistedHosts(Set<String> hosts) {
         blackListedHosts = hosts;
     }
-    
+
     public Set<String> getBlacklistedHosts() {
         return blackListedHosts;
     }
-    
+
     public void blacklistHost(String host) {
         // this is so it plays well with setting blackListedHosts to an immutable list
         if(blackListedHosts==null) blackListedHosts = new HashSet<String>();
@@ -123,19 +131,19 @@ public class Cluster {
             blackListedHosts = new HashSet<String>(blackListedHosts);
         blackListedHosts.add(host);
     }
-    
+
     public boolean isBlackListed(String supervisorId) {
-        return blackListedHosts != null && blackListedHosts.contains(getHost(supervisorId));        
+        return blackListedHosts != null && blackListedHosts.contains(getHost(supervisorId));
     }
 
     public boolean isBlacklistedHost(String host) {
-        return blackListedHosts != null && blackListedHosts.contains(host);  
+        return blackListedHosts != null && blackListedHosts.contains(host);
     }
-    
+
     public String getHost(String supervisorId) {
         return inimbus.getHostName(supervisors, supervisorId);
     }
-    
+
     /**
      * @return all the topologies which needs scheduling.
      */
@@ -152,7 +160,7 @@ public class Cluster {
 
     /**
      * Does the topology need scheduling?
-     * 
+     *
      * A topology needs scheduling if one of the following conditions holds:
      * <ul>
      *   <li>Although the topology is assigned slots, but is squeezed. i.e. the topology is assigned less slots than desired.</li>
@@ -171,7 +179,7 @@ public class Cluster {
      */
     public Map<ExecutorDetails, String> getNeedsSchedulingExecutorToComponents(TopologyDetails topology) {
         Collection<ExecutorDetails> allExecutors = new HashSet(topology.getExecutors());
-        
+
         SchedulerAssignment assignment = this.assignments.get(topology.getId());
         if (assignment != null) {
             Collection<ExecutorDetails> assignedExecutors = assignment.getExecutors();
@@ -180,7 +188,7 @@ public class Cluster {
 
         return topology.selectExecutorToComponent(allExecutors);
     }
-    
+
     /**
      * @param topology
      * @return a component-id -> executors map which needs scheduling in this topology.
@@ -194,10 +202,10 @@ public class Cluster {
             if (!componentToExecutors.containsKey(component)) {
                 componentToExecutors.put(component, new ArrayList<ExecutorDetails>());
             }
-            
+
             componentToExecutors.get(component).add(executor);
         }
-        
+
         return componentToExecutors;
     }
 
@@ -232,7 +240,7 @@ public class Cluster {
 
         return ret;
     }
-    
+
     public Set<Integer> getAssignablePorts(SupervisorDetails supervisor) {
         if(isBlackListed(supervisor.id)) return new HashSet();
         return supervisor.allPorts;
@@ -251,7 +259,7 @@ public class Cluster {
 
         return slots;
     }
-    
+
     public List<WorkerSlot> getAssignableSlots(SupervisorDetails supervisor) {
         Set<Integer> ports = this.getAssignablePorts(supervisor);
         List<WorkerSlot> slots = new ArrayList<WorkerSlot>(ports.size());
@@ -260,9 +268,9 @@ public class Cluster {
             slots.add(new WorkerSlot(supervisor.getId(), port));
         }
 
-        return slots;        
+        return slots;
     }
-    
+
     /**
      * get the unassigned executors of the topology.
      */
@@ -272,13 +280,13 @@ public class Cluster {
         }
 
         Collection<ExecutorDetails> ret = new HashSet(topology.getExecutors());
-        
+
         SchedulerAssignment assignment = this.getAssignmentById(topology.getId());
         if (assignment != null) {
             Set<ExecutorDetails> assignedExecutors = assignment.getExecutors();
             ret.removeAll(assignedExecutors);
         }
-        
+
         return ret;
     }
 
@@ -299,14 +307,14 @@ public class Cluster {
 
     /**
      * Assign the slot to the executors for this topology.
-     * 
+     *
      * @throws RuntimeException if the specified slot is already occupied.
      */
     public void assign(WorkerSlot slot, String topologyId, Collection<ExecutorDetails> executors) {
         if (this.isSlotOccupied(slot)) {
             throw new RuntimeException("slot: [" + slot.getNodeId() + ", " + slot.getPort() + "] is already occupied.");
         }
-        
+
         SchedulerAssignmentImpl assignment = (SchedulerAssignmentImpl)this.getAssignmentById(topologyId);
         if (assignment == null) {
             assignment = new SchedulerAssignmentImpl(topologyId, new HashMap<ExecutorDetails, WorkerSlot>());
@@ -314,7 +322,9 @@ public class Cluster {
         } else {
             for (ExecutorDetails executor : executors) {
                  if (assignment.isExecutorAssigned(executor)) {
-                     throw new RuntimeException("the executor is already assigned, you should unassign it before assign it to another slot.");
+                     throw new RuntimeException("Attempting to assign executor: " + executor + " of topology: "+ topologyId
+                             + " to workerslot: " + slot + ". The executor is already assigned to workerslot: " + assignment.getExecutorToSlot().get(executor)
+                             + ". The executor must unassigned before it can be assigned to another slot!");
                  }
             }
         }
@@ -333,7 +343,7 @@ public class Cluster {
 
         return slots;
     }
-    
+
     public List<WorkerSlot> getAssignableSlots() {
         List<WorkerSlot> slots = new ArrayList<WorkerSlot>();
         for (SupervisorDetails supervisor : this.supervisors.values()) {
@@ -345,7 +355,7 @@ public class Cluster {
 
     /**
      * Free the specified slot.
-     * 
+     *
      * @param slot
      */
     public void freeSlot(WorkerSlot slot) {
@@ -356,10 +366,10 @@ public class Cluster {
             }
         }
     }
-    
+
     /**
      * free the slots.
-     * 
+     *
      * @param slots
      */
     public void freeSlots(Collection<WorkerSlot> slots) {
@@ -380,10 +390,10 @@ public class Cluster {
                 return true;
             }
         }
-        
+
         return false;
     }
-    
+
     /**
      * get the current assignment for the topology.
      */
@@ -415,7 +425,7 @@ public class Cluster {
 
         return null;
     }
-    
+
     public Collection<WorkerSlot> getUsedSlots() {
         Set<WorkerSlot> ret = new HashSet();
         for(SchedulerAssignmentImpl s: assignments.values()) {
@@ -426,7 +436,7 @@ public class Cluster {
 
     /**
      * Get all the supervisors on the specified <code>host</code>.
-     * 
+     *
      * @param host hostname of the supervisor
      * @return the <code>SupervisorDetails</code> object.
      */
@@ -448,7 +458,7 @@ public class Cluster {
      */
     public Map<String, SchedulerAssignment> getAssignments() {
         Map<String, SchedulerAssignment> ret = new HashMap<String, SchedulerAssignment>(this.assignments.size());
-        
+
         for (String topologyId : this.assignments.keySet()) {
             ret.put(topologyId, this.assignments.get(topologyId));
         }
@@ -541,7 +551,7 @@ public class Cluster {
     /*
     * Get heap memory usage for a worker's main process and logwriter process
     * */
-    private Double getAssignedMemoryForSlot(Map topConf) {
+    public Double getAssignedMemoryForSlot(Map topConf) {
         Double totalWorkerMemory = 0.0;
         final Integer TOPOLOGY_WORKER_DEFAULT_MEMORY_ALLOCATION = 768;
 
@@ -604,27 +614,49 @@ public class Cluster {
     }
 
     /*
-    * Update memory usage for each topology and each supervisor node after every round of scheduling
-    * */
+     * Update memory usage for each topology and each supervisor node after every round of scheduling
+     * */
     public void updateAssignedMemoryForTopologyAndSupervisor(Topologies topologies) {
         Map<String, Double> supervisorToAssignedMem = new HashMap<String, Double>();
 
         for (Map.Entry<String, SchedulerAssignment> entry : this.getAssignments().entrySet()) {
             String topId = entry.getValue().getTopologyId();
+            if (topologies.getById(topId) == null) {
+                continue;
+            }
             Map topConf = topologies.getById(topId).getConf();
             Double assignedMemForTopology = 0.0;
             Double assignedMemPerSlot = getAssignedMemoryForSlot(topConf);
+
+            Map<WorkerSlot, Double[]> workerResources;
+            if (this.workerResources.containsKey(topId)){
+                workerResources = this.workerResources.get(topId);
+            } else {
+                workerResources = new HashMap<WorkerSlot, Double[]>();
+                this.workerResources.put(topId, workerResources);
+            }
+
             for (WorkerSlot ws: entry.getValue().getSlots()) {
                 assignedMemForTopology += assignedMemPerSlot;
                 String nodeId = ws.getNodeId();
+
+                // for non-RAS, these are all constant
+                if (workerResources.containsKey(ws)){
+                    Double[] worker_resources = workerResources.get(ws);
+                    worker_resources[0] = assignedMemPerSlot;
+                } else {
+                    Double[] worker_resources = {assignedMemPerSlot, 0.0, 0.0};
+                    workerResources.put(ws, worker_resources);
+                }
+
                 if (supervisorToAssignedMem.containsKey(nodeId)) {
                     supervisorToAssignedMem.put(nodeId, supervisorToAssignedMem.get(nodeId) + assignedMemPerSlot);
                 } else {
                     supervisorToAssignedMem.put(nodeId, assignedMemPerSlot);
                 }
             }
-            if (this.getTopologyResourcesMap().containsKey(topId)) {
-                Double[] topo_resources = getTopologyResourcesMap().get(topId);
+            if (topologyResources.containsKey(topId)) {
+                Double[] topo_resources = topologyResources.get(topId);
                 topo_resources[3] = assignedMemForTopology;
             } else {
                 Double[] topo_resources = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
@@ -646,6 +678,74 @@ public class Cluster {
         }
     }
 
+    private static final Double PER_WORKER_CPU_SWAG = 100.0;
+    /**
+     * Update CPU usage for each topology and each supervisor node
+     */
+    public void updateAssignedCpuForTopologyAndSupervisor(Topologies topologies) {
+        Map<String, Double> supervisorToAssignedCpu = new HashMap<String, Double>();
+
+        for (Map.Entry<String, SchedulerAssignment> entry : getAssignments().entrySet()) {
+            String topId = entry.getValue().getTopologyId();
+            if (topologies.getById(topId) == null) {
+                continue;
+            }
+            Map topConf = topologies.getById(topId).getConf();
+            Double assignedCpuForTopology = 0.0;
+
+            Map<WorkerSlot, Double[]> workerResources;
+            if (this.workerResources.containsKey(topId)){
+                workerResources = this.workerResources.get(topId);
+            } else {
+                workerResources = new HashMap<WorkerSlot, Double[]>();
+                this.workerResources.put(topId, workerResources);
+            }
+
+            for (WorkerSlot ws: entry.getValue().getSlots()) {
+                assignedCpuForTopology += PER_WORKER_CPU_SWAG;
+                String nodeId = ws.getNodeId();
+
+                // for non-RAS, these are all constant
+                if (workerResources.containsKey(ws)){
+                    Double[] worker_resources = workerResources.get(ws);
+                    worker_resources[2] = PER_WORKER_CPU_SWAG;
+                } else {
+                    Double[] worker_resources = {0.0, 0.0, PER_WORKER_CPU_SWAG};
+                    workerResources.put(ws, worker_resources);
+                }
+
+                if (supervisorToAssignedCpu.containsKey(nodeId)) {
+                    supervisorToAssignedCpu.put(nodeId, supervisorToAssignedCpu.get(nodeId) + PER_WORKER_CPU_SWAG);
+                } else {
+                    supervisorToAssignedCpu.put(nodeId, PER_WORKER_CPU_SWAG);
+                }
+            }
+
+            this.setWorkerResources(topId, workerResources);
+
+            if (getTopologyResourcesMap().containsKey(topId)) {
+                Double[] topo_resources = getTopologyResourcesMap().get(topId);
+                topo_resources[5] = assignedCpuForTopology;
+            } else {
+                Double[] topo_resources = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
+                topo_resources[5] = assignedCpuForTopology;
+                setTopologyResources(topId, topo_resources);
+            }
+        }
+
+        for (Map.Entry<String, Double> entry : supervisorToAssignedCpu.entrySet()) {
+            String nodeId = entry.getKey();
+            if (supervisorsResources.containsKey(nodeId)) {
+                Double[] supervisor_resources = supervisorsResources.get(nodeId);
+                supervisor_resources[3] = entry.getValue();
+            } else {
+                Double[] supervisor_resources = {0.0, 0.0, 0.0, 0.0};
+                supervisor_resources[3] = entry.getValue();
+                supervisorsResources.put(nodeId, supervisor_resources);
+            }
+        }
+    }
+
     /**
      * set scheduler status for a topology
      */
@@ -698,6 +798,10 @@ public class Cluster {
         return this.topologyResources;
     }
 
+    public void setSupervisorResources(String supervisorId, Double[] resources) {
+        this.supervisorsResources.put(supervisorId, resources);
+    }
+
     /**
      * Sets the amount of used and free resources on a supervisor. Used for displaying resource information on the UI
      * @param supervisorResources a map where the key is the supervisor id and the value is a map that represents
@@ -715,4 +819,37 @@ public class Cluster {
     public Map<String, Double[]> getSupervisorsResourcesMap() {
         return this.supervisorsResources;
     }
+
+    /**
+     * Gets the reference to the full topology->worker resource map.
+     * @return map of topology -> map of worker slot ->resources for that worker
+     */
+    public Map<String, Map<WorkerSlot, Double[]>> getWorkerResourcesMap() {
+        return this.workerResources;
+    }
+
+    /**
+     * Set the worker resources map for all topologies in source
+     * @param resources map
+     */
+    public void setWorkerResourcesMap(Map<String, Map<WorkerSlot, Double[]>> resources) {
+        this.workerResources.putAll(resources);
+    }
+
+    /**
+     * Set the worker resources map for a specific topologyId
+     * @param topologyId the id of the topology
+     * @param resources map for the topology
+     */
+    public void setWorkerResources(String topologyId, Map<WorkerSlot, Double[]> resources) {
+        this.workerResources.put(topologyId, resources);
+    }
+
+    public INimbus getINimbus() {
+        return this.inimbus;
+    }
+
+    public Map getConf() {
+        return this.conf;
+    }
 }


[8/9] storm git commit: Merge branch 'STORM-1994_add_per_worker_components_and_resource_usage_1x' of https://github.com/abellina/storm into STORM-1994-1.x

Posted by ka...@apache.org.
Merge branch 'STORM-1994_add_per_worker_components_and_resource_usage_1x' of https://github.com/abellina/storm into STORM-1994-1.x


Project: http://git-wip-us.apache.org/repos/asf/storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/storm/commit/39544ead
Tree: http://git-wip-us.apache.org/repos/asf/storm/tree/39544ead
Diff: http://git-wip-us.apache.org/repos/asf/storm/diff/39544ead

Branch: refs/heads/1.x-branch
Commit: 39544ead3201d2915c53881896357958076735a4
Parents: ce38849 0e0bcf2
Author: Jungtaek Lim <ka...@gmail.com>
Authored: Mon Aug 22 17:17:18 2016 +0900
Committer: Jungtaek Lim <ka...@gmail.com>
Committed: Mon Aug 22 17:17:18 2016 +0900

----------------------------------------------------------------------
 docs/STORM-UI-REST-API.md                       |  121 +-
 docs/images/supervisor_page.png                 |  Bin 0 -> 133290 bytes
 .../src/clj/org/apache/storm/daemon/nimbus.clj  |  296 +-
 storm-core/src/clj/org/apache/storm/stats.clj   |   68 +-
 storm-core/src/clj/org/apache/storm/ui/core.clj |   94 +-
 .../org/apache/storm/generated/Assignment.java  |  244 +-
 .../storm/generated/ClusterWorkerHeartbeat.java |   52 +-
 .../storm/generated/ComponentPageInfo.java      |  220 +-
 .../org/apache/storm/generated/Credentials.java |   44 +-
 .../jvm/org/apache/storm/generated/HBNodes.java |   32 +-
 .../org/apache/storm/generated/HBRecords.java   |   36 +-
 .../storm/generated/LSApprovedWorkers.java      |   44 +-
 .../generated/LSSupervisorAssignments.java      |   48 +-
 .../apache/storm/generated/LSTopoHistory.java   |   64 +-
 .../storm/generated/LSTopoHistoryList.java      |   36 +-
 .../storm/generated/LSWorkerHeartbeat.java      |   36 +-
 .../apache/storm/generated/ListBlobsResult.java |   32 +-
 .../apache/storm/generated/LocalAssignment.java |   36 +-
 .../apache/storm/generated/LocalStateData.java  |   48 +-
 .../org/apache/storm/generated/LogConfig.java   |   48 +-
 .../jvm/org/apache/storm/generated/Nimbus.java  | 3486 ++++++++++++------
 .../org/apache/storm/generated/NodeInfo.java    |   32 +-
 .../storm/generated/RebalanceOptions.java       |   44 +-
 .../storm/generated/SettableBlobMeta.java       |   36 +-
 .../org/apache/storm/generated/StormBase.java   |   92 +-
 .../apache/storm/generated/SupervisorInfo.java  |  152 +-
 .../storm/generated/SupervisorPageInfo.java     |  624 ++++
 .../storm/generated/TopologyHistoryInfo.java    |   32 +-
 .../storm/generated/TopologyPageInfo.java       |  284 +-
 .../apache/storm/generated/WorkerSummary.java   | 1880 ++++++++++
 .../jvm/org/apache/storm/scheduler/Cluster.java |  217 +-
 .../resource/ResourceAwareScheduler.java        |    9 +
 .../auth/authorizer/SimpleACLAuthorizer.java    |    7 +-
 storm-core/src/py/storm/Nimbus-remote           |    7 +
 storm-core/src/py/storm/Nimbus.py               |  272 +-
 storm-core/src/py/storm/ttypes.py               | 1457 ++++++--
 storm-core/src/storm.thrift                     |   25 +
 storm-core/src/ui/public/component.html         |    8 +
 storm-core/src/ui/public/css/style.css          |   20 +
 storm-core/src/ui/public/js/script.js           |  191 +
 storm-core/src/ui/public/supervisor.html        |  132 +
 .../public/templates/index-page-template.html   |    4 +-
 .../templates/supervisor-page-template.html     |  145 +
 .../templates/topology-page-template.html       |  208 +-
 storm-core/src/ui/public/topology.html          |   12 +-
 .../test/clj/org/apache/storm/nimbus_test.clj   |   72 +-
 .../test/clj/org/apache/storm/stats_test.clj    |  134 +
 47 files changed, 8820 insertions(+), 2361 deletions(-)
----------------------------------------------------------------------



[6/9] storm git commit: STORM-1994: Add table with per-topology and worker resource usage and components in (new) supervisor and topology pages

Posted by ka...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/jvm/org/apache/storm/generated/ComponentPageInfo.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/org/apache/storm/generated/ComponentPageInfo.java b/storm-core/src/jvm/org/apache/storm/generated/ComponentPageInfo.java
index ff02aca..55bb200 100644
--- a/storm-core/src/jvm/org/apache/storm/generated/ComponentPageInfo.java
+++ b/storm-core/src/jvm/org/apache/storm/generated/ComponentPageInfo.java
@@ -1657,16 +1657,16 @@ public class ComponentPageInfo implements org.apache.thrift.TBase<ComponentPageI
           case 7: // WINDOW_TO_STATS
             if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
               {
-                org.apache.thrift.protocol.TMap _map422 = iprot.readMapBegin();
-                struct.window_to_stats = new HashMap<String,ComponentAggregateStats>(2*_map422.size);
-                String _key423;
-                ComponentAggregateStats _val424;
-                for (int _i425 = 0; _i425 < _map422.size; ++_i425)
+                org.apache.thrift.protocol.TMap _map456 = iprot.readMapBegin();
+                struct.window_to_stats = new HashMap<String,ComponentAggregateStats>(2*_map456.size);
+                String _key457;
+                ComponentAggregateStats _val458;
+                for (int _i459 = 0; _i459 < _map456.size; ++_i459)
                 {
-                  _key423 = iprot.readString();
-                  _val424 = new ComponentAggregateStats();
-                  _val424.read(iprot);
-                  struct.window_to_stats.put(_key423, _val424);
+                  _key457 = iprot.readString();
+                  _val458 = new ComponentAggregateStats();
+                  _val458.read(iprot);
+                  struct.window_to_stats.put(_key457, _val458);
                 }
                 iprot.readMapEnd();
               }
@@ -1678,17 +1678,17 @@ public class ComponentPageInfo implements org.apache.thrift.TBase<ComponentPageI
           case 8: // GSID_TO_INPUT_STATS
             if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
               {
-                org.apache.thrift.protocol.TMap _map426 = iprot.readMapBegin();
-                struct.gsid_to_input_stats = new HashMap<GlobalStreamId,ComponentAggregateStats>(2*_map426.size);
-                GlobalStreamId _key427;
-                ComponentAggregateStats _val428;
-                for (int _i429 = 0; _i429 < _map426.size; ++_i429)
+                org.apache.thrift.protocol.TMap _map460 = iprot.readMapBegin();
+                struct.gsid_to_input_stats = new HashMap<GlobalStreamId,ComponentAggregateStats>(2*_map460.size);
+                GlobalStreamId _key461;
+                ComponentAggregateStats _val462;
+                for (int _i463 = 0; _i463 < _map460.size; ++_i463)
                 {
-                  _key427 = new GlobalStreamId();
-                  _key427.read(iprot);
-                  _val428 = new ComponentAggregateStats();
-                  _val428.read(iprot);
-                  struct.gsid_to_input_stats.put(_key427, _val428);
+                  _key461 = new GlobalStreamId();
+                  _key461.read(iprot);
+                  _val462 = new ComponentAggregateStats();
+                  _val462.read(iprot);
+                  struct.gsid_to_input_stats.put(_key461, _val462);
                 }
                 iprot.readMapEnd();
               }
@@ -1700,16 +1700,16 @@ public class ComponentPageInfo implements org.apache.thrift.TBase<ComponentPageI
           case 9: // SID_TO_OUTPUT_STATS
             if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
               {
-                org.apache.thrift.protocol.TMap _map430 = iprot.readMapBegin();
-                struct.sid_to_output_stats = new HashMap<String,ComponentAggregateStats>(2*_map430.size);
-                String _key431;
-                ComponentAggregateStats _val432;
-                for (int _i433 = 0; _i433 < _map430.size; ++_i433)
+                org.apache.thrift.protocol.TMap _map464 = iprot.readMapBegin();
+                struct.sid_to_output_stats = new HashMap<String,ComponentAggregateStats>(2*_map464.size);
+                String _key465;
+                ComponentAggregateStats _val466;
+                for (int _i467 = 0; _i467 < _map464.size; ++_i467)
                 {
-                  _key431 = iprot.readString();
-                  _val432 = new ComponentAggregateStats();
-                  _val432.read(iprot);
-                  struct.sid_to_output_stats.put(_key431, _val432);
+                  _key465 = iprot.readString();
+                  _val466 = new ComponentAggregateStats();
+                  _val466.read(iprot);
+                  struct.sid_to_output_stats.put(_key465, _val466);
                 }
                 iprot.readMapEnd();
               }
@@ -1721,14 +1721,14 @@ public class ComponentPageInfo implements org.apache.thrift.TBase<ComponentPageI
           case 10: // EXEC_STATS
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list434 = iprot.readListBegin();
-                struct.exec_stats = new ArrayList<ExecutorAggregateStats>(_list434.size);
-                ExecutorAggregateStats _elem435;
-                for (int _i436 = 0; _i436 < _list434.size; ++_i436)
+                org.apache.thrift.protocol.TList _list468 = iprot.readListBegin();
+                struct.exec_stats = new ArrayList<ExecutorAggregateStats>(_list468.size);
+                ExecutorAggregateStats _elem469;
+                for (int _i470 = 0; _i470 < _list468.size; ++_i470)
                 {
-                  _elem435 = new ExecutorAggregateStats();
-                  _elem435.read(iprot);
-                  struct.exec_stats.add(_elem435);
+                  _elem469 = new ExecutorAggregateStats();
+                  _elem469.read(iprot);
+                  struct.exec_stats.add(_elem469);
                 }
                 iprot.readListEnd();
               }
@@ -1740,14 +1740,14 @@ public class ComponentPageInfo implements org.apache.thrift.TBase<ComponentPageI
           case 11: // ERRORS
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list437 = iprot.readListBegin();
-                struct.errors = new ArrayList<ErrorInfo>(_list437.size);
-                ErrorInfo _elem438;
-                for (int _i439 = 0; _i439 < _list437.size; ++_i439)
+                org.apache.thrift.protocol.TList _list471 = iprot.readListBegin();
+                struct.errors = new ArrayList<ErrorInfo>(_list471.size);
+                ErrorInfo _elem472;
+                for (int _i473 = 0; _i473 < _list471.size; ++_i473)
                 {
-                  _elem438 = new ErrorInfo();
-                  _elem438.read(iprot);
-                  struct.errors.add(_elem438);
+                  _elem472 = new ErrorInfo();
+                  _elem472.read(iprot);
+                  struct.errors.add(_elem472);
                 }
                 iprot.readListEnd();
               }
@@ -1841,10 +1841,10 @@ public class ComponentPageInfo implements org.apache.thrift.TBase<ComponentPageI
           oprot.writeFieldBegin(WINDOW_TO_STATS_FIELD_DESC);
           {
             oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.window_to_stats.size()));
-            for (Map.Entry<String, ComponentAggregateStats> _iter440 : struct.window_to_stats.entrySet())
+            for (Map.Entry<String, ComponentAggregateStats> _iter474 : struct.window_to_stats.entrySet())
             {
-              oprot.writeString(_iter440.getKey());
-              _iter440.getValue().write(oprot);
+              oprot.writeString(_iter474.getKey());
+              _iter474.getValue().write(oprot);
             }
             oprot.writeMapEnd();
           }
@@ -1856,10 +1856,10 @@ public class ComponentPageInfo implements org.apache.thrift.TBase<ComponentPageI
           oprot.writeFieldBegin(GSID_TO_INPUT_STATS_FIELD_DESC);
           {
             oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.STRUCT, struct.gsid_to_input_stats.size()));
-            for (Map.Entry<GlobalStreamId, ComponentAggregateStats> _iter441 : struct.gsid_to_input_stats.entrySet())
+            for (Map.Entry<GlobalStreamId, ComponentAggregateStats> _iter475 : struct.gsid_to_input_stats.entrySet())
             {
-              _iter441.getKey().write(oprot);
-              _iter441.getValue().write(oprot);
+              _iter475.getKey().write(oprot);
+              _iter475.getValue().write(oprot);
             }
             oprot.writeMapEnd();
           }
@@ -1871,10 +1871,10 @@ public class ComponentPageInfo implements org.apache.thrift.TBase<ComponentPageI
           oprot.writeFieldBegin(SID_TO_OUTPUT_STATS_FIELD_DESC);
           {
             oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.sid_to_output_stats.size()));
-            for (Map.Entry<String, ComponentAggregateStats> _iter442 : struct.sid_to_output_stats.entrySet())
+            for (Map.Entry<String, ComponentAggregateStats> _iter476 : struct.sid_to_output_stats.entrySet())
             {
-              oprot.writeString(_iter442.getKey());
-              _iter442.getValue().write(oprot);
+              oprot.writeString(_iter476.getKey());
+              _iter476.getValue().write(oprot);
             }
             oprot.writeMapEnd();
           }
@@ -1886,9 +1886,9 @@ public class ComponentPageInfo implements org.apache.thrift.TBase<ComponentPageI
           oprot.writeFieldBegin(EXEC_STATS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.exec_stats.size()));
-            for (ExecutorAggregateStats _iter443 : struct.exec_stats)
+            for (ExecutorAggregateStats _iter477 : struct.exec_stats)
             {
-              _iter443.write(oprot);
+              _iter477.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -1900,9 +1900,9 @@ public class ComponentPageInfo implements org.apache.thrift.TBase<ComponentPageI
           oprot.writeFieldBegin(ERRORS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.errors.size()));
-            for (ErrorInfo _iter444 : struct.errors)
+            for (ErrorInfo _iter478 : struct.errors)
             {
-              _iter444.write(oprot);
+              _iter478.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -2010,48 +2010,48 @@ public class ComponentPageInfo implements org.apache.thrift.TBase<ComponentPageI
       if (struct.is_set_window_to_stats()) {
         {
           oprot.writeI32(struct.window_to_stats.size());
-          for (Map.Entry<String, ComponentAggregateStats> _iter445 : struct.window_to_stats.entrySet())
+          for (Map.Entry<String, ComponentAggregateStats> _iter479 : struct.window_to_stats.entrySet())
           {
-            oprot.writeString(_iter445.getKey());
-            _iter445.getValue().write(oprot);
+            oprot.writeString(_iter479.getKey());
+            _iter479.getValue().write(oprot);
           }
         }
       }
       if (struct.is_set_gsid_to_input_stats()) {
         {
           oprot.writeI32(struct.gsid_to_input_stats.size());
-          for (Map.Entry<GlobalStreamId, ComponentAggregateStats> _iter446 : struct.gsid_to_input_stats.entrySet())
+          for (Map.Entry<GlobalStreamId, ComponentAggregateStats> _iter480 : struct.gsid_to_input_stats.entrySet())
           {
-            _iter446.getKey().write(oprot);
-            _iter446.getValue().write(oprot);
+            _iter480.getKey().write(oprot);
+            _iter480.getValue().write(oprot);
           }
         }
       }
       if (struct.is_set_sid_to_output_stats()) {
         {
           oprot.writeI32(struct.sid_to_output_stats.size());
-          for (Map.Entry<String, ComponentAggregateStats> _iter447 : struct.sid_to_output_stats.entrySet())
+          for (Map.Entry<String, ComponentAggregateStats> _iter481 : struct.sid_to_output_stats.entrySet())
           {
-            oprot.writeString(_iter447.getKey());
-            _iter447.getValue().write(oprot);
+            oprot.writeString(_iter481.getKey());
+            _iter481.getValue().write(oprot);
           }
         }
       }
       if (struct.is_set_exec_stats()) {
         {
           oprot.writeI32(struct.exec_stats.size());
-          for (ExecutorAggregateStats _iter448 : struct.exec_stats)
+          for (ExecutorAggregateStats _iter482 : struct.exec_stats)
           {
-            _iter448.write(oprot);
+            _iter482.write(oprot);
           }
         }
       }
       if (struct.is_set_errors()) {
         {
           oprot.writeI32(struct.errors.size());
-          for (ErrorInfo _iter449 : struct.errors)
+          for (ErrorInfo _iter483 : struct.errors)
           {
-            _iter449.write(oprot);
+            _iter483.write(oprot);
           }
         }
       }
@@ -2095,77 +2095,77 @@ public class ComponentPageInfo implements org.apache.thrift.TBase<ComponentPageI
       }
       if (incoming.get(4)) {
         {
-          org.apache.thrift.protocol.TMap _map450 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-          struct.window_to_stats = new HashMap<String,ComponentAggregateStats>(2*_map450.size);
-          String _key451;
-          ComponentAggregateStats _val452;
-          for (int _i453 = 0; _i453 < _map450.size; ++_i453)
+          org.apache.thrift.protocol.TMap _map484 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.window_to_stats = new HashMap<String,ComponentAggregateStats>(2*_map484.size);
+          String _key485;
+          ComponentAggregateStats _val486;
+          for (int _i487 = 0; _i487 < _map484.size; ++_i487)
           {
-            _key451 = iprot.readString();
-            _val452 = new ComponentAggregateStats();
-            _val452.read(iprot);
-            struct.window_to_stats.put(_key451, _val452);
+            _key485 = iprot.readString();
+            _val486 = new ComponentAggregateStats();
+            _val486.read(iprot);
+            struct.window_to_stats.put(_key485, _val486);
           }
         }
         struct.set_window_to_stats_isSet(true);
       }
       if (incoming.get(5)) {
         {
-          org.apache.thrift.protocol.TMap _map454 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-          struct.gsid_to_input_stats = new HashMap<GlobalStreamId,ComponentAggregateStats>(2*_map454.size);
-          GlobalStreamId _key455;
-          ComponentAggregateStats _val456;
-          for (int _i457 = 0; _i457 < _map454.size; ++_i457)
+          org.apache.thrift.protocol.TMap _map488 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.gsid_to_input_stats = new HashMap<GlobalStreamId,ComponentAggregateStats>(2*_map488.size);
+          GlobalStreamId _key489;
+          ComponentAggregateStats _val490;
+          for (int _i491 = 0; _i491 < _map488.size; ++_i491)
           {
-            _key455 = new GlobalStreamId();
-            _key455.read(iprot);
-            _val456 = new ComponentAggregateStats();
-            _val456.read(iprot);
-            struct.gsid_to_input_stats.put(_key455, _val456);
+            _key489 = new GlobalStreamId();
+            _key489.read(iprot);
+            _val490 = new ComponentAggregateStats();
+            _val490.read(iprot);
+            struct.gsid_to_input_stats.put(_key489, _val490);
           }
         }
         struct.set_gsid_to_input_stats_isSet(true);
       }
       if (incoming.get(6)) {
         {
-          org.apache.thrift.protocol.TMap _map458 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-          struct.sid_to_output_stats = new HashMap<String,ComponentAggregateStats>(2*_map458.size);
-          String _key459;
-          ComponentAggregateStats _val460;
-          for (int _i461 = 0; _i461 < _map458.size; ++_i461)
+          org.apache.thrift.protocol.TMap _map492 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.sid_to_output_stats = new HashMap<String,ComponentAggregateStats>(2*_map492.size);
+          String _key493;
+          ComponentAggregateStats _val494;
+          for (int _i495 = 0; _i495 < _map492.size; ++_i495)
           {
-            _key459 = iprot.readString();
-            _val460 = new ComponentAggregateStats();
-            _val460.read(iprot);
-            struct.sid_to_output_stats.put(_key459, _val460);
+            _key493 = iprot.readString();
+            _val494 = new ComponentAggregateStats();
+            _val494.read(iprot);
+            struct.sid_to_output_stats.put(_key493, _val494);
           }
         }
         struct.set_sid_to_output_stats_isSet(true);
       }
       if (incoming.get(7)) {
         {
-          org.apache.thrift.protocol.TList _list462 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-          struct.exec_stats = new ArrayList<ExecutorAggregateStats>(_list462.size);
-          ExecutorAggregateStats _elem463;
-          for (int _i464 = 0; _i464 < _list462.size; ++_i464)
+          org.apache.thrift.protocol.TList _list496 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.exec_stats = new ArrayList<ExecutorAggregateStats>(_list496.size);
+          ExecutorAggregateStats _elem497;
+          for (int _i498 = 0; _i498 < _list496.size; ++_i498)
           {
-            _elem463 = new ExecutorAggregateStats();
-            _elem463.read(iprot);
-            struct.exec_stats.add(_elem463);
+            _elem497 = new ExecutorAggregateStats();
+            _elem497.read(iprot);
+            struct.exec_stats.add(_elem497);
           }
         }
         struct.set_exec_stats_isSet(true);
       }
       if (incoming.get(8)) {
         {
-          org.apache.thrift.protocol.TList _list465 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-          struct.errors = new ArrayList<ErrorInfo>(_list465.size);
-          ErrorInfo _elem466;
-          for (int _i467 = 0; _i467 < _list465.size; ++_i467)
+          org.apache.thrift.protocol.TList _list499 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.errors = new ArrayList<ErrorInfo>(_list499.size);
+          ErrorInfo _elem500;
+          for (int _i501 = 0; _i501 < _list499.size; ++_i501)
           {
-            _elem466 = new ErrorInfo();
-            _elem466.read(iprot);
-            struct.errors.add(_elem466);
+            _elem500 = new ErrorInfo();
+            _elem500.read(iprot);
+            struct.errors.add(_elem500);
           }
         }
         struct.set_errors_isSet(true);

http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/jvm/org/apache/storm/generated/Credentials.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/org/apache/storm/generated/Credentials.java b/storm-core/src/jvm/org/apache/storm/generated/Credentials.java
index 117865c..5319f76 100644
--- a/storm-core/src/jvm/org/apache/storm/generated/Credentials.java
+++ b/storm-core/src/jvm/org/apache/storm/generated/Credentials.java
@@ -365,15 +365,15 @@ public class Credentials implements org.apache.thrift.TBase<Credentials, Credent
           case 1: // CREDS
             if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
               {
-                org.apache.thrift.protocol.TMap _map478 = iprot.readMapBegin();
-                struct.creds = new HashMap<String,String>(2*_map478.size);
-                String _key479;
-                String _val480;
-                for (int _i481 = 0; _i481 < _map478.size; ++_i481)
+                org.apache.thrift.protocol.TMap _map512 = iprot.readMapBegin();
+                struct.creds = new HashMap<String,String>(2*_map512.size);
+                String _key513;
+                String _val514;
+                for (int _i515 = 0; _i515 < _map512.size; ++_i515)
                 {
-                  _key479 = iprot.readString();
-                  _val480 = iprot.readString();
-                  struct.creds.put(_key479, _val480);
+                  _key513 = iprot.readString();
+                  _val514 = iprot.readString();
+                  struct.creds.put(_key513, _val514);
                 }
                 iprot.readMapEnd();
               }
@@ -399,10 +399,10 @@ public class Credentials implements org.apache.thrift.TBase<Credentials, Credent
         oprot.writeFieldBegin(CREDS_FIELD_DESC);
         {
           oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.creds.size()));
-          for (Map.Entry<String, String> _iter482 : struct.creds.entrySet())
+          for (Map.Entry<String, String> _iter516 : struct.creds.entrySet())
           {
-            oprot.writeString(_iter482.getKey());
-            oprot.writeString(_iter482.getValue());
+            oprot.writeString(_iter516.getKey());
+            oprot.writeString(_iter516.getValue());
           }
           oprot.writeMapEnd();
         }
@@ -427,10 +427,10 @@ public class Credentials implements org.apache.thrift.TBase<Credentials, Credent
       TTupleProtocol oprot = (TTupleProtocol) prot;
       {
         oprot.writeI32(struct.creds.size());
-        for (Map.Entry<String, String> _iter483 : struct.creds.entrySet())
+        for (Map.Entry<String, String> _iter517 : struct.creds.entrySet())
         {
-          oprot.writeString(_iter483.getKey());
-          oprot.writeString(_iter483.getValue());
+          oprot.writeString(_iter517.getKey());
+          oprot.writeString(_iter517.getValue());
         }
       }
     }
@@ -439,15 +439,15 @@ public class Credentials implements org.apache.thrift.TBase<Credentials, Credent
     public void read(org.apache.thrift.protocol.TProtocol prot, Credentials struct) throws org.apache.thrift.TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       {
-        org.apache.thrift.protocol.TMap _map484 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-        struct.creds = new HashMap<String,String>(2*_map484.size);
-        String _key485;
-        String _val486;
-        for (int _i487 = 0; _i487 < _map484.size; ++_i487)
+        org.apache.thrift.protocol.TMap _map518 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+        struct.creds = new HashMap<String,String>(2*_map518.size);
+        String _key519;
+        String _val520;
+        for (int _i521 = 0; _i521 < _map518.size; ++_i521)
         {
-          _key485 = iprot.readString();
-          _val486 = iprot.readString();
-          struct.creds.put(_key485, _val486);
+          _key519 = iprot.readString();
+          _val520 = iprot.readString();
+          struct.creds.put(_key519, _val520);
         }
       }
       struct.set_creds_isSet(true);

http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/jvm/org/apache/storm/generated/HBNodes.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/org/apache/storm/generated/HBNodes.java b/storm-core/src/jvm/org/apache/storm/generated/HBNodes.java
index 0f0ae15..5435af2 100644
--- a/storm-core/src/jvm/org/apache/storm/generated/HBNodes.java
+++ b/storm-core/src/jvm/org/apache/storm/generated/HBNodes.java
@@ -364,13 +364,13 @@ public class HBNodes implements org.apache.thrift.TBase<HBNodes, HBNodes._Fields
           case 1: // PULSE_IDS
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list730 = iprot.readListBegin();
-                struct.pulseIds = new ArrayList<String>(_list730.size);
-                String _elem731;
-                for (int _i732 = 0; _i732 < _list730.size; ++_i732)
+                org.apache.thrift.protocol.TList _list764 = iprot.readListBegin();
+                struct.pulseIds = new ArrayList<String>(_list764.size);
+                String _elem765;
+                for (int _i766 = 0; _i766 < _list764.size; ++_i766)
                 {
-                  _elem731 = iprot.readString();
-                  struct.pulseIds.add(_elem731);
+                  _elem765 = iprot.readString();
+                  struct.pulseIds.add(_elem765);
                 }
                 iprot.readListEnd();
               }
@@ -396,9 +396,9 @@ public class HBNodes implements org.apache.thrift.TBase<HBNodes, HBNodes._Fields
         oprot.writeFieldBegin(PULSE_IDS_FIELD_DESC);
         {
           oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.pulseIds.size()));
-          for (String _iter733 : struct.pulseIds)
+          for (String _iter767 : struct.pulseIds)
           {
-            oprot.writeString(_iter733);
+            oprot.writeString(_iter767);
           }
           oprot.writeListEnd();
         }
@@ -429,9 +429,9 @@ public class HBNodes implements org.apache.thrift.TBase<HBNodes, HBNodes._Fields
       if (struct.is_set_pulseIds()) {
         {
           oprot.writeI32(struct.pulseIds.size());
-          for (String _iter734 : struct.pulseIds)
+          for (String _iter768 : struct.pulseIds)
           {
-            oprot.writeString(_iter734);
+            oprot.writeString(_iter768);
           }
         }
       }
@@ -443,13 +443,13 @@ public class HBNodes implements org.apache.thrift.TBase<HBNodes, HBNodes._Fields
       BitSet incoming = iprot.readBitSet(1);
       if (incoming.get(0)) {
         {
-          org.apache.thrift.protocol.TList _list735 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-          struct.pulseIds = new ArrayList<String>(_list735.size);
-          String _elem736;
-          for (int _i737 = 0; _i737 < _list735.size; ++_i737)
+          org.apache.thrift.protocol.TList _list769 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+          struct.pulseIds = new ArrayList<String>(_list769.size);
+          String _elem770;
+          for (int _i771 = 0; _i771 < _list769.size; ++_i771)
           {
-            _elem736 = iprot.readString();
-            struct.pulseIds.add(_elem736);
+            _elem770 = iprot.readString();
+            struct.pulseIds.add(_elem770);
           }
         }
         struct.set_pulseIds_isSet(true);

http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/jvm/org/apache/storm/generated/HBRecords.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/org/apache/storm/generated/HBRecords.java b/storm-core/src/jvm/org/apache/storm/generated/HBRecords.java
index c92b49a..d21dcc8 100644
--- a/storm-core/src/jvm/org/apache/storm/generated/HBRecords.java
+++ b/storm-core/src/jvm/org/apache/storm/generated/HBRecords.java
@@ -367,14 +367,14 @@ public class HBRecords implements org.apache.thrift.TBase<HBRecords, HBRecords._
           case 1: // PULSES
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list722 = iprot.readListBegin();
-                struct.pulses = new ArrayList<HBPulse>(_list722.size);
-                HBPulse _elem723;
-                for (int _i724 = 0; _i724 < _list722.size; ++_i724)
+                org.apache.thrift.protocol.TList _list756 = iprot.readListBegin();
+                struct.pulses = new ArrayList<HBPulse>(_list756.size);
+                HBPulse _elem757;
+                for (int _i758 = 0; _i758 < _list756.size; ++_i758)
                 {
-                  _elem723 = new HBPulse();
-                  _elem723.read(iprot);
-                  struct.pulses.add(_elem723);
+                  _elem757 = new HBPulse();
+                  _elem757.read(iprot);
+                  struct.pulses.add(_elem757);
                 }
                 iprot.readListEnd();
               }
@@ -400,9 +400,9 @@ public class HBRecords implements org.apache.thrift.TBase<HBRecords, HBRecords._
         oprot.writeFieldBegin(PULSES_FIELD_DESC);
         {
           oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.pulses.size()));
-          for (HBPulse _iter725 : struct.pulses)
+          for (HBPulse _iter759 : struct.pulses)
           {
-            _iter725.write(oprot);
+            _iter759.write(oprot);
           }
           oprot.writeListEnd();
         }
@@ -433,9 +433,9 @@ public class HBRecords implements org.apache.thrift.TBase<HBRecords, HBRecords._
       if (struct.is_set_pulses()) {
         {
           oprot.writeI32(struct.pulses.size());
-          for (HBPulse _iter726 : struct.pulses)
+          for (HBPulse _iter760 : struct.pulses)
           {
-            _iter726.write(oprot);
+            _iter760.write(oprot);
           }
         }
       }
@@ -447,14 +447,14 @@ public class HBRecords implements org.apache.thrift.TBase<HBRecords, HBRecords._
       BitSet incoming = iprot.readBitSet(1);
       if (incoming.get(0)) {
         {
-          org.apache.thrift.protocol.TList _list727 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-          struct.pulses = new ArrayList<HBPulse>(_list727.size);
-          HBPulse _elem728;
-          for (int _i729 = 0; _i729 < _list727.size; ++_i729)
+          org.apache.thrift.protocol.TList _list761 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.pulses = new ArrayList<HBPulse>(_list761.size);
+          HBPulse _elem762;
+          for (int _i763 = 0; _i763 < _list761.size; ++_i763)
           {
-            _elem728 = new HBPulse();
-            _elem728.read(iprot);
-            struct.pulses.add(_elem728);
+            _elem762 = new HBPulse();
+            _elem762.read(iprot);
+            struct.pulses.add(_elem762);
           }
         }
         struct.set_pulses_isSet(true);

http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/jvm/org/apache/storm/generated/LSApprovedWorkers.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/org/apache/storm/generated/LSApprovedWorkers.java b/storm-core/src/jvm/org/apache/storm/generated/LSApprovedWorkers.java
index 130a643..41c8a7d 100644
--- a/storm-core/src/jvm/org/apache/storm/generated/LSApprovedWorkers.java
+++ b/storm-core/src/jvm/org/apache/storm/generated/LSApprovedWorkers.java
@@ -365,15 +365,15 @@ public class LSApprovedWorkers implements org.apache.thrift.TBase<LSApprovedWork
           case 1: // APPROVED_WORKERS
             if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
               {
-                org.apache.thrift.protocol.TMap _map652 = iprot.readMapBegin();
-                struct.approved_workers = new HashMap<String,Integer>(2*_map652.size);
-                String _key653;
-                int _val654;
-                for (int _i655 = 0; _i655 < _map652.size; ++_i655)
+                org.apache.thrift.protocol.TMap _map686 = iprot.readMapBegin();
+                struct.approved_workers = new HashMap<String,Integer>(2*_map686.size);
+                String _key687;
+                int _val688;
+                for (int _i689 = 0; _i689 < _map686.size; ++_i689)
                 {
-                  _key653 = iprot.readString();
-                  _val654 = iprot.readI32();
-                  struct.approved_workers.put(_key653, _val654);
+                  _key687 = iprot.readString();
+                  _val688 = iprot.readI32();
+                  struct.approved_workers.put(_key687, _val688);
                 }
                 iprot.readMapEnd();
               }
@@ -399,10 +399,10 @@ public class LSApprovedWorkers implements org.apache.thrift.TBase<LSApprovedWork
         oprot.writeFieldBegin(APPROVED_WORKERS_FIELD_DESC);
         {
           oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I32, struct.approved_workers.size()));
-          for (Map.Entry<String, Integer> _iter656 : struct.approved_workers.entrySet())
+          for (Map.Entry<String, Integer> _iter690 : struct.approved_workers.entrySet())
           {
-            oprot.writeString(_iter656.getKey());
-            oprot.writeI32(_iter656.getValue());
+            oprot.writeString(_iter690.getKey());
+            oprot.writeI32(_iter690.getValue());
           }
           oprot.writeMapEnd();
         }
@@ -427,10 +427,10 @@ public class LSApprovedWorkers implements org.apache.thrift.TBase<LSApprovedWork
       TTupleProtocol oprot = (TTupleProtocol) prot;
       {
         oprot.writeI32(struct.approved_workers.size());
-        for (Map.Entry<String, Integer> _iter657 : struct.approved_workers.entrySet())
+        for (Map.Entry<String, Integer> _iter691 : struct.approved_workers.entrySet())
         {
-          oprot.writeString(_iter657.getKey());
-          oprot.writeI32(_iter657.getValue());
+          oprot.writeString(_iter691.getKey());
+          oprot.writeI32(_iter691.getValue());
         }
       }
     }
@@ -439,15 +439,15 @@ public class LSApprovedWorkers implements org.apache.thrift.TBase<LSApprovedWork
     public void read(org.apache.thrift.protocol.TProtocol prot, LSApprovedWorkers struct) throws org.apache.thrift.TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       {
-        org.apache.thrift.protocol.TMap _map658 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I32, iprot.readI32());
-        struct.approved_workers = new HashMap<String,Integer>(2*_map658.size);
-        String _key659;
-        int _val660;
-        for (int _i661 = 0; _i661 < _map658.size; ++_i661)
+        org.apache.thrift.protocol.TMap _map692 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I32, iprot.readI32());
+        struct.approved_workers = new HashMap<String,Integer>(2*_map692.size);
+        String _key693;
+        int _val694;
+        for (int _i695 = 0; _i695 < _map692.size; ++_i695)
         {
-          _key659 = iprot.readString();
-          _val660 = iprot.readI32();
-          struct.approved_workers.put(_key659, _val660);
+          _key693 = iprot.readString();
+          _val694 = iprot.readI32();
+          struct.approved_workers.put(_key693, _val694);
         }
       }
       struct.set_approved_workers_isSet(true);

http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/jvm/org/apache/storm/generated/LSSupervisorAssignments.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/org/apache/storm/generated/LSSupervisorAssignments.java b/storm-core/src/jvm/org/apache/storm/generated/LSSupervisorAssignments.java
index da1e4df..682425e 100644
--- a/storm-core/src/jvm/org/apache/storm/generated/LSSupervisorAssignments.java
+++ b/storm-core/src/jvm/org/apache/storm/generated/LSSupervisorAssignments.java
@@ -376,16 +376,16 @@ public class LSSupervisorAssignments implements org.apache.thrift.TBase<LSSuperv
           case 1: // ASSIGNMENTS
             if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
               {
-                org.apache.thrift.protocol.TMap _map662 = iprot.readMapBegin();
-                struct.assignments = new HashMap<Integer,LocalAssignment>(2*_map662.size);
-                int _key663;
-                LocalAssignment _val664;
-                for (int _i665 = 0; _i665 < _map662.size; ++_i665)
+                org.apache.thrift.protocol.TMap _map696 = iprot.readMapBegin();
+                struct.assignments = new HashMap<Integer,LocalAssignment>(2*_map696.size);
+                int _key697;
+                LocalAssignment _val698;
+                for (int _i699 = 0; _i699 < _map696.size; ++_i699)
                 {
-                  _key663 = iprot.readI32();
-                  _val664 = new LocalAssignment();
-                  _val664.read(iprot);
-                  struct.assignments.put(_key663, _val664);
+                  _key697 = iprot.readI32();
+                  _val698 = new LocalAssignment();
+                  _val698.read(iprot);
+                  struct.assignments.put(_key697, _val698);
                 }
                 iprot.readMapEnd();
               }
@@ -411,10 +411,10 @@ public class LSSupervisorAssignments implements org.apache.thrift.TBase<LSSuperv
         oprot.writeFieldBegin(ASSIGNMENTS_FIELD_DESC);
         {
           oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I32, org.apache.thrift.protocol.TType.STRUCT, struct.assignments.size()));
-          for (Map.Entry<Integer, LocalAssignment> _iter666 : struct.assignments.entrySet())
+          for (Map.Entry<Integer, LocalAssignment> _iter700 : struct.assignments.entrySet())
           {
-            oprot.writeI32(_iter666.getKey());
-            _iter666.getValue().write(oprot);
+            oprot.writeI32(_iter700.getKey());
+            _iter700.getValue().write(oprot);
           }
           oprot.writeMapEnd();
         }
@@ -439,10 +439,10 @@ public class LSSupervisorAssignments implements org.apache.thrift.TBase<LSSuperv
       TTupleProtocol oprot = (TTupleProtocol) prot;
       {
         oprot.writeI32(struct.assignments.size());
-        for (Map.Entry<Integer, LocalAssignment> _iter667 : struct.assignments.entrySet())
+        for (Map.Entry<Integer, LocalAssignment> _iter701 : struct.assignments.entrySet())
         {
-          oprot.writeI32(_iter667.getKey());
-          _iter667.getValue().write(oprot);
+          oprot.writeI32(_iter701.getKey());
+          _iter701.getValue().write(oprot);
         }
       }
     }
@@ -451,16 +451,16 @@ public class LSSupervisorAssignments implements org.apache.thrift.TBase<LSSuperv
     public void read(org.apache.thrift.protocol.TProtocol prot, LSSupervisorAssignments struct) throws org.apache.thrift.TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       {
-        org.apache.thrift.protocol.TMap _map668 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I32, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-        struct.assignments = new HashMap<Integer,LocalAssignment>(2*_map668.size);
-        int _key669;
-        LocalAssignment _val670;
-        for (int _i671 = 0; _i671 < _map668.size; ++_i671)
+        org.apache.thrift.protocol.TMap _map702 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I32, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.assignments = new HashMap<Integer,LocalAssignment>(2*_map702.size);
+        int _key703;
+        LocalAssignment _val704;
+        for (int _i705 = 0; _i705 < _map702.size; ++_i705)
         {
-          _key669 = iprot.readI32();
-          _val670 = new LocalAssignment();
-          _val670.read(iprot);
-          struct.assignments.put(_key669, _val670);
+          _key703 = iprot.readI32();
+          _val704 = new LocalAssignment();
+          _val704.read(iprot);
+          struct.assignments.put(_key703, _val704);
         }
       }
       struct.set_assignments_isSet(true);

http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/jvm/org/apache/storm/generated/LSTopoHistory.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/org/apache/storm/generated/LSTopoHistory.java b/storm-core/src/jvm/org/apache/storm/generated/LSTopoHistory.java
index 05f5222..68f5066 100644
--- a/storm-core/src/jvm/org/apache/storm/generated/LSTopoHistory.java
+++ b/storm-core/src/jvm/org/apache/storm/generated/LSTopoHistory.java
@@ -656,13 +656,13 @@ public class LSTopoHistory implements org.apache.thrift.TBase<LSTopoHistory, LST
           case 3: // USERS
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list680 = iprot.readListBegin();
-                struct.users = new ArrayList<String>(_list680.size);
-                String _elem681;
-                for (int _i682 = 0; _i682 < _list680.size; ++_i682)
+                org.apache.thrift.protocol.TList _list714 = iprot.readListBegin();
+                struct.users = new ArrayList<String>(_list714.size);
+                String _elem715;
+                for (int _i716 = 0; _i716 < _list714.size; ++_i716)
                 {
-                  _elem681 = iprot.readString();
-                  struct.users.add(_elem681);
+                  _elem715 = iprot.readString();
+                  struct.users.add(_elem715);
                 }
                 iprot.readListEnd();
               }
@@ -674,13 +674,13 @@ public class LSTopoHistory implements org.apache.thrift.TBase<LSTopoHistory, LST
           case 4: // GROUPS
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list683 = iprot.readListBegin();
-                struct.groups = new ArrayList<String>(_list683.size);
-                String _elem684;
-                for (int _i685 = 0; _i685 < _list683.size; ++_i685)
+                org.apache.thrift.protocol.TList _list717 = iprot.readListBegin();
+                struct.groups = new ArrayList<String>(_list717.size);
+                String _elem718;
+                for (int _i719 = 0; _i719 < _list717.size; ++_i719)
                 {
-                  _elem684 = iprot.readString();
-                  struct.groups.add(_elem684);
+                  _elem718 = iprot.readString();
+                  struct.groups.add(_elem718);
                 }
                 iprot.readListEnd();
               }
@@ -714,9 +714,9 @@ public class LSTopoHistory implements org.apache.thrift.TBase<LSTopoHistory, LST
         oprot.writeFieldBegin(USERS_FIELD_DESC);
         {
           oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.users.size()));
-          for (String _iter686 : struct.users)
+          for (String _iter720 : struct.users)
           {
-            oprot.writeString(_iter686);
+            oprot.writeString(_iter720);
           }
           oprot.writeListEnd();
         }
@@ -726,9 +726,9 @@ public class LSTopoHistory implements org.apache.thrift.TBase<LSTopoHistory, LST
         oprot.writeFieldBegin(GROUPS_FIELD_DESC);
         {
           oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.groups.size()));
-          for (String _iter687 : struct.groups)
+          for (String _iter721 : struct.groups)
           {
-            oprot.writeString(_iter687);
+            oprot.writeString(_iter721);
           }
           oprot.writeListEnd();
         }
@@ -755,16 +755,16 @@ public class LSTopoHistory implements org.apache.thrift.TBase<LSTopoHistory, LST
       oprot.writeI64(struct.time_stamp);
       {
         oprot.writeI32(struct.users.size());
-        for (String _iter688 : struct.users)
+        for (String _iter722 : struct.users)
         {
-          oprot.writeString(_iter688);
+          oprot.writeString(_iter722);
         }
       }
       {
         oprot.writeI32(struct.groups.size());
-        for (String _iter689 : struct.groups)
+        for (String _iter723 : struct.groups)
         {
-          oprot.writeString(_iter689);
+          oprot.writeString(_iter723);
         }
       }
     }
@@ -777,24 +777,24 @@ public class LSTopoHistory implements org.apache.thrift.TBase<LSTopoHistory, LST
       struct.time_stamp = iprot.readI64();
       struct.set_time_stamp_isSet(true);
       {
-        org.apache.thrift.protocol.TList _list690 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-        struct.users = new ArrayList<String>(_list690.size);
-        String _elem691;
-        for (int _i692 = 0; _i692 < _list690.size; ++_i692)
+        org.apache.thrift.protocol.TList _list724 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+        struct.users = new ArrayList<String>(_list724.size);
+        String _elem725;
+        for (int _i726 = 0; _i726 < _list724.size; ++_i726)
         {
-          _elem691 = iprot.readString();
-          struct.users.add(_elem691);
+          _elem725 = iprot.readString();
+          struct.users.add(_elem725);
         }
       }
       struct.set_users_isSet(true);
       {
-        org.apache.thrift.protocol.TList _list693 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-        struct.groups = new ArrayList<String>(_list693.size);
-        String _elem694;
-        for (int _i695 = 0; _i695 < _list693.size; ++_i695)
+        org.apache.thrift.protocol.TList _list727 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+        struct.groups = new ArrayList<String>(_list727.size);
+        String _elem728;
+        for (int _i729 = 0; _i729 < _list727.size; ++_i729)
         {
-          _elem694 = iprot.readString();
-          struct.groups.add(_elem694);
+          _elem728 = iprot.readString();
+          struct.groups.add(_elem728);
         }
       }
       struct.set_groups_isSet(true);

http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/jvm/org/apache/storm/generated/LSTopoHistoryList.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/org/apache/storm/generated/LSTopoHistoryList.java b/storm-core/src/jvm/org/apache/storm/generated/LSTopoHistoryList.java
index bc227f7..a2bf370 100644
--- a/storm-core/src/jvm/org/apache/storm/generated/LSTopoHistoryList.java
+++ b/storm-core/src/jvm/org/apache/storm/generated/LSTopoHistoryList.java
@@ -371,14 +371,14 @@ public class LSTopoHistoryList implements org.apache.thrift.TBase<LSTopoHistoryL
           case 1: // TOPO_HISTORY
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list696 = iprot.readListBegin();
-                struct.topo_history = new ArrayList<LSTopoHistory>(_list696.size);
-                LSTopoHistory _elem697;
-                for (int _i698 = 0; _i698 < _list696.size; ++_i698)
+                org.apache.thrift.protocol.TList _list730 = iprot.readListBegin();
+                struct.topo_history = new ArrayList<LSTopoHistory>(_list730.size);
+                LSTopoHistory _elem731;
+                for (int _i732 = 0; _i732 < _list730.size; ++_i732)
                 {
-                  _elem697 = new LSTopoHistory();
-                  _elem697.read(iprot);
-                  struct.topo_history.add(_elem697);
+                  _elem731 = new LSTopoHistory();
+                  _elem731.read(iprot);
+                  struct.topo_history.add(_elem731);
                 }
                 iprot.readListEnd();
               }
@@ -404,9 +404,9 @@ public class LSTopoHistoryList implements org.apache.thrift.TBase<LSTopoHistoryL
         oprot.writeFieldBegin(TOPO_HISTORY_FIELD_DESC);
         {
           oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.topo_history.size()));
-          for (LSTopoHistory _iter699 : struct.topo_history)
+          for (LSTopoHistory _iter733 : struct.topo_history)
           {
-            _iter699.write(oprot);
+            _iter733.write(oprot);
           }
           oprot.writeListEnd();
         }
@@ -431,9 +431,9 @@ public class LSTopoHistoryList implements org.apache.thrift.TBase<LSTopoHistoryL
       TTupleProtocol oprot = (TTupleProtocol) prot;
       {
         oprot.writeI32(struct.topo_history.size());
-        for (LSTopoHistory _iter700 : struct.topo_history)
+        for (LSTopoHistory _iter734 : struct.topo_history)
         {
-          _iter700.write(oprot);
+          _iter734.write(oprot);
         }
       }
     }
@@ -442,14 +442,14 @@ public class LSTopoHistoryList implements org.apache.thrift.TBase<LSTopoHistoryL
     public void read(org.apache.thrift.protocol.TProtocol prot, LSTopoHistoryList struct) throws org.apache.thrift.TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       {
-        org.apache.thrift.protocol.TList _list701 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-        struct.topo_history = new ArrayList<LSTopoHistory>(_list701.size);
-        LSTopoHistory _elem702;
-        for (int _i703 = 0; _i703 < _list701.size; ++_i703)
+        org.apache.thrift.protocol.TList _list735 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.topo_history = new ArrayList<LSTopoHistory>(_list735.size);
+        LSTopoHistory _elem736;
+        for (int _i737 = 0; _i737 < _list735.size; ++_i737)
         {
-          _elem702 = new LSTopoHistory();
-          _elem702.read(iprot);
-          struct.topo_history.add(_elem702);
+          _elem736 = new LSTopoHistory();
+          _elem736.read(iprot);
+          struct.topo_history.add(_elem736);
         }
       }
       struct.set_topo_history_isSet(true);

http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/jvm/org/apache/storm/generated/LSWorkerHeartbeat.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/org/apache/storm/generated/LSWorkerHeartbeat.java b/storm-core/src/jvm/org/apache/storm/generated/LSWorkerHeartbeat.java
index 2bc8b0a..968b9cc 100644
--- a/storm-core/src/jvm/org/apache/storm/generated/LSWorkerHeartbeat.java
+++ b/storm-core/src/jvm/org/apache/storm/generated/LSWorkerHeartbeat.java
@@ -638,14 +638,14 @@ public class LSWorkerHeartbeat implements org.apache.thrift.TBase<LSWorkerHeartb
           case 3: // EXECUTORS
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list672 = iprot.readListBegin();
-                struct.executors = new ArrayList<ExecutorInfo>(_list672.size);
-                ExecutorInfo _elem673;
-                for (int _i674 = 0; _i674 < _list672.size; ++_i674)
+                org.apache.thrift.protocol.TList _list706 = iprot.readListBegin();
+                struct.executors = new ArrayList<ExecutorInfo>(_list706.size);
+                ExecutorInfo _elem707;
+                for (int _i708 = 0; _i708 < _list706.size; ++_i708)
                 {
-                  _elem673 = new ExecutorInfo();
-                  _elem673.read(iprot);
-                  struct.executors.add(_elem673);
+                  _elem707 = new ExecutorInfo();
+                  _elem707.read(iprot);
+                  struct.executors.add(_elem707);
                 }
                 iprot.readListEnd();
               }
@@ -687,9 +687,9 @@ public class LSWorkerHeartbeat implements org.apache.thrift.TBase<LSWorkerHeartb
         oprot.writeFieldBegin(EXECUTORS_FIELD_DESC);
         {
           oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.executors.size()));
-          for (ExecutorInfo _iter675 : struct.executors)
+          for (ExecutorInfo _iter709 : struct.executors)
           {
-            _iter675.write(oprot);
+            _iter709.write(oprot);
           }
           oprot.writeListEnd();
         }
@@ -719,9 +719,9 @@ public class LSWorkerHeartbeat implements org.apache.thrift.TBase<LSWorkerHeartb
       oprot.writeString(struct.topology_id);
       {
         oprot.writeI32(struct.executors.size());
-        for (ExecutorInfo _iter676 : struct.executors)
+        for (ExecutorInfo _iter710 : struct.executors)
         {
-          _iter676.write(oprot);
+          _iter710.write(oprot);
         }
       }
       oprot.writeI32(struct.port);
@@ -735,14 +735,14 @@ public class LSWorkerHeartbeat implements org.apache.thrift.TBase<LSWorkerHeartb
       struct.topology_id = iprot.readString();
       struct.set_topology_id_isSet(true);
       {
-        org.apache.thrift.protocol.TList _list677 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-        struct.executors = new ArrayList<ExecutorInfo>(_list677.size);
-        ExecutorInfo _elem678;
-        for (int _i679 = 0; _i679 < _list677.size; ++_i679)
+        org.apache.thrift.protocol.TList _list711 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.executors = new ArrayList<ExecutorInfo>(_list711.size);
+        ExecutorInfo _elem712;
+        for (int _i713 = 0; _i713 < _list711.size; ++_i713)
         {
-          _elem678 = new ExecutorInfo();
-          _elem678.read(iprot);
-          struct.executors.add(_elem678);
+          _elem712 = new ExecutorInfo();
+          _elem712.read(iprot);
+          struct.executors.add(_elem712);
         }
       }
       struct.set_executors_isSet(true);

http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/jvm/org/apache/storm/generated/ListBlobsResult.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/org/apache/storm/generated/ListBlobsResult.java b/storm-core/src/jvm/org/apache/storm/generated/ListBlobsResult.java
index 854b49d..43f7dc2 100644
--- a/storm-core/src/jvm/org/apache/storm/generated/ListBlobsResult.java
+++ b/storm-core/src/jvm/org/apache/storm/generated/ListBlobsResult.java
@@ -453,13 +453,13 @@ public class ListBlobsResult implements org.apache.thrift.TBase<ListBlobsResult,
           case 1: // KEYS
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list496 = iprot.readListBegin();
-                struct.keys = new ArrayList<String>(_list496.size);
-                String _elem497;
-                for (int _i498 = 0; _i498 < _list496.size; ++_i498)
+                org.apache.thrift.protocol.TList _list530 = iprot.readListBegin();
+                struct.keys = new ArrayList<String>(_list530.size);
+                String _elem531;
+                for (int _i532 = 0; _i532 < _list530.size; ++_i532)
                 {
-                  _elem497 = iprot.readString();
-                  struct.keys.add(_elem497);
+                  _elem531 = iprot.readString();
+                  struct.keys.add(_elem531);
                 }
                 iprot.readListEnd();
               }
@@ -493,9 +493,9 @@ public class ListBlobsResult implements org.apache.thrift.TBase<ListBlobsResult,
         oprot.writeFieldBegin(KEYS_FIELD_DESC);
         {
           oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.keys.size()));
-          for (String _iter499 : struct.keys)
+          for (String _iter533 : struct.keys)
           {
-            oprot.writeString(_iter499);
+            oprot.writeString(_iter533);
           }
           oprot.writeListEnd();
         }
@@ -525,9 +525,9 @@ public class ListBlobsResult implements org.apache.thrift.TBase<ListBlobsResult,
       TTupleProtocol oprot = (TTupleProtocol) prot;
       {
         oprot.writeI32(struct.keys.size());
-        for (String _iter500 : struct.keys)
+        for (String _iter534 : struct.keys)
         {
-          oprot.writeString(_iter500);
+          oprot.writeString(_iter534);
         }
       }
       oprot.writeString(struct.session);
@@ -537,13 +537,13 @@ public class ListBlobsResult implements org.apache.thrift.TBase<ListBlobsResult,
     public void read(org.apache.thrift.protocol.TProtocol prot, ListBlobsResult struct) throws org.apache.thrift.TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       {
-        org.apache.thrift.protocol.TList _list501 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-        struct.keys = new ArrayList<String>(_list501.size);
-        String _elem502;
-        for (int _i503 = 0; _i503 < _list501.size; ++_i503)
+        org.apache.thrift.protocol.TList _list535 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+        struct.keys = new ArrayList<String>(_list535.size);
+        String _elem536;
+        for (int _i537 = 0; _i537 < _list535.size; ++_i537)
         {
-          _elem502 = iprot.readString();
-          struct.keys.add(_elem502);
+          _elem536 = iprot.readString();
+          struct.keys.add(_elem536);
         }
       }
       struct.set_keys_isSet(true);

http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/jvm/org/apache/storm/generated/LocalAssignment.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/org/apache/storm/generated/LocalAssignment.java b/storm-core/src/jvm/org/apache/storm/generated/LocalAssignment.java
index 19404cc..717014c 100644
--- a/storm-core/src/jvm/org/apache/storm/generated/LocalAssignment.java
+++ b/storm-core/src/jvm/org/apache/storm/generated/LocalAssignment.java
@@ -549,14 +549,14 @@ public class LocalAssignment implements org.apache.thrift.TBase<LocalAssignment,
           case 2: // EXECUTORS
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list644 = iprot.readListBegin();
-                struct.executors = new ArrayList<ExecutorInfo>(_list644.size);
-                ExecutorInfo _elem645;
-                for (int _i646 = 0; _i646 < _list644.size; ++_i646)
+                org.apache.thrift.protocol.TList _list678 = iprot.readListBegin();
+                struct.executors = new ArrayList<ExecutorInfo>(_list678.size);
+                ExecutorInfo _elem679;
+                for (int _i680 = 0; _i680 < _list678.size; ++_i680)
                 {
-                  _elem645 = new ExecutorInfo();
-                  _elem645.read(iprot);
-                  struct.executors.add(_elem645);
+                  _elem679 = new ExecutorInfo();
+                  _elem679.read(iprot);
+                  struct.executors.add(_elem679);
                 }
                 iprot.readListEnd();
               }
@@ -596,9 +596,9 @@ public class LocalAssignment implements org.apache.thrift.TBase<LocalAssignment,
         oprot.writeFieldBegin(EXECUTORS_FIELD_DESC);
         {
           oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.executors.size()));
-          for (ExecutorInfo _iter647 : struct.executors)
+          for (ExecutorInfo _iter681 : struct.executors)
           {
-            _iter647.write(oprot);
+            _iter681.write(oprot);
           }
           oprot.writeListEnd();
         }
@@ -631,9 +631,9 @@ public class LocalAssignment implements org.apache.thrift.TBase<LocalAssignment,
       oprot.writeString(struct.topology_id);
       {
         oprot.writeI32(struct.executors.size());
-        for (ExecutorInfo _iter648 : struct.executors)
+        for (ExecutorInfo _iter682 : struct.executors)
         {
-          _iter648.write(oprot);
+          _iter682.write(oprot);
         }
       }
       BitSet optionals = new BitSet();
@@ -652,14 +652,14 @@ public class LocalAssignment implements org.apache.thrift.TBase<LocalAssignment,
       struct.topology_id = iprot.readString();
       struct.set_topology_id_isSet(true);
       {
-        org.apache.thrift.protocol.TList _list649 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-        struct.executors = new ArrayList<ExecutorInfo>(_list649.size);
-        ExecutorInfo _elem650;
-        for (int _i651 = 0; _i651 < _list649.size; ++_i651)
+        org.apache.thrift.protocol.TList _list683 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.executors = new ArrayList<ExecutorInfo>(_list683.size);
+        ExecutorInfo _elem684;
+        for (int _i685 = 0; _i685 < _list683.size; ++_i685)
         {
-          _elem650 = new ExecutorInfo();
-          _elem650.read(iprot);
-          struct.executors.add(_elem650);
+          _elem684 = new ExecutorInfo();
+          _elem684.read(iprot);
+          struct.executors.add(_elem684);
         }
       }
       struct.set_executors_isSet(true);

http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/jvm/org/apache/storm/generated/LocalStateData.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/org/apache/storm/generated/LocalStateData.java b/storm-core/src/jvm/org/apache/storm/generated/LocalStateData.java
index e85a55d..dd28764 100644
--- a/storm-core/src/jvm/org/apache/storm/generated/LocalStateData.java
+++ b/storm-core/src/jvm/org/apache/storm/generated/LocalStateData.java
@@ -376,16 +376,16 @@ public class LocalStateData implements org.apache.thrift.TBase<LocalStateData, L
           case 1: // SERIALIZED_PARTS
             if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
               {
-                org.apache.thrift.protocol.TMap _map634 = iprot.readMapBegin();
-                struct.serialized_parts = new HashMap<String,ThriftSerializedObject>(2*_map634.size);
-                String _key635;
-                ThriftSerializedObject _val636;
-                for (int _i637 = 0; _i637 < _map634.size; ++_i637)
+                org.apache.thrift.protocol.TMap _map668 = iprot.readMapBegin();
+                struct.serialized_parts = new HashMap<String,ThriftSerializedObject>(2*_map668.size);
+                String _key669;
+                ThriftSerializedObject _val670;
+                for (int _i671 = 0; _i671 < _map668.size; ++_i671)
                 {
-                  _key635 = iprot.readString();
-                  _val636 = new ThriftSerializedObject();
-                  _val636.read(iprot);
-                  struct.serialized_parts.put(_key635, _val636);
+                  _key669 = iprot.readString();
+                  _val670 = new ThriftSerializedObject();
+                  _val670.read(iprot);
+                  struct.serialized_parts.put(_key669, _val670);
                 }
                 iprot.readMapEnd();
               }
@@ -411,10 +411,10 @@ public class LocalStateData implements org.apache.thrift.TBase<LocalStateData, L
         oprot.writeFieldBegin(SERIALIZED_PARTS_FIELD_DESC);
         {
           oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.serialized_parts.size()));
-          for (Map.Entry<String, ThriftSerializedObject> _iter638 : struct.serialized_parts.entrySet())
+          for (Map.Entry<String, ThriftSerializedObject> _iter672 : struct.serialized_parts.entrySet())
           {
-            oprot.writeString(_iter638.getKey());
-            _iter638.getValue().write(oprot);
+            oprot.writeString(_iter672.getKey());
+            _iter672.getValue().write(oprot);
           }
           oprot.writeMapEnd();
         }
@@ -439,10 +439,10 @@ public class LocalStateData implements org.apache.thrift.TBase<LocalStateData, L
       TTupleProtocol oprot = (TTupleProtocol) prot;
       {
         oprot.writeI32(struct.serialized_parts.size());
-        for (Map.Entry<String, ThriftSerializedObject> _iter639 : struct.serialized_parts.entrySet())
+        for (Map.Entry<String, ThriftSerializedObject> _iter673 : struct.serialized_parts.entrySet())
         {
-          oprot.writeString(_iter639.getKey());
-          _iter639.getValue().write(oprot);
+          oprot.writeString(_iter673.getKey());
+          _iter673.getValue().write(oprot);
         }
       }
     }
@@ -451,16 +451,16 @@ public class LocalStateData implements org.apache.thrift.TBase<LocalStateData, L
     public void read(org.apache.thrift.protocol.TProtocol prot, LocalStateData struct) throws org.apache.thrift.TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       {
-        org.apache.thrift.protocol.TMap _map640 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-        struct.serialized_parts = new HashMap<String,ThriftSerializedObject>(2*_map640.size);
-        String _key641;
-        ThriftSerializedObject _val642;
-        for (int _i643 = 0; _i643 < _map640.size; ++_i643)
+        org.apache.thrift.protocol.TMap _map674 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.serialized_parts = new HashMap<String,ThriftSerializedObject>(2*_map674.size);
+        String _key675;
+        ThriftSerializedObject _val676;
+        for (int _i677 = 0; _i677 < _map674.size; ++_i677)
         {
-          _key641 = iprot.readString();
-          _val642 = new ThriftSerializedObject();
-          _val642.read(iprot);
-          struct.serialized_parts.put(_key641, _val642);
+          _key675 = iprot.readString();
+          _val676 = new ThriftSerializedObject();
+          _val676.read(iprot);
+          struct.serialized_parts.put(_key675, _val676);
         }
       }
       struct.set_serialized_parts_isSet(true);

http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/jvm/org/apache/storm/generated/LogConfig.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/org/apache/storm/generated/LogConfig.java b/storm-core/src/jvm/org/apache/storm/generated/LogConfig.java
index a797523..95230de 100644
--- a/storm-core/src/jvm/org/apache/storm/generated/LogConfig.java
+++ b/storm-core/src/jvm/org/apache/storm/generated/LogConfig.java
@@ -368,16 +368,16 @@ public class LogConfig implements org.apache.thrift.TBase<LogConfig, LogConfig._
           case 2: // NAMED_LOGGER_LEVEL
             if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
               {
-                org.apache.thrift.protocol.TMap _map704 = iprot.readMapBegin();
-                struct.named_logger_level = new HashMap<String,LogLevel>(2*_map704.size);
-                String _key705;
-                LogLevel _val706;
-                for (int _i707 = 0; _i707 < _map704.size; ++_i707)
+                org.apache.thrift.protocol.TMap _map738 = iprot.readMapBegin();
+                struct.named_logger_level = new HashMap<String,LogLevel>(2*_map738.size);
+                String _key739;
+                LogLevel _val740;
+                for (int _i741 = 0; _i741 < _map738.size; ++_i741)
                 {
-                  _key705 = iprot.readString();
-                  _val706 = new LogLevel();
-                  _val706.read(iprot);
-                  struct.named_logger_level.put(_key705, _val706);
+                  _key739 = iprot.readString();
+                  _val740 = new LogLevel();
+                  _val740.read(iprot);
+                  struct.named_logger_level.put(_key739, _val740);
                 }
                 iprot.readMapEnd();
               }
@@ -404,10 +404,10 @@ public class LogConfig implements org.apache.thrift.TBase<LogConfig, LogConfig._
           oprot.writeFieldBegin(NAMED_LOGGER_LEVEL_FIELD_DESC);
           {
             oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.named_logger_level.size()));
-            for (Map.Entry<String, LogLevel> _iter708 : struct.named_logger_level.entrySet())
+            for (Map.Entry<String, LogLevel> _iter742 : struct.named_logger_level.entrySet())
             {
-              oprot.writeString(_iter708.getKey());
-              _iter708.getValue().write(oprot);
+              oprot.writeString(_iter742.getKey());
+              _iter742.getValue().write(oprot);
             }
             oprot.writeMapEnd();
           }
@@ -439,10 +439,10 @@ public class LogConfig implements org.apache.thrift.TBase<LogConfig, LogConfig._
       if (struct.is_set_named_logger_level()) {
         {
           oprot.writeI32(struct.named_logger_level.size());
-          for (Map.Entry<String, LogLevel> _iter709 : struct.named_logger_level.entrySet())
+          for (Map.Entry<String, LogLevel> _iter743 : struct.named_logger_level.entrySet())
           {
-            oprot.writeString(_iter709.getKey());
-            _iter709.getValue().write(oprot);
+            oprot.writeString(_iter743.getKey());
+            _iter743.getValue().write(oprot);
           }
         }
       }
@@ -454,16 +454,16 @@ public class LogConfig implements org.apache.thrift.TBase<LogConfig, LogConfig._
       BitSet incoming = iprot.readBitSet(1);
       if (incoming.get(0)) {
         {
-          org.apache.thrift.protocol.TMap _map710 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-          struct.named_logger_level = new HashMap<String,LogLevel>(2*_map710.size);
-          String _key711;
-          LogLevel _val712;
-          for (int _i713 = 0; _i713 < _map710.size; ++_i713)
+          org.apache.thrift.protocol.TMap _map744 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.named_logger_level = new HashMap<String,LogLevel>(2*_map744.size);
+          String _key745;
+          LogLevel _val746;
+          for (int _i747 = 0; _i747 < _map744.size; ++_i747)
           {
-            _key711 = iprot.readString();
-            _val712 = new LogLevel();
-            _val712.read(iprot);
-            struct.named_logger_level.put(_key711, _val712);
+            _key745 = iprot.readString();
+            _val746 = new LogLevel();
+            _val746.read(iprot);
+            struct.named_logger_level.put(_key745, _val746);
           }
         }
         struct.set_named_logger_level_isSet(true);


[4/9] storm git commit: STORM-1994: Add table with per-topology and worker resource usage and components in (new) supervisor and topology pages

Posted by ka...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/jvm/org/apache/storm/generated/NodeInfo.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/org/apache/storm/generated/NodeInfo.java b/storm-core/src/jvm/org/apache/storm/generated/NodeInfo.java
index 0106940..34c5e43 100644
--- a/storm-core/src/jvm/org/apache/storm/generated/NodeInfo.java
+++ b/storm-core/src/jvm/org/apache/storm/generated/NodeInfo.java
@@ -461,13 +461,13 @@ public class NodeInfo implements org.apache.thrift.TBase<NodeInfo, NodeInfo._Fie
           case 2: // PORT
             if (schemeField.type == org.apache.thrift.protocol.TType.SET) {
               {
-                org.apache.thrift.protocol.TSet _set540 = iprot.readSetBegin();
-                struct.port = new HashSet<Long>(2*_set540.size);
-                long _elem541;
-                for (int _i542 = 0; _i542 < _set540.size; ++_i542)
+                org.apache.thrift.protocol.TSet _set574 = iprot.readSetBegin();
+                struct.port = new HashSet<Long>(2*_set574.size);
+                long _elem575;
+                for (int _i576 = 0; _i576 < _set574.size; ++_i576)
                 {
-                  _elem541 = iprot.readI64();
-                  struct.port.add(_elem541);
+                  _elem575 = iprot.readI64();
+                  struct.port.add(_elem575);
                 }
                 iprot.readSetEnd();
               }
@@ -498,9 +498,9 @@ public class NodeInfo implements org.apache.thrift.TBase<NodeInfo, NodeInfo._Fie
         oprot.writeFieldBegin(PORT_FIELD_DESC);
         {
           oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, struct.port.size()));
-          for (long _iter543 : struct.port)
+          for (long _iter577 : struct.port)
           {
-            oprot.writeI64(_iter543);
+            oprot.writeI64(_iter577);
           }
           oprot.writeSetEnd();
         }
@@ -526,9 +526,9 @@ public class NodeInfo implements org.apache.thrift.TBase<NodeInfo, NodeInfo._Fie
       oprot.writeString(struct.node);
       {
         oprot.writeI32(struct.port.size());
-        for (long _iter544 : struct.port)
+        for (long _iter578 : struct.port)
         {
-          oprot.writeI64(_iter544);
+          oprot.writeI64(_iter578);
         }
       }
     }
@@ -539,13 +539,13 @@ public class NodeInfo implements org.apache.thrift.TBase<NodeInfo, NodeInfo._Fie
       struct.node = iprot.readString();
       struct.set_node_isSet(true);
       {
-        org.apache.thrift.protocol.TSet _set545 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32());
-        struct.port = new HashSet<Long>(2*_set545.size);
-        long _elem546;
-        for (int _i547 = 0; _i547 < _set545.size; ++_i547)
+        org.apache.thrift.protocol.TSet _set579 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32());
+        struct.port = new HashSet<Long>(2*_set579.size);
+        long _elem580;
+        for (int _i581 = 0; _i581 < _set579.size; ++_i581)
         {
-          _elem546 = iprot.readI64();
-          struct.port.add(_elem546);
+          _elem580 = iprot.readI64();
+          struct.port.add(_elem580);
         }
       }
       struct.set_port_isSet(true);

http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/jvm/org/apache/storm/generated/RebalanceOptions.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/org/apache/storm/generated/RebalanceOptions.java b/storm-core/src/jvm/org/apache/storm/generated/RebalanceOptions.java
index 1d92dd6..34d4c3b 100644
--- a/storm-core/src/jvm/org/apache/storm/generated/RebalanceOptions.java
+++ b/storm-core/src/jvm/org/apache/storm/generated/RebalanceOptions.java
@@ -529,15 +529,15 @@ public class RebalanceOptions implements org.apache.thrift.TBase<RebalanceOption
           case 3: // NUM_EXECUTORS
             if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
               {
-                org.apache.thrift.protocol.TMap _map468 = iprot.readMapBegin();
-                struct.num_executors = new HashMap<String,Integer>(2*_map468.size);
-                String _key469;
-                int _val470;
-                for (int _i471 = 0; _i471 < _map468.size; ++_i471)
+                org.apache.thrift.protocol.TMap _map502 = iprot.readMapBegin();
+                struct.num_executors = new HashMap<String,Integer>(2*_map502.size);
+                String _key503;
+                int _val504;
+                for (int _i505 = 0; _i505 < _map502.size; ++_i505)
                 {
-                  _key469 = iprot.readString();
-                  _val470 = iprot.readI32();
-                  struct.num_executors.put(_key469, _val470);
+                  _key503 = iprot.readString();
+                  _val504 = iprot.readI32();
+                  struct.num_executors.put(_key503, _val504);
                 }
                 iprot.readMapEnd();
               }
@@ -574,10 +574,10 @@ public class RebalanceOptions implements org.apache.thrift.TBase<RebalanceOption
           oprot.writeFieldBegin(NUM_EXECUTORS_FIELD_DESC);
           {
             oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I32, struct.num_executors.size()));
-            for (Map.Entry<String, Integer> _iter472 : struct.num_executors.entrySet())
+            for (Map.Entry<String, Integer> _iter506 : struct.num_executors.entrySet())
             {
-              oprot.writeString(_iter472.getKey());
-              oprot.writeI32(_iter472.getValue());
+              oprot.writeString(_iter506.getKey());
+              oprot.writeI32(_iter506.getValue());
             }
             oprot.writeMapEnd();
           }
@@ -621,10 +621,10 @@ public class RebalanceOptions implements org.apache.thrift.TBase<RebalanceOption
       if (struct.is_set_num_executors()) {
         {
           oprot.writeI32(struct.num_executors.size());
-          for (Map.Entry<String, Integer> _iter473 : struct.num_executors.entrySet())
+          for (Map.Entry<String, Integer> _iter507 : struct.num_executors.entrySet())
           {
-            oprot.writeString(_iter473.getKey());
-            oprot.writeI32(_iter473.getValue());
+            oprot.writeString(_iter507.getKey());
+            oprot.writeI32(_iter507.getValue());
           }
         }
       }
@@ -644,15 +644,15 @@ public class RebalanceOptions implements org.apache.thrift.TBase<RebalanceOption
       }
       if (incoming.get(2)) {
         {
-          org.apache.thrift.protocol.TMap _map474 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I32, iprot.readI32());
-          struct.num_executors = new HashMap<String,Integer>(2*_map474.size);
-          String _key475;
-          int _val476;
-          for (int _i477 = 0; _i477 < _map474.size; ++_i477)
+          org.apache.thrift.protocol.TMap _map508 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I32, iprot.readI32());
+          struct.num_executors = new HashMap<String,Integer>(2*_map508.size);
+          String _key509;
+          int _val510;
+          for (int _i511 = 0; _i511 < _map508.size; ++_i511)
           {
-            _key475 = iprot.readString();
-            _val476 = iprot.readI32();
-            struct.num_executors.put(_key475, _val476);
+            _key509 = iprot.readString();
+            _val510 = iprot.readI32();
+            struct.num_executors.put(_key509, _val510);
           }
         }
         struct.set_num_executors_isSet(true);

http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/jvm/org/apache/storm/generated/SettableBlobMeta.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/org/apache/storm/generated/SettableBlobMeta.java b/storm-core/src/jvm/org/apache/storm/generated/SettableBlobMeta.java
index e22fd6f..3bd9a86 100644
--- a/storm-core/src/jvm/org/apache/storm/generated/SettableBlobMeta.java
+++ b/storm-core/src/jvm/org/apache/storm/generated/SettableBlobMeta.java
@@ -452,14 +452,14 @@ public class SettableBlobMeta implements org.apache.thrift.TBase<SettableBlobMet
           case 1: // ACL
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list488 = iprot.readListBegin();
-                struct.acl = new ArrayList<AccessControl>(_list488.size);
-                AccessControl _elem489;
-                for (int _i490 = 0; _i490 < _list488.size; ++_i490)
+                org.apache.thrift.protocol.TList _list522 = iprot.readListBegin();
+                struct.acl = new ArrayList<AccessControl>(_list522.size);
+                AccessControl _elem523;
+                for (int _i524 = 0; _i524 < _list522.size; ++_i524)
                 {
-                  _elem489 = new AccessControl();
-                  _elem489.read(iprot);
-                  struct.acl.add(_elem489);
+                  _elem523 = new AccessControl();
+                  _elem523.read(iprot);
+                  struct.acl.add(_elem523);
                 }
                 iprot.readListEnd();
               }
@@ -493,9 +493,9 @@ public class SettableBlobMeta implements org.apache.thrift.TBase<SettableBlobMet
         oprot.writeFieldBegin(ACL_FIELD_DESC);
         {
           oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.acl.size()));
-          for (AccessControl _iter491 : struct.acl)
+          for (AccessControl _iter525 : struct.acl)
           {
-            _iter491.write(oprot);
+            _iter525.write(oprot);
           }
           oprot.writeListEnd();
         }
@@ -525,9 +525,9 @@ public class SettableBlobMeta implements org.apache.thrift.TBase<SettableBlobMet
       TTupleProtocol oprot = (TTupleProtocol) prot;
       {
         oprot.writeI32(struct.acl.size());
-        for (AccessControl _iter492 : struct.acl)
+        for (AccessControl _iter526 : struct.acl)
         {
-          _iter492.write(oprot);
+          _iter526.write(oprot);
         }
       }
       BitSet optionals = new BitSet();
@@ -544,14 +544,14 @@ public class SettableBlobMeta implements org.apache.thrift.TBase<SettableBlobMet
     public void read(org.apache.thrift.protocol.TProtocol prot, SettableBlobMeta struct) throws org.apache.thrift.TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       {
-        org.apache.thrift.protocol.TList _list493 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-        struct.acl = new ArrayList<AccessControl>(_list493.size);
-        AccessControl _elem494;
-        for (int _i495 = 0; _i495 < _list493.size; ++_i495)
+        org.apache.thrift.protocol.TList _list527 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.acl = new ArrayList<AccessControl>(_list527.size);
+        AccessControl _elem528;
+        for (int _i529 = 0; _i529 < _list527.size; ++_i529)
         {
-          _elem494 = new AccessControl();
-          _elem494.read(iprot);
-          struct.acl.add(_elem494);
+          _elem528 = new AccessControl();
+          _elem528.read(iprot);
+          struct.acl.add(_elem528);
         }
       }
       struct.set_acl_isSet(true);

http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/jvm/org/apache/storm/generated/StormBase.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/org/apache/storm/generated/StormBase.java b/storm-core/src/jvm/org/apache/storm/generated/StormBase.java
index 5b95144..ba4b792 100644
--- a/storm-core/src/jvm/org/apache/storm/generated/StormBase.java
+++ b/storm-core/src/jvm/org/apache/storm/generated/StormBase.java
@@ -1090,15 +1090,15 @@ public class StormBase implements org.apache.thrift.TBase<StormBase, StormBase._
           case 4: // COMPONENT_EXECUTORS
             if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
               {
-                org.apache.thrift.protocol.TMap _map604 = iprot.readMapBegin();
-                struct.component_executors = new HashMap<String,Integer>(2*_map604.size);
-                String _key605;
-                int _val606;
-                for (int _i607 = 0; _i607 < _map604.size; ++_i607)
+                org.apache.thrift.protocol.TMap _map638 = iprot.readMapBegin();
+                struct.component_executors = new HashMap<String,Integer>(2*_map638.size);
+                String _key639;
+                int _val640;
+                for (int _i641 = 0; _i641 < _map638.size; ++_i641)
                 {
-                  _key605 = iprot.readString();
-                  _val606 = iprot.readI32();
-                  struct.component_executors.put(_key605, _val606);
+                  _key639 = iprot.readString();
+                  _val640 = iprot.readI32();
+                  struct.component_executors.put(_key639, _val640);
                 }
                 iprot.readMapEnd();
               }
@@ -1143,16 +1143,16 @@ public class StormBase implements org.apache.thrift.TBase<StormBase, StormBase._
           case 9: // COMPONENT_DEBUG
             if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
               {
-                org.apache.thrift.protocol.TMap _map608 = iprot.readMapBegin();
-                struct.component_debug = new HashMap<String,DebugOptions>(2*_map608.size);
-                String _key609;
-                DebugOptions _val610;
-                for (int _i611 = 0; _i611 < _map608.size; ++_i611)
+                org.apache.thrift.protocol.TMap _map642 = iprot.readMapBegin();
+                struct.component_debug = new HashMap<String,DebugOptions>(2*_map642.size);
+                String _key643;
+                DebugOptions _val644;
+                for (int _i645 = 0; _i645 < _map642.size; ++_i645)
                 {
-                  _key609 = iprot.readString();
-                  _val610 = new DebugOptions();
-                  _val610.read(iprot);
-                  struct.component_debug.put(_key609, _val610);
+                  _key643 = iprot.readString();
+                  _val644 = new DebugOptions();
+                  _val644.read(iprot);
+                  struct.component_debug.put(_key643, _val644);
                 }
                 iprot.readMapEnd();
               }
@@ -1192,10 +1192,10 @@ public class StormBase implements org.apache.thrift.TBase<StormBase, StormBase._
           oprot.writeFieldBegin(COMPONENT_EXECUTORS_FIELD_DESC);
           {
             oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I32, struct.component_executors.size()));
-            for (Map.Entry<String, Integer> _iter612 : struct.component_executors.entrySet())
+            for (Map.Entry<String, Integer> _iter646 : struct.component_executors.entrySet())
             {
-              oprot.writeString(_iter612.getKey());
-              oprot.writeI32(_iter612.getValue());
+              oprot.writeString(_iter646.getKey());
+              oprot.writeI32(_iter646.getValue());
             }
             oprot.writeMapEnd();
           }
@@ -1233,10 +1233,10 @@ public class StormBase implements org.apache.thrift.TBase<StormBase, StormBase._
           oprot.writeFieldBegin(COMPONENT_DEBUG_FIELD_DESC);
           {
             oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.component_debug.size()));
-            for (Map.Entry<String, DebugOptions> _iter613 : struct.component_debug.entrySet())
+            for (Map.Entry<String, DebugOptions> _iter647 : struct.component_debug.entrySet())
             {
-              oprot.writeString(_iter613.getKey());
-              _iter613.getValue().write(oprot);
+              oprot.writeString(_iter647.getKey());
+              _iter647.getValue().write(oprot);
             }
             oprot.writeMapEnd();
           }
@@ -1286,10 +1286,10 @@ public class StormBase implements org.apache.thrift.TBase<StormBase, StormBase._
       if (struct.is_set_component_executors()) {
         {
           oprot.writeI32(struct.component_executors.size());
-          for (Map.Entry<String, Integer> _iter614 : struct.component_executors.entrySet())
+          for (Map.Entry<String, Integer> _iter648 : struct.component_executors.entrySet())
           {
-            oprot.writeString(_iter614.getKey());
-            oprot.writeI32(_iter614.getValue());
+            oprot.writeString(_iter648.getKey());
+            oprot.writeI32(_iter648.getValue());
           }
         }
       }
@@ -1308,10 +1308,10 @@ public class StormBase implements org.apache.thrift.TBase<StormBase, StormBase._
       if (struct.is_set_component_debug()) {
         {
           oprot.writeI32(struct.component_debug.size());
-          for (Map.Entry<String, DebugOptions> _iter615 : struct.component_debug.entrySet())
+          for (Map.Entry<String, DebugOptions> _iter649 : struct.component_debug.entrySet())
           {
-            oprot.writeString(_iter615.getKey());
-            _iter615.getValue().write(oprot);
+            oprot.writeString(_iter649.getKey());
+            _iter649.getValue().write(oprot);
           }
         }
       }
@@ -1329,15 +1329,15 @@ public class StormBase implements org.apache.thrift.TBase<StormBase, StormBase._
       BitSet incoming = iprot.readBitSet(6);
       if (incoming.get(0)) {
         {
-          org.apache.thrift.protocol.TMap _map616 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I32, iprot.readI32());
-          struct.component_executors = new HashMap<String,Integer>(2*_map616.size);
-          String _key617;
-          int _val618;
-          for (int _i619 = 0; _i619 < _map616.size; ++_i619)
+          org.apache.thrift.protocol.TMap _map650 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I32, iprot.readI32());
+          struct.component_executors = new HashMap<String,Integer>(2*_map650.size);
+          String _key651;
+          int _val652;
+          for (int _i653 = 0; _i653 < _map650.size; ++_i653)
           {
-            _key617 = iprot.readString();
-            _val618 = iprot.readI32();
-            struct.component_executors.put(_key617, _val618);
+            _key651 = iprot.readString();
+            _val652 = iprot.readI32();
+            struct.component_executors.put(_key651, _val652);
           }
         }
         struct.set_component_executors_isSet(true);
@@ -1361,16 +1361,16 @@ public class StormBase implements org.apache.thrift.TBase<StormBase, StormBase._
       }
       if (incoming.get(5)) {
         {
-          org.apache.thrift.protocol.TMap _map620 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-          struct.component_debug = new HashMap<String,DebugOptions>(2*_map620.size);
-          String _key621;
-          DebugOptions _val622;
-          for (int _i623 = 0; _i623 < _map620.size; ++_i623)
+          org.apache.thrift.protocol.TMap _map654 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.component_debug = new HashMap<String,DebugOptions>(2*_map654.size);
+          String _key655;
+          DebugOptions _val656;
+          for (int _i657 = 0; _i657 < _map654.size; ++_i657)
           {
-            _key621 = iprot.readString();
-            _val622 = new DebugOptions();
-            _val622.read(iprot);
-            struct.component_debug.put(_key621, _val622);
+            _key655 = iprot.readString();
+            _val656 = new DebugOptions();
+            _val656.read(iprot);
+            struct.component_debug.put(_key655, _val656);
           }
         }
         struct.set_component_debug_isSet(true);

http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/jvm/org/apache/storm/generated/SupervisorInfo.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/org/apache/storm/generated/SupervisorInfo.java b/storm-core/src/jvm/org/apache/storm/generated/SupervisorInfo.java
index a4d5269..18805be 100644
--- a/storm-core/src/jvm/org/apache/storm/generated/SupervisorInfo.java
+++ b/storm-core/src/jvm/org/apache/storm/generated/SupervisorInfo.java
@@ -1085,13 +1085,13 @@ public class SupervisorInfo implements org.apache.thrift.TBase<SupervisorInfo, S
           case 4: // USED_PORTS
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list504 = iprot.readListBegin();
-                struct.used_ports = new ArrayList<Long>(_list504.size);
-                long _elem505;
-                for (int _i506 = 0; _i506 < _list504.size; ++_i506)
+                org.apache.thrift.protocol.TList _list538 = iprot.readListBegin();
+                struct.used_ports = new ArrayList<Long>(_list538.size);
+                long _elem539;
+                for (int _i540 = 0; _i540 < _list538.size; ++_i540)
                 {
-                  _elem505 = iprot.readI64();
-                  struct.used_ports.add(_elem505);
+                  _elem539 = iprot.readI64();
+                  struct.used_ports.add(_elem539);
                 }
                 iprot.readListEnd();
               }
@@ -1103,13 +1103,13 @@ public class SupervisorInfo implements org.apache.thrift.TBase<SupervisorInfo, S
           case 5: // META
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list507 = iprot.readListBegin();
-                struct.meta = new ArrayList<Long>(_list507.size);
-                long _elem508;
-                for (int _i509 = 0; _i509 < _list507.size; ++_i509)
+                org.apache.thrift.protocol.TList _list541 = iprot.readListBegin();
+                struct.meta = new ArrayList<Long>(_list541.size);
+                long _elem542;
+                for (int _i543 = 0; _i543 < _list541.size; ++_i543)
                 {
-                  _elem508 = iprot.readI64();
-                  struct.meta.add(_elem508);
+                  _elem542 = iprot.readI64();
+                  struct.meta.add(_elem542);
                 }
                 iprot.readListEnd();
               }
@@ -1121,15 +1121,15 @@ public class SupervisorInfo implements org.apache.thrift.TBase<SupervisorInfo, S
           case 6: // SCHEDULER_META
             if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
               {
-                org.apache.thrift.protocol.TMap _map510 = iprot.readMapBegin();
-                struct.scheduler_meta = new HashMap<String,String>(2*_map510.size);
-                String _key511;
-                String _val512;
-                for (int _i513 = 0; _i513 < _map510.size; ++_i513)
+                org.apache.thrift.protocol.TMap _map544 = iprot.readMapBegin();
+                struct.scheduler_meta = new HashMap<String,String>(2*_map544.size);
+                String _key545;
+                String _val546;
+                for (int _i547 = 0; _i547 < _map544.size; ++_i547)
                 {
-                  _key511 = iprot.readString();
-                  _val512 = iprot.readString();
-                  struct.scheduler_meta.put(_key511, _val512);
+                  _key545 = iprot.readString();
+                  _val546 = iprot.readString();
+                  struct.scheduler_meta.put(_key545, _val546);
                 }
                 iprot.readMapEnd();
               }
@@ -1157,15 +1157,15 @@ public class SupervisorInfo implements org.apache.thrift.TBase<SupervisorInfo, S
           case 9: // RESOURCES_MAP
             if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
               {
-                org.apache.thrift.protocol.TMap _map514 = iprot.readMapBegin();
-                struct.resources_map = new HashMap<String,Double>(2*_map514.size);
-                String _key515;
-                double _val516;
-                for (int _i517 = 0; _i517 < _map514.size; ++_i517)
+                org.apache.thrift.protocol.TMap _map548 = iprot.readMapBegin();
+                struct.resources_map = new HashMap<String,Double>(2*_map548.size);
+                String _key549;
+                double _val550;
+                for (int _i551 = 0; _i551 < _map548.size; ++_i551)
                 {
-                  _key515 = iprot.readString();
-                  _val516 = iprot.readDouble();
-                  struct.resources_map.put(_key515, _val516);
+                  _key549 = iprot.readString();
+                  _val550 = iprot.readDouble();
+                  struct.resources_map.put(_key549, _val550);
                 }
                 iprot.readMapEnd();
               }
@@ -1207,9 +1207,9 @@ public class SupervisorInfo implements org.apache.thrift.TBase<SupervisorInfo, S
           oprot.writeFieldBegin(USED_PORTS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.used_ports.size()));
-            for (long _iter518 : struct.used_ports)
+            for (long _iter552 : struct.used_ports)
             {
-              oprot.writeI64(_iter518);
+              oprot.writeI64(_iter552);
             }
             oprot.writeListEnd();
           }
@@ -1221,9 +1221,9 @@ public class SupervisorInfo implements org.apache.thrift.TBase<SupervisorInfo, S
           oprot.writeFieldBegin(META_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.meta.size()));
-            for (long _iter519 : struct.meta)
+            for (long _iter553 : struct.meta)
             {
-              oprot.writeI64(_iter519);
+              oprot.writeI64(_iter553);
             }
             oprot.writeListEnd();
           }
@@ -1235,10 +1235,10 @@ public class SupervisorInfo implements org.apache.thrift.TBase<SupervisorInfo, S
           oprot.writeFieldBegin(SCHEDULER_META_FIELD_DESC);
           {
             oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.scheduler_meta.size()));
-            for (Map.Entry<String, String> _iter520 : struct.scheduler_meta.entrySet())
+            for (Map.Entry<String, String> _iter554 : struct.scheduler_meta.entrySet())
             {
-              oprot.writeString(_iter520.getKey());
-              oprot.writeString(_iter520.getValue());
+              oprot.writeString(_iter554.getKey());
+              oprot.writeString(_iter554.getValue());
             }
             oprot.writeMapEnd();
           }
@@ -1262,10 +1262,10 @@ public class SupervisorInfo implements org.apache.thrift.TBase<SupervisorInfo, S
           oprot.writeFieldBegin(RESOURCES_MAP_FIELD_DESC);
           {
             oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.DOUBLE, struct.resources_map.size()));
-            for (Map.Entry<String, Double> _iter521 : struct.resources_map.entrySet())
+            for (Map.Entry<String, Double> _iter555 : struct.resources_map.entrySet())
             {
-              oprot.writeString(_iter521.getKey());
-              oprot.writeDouble(_iter521.getValue());
+              oprot.writeString(_iter555.getKey());
+              oprot.writeDouble(_iter555.getValue());
             }
             oprot.writeMapEnd();
           }
@@ -1320,28 +1320,28 @@ public class SupervisorInfo implements org.apache.thrift.TBase<SupervisorInfo, S
       if (struct.is_set_used_ports()) {
         {
           oprot.writeI32(struct.used_ports.size());
-          for (long _iter522 : struct.used_ports)
+          for (long _iter556 : struct.used_ports)
           {
-            oprot.writeI64(_iter522);
+            oprot.writeI64(_iter556);
           }
         }
       }
       if (struct.is_set_meta()) {
         {
           oprot.writeI32(struct.meta.size());
-          for (long _iter523 : struct.meta)
+          for (long _iter557 : struct.meta)
           {
-            oprot.writeI64(_iter523);
+            oprot.writeI64(_iter557);
           }
         }
       }
       if (struct.is_set_scheduler_meta()) {
         {
           oprot.writeI32(struct.scheduler_meta.size());
-          for (Map.Entry<String, String> _iter524 : struct.scheduler_meta.entrySet())
+          for (Map.Entry<String, String> _iter558 : struct.scheduler_meta.entrySet())
           {
-            oprot.writeString(_iter524.getKey());
-            oprot.writeString(_iter524.getValue());
+            oprot.writeString(_iter558.getKey());
+            oprot.writeString(_iter558.getValue());
           }
         }
       }
@@ -1354,10 +1354,10 @@ public class SupervisorInfo implements org.apache.thrift.TBase<SupervisorInfo, S
       if (struct.is_set_resources_map()) {
         {
           oprot.writeI32(struct.resources_map.size());
-          for (Map.Entry<String, Double> _iter525 : struct.resources_map.entrySet())
+          for (Map.Entry<String, Double> _iter559 : struct.resources_map.entrySet())
           {
-            oprot.writeString(_iter525.getKey());
-            oprot.writeDouble(_iter525.getValue());
+            oprot.writeString(_iter559.getKey());
+            oprot.writeDouble(_iter559.getValue());
           }
         }
       }
@@ -1377,41 +1377,41 @@ public class SupervisorInfo implements org.apache.thrift.TBase<SupervisorInfo, S
       }
       if (incoming.get(1)) {
         {
-          org.apache.thrift.protocol.TList _list526 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32());
-          struct.used_ports = new ArrayList<Long>(_list526.size);
-          long _elem527;
-          for (int _i528 = 0; _i528 < _list526.size; ++_i528)
+          org.apache.thrift.protocol.TList _list560 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32());
+          struct.used_ports = new ArrayList<Long>(_list560.size);
+          long _elem561;
+          for (int _i562 = 0; _i562 < _list560.size; ++_i562)
           {
-            _elem527 = iprot.readI64();
-            struct.used_ports.add(_elem527);
+            _elem561 = iprot.readI64();
+            struct.used_ports.add(_elem561);
           }
         }
         struct.set_used_ports_isSet(true);
       }
       if (incoming.get(2)) {
         {
-          org.apache.thrift.protocol.TList _list529 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32());
-          struct.meta = new ArrayList<Long>(_list529.size);
-          long _elem530;
-          for (int _i531 = 0; _i531 < _list529.size; ++_i531)
+          org.apache.thrift.protocol.TList _list563 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32());
+          struct.meta = new ArrayList<Long>(_list563.size);
+          long _elem564;
+          for (int _i565 = 0; _i565 < _list563.size; ++_i565)
           {
-            _elem530 = iprot.readI64();
-            struct.meta.add(_elem530);
+            _elem564 = iprot.readI64();
+            struct.meta.add(_elem564);
           }
         }
         struct.set_meta_isSet(true);
       }
       if (incoming.get(3)) {
         {
-          org.apache.thrift.protocol.TMap _map532 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-          struct.scheduler_meta = new HashMap<String,String>(2*_map532.size);
-          String _key533;
-          String _val534;
-          for (int _i535 = 0; _i535 < _map532.size; ++_i535)
+          org.apache.thrift.protocol.TMap _map566 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+          struct.scheduler_meta = new HashMap<String,String>(2*_map566.size);
+          String _key567;
+          String _val568;
+          for (int _i569 = 0; _i569 < _map566.size; ++_i569)
           {
-            _key533 = iprot.readString();
-            _val534 = iprot.readString();
-            struct.scheduler_meta.put(_key533, _val534);
+            _key567 = iprot.readString();
+            _val568 = iprot.readString();
+            struct.scheduler_meta.put(_key567, _val568);
           }
         }
         struct.set_scheduler_meta_isSet(true);
@@ -1426,15 +1426,15 @@ public class SupervisorInfo implements org.apache.thrift.TBase<SupervisorInfo, S
       }
       if (incoming.get(6)) {
         {
-          org.apache.thrift.protocol.TMap _map536 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.DOUBLE, iprot.readI32());
-          struct.resources_map = new HashMap<String,Double>(2*_map536.size);
-          String _key537;
-          double _val538;
-          for (int _i539 = 0; _i539 < _map536.size; ++_i539)
+          org.apache.thrift.protocol.TMap _map570 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.DOUBLE, iprot.readI32());
+          struct.resources_map = new HashMap<String,Double>(2*_map570.size);
+          String _key571;
+          double _val572;
+          for (int _i573 = 0; _i573 < _map570.size; ++_i573)
           {
-            _key537 = iprot.readString();
-            _val538 = iprot.readDouble();
-            struct.resources_map.put(_key537, _val538);
+            _key571 = iprot.readString();
+            _val572 = iprot.readDouble();
+            struct.resources_map.put(_key571, _val572);
           }
         }
         struct.set_resources_map_isSet(true);

http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/jvm/org/apache/storm/generated/SupervisorPageInfo.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/org/apache/storm/generated/SupervisorPageInfo.java b/storm-core/src/jvm/org/apache/storm/generated/SupervisorPageInfo.java
new file mode 100644
index 0000000..d704005
--- /dev/null
+++ b/storm-core/src/jvm/org/apache/storm/generated/SupervisorPageInfo.java
@@ -0,0 +1,624 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.storm.generated;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+public class SupervisorPageInfo implements org.apache.thrift.TBase<SupervisorPageInfo, SupervisorPageInfo._Fields>, java.io.Serializable, Cloneable, Comparable<SupervisorPageInfo> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SupervisorPageInfo");
+
+  private static final org.apache.thrift.protocol.TField SUPERVISOR_SUMMARIES_FIELD_DESC = new org.apache.thrift.protocol.TField("supervisor_summaries", org.apache.thrift.protocol.TType.LIST, (short)1);
+  private static final org.apache.thrift.protocol.TField WORKER_SUMMARIES_FIELD_DESC = new org.apache.thrift.protocol.TField("worker_summaries", org.apache.thrift.protocol.TType.LIST, (short)2);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new SupervisorPageInfoStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new SupervisorPageInfoTupleSchemeFactory());
+  }
+
+  private List<SupervisorSummary> supervisor_summaries; // optional
+  private List<WorkerSummary> worker_summaries; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    SUPERVISOR_SUMMARIES((short)1, "supervisor_summaries"),
+    WORKER_SUMMARIES((short)2, "worker_summaries");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // SUPERVISOR_SUMMARIES
+          return SUPERVISOR_SUMMARIES;
+        case 2: // WORKER_SUMMARIES
+          return WORKER_SUMMARIES;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final _Fields optionals[] = {_Fields.SUPERVISOR_SUMMARIES,_Fields.WORKER_SUMMARIES};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.SUPERVISOR_SUMMARIES, new org.apache.thrift.meta_data.FieldMetaData("supervisor_summaries", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SupervisorSummary.class))));
+    tmpMap.put(_Fields.WORKER_SUMMARIES, new org.apache.thrift.meta_data.FieldMetaData("worker_summaries", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT            , "WorkerSummary"))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(SupervisorPageInfo.class, metaDataMap);
+  }
+
+  public SupervisorPageInfo() {
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public SupervisorPageInfo(SupervisorPageInfo other) {
+    if (other.is_set_supervisor_summaries()) {
+      List<SupervisorSummary> __this__supervisor_summaries = new ArrayList<SupervisorSummary>(other.supervisor_summaries.size());
+      for (SupervisorSummary other_element : other.supervisor_summaries) {
+        __this__supervisor_summaries.add(new SupervisorSummary(other_element));
+      }
+      this.supervisor_summaries = __this__supervisor_summaries;
+    }
+    if (other.is_set_worker_summaries()) {
+      List<WorkerSummary> __this__worker_summaries = new ArrayList<WorkerSummary>(other.worker_summaries.size());
+      for (WorkerSummary other_element : other.worker_summaries) {
+        __this__worker_summaries.add(other_element);
+      }
+      this.worker_summaries = __this__worker_summaries;
+    }
+  }
+
+  public SupervisorPageInfo deepCopy() {
+    return new SupervisorPageInfo(this);
+  }
+
+  @Override
+  public void clear() {
+    this.supervisor_summaries = null;
+    this.worker_summaries = null;
+  }
+
+  public int get_supervisor_summaries_size() {
+    return (this.supervisor_summaries == null) ? 0 : this.supervisor_summaries.size();
+  }
+
+  public java.util.Iterator<SupervisorSummary> get_supervisor_summaries_iterator() {
+    return (this.supervisor_summaries == null) ? null : this.supervisor_summaries.iterator();
+  }
+
+  public void add_to_supervisor_summaries(SupervisorSummary elem) {
+    if (this.supervisor_summaries == null) {
+      this.supervisor_summaries = new ArrayList<SupervisorSummary>();
+    }
+    this.supervisor_summaries.add(elem);
+  }
+
+  public List<SupervisorSummary> get_supervisor_summaries() {
+    return this.supervisor_summaries;
+  }
+
+  public void set_supervisor_summaries(List<SupervisorSummary> supervisor_summaries) {
+    this.supervisor_summaries = supervisor_summaries;
+  }
+
+  public void unset_supervisor_summaries() {
+    this.supervisor_summaries = null;
+  }
+
+  /** Returns true if field supervisor_summaries is set (has been assigned a value) and false otherwise */
+  public boolean is_set_supervisor_summaries() {
+    return this.supervisor_summaries != null;
+  }
+
+  public void set_supervisor_summaries_isSet(boolean value) {
+    if (!value) {
+      this.supervisor_summaries = null;
+    }
+  }
+
+  public int get_worker_summaries_size() {
+    return (this.worker_summaries == null) ? 0 : this.worker_summaries.size();
+  }
+
+  public java.util.Iterator<WorkerSummary> get_worker_summaries_iterator() {
+    return (this.worker_summaries == null) ? null : this.worker_summaries.iterator();
+  }
+
+  public void add_to_worker_summaries(WorkerSummary elem) {
+    if (this.worker_summaries == null) {
+      this.worker_summaries = new ArrayList<WorkerSummary>();
+    }
+    this.worker_summaries.add(elem);
+  }
+
+  public List<WorkerSummary> get_worker_summaries() {
+    return this.worker_summaries;
+  }
+
+  public void set_worker_summaries(List<WorkerSummary> worker_summaries) {
+    this.worker_summaries = worker_summaries;
+  }
+
+  public void unset_worker_summaries() {
+    this.worker_summaries = null;
+  }
+
+  /** Returns true if field worker_summaries is set (has been assigned a value) and false otherwise */
+  public boolean is_set_worker_summaries() {
+    return this.worker_summaries != null;
+  }
+
+  public void set_worker_summaries_isSet(boolean value) {
+    if (!value) {
+      this.worker_summaries = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case SUPERVISOR_SUMMARIES:
+      if (value == null) {
+        unset_supervisor_summaries();
+      } else {
+        set_supervisor_summaries((List<SupervisorSummary>)value);
+      }
+      break;
+
+    case WORKER_SUMMARIES:
+      if (value == null) {
+        unset_worker_summaries();
+      } else {
+        set_worker_summaries((List<WorkerSummary>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case SUPERVISOR_SUMMARIES:
+      return get_supervisor_summaries();
+
+    case WORKER_SUMMARIES:
+      return get_worker_summaries();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case SUPERVISOR_SUMMARIES:
+      return is_set_supervisor_summaries();
+    case WORKER_SUMMARIES:
+      return is_set_worker_summaries();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof SupervisorPageInfo)
+      return this.equals((SupervisorPageInfo)that);
+    return false;
+  }
+
+  public boolean equals(SupervisorPageInfo that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_supervisor_summaries = true && this.is_set_supervisor_summaries();
+    boolean that_present_supervisor_summaries = true && that.is_set_supervisor_summaries();
+    if (this_present_supervisor_summaries || that_present_supervisor_summaries) {
+      if (!(this_present_supervisor_summaries && that_present_supervisor_summaries))
+        return false;
+      if (!this.supervisor_summaries.equals(that.supervisor_summaries))
+        return false;
+    }
+
+    boolean this_present_worker_summaries = true && this.is_set_worker_summaries();
+    boolean that_present_worker_summaries = true && that.is_set_worker_summaries();
+    if (this_present_worker_summaries || that_present_worker_summaries) {
+      if (!(this_present_worker_summaries && that_present_worker_summaries))
+        return false;
+      if (!this.worker_summaries.equals(that.worker_summaries))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_supervisor_summaries = true && (is_set_supervisor_summaries());
+    list.add(present_supervisor_summaries);
+    if (present_supervisor_summaries)
+      list.add(supervisor_summaries);
+
+    boolean present_worker_summaries = true && (is_set_worker_summaries());
+    list.add(present_worker_summaries);
+    if (present_worker_summaries)
+      list.add(worker_summaries);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(SupervisorPageInfo other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(is_set_supervisor_summaries()).compareTo(other.is_set_supervisor_summaries());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_supervisor_summaries()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.supervisor_summaries, other.supervisor_summaries);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(is_set_worker_summaries()).compareTo(other.is_set_worker_summaries());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_worker_summaries()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.worker_summaries, other.worker_summaries);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("SupervisorPageInfo(");
+    boolean first = true;
+
+    if (is_set_supervisor_summaries()) {
+      sb.append("supervisor_summaries:");
+      if (this.supervisor_summaries == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.supervisor_summaries);
+      }
+      first = false;
+    }
+    if (is_set_worker_summaries()) {
+      if (!first) sb.append(", ");
+      sb.append("worker_summaries:");
+      if (this.worker_summaries == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.worker_summaries);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class SupervisorPageInfoStandardSchemeFactory implements SchemeFactory {
+    public SupervisorPageInfoStandardScheme getScheme() {
+      return new SupervisorPageInfoStandardScheme();
+    }
+  }
+
+  private static class SupervisorPageInfoStandardScheme extends StandardScheme<SupervisorPageInfo> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, SupervisorPageInfo struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // SUPERVISOR_SUMMARIES
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list402 = iprot.readListBegin();
+                struct.supervisor_summaries = new ArrayList<SupervisorSummary>(_list402.size);
+                SupervisorSummary _elem403;
+                for (int _i404 = 0; _i404 < _list402.size; ++_i404)
+                {
+                  _elem403 = new SupervisorSummary();
+                  _elem403.read(iprot);
+                  struct.supervisor_summaries.add(_elem403);
+                }
+                iprot.readListEnd();
+              }
+              struct.set_supervisor_summaries_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // WORKER_SUMMARIES
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list405 = iprot.readListBegin();
+                struct.worker_summaries = new ArrayList<WorkerSummary>(_list405.size);
+                WorkerSummary _elem406;
+                for (int _i407 = 0; _i407 < _list405.size; ++_i407)
+                {
+                  _elem406 = new WorkerSummary();
+                  _elem406.read(iprot);
+                  struct.worker_summaries.add(_elem406);
+                }
+                iprot.readListEnd();
+              }
+              struct.set_worker_summaries_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, SupervisorPageInfo struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.supervisor_summaries != null) {
+        if (struct.is_set_supervisor_summaries()) {
+          oprot.writeFieldBegin(SUPERVISOR_SUMMARIES_FIELD_DESC);
+          {
+            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.supervisor_summaries.size()));
+            for (SupervisorSummary _iter408 : struct.supervisor_summaries)
+            {
+              _iter408.write(oprot);
+            }
+            oprot.writeListEnd();
+          }
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.worker_summaries != null) {
+        if (struct.is_set_worker_summaries()) {
+          oprot.writeFieldBegin(WORKER_SUMMARIES_FIELD_DESC);
+          {
+            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.worker_summaries.size()));
+            for (WorkerSummary _iter409 : struct.worker_summaries)
+            {
+              _iter409.write(oprot);
+            }
+            oprot.writeListEnd();
+          }
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class SupervisorPageInfoTupleSchemeFactory implements SchemeFactory {
+    public SupervisorPageInfoTupleScheme getScheme() {
+      return new SupervisorPageInfoTupleScheme();
+    }
+  }
+
+  private static class SupervisorPageInfoTupleScheme extends TupleScheme<SupervisorPageInfo> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, SupervisorPageInfo struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.is_set_supervisor_summaries()) {
+        optionals.set(0);
+      }
+      if (struct.is_set_worker_summaries()) {
+        optionals.set(1);
+      }
+      oprot.writeBitSet(optionals, 2);
+      if (struct.is_set_supervisor_summaries()) {
+        {
+          oprot.writeI32(struct.supervisor_summaries.size());
+          for (SupervisorSummary _iter410 : struct.supervisor_summaries)
+          {
+            _iter410.write(oprot);
+          }
+        }
+      }
+      if (struct.is_set_worker_summaries()) {
+        {
+          oprot.writeI32(struct.worker_summaries.size());
+          for (WorkerSummary _iter411 : struct.worker_summaries)
+          {
+            _iter411.write(oprot);
+          }
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, SupervisorPageInfo struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(2);
+      if (incoming.get(0)) {
+        {
+          org.apache.thrift.protocol.TList _list412 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.supervisor_summaries = new ArrayList<SupervisorSummary>(_list412.size);
+          SupervisorSummary _elem413;
+          for (int _i414 = 0; _i414 < _list412.size; ++_i414)
+          {
+            _elem413 = new SupervisorSummary();
+            _elem413.read(iprot);
+            struct.supervisor_summaries.add(_elem413);
+          }
+        }
+        struct.set_supervisor_summaries_isSet(true);
+      }
+      if (incoming.get(1)) {
+        {
+          org.apache.thrift.protocol.TList _list415 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.worker_summaries = new ArrayList<WorkerSummary>(_list415.size);
+          WorkerSummary _elem416;
+          for (int _i417 = 0; _i417 < _list415.size; ++_i417)
+          {
+            _elem416 = new WorkerSummary();
+            _elem416.read(iprot);
+            struct.worker_summaries.add(_elem416);
+          }
+        }
+        struct.set_worker_summaries_isSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/jvm/org/apache/storm/generated/TopologyHistoryInfo.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/org/apache/storm/generated/TopologyHistoryInfo.java b/storm-core/src/jvm/org/apache/storm/generated/TopologyHistoryInfo.java
index 3ec261f..63225dc 100644
--- a/storm-core/src/jvm/org/apache/storm/generated/TopologyHistoryInfo.java
+++ b/storm-core/src/jvm/org/apache/storm/generated/TopologyHistoryInfo.java
@@ -364,13 +364,13 @@ public class TopologyHistoryInfo implements org.apache.thrift.TBase<TopologyHist
           case 1: // TOPO_IDS
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list714 = iprot.readListBegin();
-                struct.topo_ids = new ArrayList<String>(_list714.size);
-                String _elem715;
-                for (int _i716 = 0; _i716 < _list714.size; ++_i716)
+                org.apache.thrift.protocol.TList _list748 = iprot.readListBegin();
+                struct.topo_ids = new ArrayList<String>(_list748.size);
+                String _elem749;
+                for (int _i750 = 0; _i750 < _list748.size; ++_i750)
                 {
-                  _elem715 = iprot.readString();
-                  struct.topo_ids.add(_elem715);
+                  _elem749 = iprot.readString();
+                  struct.topo_ids.add(_elem749);
                 }
                 iprot.readListEnd();
               }
@@ -396,9 +396,9 @@ public class TopologyHistoryInfo implements org.apache.thrift.TBase<TopologyHist
         oprot.writeFieldBegin(TOPO_IDS_FIELD_DESC);
         {
           oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.topo_ids.size()));
-          for (String _iter717 : struct.topo_ids)
+          for (String _iter751 : struct.topo_ids)
           {
-            oprot.writeString(_iter717);
+            oprot.writeString(_iter751);
           }
           oprot.writeListEnd();
         }
@@ -429,9 +429,9 @@ public class TopologyHistoryInfo implements org.apache.thrift.TBase<TopologyHist
       if (struct.is_set_topo_ids()) {
         {
           oprot.writeI32(struct.topo_ids.size());
-          for (String _iter718 : struct.topo_ids)
+          for (String _iter752 : struct.topo_ids)
           {
-            oprot.writeString(_iter718);
+            oprot.writeString(_iter752);
           }
         }
       }
@@ -443,13 +443,13 @@ public class TopologyHistoryInfo implements org.apache.thrift.TBase<TopologyHist
       BitSet incoming = iprot.readBitSet(1);
       if (incoming.get(0)) {
         {
-          org.apache.thrift.protocol.TList _list719 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-          struct.topo_ids = new ArrayList<String>(_list719.size);
-          String _elem720;
-          for (int _i721 = 0; _i721 < _list719.size; ++_i721)
+          org.apache.thrift.protocol.TList _list753 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+          struct.topo_ids = new ArrayList<String>(_list753.size);
+          String _elem754;
+          for (int _i755 = 0; _i755 < _list753.size; ++_i755)
           {
-            _elem720 = iprot.readString();
-            struct.topo_ids.add(_elem720);
+            _elem754 = iprot.readString();
+            struct.topo_ids.add(_elem754);
           }
         }
         struct.set_topo_ids_isSet(true);

http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/jvm/org/apache/storm/generated/TopologyPageInfo.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/org/apache/storm/generated/TopologyPageInfo.java b/storm-core/src/jvm/org/apache/storm/generated/TopologyPageInfo.java
index 1d9214b..e573ba3 100644
--- a/storm-core/src/jvm/org/apache/storm/generated/TopologyPageInfo.java
+++ b/storm-core/src/jvm/org/apache/storm/generated/TopologyPageInfo.java
@@ -70,6 +70,7 @@ public class TopologyPageInfo implements org.apache.thrift.TBase<TopologyPageInf
   private static final org.apache.thrift.protocol.TField OWNER_FIELD_DESC = new org.apache.thrift.protocol.TField("owner", org.apache.thrift.protocol.TType.STRING, (short)13);
   private static final org.apache.thrift.protocol.TField DEBUG_OPTIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("debug_options", org.apache.thrift.protocol.TType.STRUCT, (short)14);
   private static final org.apache.thrift.protocol.TField REPLICATION_COUNT_FIELD_DESC = new org.apache.thrift.protocol.TField("replication_count", org.apache.thrift.protocol.TType.I32, (short)15);
+  private static final org.apache.thrift.protocol.TField WORKERS_FIELD_DESC = new org.apache.thrift.protocol.TField("workers", org.apache.thrift.protocol.TType.LIST, (short)16);
   private static final org.apache.thrift.protocol.TField REQUESTED_MEMONHEAP_FIELD_DESC = new org.apache.thrift.protocol.TField("requested_memonheap", org.apache.thrift.protocol.TType.DOUBLE, (short)521);
   private static final org.apache.thrift.protocol.TField REQUESTED_MEMOFFHEAP_FIELD_DESC = new org.apache.thrift.protocol.TField("requested_memoffheap", org.apache.thrift.protocol.TType.DOUBLE, (short)522);
   private static final org.apache.thrift.protocol.TField REQUESTED_CPU_FIELD_DESC = new org.apache.thrift.protocol.TField("requested_cpu", org.apache.thrift.protocol.TType.DOUBLE, (short)523);
@@ -98,6 +99,7 @@ public class TopologyPageInfo implements org.apache.thrift.TBase<TopologyPageInf
   private String owner; // optional
   private DebugOptions debug_options; // optional
   private int replication_count; // optional
+  private List<WorkerSummary> workers; // optional
   private double requested_memonheap; // optional
   private double requested_memoffheap; // optional
   private double requested_cpu; // optional
@@ -122,6 +124,7 @@ public class TopologyPageInfo implements org.apache.thrift.TBase<TopologyPageInf
     OWNER((short)13, "owner"),
     DEBUG_OPTIONS((short)14, "debug_options"),
     REPLICATION_COUNT((short)15, "replication_count"),
+    WORKERS((short)16, "workers"),
     REQUESTED_MEMONHEAP((short)521, "requested_memonheap"),
     REQUESTED_MEMOFFHEAP((short)522, "requested_memoffheap"),
     REQUESTED_CPU((short)523, "requested_cpu"),
@@ -172,6 +175,8 @@ public class TopologyPageInfo implements org.apache.thrift.TBase<TopologyPageInf
           return DEBUG_OPTIONS;
         case 15: // REPLICATION_COUNT
           return REPLICATION_COUNT;
+        case 16: // WORKERS
+          return WORKERS;
         case 521: // REQUESTED_MEMONHEAP
           return REQUESTED_MEMONHEAP;
         case 522: // REQUESTED_MEMOFFHEAP
@@ -236,7 +241,7 @@ public class TopologyPageInfo implements org.apache.thrift.TBase<TopologyPageInf
   private static final int __ASSIGNED_MEMOFFHEAP_ISSET_ID = 9;
   private static final int __ASSIGNED_CPU_ISSET_ID = 10;
   private short __isset_bitfield = 0;
-  private static final _Fields optionals[] = {_Fields.NAME,_Fields.UPTIME_SECS,_Fields.STATUS,_Fields.NUM_TASKS,_Fields.NUM_WORKERS,_Fields.NUM_EXECUTORS,_Fields.TOPOLOGY_CONF,_Fields.ID_TO_SPOUT_AGG_STATS,_Fields.ID_TO_BOLT_AGG_STATS,_Fields.SCHED_STATUS,_Fields.TOPOLOGY_STATS,_Fields.OWNER,_Fields.DEBUG_OPTIONS,_Fields.REPLICATION_COUNT,_Fields.REQUESTED_MEMONHEAP,_Fields.REQUESTED_MEMOFFHEAP,_Fields.REQUESTED_CPU,_Fields.ASSIGNED_MEMONHEAP,_Fields.ASSIGNED_MEMOFFHEAP,_Fields.ASSIGNED_CPU};
+  private static final _Fields optionals[] = {_Fields.NAME,_Fields.UPTIME_SECS,_Fields.STATUS,_Fields.NUM_TASKS,_Fields.NUM_WORKERS,_Fields.NUM_EXECUTORS,_Fields.TOPOLOGY_CONF,_Fields.ID_TO_SPOUT_AGG_STATS,_Fields.ID_TO_BOLT_AGG_STATS,_Fields.SCHED_STATUS,_Fields.TOPOLOGY_STATS,_Fields.OWNER,_Fields.DEBUG_OPTIONS,_Fields.REPLICATION_COUNT,_Fields.WORKERS,_Fields.REQUESTED_MEMONHEAP,_Fields.REQUESTED_MEMOFFHEAP,_Fields.REQUESTED_CPU,_Fields.ASSIGNED_MEMONHEAP,_Fields.ASSIGNED_MEMOFFHEAP,_Fields.ASSIGNED_CPU};
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -274,6 +279,9 @@ public class TopologyPageInfo implements org.apache.thrift.TBase<TopologyPageInf
         new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, DebugOptions.class)));
     tmpMap.put(_Fields.REPLICATION_COUNT, new org.apache.thrift.meta_data.FieldMetaData("replication_count", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+    tmpMap.put(_Fields.WORKERS, new org.apache.thrift.meta_data.FieldMetaData("workers", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, WorkerSummary.class))));
     tmpMap.put(_Fields.REQUESTED_MEMONHEAP, new org.apache.thrift.meta_data.FieldMetaData("requested_memonheap", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE)));
     tmpMap.put(_Fields.REQUESTED_MEMOFFHEAP, new org.apache.thrift.meta_data.FieldMetaData("requested_memoffheap", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
@@ -364,6 +372,13 @@ public class TopologyPageInfo implements org.apache.thrift.TBase<TopologyPageInf
       this.debug_options = new DebugOptions(other.debug_options);
     }
     this.replication_count = other.replication_count;
+    if (other.is_set_workers()) {
+      List<WorkerSummary> __this__workers = new ArrayList<WorkerSummary>(other.workers.size());
+      for (WorkerSummary other_element : other.workers) {
+        __this__workers.add(new WorkerSummary(other_element));
+      }
+      this.workers = __this__workers;
+    }
     this.requested_memonheap = other.requested_memonheap;
     this.requested_memoffheap = other.requested_memoffheap;
     this.requested_cpu = other.requested_cpu;
@@ -398,6 +413,7 @@ public class TopologyPageInfo implements org.apache.thrift.TBase<TopologyPageInf
     this.debug_options = null;
     set_replication_count_isSet(false);
     this.replication_count = 0;
+    this.workers = null;
     set_requested_memonheap_isSet(false);
     this.requested_memonheap = 0.0;
     set_requested_memoffheap_isSet(false);
@@ -774,6 +790,44 @@ public class TopologyPageInfo implements org.apache.thrift.TBase<TopologyPageInf
     __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __REPLICATION_COUNT_ISSET_ID, value);
   }
 
+  public int get_workers_size() {
+    return (this.workers == null) ? 0 : this.workers.size();
+  }
+
+  public java.util.Iterator<WorkerSummary> get_workers_iterator() {
+    return (this.workers == null) ? null : this.workers.iterator();
+  }
+
+  public void add_to_workers(WorkerSummary elem) {
+    if (this.workers == null) {
+      this.workers = new ArrayList<WorkerSummary>();
+    }
+    this.workers.add(elem);
+  }
+
+  public List<WorkerSummary> get_workers() {
+    return this.workers;
+  }
+
+  public void set_workers(List<WorkerSummary> workers) {
+    this.workers = workers;
+  }
+
+  public void unset_workers() {
+    this.workers = null;
+  }
+
+  /** Returns true if field workers is set (has been assigned a value) and false otherwise */
+  public boolean is_set_workers() {
+    return this.workers != null;
+  }
+
+  public void set_workers_isSet(boolean value) {
+    if (!value) {
+      this.workers = null;
+    }
+  }
+
   public double get_requested_memonheap() {
     return this.requested_memonheap;
   }
@@ -1028,6 +1082,14 @@ public class TopologyPageInfo implements org.apache.thrift.TBase<TopologyPageInf
       }
       break;
 
+    case WORKERS:
+      if (value == null) {
+        unset_workers();
+      } else {
+        set_workers((List<WorkerSummary>)value);
+      }
+      break;
+
     case REQUESTED_MEMONHEAP:
       if (value == null) {
         unset_requested_memonheap();
@@ -1126,6 +1188,9 @@ public class TopologyPageInfo implements org.apache.thrift.TBase<TopologyPageInf
     case REPLICATION_COUNT:
       return get_replication_count();
 
+    case WORKERS:
+      return get_workers();
+
     case REQUESTED_MEMONHEAP:
       return get_requested_memonheap();
 
@@ -1185,6 +1250,8 @@ public class TopologyPageInfo implements org.apache.thrift.TBase<TopologyPageInf
       return is_set_debug_options();
     case REPLICATION_COUNT:
       return is_set_replication_count();
+    case WORKERS:
+      return is_set_workers();
     case REQUESTED_MEMONHEAP:
       return is_set_requested_memonheap();
     case REQUESTED_MEMOFFHEAP:
@@ -1349,6 +1416,15 @@ public class TopologyPageInfo implements org.apache.thrift.TBase<TopologyPageInf
         return false;
     }
 
+    boolean this_present_workers = true && this.is_set_workers();
+    boolean that_present_workers = true && that.is_set_workers();
+    if (this_present_workers || that_present_workers) {
+      if (!(this_present_workers && that_present_workers))
+        return false;
+      if (!this.workers.equals(that.workers))
+        return false;
+    }
+
     boolean this_present_requested_memonheap = true && this.is_set_requested_memonheap();
     boolean that_present_requested_memonheap = true && that.is_set_requested_memonheap();
     if (this_present_requested_memonheap || that_present_requested_memonheap) {
@@ -1485,6 +1561,11 @@ public class TopologyPageInfo implements org.apache.thrift.TBase<TopologyPageInf
     if (present_replication_count)
       list.add(replication_count);
 
+    boolean present_workers = true && (is_set_workers());
+    list.add(present_workers);
+    if (present_workers)
+      list.add(workers);
+
     boolean present_requested_memonheap = true && (is_set_requested_memonheap());
     list.add(present_requested_memonheap);
     if (present_requested_memonheap)
@@ -1676,6 +1757,16 @@ public class TopologyPageInfo implements org.apache.thrift.TBase<TopologyPageInf
         return lastComparison;
       }
     }
+    lastComparison = Boolean.valueOf(is_set_workers()).compareTo(other.is_set_workers());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_workers()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.workers, other.workers);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
     lastComparison = Boolean.valueOf(is_set_requested_memonheap()).compareTo(other.is_set_requested_memonheap());
     if (lastComparison != 0) {
       return lastComparison;
@@ -1883,6 +1974,16 @@ public class TopologyPageInfo implements org.apache.thrift.TBase<TopologyPageInf
       sb.append(this.replication_count);
       first = false;
     }
+    if (is_set_workers()) {
+      if (!first) sb.append(", ");
+      sb.append("workers:");
+      if (this.workers == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.workers);
+      }
+      first = false;
+    }
     if (is_set_requested_memonheap()) {
       if (!first) sb.append(", ");
       sb.append("requested_memonheap:");
@@ -2041,16 +2142,16 @@ public class TopologyPageInfo implements org.apache.thrift.TBase<TopologyPageInf
           case 9: // ID_TO_SPOUT_AGG_STATS
             if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
               {
-                org.apache.thrift.protocol.TMap _map402 = iprot.readMapBegin();
-                struct.id_to_spout_agg_stats = new HashMap<String,ComponentAggregateStats>(2*_map402.size);
-                String _key403;
-                ComponentAggregateStats _val404;
-                for (int _i405 = 0; _i405 < _map402.size; ++_i405)
+                org.apache.thrift.protocol.TMap _map428 = iprot.readMapBegin();
+                struct.id_to_spout_agg_stats = new HashMap<String,ComponentAggregateStats>(2*_map428.size);
+                String _key429;
+                ComponentAggregateStats _val430;
+                for (int _i431 = 0; _i431 < _map428.size; ++_i431)
                 {
-                  _key403 = iprot.readString();
-                  _val404 = new ComponentAggregateStats();
-                  _val404.read(iprot);
-                  struct.id_to_spout_agg_stats.put(_key403, _val404);
+                  _key429 = iprot.readString();
+                  _val430 = new ComponentAggregateStats();
+                  _val430.read(iprot);
+                  struct.id_to_spout_agg_stats.put(_key429, _val430);
                 }
                 iprot.readMapEnd();
               }
@@ -2062,16 +2163,16 @@ public class TopologyPageInfo implements org.apache.thrift.TBase<TopologyPageInf
           case 10: // ID_TO_BOLT_AGG_STATS
             if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
               {
-                org.apache.thrift.protocol.TMap _map406 = iprot.readMapBegin();
-                struct.id_to_bolt_agg_stats = new HashMap<String,ComponentAggregateStats>(2*_map406.size);
-                String _key407;
-                ComponentAggregateStats _val408;
-                for (int _i409 = 0; _i409 < _map406.size; ++_i409)
+                org.apache.thrift.protocol.TMap _map432 = iprot.readMapBegin();
+                struct.id_to_bolt_agg_stats = new HashMap<String,ComponentAggregateStats>(2*_map432.size);
+                String _key433;
+                ComponentAggregateStats _val434;
+                for (int _i435 = 0; _i435 < _map432.size; ++_i435)
                 {
-                  _key407 = iprot.readString();
-                  _val408 = new ComponentAggregateStats();
-                  _val408.read(iprot);
-                  struct.id_to_bolt_agg_stats.put(_key407, _val408);
+                  _key433 = iprot.readString();
+                  _val434 = new ComponentAggregateStats();
+                  _val434.read(iprot);
+                  struct.id_to_bolt_agg_stats.put(_key433, _val434);
                 }
                 iprot.readMapEnd();
               }
@@ -2122,6 +2223,25 @@ public class TopologyPageInfo implements org.apache.thrift.TBase<TopologyPageInf
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
+          case 16: // WORKERS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list436 = iprot.readListBegin();
+                struct.workers = new ArrayList<WorkerSummary>(_list436.size);
+                WorkerSummary _elem437;
+                for (int _i438 = 0; _i438 < _list436.size; ++_i438)
+                {
+                  _elem437 = new WorkerSummary();
+                  _elem437.read(iprot);
+                  struct.workers.add(_elem437);
+                }
+                iprot.readListEnd();
+              }
+              struct.set_workers_isSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
           case 521: // REQUESTED_MEMONHEAP
             if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) {
               struct.requested_memonheap = iprot.readDouble();
@@ -2234,10 +2354,10 @@ public class TopologyPageInfo implements org.apache.thrift.TBase<TopologyPageInf
           oprot.writeFieldBegin(ID_TO_SPOUT_AGG_STATS_FIELD_DESC);
           {
             oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.id_to_spout_agg_stats.size()));
-            for (Map.Entry<String, ComponentAggregateStats> _iter410 : struct.id_to_spout_agg_stats.entrySet())
+            for (Map.Entry<String, ComponentAggregateStats> _iter439 : struct.id_to_spout_agg_stats.entrySet())
             {
-              oprot.writeString(_iter410.getKey());
-              _iter410.getValue().write(oprot);
+              oprot.writeString(_iter439.getKey());
+              _iter439.getValue().write(oprot);
             }
             oprot.writeMapEnd();
           }
@@ -2249,10 +2369,10 @@ public class TopologyPageInfo implements org.apache.thrift.TBase<TopologyPageInf
           oprot.writeFieldBegin(ID_TO_BOLT_AGG_STATS_FIELD_DESC);
           {
             oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.id_to_bolt_agg_stats.size()));
-            for (Map.Entry<String, ComponentAggregateStats> _iter411 : struct.id_to_bolt_agg_stats.entrySet())
+            for (Map.Entry<String, ComponentAggregateStats> _iter440 : struct.id_to_bolt_agg_stats.entrySet())
             {
-              oprot.writeString(_iter411.getKey());
-              _iter411.getValue().write(oprot);
+              oprot.writeString(_iter440.getKey());
+              _iter440.getValue().write(oprot);
             }
             oprot.writeMapEnd();
           }
@@ -2292,6 +2412,20 @@ public class TopologyPageInfo implements org.apache.thrift.TBase<TopologyPageInf
         oprot.writeI32(struct.replication_count);
         oprot.writeFieldEnd();
       }
+      if (struct.workers != null) {
+        if (struct.is_set_workers()) {
+          oprot.writeFieldBegin(WORKERS_FIELD_DESC);
+          {
+            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.workers.size()));
+            for (WorkerSummary _iter441 : struct.workers)
+            {
+              _iter441.write(oprot);
+            }
+            oprot.writeListEnd();
+          }
+          oprot.writeFieldEnd();
+        }
+      }
       if (struct.is_set_requested_memonheap()) {
         oprot.writeFieldBegin(REQUESTED_MEMONHEAP_FIELD_DESC);
         oprot.writeDouble(struct.requested_memonheap);
@@ -2383,25 +2517,28 @@ public class TopologyPageInfo implements org.apache.thrift.TBase<TopologyPageInf
       if (struct.is_set_replication_count()) {
         optionals.set(13);
       }
-      if (struct.is_set_requested_memonheap()) {
+      if (struct.is_set_workers()) {
         optionals.set(14);
       }
-      if (struct.is_set_requested_memoffheap()) {
+      if (struct.is_set_requested_memonheap()) {
         optionals.set(15);
       }
-      if (struct.is_set_requested_cpu()) {
+      if (struct.is_set_requested_memoffheap()) {
         optionals.set(16);
       }
-      if (struct.is_set_assigned_memonheap()) {
+      if (struct.is_set_requested_cpu()) {
         optionals.set(17);
       }
-      if (struct.is_set_assigned_memoffheap()) {
+      if (struct.is_set_assigned_memonheap()) {
         optionals.set(18);
       }
-      if (struct.is_set_assigned_cpu()) {
+      if (struct.is_set_assigned_memoffheap()) {
         optionals.set(19);
       }
-      oprot.writeBitSet(optionals, 20);
+      if (struct.is_set_assigned_cpu()) {
+        optionals.set(20);
+      }
+      oprot.writeBitSet(optionals, 21);
       if (struct.is_set_name()) {
         oprot.writeString(struct.name);
       }
@@ -2426,20 +2563,20 @@ public class TopologyPageInfo implements org.apache.thrift.TBase<TopologyPageInf
       if (struct.is_set_id_to_spout_agg_stats()) {
         {
           oprot.writeI32(struct.id_to_spout_agg_stats.size());
-          for (Map.Entry<String, ComponentAggregateStats> _iter412 : struct.id_to_spout_agg_stats.entrySet())
+          for (Map.Entry<String, ComponentAggregateStats> _iter442 : struct.id_to_spout_agg_stats.entrySet())
           {
-            oprot.writeString(_iter412.getKey());
-            _iter412.getValue().write(oprot);
+            oprot.writeString(_iter442.getKey());
+            _iter442.getValue().write(oprot);
           }
         }
       }
       if (struct.is_set_id_to_bolt_agg_stats()) {
         {
           oprot.writeI32(struct.id_to_bolt_agg_stats.size());
-          for (Map.Entry<String, ComponentAggregateStats> _iter413 : struct.id_to_bolt_agg_stats.entrySet())
+          for (Map.Entry<String, ComponentAggregateStats> _iter443 : struct.id_to_bolt_agg_stats.entrySet())
           {
-            oprot.writeString(_iter413.getKey());
-            _iter413.getValue().write(oprot);
+            oprot.writeString(_iter443.getKey());
+            _iter443.getValue().write(oprot);
           }
         }
       }
@@ -2458,6 +2595,15 @@ public class TopologyPageInfo implements org.apache.thrift.TBase<TopologyPageInf
       if (struct.is_set_replication_count()) {
         oprot.writeI32(struct.replication_count);
       }
+      if (struct.is_set_workers()) {
+        {
+          oprot.writeI32(struct.workers.size());
+          for (WorkerSummary _iter444 : struct.workers)
+          {
+            _iter444.write(oprot);
+          }
+        }
+      }
       if (struct.is_set_requested_memonheap()) {
         oprot.writeDouble(struct.requested_memonheap);
       }
@@ -2483,7 +2629,7 @@ public class TopologyPageInfo implements org.apache.thrift.TBase<TopologyPageInf
       TTupleProtocol iprot = (TTupleProtocol) prot;
       struct.id = iprot.readString();
       struct.set_id_isSet(true);
-      BitSet incoming = iprot.readBitSet(20);
+      BitSet incoming = iprot.readBitSet(21);
       if (incoming.get(0)) {
         struct.name = iprot.readString();
         struct.set_name_isSet(true);
@@ -2514,32 +2660,32 @@ public class TopologyPageInfo implements org.apache.thrift.TBase<TopologyPageInf
       }
       if (incoming.get(7)) {
         {
-          org.apache.thrift.protocol.TMap _map414 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-          struct.id_to_spout_agg_stats = new HashMap<String,ComponentAggregateStats>(2*_map414.size);
-          String _key415;
-          ComponentAggregateStats _val416;
-          for (int _i417 = 0; _i417 < _map414.size; ++_i417)
+          org.apache.thrift.protocol.TMap _map445 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.id_to_spout_agg_stats = new HashMap<String,ComponentAggregateStats>(2*_map445.size);
+          String _key446;
+          ComponentAggregateStats _val447;
+          for (int _i448 = 0; _i448 < _map445.size; ++_i448)
           {
-            _key415 = iprot.readString();
-            _val416 = new ComponentAggregateStats();
-            _val416.read(iprot);
-            struct.id_to_spout_agg_stats.put(_key415, _val416);
+            _key446 = iprot.readString();
+            _val447 = new ComponentAggregateStats();
+            _val447.read(iprot);
+            struct.id_to_spout_agg_stats.put(_key446, _val447);
           }
         }
         struct.set_id_to_spout_agg_stats_isSet(true);
       }
       if (incoming.get(8)) {
         {
-          org.apache.thrift.protocol.TMap _map418 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-          struct.id_to_bolt_agg_stats = new HashMap<String,ComponentAggregateStats>(2*_map418.size);
-          String _key419;
-          ComponentAggregateStats _val420;
-          for (int _i421 = 0; _i421 < _map418.size; ++_i421)
+          org.apache.thrift.protocol.TMap _map449 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.id_to_bolt_agg_stats = new HashMap<String,ComponentAggregateStats>(2*_map449.size);
+          String _key450;
+          ComponentAggregateStats _val451;
+          for (int _i452 = 0; _i452 < _map449.size; ++_i452)
           {
-            _key419 = iprot.readString();
-            _val420 = new ComponentAggregateStats();
-            _val420.read(iprot);
-            struct.id_to_bolt_agg_stats.put(_key419, _val420);
+            _key450 = iprot.readString();
+            _val451 = new ComponentAggregateStats();
+            _val451.read(iprot);
+            struct.id_to_bolt_agg_stats.put(_key450, _val451);
           }
         }
         struct.set_id_to_bolt_agg_stats_isSet(true);
@@ -2567,26 +2713,40 @@ public class TopologyPageInfo implements org.apache.thrift.TBase<TopologyPageInf
         struct.set_replication_count_isSet(true);
       }
       if (incoming.get(14)) {
+        {
+          org.apache.thrift.protocol.TList _list453 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.workers = new ArrayList<WorkerSummary>(_list453.size);
+          WorkerSummary _elem454;
+          for (int _i455 = 0; _i455 < _list453.size; ++_i455)
+          {
+            _elem454 = new WorkerSummary();
+            _elem454.read(iprot);
+            struct.workers.add(_elem454);
+          }
+        }
+        struct.set_workers_isSet(true);
+      }
+      if (incoming.get(15)) {
         struct.requested_memonheap = iprot.readDouble();
         struct.set_requested_memonheap_isSet(true);
       }
-      if (incoming.get(15)) {
+      if (incoming.get(16)) {
         struct.requested_memoffheap = iprot.readDouble();
         struct.set_requested_memoffheap_isSet(true);
       }
-      if (incoming.get(16)) {
+      if (incoming.get(17)) {
         struct.requested_cpu = iprot.readDouble();
         struct.set_requested_cpu_isSet(true);
       }
-      if (incoming.get(17)) {
+      if (incoming.get(18)) {
         struct.assigned_memonheap = iprot.readDouble();
         struct.set_assigned_memonheap_isSet(true);
       }
-      if (incoming.get(18)) {
+      if (incoming.get(19)) {
         struct.assigned_memoffheap = iprot.readDouble();
         struct.set_assigned_memoffheap_isSet(true);
       }
-      if (incoming.get(19)) {
+      if (incoming.get(20)) {
         struct.assigned_cpu = iprot.readDouble();
         struct.set_assigned_cpu_isSet(true);
       }


[9/9] storm git commit: add STORM-1994 to CHANGELOG

Posted by ka...@apache.org.
add STORM-1994 to CHANGELOG


Project: http://git-wip-us.apache.org/repos/asf/storm/repo
Commit: http://git-wip-us.apache.org/repos/asf/storm/commit/dc20e9ce
Tree: http://git-wip-us.apache.org/repos/asf/storm/tree/dc20e9ce
Diff: http://git-wip-us.apache.org/repos/asf/storm/diff/dc20e9ce

Branch: refs/heads/1.x-branch
Commit: dc20e9ce026e8f8ebacce1a806d72043d407e55c
Parents: 39544ea
Author: Jungtaek Lim <ka...@gmail.com>
Authored: Mon Aug 22 17:34:20 2016 +0900
Committer: Jungtaek Lim <ka...@gmail.com>
Committed: Mon Aug 22 17:34:20 2016 +0900

----------------------------------------------------------------------
 CHANGELOG.md | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/storm/blob/dc20e9ce/CHANGELOG.md
----------------------------------------------------------------------
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 49fa168..ce2d73d 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,4 +1,5 @@
 ## 1.1.0
+ * STORM-1994: Add table with per-topology & worker resource usage and components in (new) supervisor and topology pages
  * STORM-2042: Nimbus client connections not closed properly causing connection leaks
  * STORM-1766: A better algorithm server rack selection for RAS
  * STORM-1913: Additions and Improvements for Trident RAS API


[5/9] storm git commit: STORM-1994: Add table with per-topology and worker resource usage and components in (new) supervisor and topology pages

Posted by ka...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/jvm/org/apache/storm/generated/Nimbus.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/org/apache/storm/generated/Nimbus.java b/storm-core/src/jvm/org/apache/storm/generated/Nimbus.java
index 94835b0..c05bf6b 100644
--- a/storm-core/src/jvm/org/apache/storm/generated/Nimbus.java
+++ b/storm-core/src/jvm/org/apache/storm/generated/Nimbus.java
@@ -142,6 +142,8 @@ public class Nimbus {
 
     public TopologyPageInfo getTopologyPageInfo(String id, String window, boolean is_include_sys) throws NotAliveException, AuthorizationException, org.apache.thrift.TException;
 
+    public SupervisorPageInfo getSupervisorPageInfo(String id, String host, boolean is_include_sys) throws NotAliveException, AuthorizationException, org.apache.thrift.TException;
+
     public ComponentPageInfo getComponentPageInfo(String topology_id, String component_id, String window, boolean is_include_sys) throws NotAliveException, AuthorizationException, org.apache.thrift.TException;
 
     public String getTopologyConf(String id) throws NotAliveException, AuthorizationException, org.apache.thrift.TException;
@@ -240,6 +242,8 @@ public class Nimbus {
 
     public void getTopologyPageInfo(String id, String window, boolean is_include_sys, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
 
+    public void getSupervisorPageInfo(String id, String host, boolean is_include_sys, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
     public void getComponentPageInfo(String topology_id, String component_id, String window, boolean is_include_sys, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
 
     public void getTopologyConf(String id, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
@@ -1253,6 +1257,37 @@ public class Nimbus {
       throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getTopologyPageInfo failed: unknown result");
     }
 
+    public SupervisorPageInfo getSupervisorPageInfo(String id, String host, boolean is_include_sys) throws NotAliveException, AuthorizationException, org.apache.thrift.TException
+    {
+      send_getSupervisorPageInfo(id, host, is_include_sys);
+      return recv_getSupervisorPageInfo();
+    }
+
+    public void send_getSupervisorPageInfo(String id, String host, boolean is_include_sys) throws org.apache.thrift.TException
+    {
+      getSupervisorPageInfo_args args = new getSupervisorPageInfo_args();
+      args.set_id(id);
+      args.set_host(host);
+      args.set_is_include_sys(is_include_sys);
+      sendBase("getSupervisorPageInfo", args);
+    }
+
+    public SupervisorPageInfo recv_getSupervisorPageInfo() throws NotAliveException, AuthorizationException, org.apache.thrift.TException
+    {
+      getSupervisorPageInfo_result result = new getSupervisorPageInfo_result();
+      receiveBase(result, "getSupervisorPageInfo");
+      if (result.is_set_success()) {
+        return result.success;
+      }
+      if (result.e != null) {
+        throw result.e;
+      }
+      if (result.aze != null) {
+        throw result.aze;
+      }
+      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getSupervisorPageInfo failed: unknown result");
+    }
+
     public ComponentPageInfo getComponentPageInfo(String topology_id, String component_id, String window, boolean is_include_sys) throws NotAliveException, AuthorizationException, org.apache.thrift.TException
     {
       send_getComponentPageInfo(topology_id, component_id, window, is_include_sys);
@@ -2666,6 +2701,44 @@ public class Nimbus {
       }
     }
 
+    public void getSupervisorPageInfo(String id, String host, boolean is_include_sys, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      getSupervisorPageInfo_call method_call = new getSupervisorPageInfo_call(id, host, is_include_sys, resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    public static class getSupervisorPageInfo_call extends org.apache.thrift.async.TAsyncMethodCall {
+      private String id;
+      private String host;
+      private boolean is_include_sys;
+      public getSupervisorPageInfo_call(String id, String host, boolean is_include_sys, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+        this.id = id;
+        this.host = host;
+        this.is_include_sys = is_include_sys;
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getSupervisorPageInfo", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        getSupervisorPageInfo_args args = new getSupervisorPageInfo_args();
+        args.set_id(id);
+        args.set_host(host);
+        args.set_is_include_sys(is_include_sys);
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public SupervisorPageInfo getResult() throws NotAliveException, AuthorizationException, org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        return (new Client(prot)).recv_getSupervisorPageInfo();
+      }
+    }
+
     public void getComponentPageInfo(String topology_id, String component_id, String window, boolean is_include_sys, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
       checkReady();
       getComponentPageInfo_call method_call = new getComponentPageInfo_call(topology_id, component_id, window, is_include_sys, resultHandler, this, ___protocolFactory, ___transport);
@@ -2885,6 +2958,7 @@ public class Nimbus {
       processMap.put("getTopologyInfo", new getTopologyInfo());
       processMap.put("getTopologyInfoWithOpts", new getTopologyInfoWithOpts());
       processMap.put("getTopologyPageInfo", new getTopologyPageInfo());
+      processMap.put("getSupervisorPageInfo", new getSupervisorPageInfo());
       processMap.put("getComponentPageInfo", new getComponentPageInfo());
       processMap.put("getTopologyConf", new getTopologyConf());
       processMap.put("getTopology", new getTopology());
@@ -3807,6 +3881,32 @@ public class Nimbus {
       }
     }
 
+    public static class getSupervisorPageInfo<I extends Iface> extends org.apache.thrift.ProcessFunction<I, getSupervisorPageInfo_args> {
+      public getSupervisorPageInfo() {
+        super("getSupervisorPageInfo");
+      }
+
+      public getSupervisorPageInfo_args getEmptyArgsInstance() {
+        return new getSupervisorPageInfo_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public getSupervisorPageInfo_result getResult(I iface, getSupervisorPageInfo_args args) throws org.apache.thrift.TException {
+        getSupervisorPageInfo_result result = new getSupervisorPageInfo_result();
+        try {
+          result.success = iface.getSupervisorPageInfo(args.id, args.host, args.is_include_sys);
+        } catch (NotAliveException e) {
+          result.e = e;
+        } catch (AuthorizationException aze) {
+          result.aze = aze;
+        }
+        return result;
+      }
+    }
+
     public static class getComponentPageInfo<I extends Iface> extends org.apache.thrift.ProcessFunction<I, getComponentPageInfo_args> {
       public getComponentPageInfo() {
         super("getComponentPageInfo");
@@ -3985,6 +4085,7 @@ public class Nimbus {
       processMap.put("getTopologyInfo", new getTopologyInfo());
       processMap.put("getTopologyInfoWithOpts", new getTopologyInfoWithOpts());
       processMap.put("getTopologyPageInfo", new getTopologyPageInfo());
+      processMap.put("getSupervisorPageInfo", new getSupervisorPageInfo());
       processMap.put("getComponentPageInfo", new getComponentPageInfo());
       processMap.put("getTopologyConf", new getTopologyConf());
       processMap.put("getTopology", new getTopology());
@@ -6169,6 +6270,68 @@ public class Nimbus {
       }
     }
 
+    public static class getSupervisorPageInfo<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, getSupervisorPageInfo_args, SupervisorPageInfo> {
+      public getSupervisorPageInfo() {
+        super("getSupervisorPageInfo");
+      }
+
+      public getSupervisorPageInfo_args getEmptyArgsInstance() {
+        return new getSupervisorPageInfo_args();
+      }
+
+      public AsyncMethodCallback<SupervisorPageInfo> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<SupervisorPageInfo>() { 
+          public void onComplete(SupervisorPageInfo o) {
+            getSupervisorPageInfo_result result = new getSupervisorPageInfo_result();
+            result.success = o;
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            getSupervisorPageInfo_result result = new getSupervisorPageInfo_result();
+            if (e instanceof NotAliveException) {
+                        result.e = (NotAliveException) e;
+                        result.set_e_isSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof AuthorizationException) {
+                        result.aze = (AuthorizationException) e;
+                        result.set_aze_isSet(true);
+                        msg = result;
+            }
+             else 
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, getSupervisorPageInfo_args args, org.apache.thrift.async.AsyncMethodCallback<SupervisorPageInfo> resultHandler) throws TException {
+        iface.getSupervisorPageInfo(args.id, args.host, args.is_include_sys,resultHandler);
+      }
+    }
+
     public static class getComponentPageInfo<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, getComponentPageInfo_args, ComponentPageInfo> {
       public getComponentPageInfo() {
         super("getComponentPageInfo");
@@ -17691,14 +17854,14 @@ public class Nimbus {
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list738 = iprot.readListBegin();
-                  struct.success = new ArrayList<ProfileRequest>(_list738.size);
-                  ProfileRequest _elem739;
-                  for (int _i740 = 0; _i740 < _list738.size; ++_i740)
+                  org.apache.thrift.protocol.TList _list772 = iprot.readListBegin();
+                  struct.success = new ArrayList<ProfileRequest>(_list772.size);
+                  ProfileRequest _elem773;
+                  for (int _i774 = 0; _i774 < _list772.size; ++_i774)
                   {
-                    _elem739 = new ProfileRequest();
-                    _elem739.read(iprot);
-                    struct.success.add(_elem739);
+                    _elem773 = new ProfileRequest();
+                    _elem773.read(iprot);
+                    struct.success.add(_elem773);
                   }
                   iprot.readListEnd();
                 }
@@ -17724,9 +17887,9 @@ public class Nimbus {
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
-            for (ProfileRequest _iter741 : struct.success)
+            for (ProfileRequest _iter775 : struct.success)
             {
-              _iter741.write(oprot);
+              _iter775.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -17757,9 +17920,9 @@ public class Nimbus {
         if (struct.is_set_success()) {
           {
             oprot.writeI32(struct.success.size());
-            for (ProfileRequest _iter742 : struct.success)
+            for (ProfileRequest _iter776 : struct.success)
             {
-              _iter742.write(oprot);
+              _iter776.write(oprot);
             }
           }
         }
@@ -17771,14 +17934,14 @@ public class Nimbus {
         BitSet incoming = iprot.readBitSet(1);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list743 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.success = new ArrayList<ProfileRequest>(_list743.size);
-            ProfileRequest _elem744;
-            for (int _i745 = 0; _i745 < _list743.size; ++_i745)
+            org.apache.thrift.protocol.TList _list777 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.success = new ArrayList<ProfileRequest>(_list777.size);
+            ProfileRequest _elem778;
+            for (int _i779 = 0; _i779 < _list777.size; ++_i779)
             {
-              _elem744 = new ProfileRequest();
-              _elem744.read(iprot);
-              struct.success.add(_elem744);
+              _elem778 = new ProfileRequest();
+              _elem778.read(iprot);
+              struct.success.add(_elem778);
             }
           }
           struct.set_success_isSet(true);
@@ -31397,15 +31560,869 @@ public class Nimbus {
       }
     }
 
-    private static class beginFileUpload_resultStandardSchemeFactory implements SchemeFactory {
-      public beginFileUpload_resultStandardScheme getScheme() {
-        return new beginFileUpload_resultStandardScheme();
+    private static class beginFileUpload_resultStandardSchemeFactory implements SchemeFactory {
+      public beginFileUpload_resultStandardScheme getScheme() {
+        return new beginFileUpload_resultStandardScheme();
+      }
+    }
+
+    private static class beginFileUpload_resultStandardScheme extends StandardScheme<beginFileUpload_result> {
+
+      public void read(org.apache.thrift.protocol.TProtocol iprot, beginFileUpload_result struct) throws org.apache.thrift.TException {
+        org.apache.thrift.protocol.TField schemeField;
+        iprot.readStructBegin();
+        while (true)
+        {
+          schemeField = iprot.readFieldBegin();
+          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+            break;
+          }
+          switch (schemeField.id) {
+            case 0: // SUCCESS
+              if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+                struct.success = iprot.readString();
+                struct.set_success_isSet(true);
+              } else { 
+                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+              }
+              break;
+            case 1: // AZE
+              if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+                struct.aze = new AuthorizationException();
+                struct.aze.read(iprot);
+                struct.set_aze_isSet(true);
+              } else { 
+                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+              }
+              break;
+            default:
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+          }
+          iprot.readFieldEnd();
+        }
+        iprot.readStructEnd();
+        struct.validate();
+      }
+
+      public void write(org.apache.thrift.protocol.TProtocol oprot, beginFileUpload_result struct) throws org.apache.thrift.TException {
+        struct.validate();
+
+        oprot.writeStructBegin(STRUCT_DESC);
+        if (struct.success != null) {
+          oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
+          oprot.writeString(struct.success);
+          oprot.writeFieldEnd();
+        }
+        if (struct.aze != null) {
+          oprot.writeFieldBegin(AZE_FIELD_DESC);
+          struct.aze.write(oprot);
+          oprot.writeFieldEnd();
+        }
+        oprot.writeFieldStop();
+        oprot.writeStructEnd();
+      }
+
+    }
+
+    private static class beginFileUpload_resultTupleSchemeFactory implements SchemeFactory {
+      public beginFileUpload_resultTupleScheme getScheme() {
+        return new beginFileUpload_resultTupleScheme();
+      }
+    }
+
+    private static class beginFileUpload_resultTupleScheme extends TupleScheme<beginFileUpload_result> {
+
+      @Override
+      public void write(org.apache.thrift.protocol.TProtocol prot, beginFileUpload_result struct) throws org.apache.thrift.TException {
+        TTupleProtocol oprot = (TTupleProtocol) prot;
+        BitSet optionals = new BitSet();
+        if (struct.is_set_success()) {
+          optionals.set(0);
+        }
+        if (struct.is_set_aze()) {
+          optionals.set(1);
+        }
+        oprot.writeBitSet(optionals, 2);
+        if (struct.is_set_success()) {
+          oprot.writeString(struct.success);
+        }
+        if (struct.is_set_aze()) {
+          struct.aze.write(oprot);
+        }
+      }
+
+      @Override
+      public void read(org.apache.thrift.protocol.TProtocol prot, beginFileUpload_result struct) throws org.apache.thrift.TException {
+        TTupleProtocol iprot = (TTupleProtocol) prot;
+        BitSet incoming = iprot.readBitSet(2);
+        if (incoming.get(0)) {
+          struct.success = iprot.readString();
+          struct.set_success_isSet(true);
+        }
+        if (incoming.get(1)) {
+          struct.aze = new AuthorizationException();
+          struct.aze.read(iprot);
+          struct.set_aze_isSet(true);
+        }
+      }
+    }
+
+  }
+
+  public static class uploadChunk_args implements org.apache.thrift.TBase<uploadChunk_args, uploadChunk_args._Fields>, java.io.Serializable, Cloneable, Comparable<uploadChunk_args>   {
+    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("uploadChunk_args");
+
+    private static final org.apache.thrift.protocol.TField LOCATION_FIELD_DESC = new org.apache.thrift.protocol.TField("location", org.apache.thrift.protocol.TType.STRING, (short)1);
+    private static final org.apache.thrift.protocol.TField CHUNK_FIELD_DESC = new org.apache.thrift.protocol.TField("chunk", org.apache.thrift.protocol.TType.STRING, (short)2);
+
+    private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+    static {
+      schemes.put(StandardScheme.class, new uploadChunk_argsStandardSchemeFactory());
+      schemes.put(TupleScheme.class, new uploadChunk_argsTupleSchemeFactory());
+    }
+
+    private String location; // required
+    private ByteBuffer chunk; // required
+
+    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+    public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+      LOCATION((short)1, "location"),
+      CHUNK((short)2, "chunk");
+
+      private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+      static {
+        for (_Fields field : EnumSet.allOf(_Fields.class)) {
+          byName.put(field.getFieldName(), field);
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, or null if its not found.
+       */
+      public static _Fields findByThriftId(int fieldId) {
+        switch(fieldId) {
+          case 1: // LOCATION
+            return LOCATION;
+          case 2: // CHUNK
+            return CHUNK;
+          default:
+            return null;
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, throwing an exception
+       * if it is not found.
+       */
+      public static _Fields findByThriftIdOrThrow(int fieldId) {
+        _Fields fields = findByThriftId(fieldId);
+        if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+        return fields;
+      }
+
+      /**
+       * Find the _Fields constant that matches name, or null if its not found.
+       */
+      public static _Fields findByName(String name) {
+        return byName.get(name);
+      }
+
+      private final short _thriftId;
+      private final String _fieldName;
+
+      _Fields(short thriftId, String fieldName) {
+        _thriftId = thriftId;
+        _fieldName = fieldName;
+      }
+
+      public short getThriftFieldId() {
+        return _thriftId;
+      }
+
+      public String getFieldName() {
+        return _fieldName;
+      }
+    }
+
+    // isset id assignments
+    public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+    static {
+      Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+      tmpMap.put(_Fields.LOCATION, new org.apache.thrift.meta_data.FieldMetaData("location", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+      tmpMap.put(_Fields.CHUNK, new org.apache.thrift.meta_data.FieldMetaData("chunk", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , true)));
+      metaDataMap = Collections.unmodifiableMap(tmpMap);
+      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(uploadChunk_args.class, metaDataMap);
+    }
+
+    public uploadChunk_args() {
+    }
+
+    public uploadChunk_args(
+      String location,
+      ByteBuffer chunk)
+    {
+      this();
+      this.location = location;
+      this.chunk = org.apache.thrift.TBaseHelper.copyBinary(chunk);
+    }
+
+    /**
+     * Performs a deep copy on <i>other</i>.
+     */
+    public uploadChunk_args(uploadChunk_args other) {
+      if (other.is_set_location()) {
+        this.location = other.location;
+      }
+      if (other.is_set_chunk()) {
+        this.chunk = org.apache.thrift.TBaseHelper.copyBinary(other.chunk);
+      }
+    }
+
+    public uploadChunk_args deepCopy() {
+      return new uploadChunk_args(this);
+    }
+
+    @Override
+    public void clear() {
+      this.location = null;
+      this.chunk = null;
+    }
+
+    public String get_location() {
+      return this.location;
+    }
+
+    public void set_location(String location) {
+      this.location = location;
+    }
+
+    public void unset_location() {
+      this.location = null;
+    }
+
+    /** Returns true if field location is set (has been assigned a value) and false otherwise */
+    public boolean is_set_location() {
+      return this.location != null;
+    }
+
+    public void set_location_isSet(boolean value) {
+      if (!value) {
+        this.location = null;
+      }
+    }
+
+    public byte[] get_chunk() {
+      set_chunk(org.apache.thrift.TBaseHelper.rightSize(chunk));
+      return chunk == null ? null : chunk.array();
+    }
+
+    public ByteBuffer buffer_for_chunk() {
+      return org.apache.thrift.TBaseHelper.copyBinary(chunk);
+    }
+
+    public void set_chunk(byte[] chunk) {
+      this.chunk = chunk == null ? (ByteBuffer)null : ByteBuffer.wrap(Arrays.copyOf(chunk, chunk.length));
+    }
+
+    public void set_chunk(ByteBuffer chunk) {
+      this.chunk = org.apache.thrift.TBaseHelper.copyBinary(chunk);
+    }
+
+    public void unset_chunk() {
+      this.chunk = null;
+    }
+
+    /** Returns true if field chunk is set (has been assigned a value) and false otherwise */
+    public boolean is_set_chunk() {
+      return this.chunk != null;
+    }
+
+    public void set_chunk_isSet(boolean value) {
+      if (!value) {
+        this.chunk = null;
+      }
+    }
+
+    public void setFieldValue(_Fields field, Object value) {
+      switch (field) {
+      case LOCATION:
+        if (value == null) {
+          unset_location();
+        } else {
+          set_location((String)value);
+        }
+        break;
+
+      case CHUNK:
+        if (value == null) {
+          unset_chunk();
+        } else {
+          set_chunk((ByteBuffer)value);
+        }
+        break;
+
+      }
+    }
+
+    public Object getFieldValue(_Fields field) {
+      switch (field) {
+      case LOCATION:
+        return get_location();
+
+      case CHUNK:
+        return get_chunk();
+
+      }
+      throw new IllegalStateException();
+    }
+
+    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+    public boolean isSet(_Fields field) {
+      if (field == null) {
+        throw new IllegalArgumentException();
+      }
+
+      switch (field) {
+      case LOCATION:
+        return is_set_location();
+      case CHUNK:
+        return is_set_chunk();
+      }
+      throw new IllegalStateException();
+    }
+
+    @Override
+    public boolean equals(Object that) {
+      if (that == null)
+        return false;
+      if (that instanceof uploadChunk_args)
+        return this.equals((uploadChunk_args)that);
+      return false;
+    }
+
+    public boolean equals(uploadChunk_args that) {
+      if (that == null)
+        return false;
+
+      boolean this_present_location = true && this.is_set_location();
+      boolean that_present_location = true && that.is_set_location();
+      if (this_present_location || that_present_location) {
+        if (!(this_present_location && that_present_location))
+          return false;
+        if (!this.location.equals(that.location))
+          return false;
+      }
+
+      boolean this_present_chunk = true && this.is_set_chunk();
+      boolean that_present_chunk = true && that.is_set_chunk();
+      if (this_present_chunk || that_present_chunk) {
+        if (!(this_present_chunk && that_present_chunk))
+          return false;
+        if (!this.chunk.equals(that.chunk))
+          return false;
+      }
+
+      return true;
+    }
+
+    @Override
+    public int hashCode() {
+      List<Object> list = new ArrayList<Object>();
+
+      boolean present_location = true && (is_set_location());
+      list.add(present_location);
+      if (present_location)
+        list.add(location);
+
+      boolean present_chunk = true && (is_set_chunk());
+      list.add(present_chunk);
+      if (present_chunk)
+        list.add(chunk);
+
+      return list.hashCode();
+    }
+
+    @Override
+    public int compareTo(uploadChunk_args other) {
+      if (!getClass().equals(other.getClass())) {
+        return getClass().getName().compareTo(other.getClass().getName());
+      }
+
+      int lastComparison = 0;
+
+      lastComparison = Boolean.valueOf(is_set_location()).compareTo(other.is_set_location());
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+      if (is_set_location()) {
+        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.location, other.location);
+        if (lastComparison != 0) {
+          return lastComparison;
+        }
+      }
+      lastComparison = Boolean.valueOf(is_set_chunk()).compareTo(other.is_set_chunk());
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+      if (is_set_chunk()) {
+        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.chunk, other.chunk);
+        if (lastComparison != 0) {
+          return lastComparison;
+        }
+      }
+      return 0;
+    }
+
+    public _Fields fieldForId(int fieldId) {
+      return _Fields.findByThriftId(fieldId);
+    }
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+      schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+      schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+    }
+
+    @Override
+    public String toString() {
+      StringBuilder sb = new StringBuilder("uploadChunk_args(");
+      boolean first = true;
+
+      sb.append("location:");
+      if (this.location == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.location);
+      }
+      first = false;
+      if (!first) sb.append(", ");
+      sb.append("chunk:");
+      if (this.chunk == null) {
+        sb.append("null");
+      } else {
+        org.apache.thrift.TBaseHelper.toString(this.chunk, sb);
+      }
+      first = false;
+      sb.append(")");
+      return sb.toString();
+    }
+
+    public void validate() throws org.apache.thrift.TException {
+      // check for required fields
+      // check for sub-struct validity
+    }
+
+    private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+      try {
+        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+      } catch (org.apache.thrift.TException te) {
+        throw new java.io.IOException(te);
+      }
+    }
+
+    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+      try {
+        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+      } catch (org.apache.thrift.TException te) {
+        throw new java.io.IOException(te);
+      }
+    }
+
+    private static class uploadChunk_argsStandardSchemeFactory implements SchemeFactory {
+      public uploadChunk_argsStandardScheme getScheme() {
+        return new uploadChunk_argsStandardScheme();
+      }
+    }
+
+    private static class uploadChunk_argsStandardScheme extends StandardScheme<uploadChunk_args> {
+
+      public void read(org.apache.thrift.protocol.TProtocol iprot, uploadChunk_args struct) throws org.apache.thrift.TException {
+        org.apache.thrift.protocol.TField schemeField;
+        iprot.readStructBegin();
+        while (true)
+        {
+          schemeField = iprot.readFieldBegin();
+          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+            break;
+          }
+          switch (schemeField.id) {
+            case 1: // LOCATION
+              if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+                struct.location = iprot.readString();
+                struct.set_location_isSet(true);
+              } else { 
+                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+              }
+              break;
+            case 2: // CHUNK
+              if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+                struct.chunk = iprot.readBinary();
+                struct.set_chunk_isSet(true);
+              } else { 
+                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+              }
+              break;
+            default:
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+          }
+          iprot.readFieldEnd();
+        }
+        iprot.readStructEnd();
+        struct.validate();
+      }
+
+      public void write(org.apache.thrift.protocol.TProtocol oprot, uploadChunk_args struct) throws org.apache.thrift.TException {
+        struct.validate();
+
+        oprot.writeStructBegin(STRUCT_DESC);
+        if (struct.location != null) {
+          oprot.writeFieldBegin(LOCATION_FIELD_DESC);
+          oprot.writeString(struct.location);
+          oprot.writeFieldEnd();
+        }
+        if (struct.chunk != null) {
+          oprot.writeFieldBegin(CHUNK_FIELD_DESC);
+          oprot.writeBinary(struct.chunk);
+          oprot.writeFieldEnd();
+        }
+        oprot.writeFieldStop();
+        oprot.writeStructEnd();
+      }
+
+    }
+
+    private static class uploadChunk_argsTupleSchemeFactory implements SchemeFactory {
+      public uploadChunk_argsTupleScheme getScheme() {
+        return new uploadChunk_argsTupleScheme();
+      }
+    }
+
+    private static class uploadChunk_argsTupleScheme extends TupleScheme<uploadChunk_args> {
+
+      @Override
+      public void write(org.apache.thrift.protocol.TProtocol prot, uploadChunk_args struct) throws org.apache.thrift.TException {
+        TTupleProtocol oprot = (TTupleProtocol) prot;
+        BitSet optionals = new BitSet();
+        if (struct.is_set_location()) {
+          optionals.set(0);
+        }
+        if (struct.is_set_chunk()) {
+          optionals.set(1);
+        }
+        oprot.writeBitSet(optionals, 2);
+        if (struct.is_set_location()) {
+          oprot.writeString(struct.location);
+        }
+        if (struct.is_set_chunk()) {
+          oprot.writeBinary(struct.chunk);
+        }
+      }
+
+      @Override
+      public void read(org.apache.thrift.protocol.TProtocol prot, uploadChunk_args struct) throws org.apache.thrift.TException {
+        TTupleProtocol iprot = (TTupleProtocol) prot;
+        BitSet incoming = iprot.readBitSet(2);
+        if (incoming.get(0)) {
+          struct.location = iprot.readString();
+          struct.set_location_isSet(true);
+        }
+        if (incoming.get(1)) {
+          struct.chunk = iprot.readBinary();
+          struct.set_chunk_isSet(true);
+        }
+      }
+    }
+
+  }
+
+  public static class uploadChunk_result implements org.apache.thrift.TBase<uploadChunk_result, uploadChunk_result._Fields>, java.io.Serializable, Cloneable, Comparable<uploadChunk_result>   {
+    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("uploadChunk_result");
+
+    private static final org.apache.thrift.protocol.TField AZE_FIELD_DESC = new org.apache.thrift.protocol.TField("aze", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+
+    private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+    static {
+      schemes.put(StandardScheme.class, new uploadChunk_resultStandardSchemeFactory());
+      schemes.put(TupleScheme.class, new uploadChunk_resultTupleSchemeFactory());
+    }
+
+    private AuthorizationException aze; // required
+
+    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+    public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+      AZE((short)1, "aze");
+
+      private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+      static {
+        for (_Fields field : EnumSet.allOf(_Fields.class)) {
+          byName.put(field.getFieldName(), field);
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, or null if its not found.
+       */
+      public static _Fields findByThriftId(int fieldId) {
+        switch(fieldId) {
+          case 1: // AZE
+            return AZE;
+          default:
+            return null;
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, throwing an exception
+       * if it is not found.
+       */
+      public static _Fields findByThriftIdOrThrow(int fieldId) {
+        _Fields fields = findByThriftId(fieldId);
+        if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+        return fields;
+      }
+
+      /**
+       * Find the _Fields constant that matches name, or null if its not found.
+       */
+      public static _Fields findByName(String name) {
+        return byName.get(name);
+      }
+
+      private final short _thriftId;
+      private final String _fieldName;
+
+      _Fields(short thriftId, String fieldName) {
+        _thriftId = thriftId;
+        _fieldName = fieldName;
+      }
+
+      public short getThriftFieldId() {
+        return _thriftId;
+      }
+
+      public String getFieldName() {
+        return _fieldName;
+      }
+    }
+
+    // isset id assignments
+    public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+    static {
+      Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+      tmpMap.put(_Fields.AZE, new org.apache.thrift.meta_data.FieldMetaData("aze", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
+      metaDataMap = Collections.unmodifiableMap(tmpMap);
+      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(uploadChunk_result.class, metaDataMap);
+    }
+
+    public uploadChunk_result() {
+    }
+
+    public uploadChunk_result(
+      AuthorizationException aze)
+    {
+      this();
+      this.aze = aze;
+    }
+
+    /**
+     * Performs a deep copy on <i>other</i>.
+     */
+    public uploadChunk_result(uploadChunk_result other) {
+      if (other.is_set_aze()) {
+        this.aze = new AuthorizationException(other.aze);
+      }
+    }
+
+    public uploadChunk_result deepCopy() {
+      return new uploadChunk_result(this);
+    }
+
+    @Override
+    public void clear() {
+      this.aze = null;
+    }
+
+    public AuthorizationException get_aze() {
+      return this.aze;
+    }
+
+    public void set_aze(AuthorizationException aze) {
+      this.aze = aze;
+    }
+
+    public void unset_aze() {
+      this.aze = null;
+    }
+
+    /** Returns true if field aze is set (has been assigned a value) and false otherwise */
+    public boolean is_set_aze() {
+      return this.aze != null;
+    }
+
+    public void set_aze_isSet(boolean value) {
+      if (!value) {
+        this.aze = null;
+      }
+    }
+
+    public void setFieldValue(_Fields field, Object value) {
+      switch (field) {
+      case AZE:
+        if (value == null) {
+          unset_aze();
+        } else {
+          set_aze((AuthorizationException)value);
+        }
+        break;
+
+      }
+    }
+
+    public Object getFieldValue(_Fields field) {
+      switch (field) {
+      case AZE:
+        return get_aze();
+
+      }
+      throw new IllegalStateException();
+    }
+
+    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+    public boolean isSet(_Fields field) {
+      if (field == null) {
+        throw new IllegalArgumentException();
+      }
+
+      switch (field) {
+      case AZE:
+        return is_set_aze();
+      }
+      throw new IllegalStateException();
+    }
+
+    @Override
+    public boolean equals(Object that) {
+      if (that == null)
+        return false;
+      if (that instanceof uploadChunk_result)
+        return this.equals((uploadChunk_result)that);
+      return false;
+    }
+
+    public boolean equals(uploadChunk_result that) {
+      if (that == null)
+        return false;
+
+      boolean this_present_aze = true && this.is_set_aze();
+      boolean that_present_aze = true && that.is_set_aze();
+      if (this_present_aze || that_present_aze) {
+        if (!(this_present_aze && that_present_aze))
+          return false;
+        if (!this.aze.equals(that.aze))
+          return false;
+      }
+
+      return true;
+    }
+
+    @Override
+    public int hashCode() {
+      List<Object> list = new ArrayList<Object>();
+
+      boolean present_aze = true && (is_set_aze());
+      list.add(present_aze);
+      if (present_aze)
+        list.add(aze);
+
+      return list.hashCode();
+    }
+
+    @Override
+    public int compareTo(uploadChunk_result other) {
+      if (!getClass().equals(other.getClass())) {
+        return getClass().getName().compareTo(other.getClass().getName());
+      }
+
+      int lastComparison = 0;
+
+      lastComparison = Boolean.valueOf(is_set_aze()).compareTo(other.is_set_aze());
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+      if (is_set_aze()) {
+        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.aze, other.aze);
+        if (lastComparison != 0) {
+          return lastComparison;
+        }
+      }
+      return 0;
+    }
+
+    public _Fields fieldForId(int fieldId) {
+      return _Fields.findByThriftId(fieldId);
+    }
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+      schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+      schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+      }
+
+    @Override
+    public String toString() {
+      StringBuilder sb = new StringBuilder("uploadChunk_result(");
+      boolean first = true;
+
+      sb.append("aze:");
+      if (this.aze == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.aze);
+      }
+      first = false;
+      sb.append(")");
+      return sb.toString();
+    }
+
+    public void validate() throws org.apache.thrift.TException {
+      // check for required fields
+      // check for sub-struct validity
+    }
+
+    private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+      try {
+        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+      } catch (org.apache.thrift.TException te) {
+        throw new java.io.IOException(te);
+      }
+    }
+
+    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+      try {
+        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+      } catch (org.apache.thrift.TException te) {
+        throw new java.io.IOException(te);
+      }
+    }
+
+    private static class uploadChunk_resultStandardSchemeFactory implements SchemeFactory {
+      public uploadChunk_resultStandardScheme getScheme() {
+        return new uploadChunk_resultStandardScheme();
       }
     }
 
-    private static class beginFileUpload_resultStandardScheme extends StandardScheme<beginFileUpload_result> {
+    private static class uploadChunk_resultStandardScheme extends StandardScheme<uploadChunk_result> {
 
-      public void read(org.apache.thrift.protocol.TProtocol iprot, beginFileUpload_result struct) throws org.apache.thrift.TException {
+      public void read(org.apache.thrift.protocol.TProtocol iprot, uploadChunk_result struct) throws org.apache.thrift.TException {
         org.apache.thrift.protocol.TField schemeField;
         iprot.readStructBegin();
         while (true)
@@ -31415,14 +32432,6 @@ public class Nimbus {
             break;
           }
           switch (schemeField.id) {
-            case 0: // SUCCESS
-              if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
-                struct.success = iprot.readString();
-                struct.set_success_isSet(true);
-              } else { 
-                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-              }
-              break;
             case 1: // AZE
               if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
                 struct.aze = new AuthorizationException();
@@ -31441,15 +32450,10 @@ public class Nimbus {
         struct.validate();
       }
 
-      public void write(org.apache.thrift.protocol.TProtocol oprot, beginFileUpload_result struct) throws org.apache.thrift.TException {
+      public void write(org.apache.thrift.protocol.TProtocol oprot, uploadChunk_result struct) throws org.apache.thrift.TException {
         struct.validate();
 
         oprot.writeStructBegin(STRUCT_DESC);
-        if (struct.success != null) {
-          oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
-          oprot.writeString(struct.success);
-          oprot.writeFieldEnd();
-        }
         if (struct.aze != null) {
           oprot.writeFieldBegin(AZE_FIELD_DESC);
           struct.aze.write(oprot);
@@ -31461,42 +32465,32 @@ public class Nimbus {
 
     }
 
-    private static class beginFileUpload_resultTupleSchemeFactory implements SchemeFactory {
-      public beginFileUpload_resultTupleScheme getScheme() {
-        return new beginFileUpload_resultTupleScheme();
+    private static class uploadChunk_resultTupleSchemeFactory implements SchemeFactory {
+      public uploadChunk_resultTupleScheme getScheme() {
+        return new uploadChunk_resultTupleScheme();
       }
     }
 
-    private static class beginFileUpload_resultTupleScheme extends TupleScheme<beginFileUpload_result> {
+    private static class uploadChunk_resultTupleScheme extends TupleScheme<uploadChunk_result> {
 
       @Override
-      public void write(org.apache.thrift.protocol.TProtocol prot, beginFileUpload_result struct) throws org.apache.thrift.TException {
+      public void write(org.apache.thrift.protocol.TProtocol prot, uploadChunk_result struct) throws org.apache.thrift.TException {
         TTupleProtocol oprot = (TTupleProtocol) prot;
         BitSet optionals = new BitSet();
-        if (struct.is_set_success()) {
-          optionals.set(0);
-        }
         if (struct.is_set_aze()) {
-          optionals.set(1);
-        }
-        oprot.writeBitSet(optionals, 2);
-        if (struct.is_set_success()) {
-          oprot.writeString(struct.success);
+          optionals.set(0);
         }
+        oprot.writeBitSet(optionals, 1);
         if (struct.is_set_aze()) {
           struct.aze.write(oprot);
         }
       }
 
       @Override
-      public void read(org.apache.thrift.protocol.TProtocol prot, beginFileUpload_result struct) throws org.apache.thrift.TException {
+      public void read(org.apache.thrift.protocol.TProtocol prot, uploadChunk_result struct) throws org.apache.thrift.TException {
         TTupleProtocol iprot = (TTupleProtocol) prot;
-        BitSet incoming = iprot.readBitSet(2);
+        BitSet incoming = iprot.readBitSet(1);
         if (incoming.get(0)) {
-          struct.success = iprot.readString();
-          struct.set_success_isSet(true);
-        }
-        if (incoming.get(1)) {
           struct.aze = new AuthorizationException();
           struct.aze.read(iprot);
           struct.set_aze_isSet(true);
@@ -31506,25 +32500,22 @@ public class Nimbus {
 
   }
 
-  public static class uploadChunk_args implements org.apache.thrift.TBase<uploadChunk_args, uploadChunk_args._Fields>, java.io.Serializable, Cloneable, Comparable<uploadChunk_args>   {
-    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("uploadChunk_args");
+  public static class finishFileUpload_args implements org.apache.thrift.TBase<finishFileUpload_args, finishFileUpload_args._Fields>, java.io.Serializable, Cloneable, Comparable<finishFileUpload_args>   {
+    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("finishFileUpload_args");
 
     private static final org.apache.thrift.protocol.TField LOCATION_FIELD_DESC = new org.apache.thrift.protocol.TField("location", org.apache.thrift.protocol.TType.STRING, (short)1);
-    private static final org.apache.thrift.protocol.TField CHUNK_FIELD_DESC = new org.apache.thrift.protocol.TField("chunk", org.apache.thrift.protocol.TType.STRING, (short)2);
 
     private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
     static {
-      schemes.put(StandardScheme.class, new uploadChunk_argsStandardSchemeFactory());
-      schemes.put(TupleScheme.class, new uploadChunk_argsTupleSchemeFactory());
+      schemes.put(StandardScheme.class, new finishFileUpload_argsStandardSchemeFactory());
+      schemes.put(TupleScheme.class, new finishFileUpload_argsTupleSchemeFactory());
     }
 
     private String location; // required
-    private ByteBuffer chunk; // required
 
     /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
     public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-      LOCATION((short)1, "location"),
-      CHUNK((short)2, "chunk");
+      LOCATION((short)1, "location");
 
       private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 
@@ -31541,8 +32532,6 @@ public class Nimbus {
         switch(fieldId) {
           case 1: // LOCATION
             return LOCATION;
-          case 2: // CHUNK
-            return CHUNK;
           default:
             return null;
         }
@@ -31588,44 +32577,36 @@ public class Nimbus {
       Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
       tmpMap.put(_Fields.LOCATION, new org.apache.thrift.meta_data.FieldMetaData("location", org.apache.thrift.TFieldRequirementType.DEFAULT, 
           new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-      tmpMap.put(_Fields.CHUNK, new org.apache.thrift.meta_data.FieldMetaData("chunk", org.apache.thrift.TFieldRequirementType.DEFAULT, 
-          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING          , true)));
       metaDataMap = Collections.unmodifiableMap(tmpMap);
-      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(uploadChunk_args.class, metaDataMap);
+      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(finishFileUpload_args.class, metaDataMap);
     }
 
-    public uploadChunk_args() {
+    public finishFileUpload_args() {
     }
 
-    public uploadChunk_args(
-      String location,
-      ByteBuffer chunk)
+    public finishFileUpload_args(
+      String location)
     {
       this();
       this.location = location;
-      this.chunk = org.apache.thrift.TBaseHelper.copyBinary(chunk);
     }
 
     /**
      * Performs a deep copy on <i>other</i>.
      */
-    public uploadChunk_args(uploadChunk_args other) {
+    public finishFileUpload_args(finishFileUpload_args other) {
       if (other.is_set_location()) {
         this.location = other.location;
       }
-      if (other.is_set_chunk()) {
-        this.chunk = org.apache.thrift.TBaseHelper.copyBinary(other.chunk);
-      }
     }
 
-    public uploadChunk_args deepCopy() {
-      return new uploadChunk_args(this);
+    public finishFileUpload_args deepCopy() {
+      return new finishFileUpload_args(this);
     }
 
     @Override
     public void clear() {
       this.location = null;
-      this.chunk = null;
     }
 
     public String get_location() {
@@ -31651,38 +32632,6 @@ public class Nimbus {
       }
     }
 
-    public byte[] get_chunk() {
-      set_chunk(org.apache.thrift.TBaseHelper.rightSize(chunk));
-      return chunk == null ? null : chunk.array();
-    }
-
-    public ByteBuffer buffer_for_chunk() {
-      return org.apache.thrift.TBaseHelper.copyBinary(chunk);
-    }
-
-    public void set_chunk(byte[] chunk) {
-      this.chunk = chunk == null ? (ByteBuffer)null : ByteBuffer.wrap(Arrays.copyOf(chunk, chunk.length));
-    }
-
-    public void set_chunk(ByteBuffer chunk) {
-      this.chunk = org.apache.thrift.TBaseHelper.copyBinary(chunk);
-    }
-
-    public void unset_chunk() {
-      this.chunk = null;
-    }
-
-    /** Returns true if field chunk is set (has been assigned a value) and false otherwise */
-    public boolean is_set_chunk() {
-      return this.chunk != null;
-    }
-
-    public void set_chunk_isSet(boolean value) {
-      if (!value) {
-        this.chunk = null;
-      }
-    }
-
     public void setFieldValue(_Fields field, Object value) {
       switch (field) {
       case LOCATION:
@@ -31693,14 +32642,6 @@ public class Nimbus {
         }
         break;
 
-      case CHUNK:
-        if (value == null) {
-          unset_chunk();
-        } else {
-          set_chunk((ByteBuffer)value);
-        }
-        break;
-
       }
     }
 
@@ -31709,9 +32650,6 @@ public class Nimbus {
       case LOCATION:
         return get_location();
 
-      case CHUNK:
-        return get_chunk();
-
       }
       throw new IllegalStateException();
     }
@@ -31725,8 +32663,6 @@ public class Nimbus {
       switch (field) {
       case LOCATION:
         return is_set_location();
-      case CHUNK:
-        return is_set_chunk();
       }
       throw new IllegalStateException();
     }
@@ -31735,12 +32671,12 @@ public class Nimbus {
     public boolean equals(Object that) {
       if (that == null)
         return false;
-      if (that instanceof uploadChunk_args)
-        return this.equals((uploadChunk_args)that);
+      if (that instanceof finishFileUpload_args)
+        return this.equals((finishFileUpload_args)that);
       return false;
     }
 
-    public boolean equals(uploadChunk_args that) {
+    public boolean equals(finishFileUpload_args that) {
       if (that == null)
         return false;
 
@@ -31753,15 +32689,6 @@ public class Nimbus {
           return false;
       }
 
-      boolean this_present_chunk = true && this.is_set_chunk();
-      boolean that_present_chunk = true && that.is_set_chunk();
-      if (this_present_chunk || that_present_chunk) {
-        if (!(this_present_chunk && that_present_chunk))
-          return false;
-        if (!this.chunk.equals(that.chunk))
-          return false;
-      }
-
       return true;
     }
 
@@ -31774,16 +32701,11 @@ public class Nimbus {
       if (present_location)
         list.add(location);
 
-      boolean present_chunk = true && (is_set_chunk());
-      list.add(present_chunk);
-      if (present_chunk)
-        list.add(chunk);
-
       return list.hashCode();
     }
 
     @Override
-    public int compareTo(uploadChunk_args other) {
+    public int compareTo(finishFileUpload_args other) {
       if (!getClass().equals(other.getClass())) {
         return getClass().getName().compareTo(other.getClass().getName());
       }
@@ -31800,16 +32722,6 @@ public class Nimbus {
           return lastComparison;
         }
       }
-      lastComparison = Boolean.valueOf(is_set_chunk()).compareTo(other.is_set_chunk());
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-      if (is_set_chunk()) {
-        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.chunk, other.chunk);
-        if (lastComparison != 0) {
-          return lastComparison;
-        }
-      }
       return 0;
     }
 
@@ -31827,7 +32739,7 @@ public class Nimbus {
 
     @Override
     public String toString() {
-      StringBuilder sb = new StringBuilder("uploadChunk_args(");
+      StringBuilder sb = new StringBuilder("finishFileUpload_args(");
       boolean first = true;
 
       sb.append("location:");
@@ -31837,14 +32749,6 @@ public class Nimbus {
         sb.append(this.location);
       }
       first = false;
-      if (!first) sb.append(", ");
-      sb.append("chunk:");
-      if (this.chunk == null) {
-        sb.append("null");
-      } else {
-        org.apache.thrift.TBaseHelper.toString(this.chunk, sb);
-      }
-      first = false;
       sb.append(")");
       return sb.toString();
     }
@@ -31870,15 +32774,15 @@ public class Nimbus {
       }
     }
 
-    private static class uploadChunk_argsStandardSchemeFactory implements SchemeFactory {
-      public uploadChunk_argsStandardScheme getScheme() {
-        return new uploadChunk_argsStandardScheme();
+    private static class finishFileUpload_argsStandardSchemeFactory implements SchemeFactory {
+      public finishFileUpload_argsStandardScheme getScheme() {
+        return new finishFileUpload_argsStandardScheme();
       }
     }
 
-    private static class uploadChunk_argsStandardScheme extends StandardScheme<uploadChunk_args> {
+    private static class finishFileUpload_argsStandardScheme extends StandardScheme<finishFileUpload_args> {
 
-      public void read(org.apache.thrift.protocol.TProtocol iprot, uploadChunk_args struct) throws org.apache.thrift.TException {
+      public void read(org.apache.thrift.protocol.TProtocol iprot, finishFileUpload_args struct) throws org.apache.thrift.TException {
         org.apache.thrift.protocol.TField schemeField;
         iprot.readStructBegin();
         while (true)
@@ -31896,14 +32800,6 @@ public class Nimbus {
                 org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
               }
               break;
-            case 2: // CHUNK
-              if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
-                struct.chunk = iprot.readBinary();
-                struct.set_chunk_isSet(true);
-              } else { 
-                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-              }
-              break;
             default:
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
           }
@@ -31913,7 +32809,7 @@ public class Nimbus {
         struct.validate();
       }
 
-      public void write(org.apache.thrift.protocol.TProtocol oprot, uploadChunk_args struct) throws org.apache.thrift.TException {
+      public void write(org.apache.thrift.protocol.TProtocol oprot, finishFileUpload_args struct) throws org.apache.thrift.TException {
         struct.validate();
 
         oprot.writeStructBegin(STRUCT_DESC);
@@ -31922,70 +32818,55 @@ public class Nimbus {
           oprot.writeString(struct.location);
           oprot.writeFieldEnd();
         }
-        if (struct.chunk != null) {
-          oprot.writeFieldBegin(CHUNK_FIELD_DESC);
-          oprot.writeBinary(struct.chunk);
-          oprot.writeFieldEnd();
-        }
         oprot.writeFieldStop();
         oprot.writeStructEnd();
       }
 
     }
 
-    private static class uploadChunk_argsTupleSchemeFactory implements SchemeFactory {
-      public uploadChunk_argsTupleScheme getScheme() {
-        return new uploadChunk_argsTupleScheme();
+    private static class finishFileUpload_argsTupleSchemeFactory implements SchemeFactory {
+      public finishFileUpload_argsTupleScheme getScheme() {
+        return new finishFileUpload_argsTupleScheme();
       }
     }
 
-    private static class uploadChunk_argsTupleScheme extends TupleScheme<uploadChunk_args> {
+    private static class finishFileUpload_argsTupleScheme extends TupleScheme<finishFileUpload_args> {
 
       @Override
-      public void write(org.apache.thrift.protocol.TProtocol prot, uploadChunk_args struct) throws org.apache.thrift.TException {
+      public void write(org.apache.thrift.protocol.TProtocol prot, finishFileUpload_args struct) throws org.apache.thrift.TException {
         TTupleProtocol oprot = (TTupleProtocol) prot;
         BitSet optionals = new BitSet();
         if (struct.is_set_location()) {
           optionals.set(0);
         }
-        if (struct.is_set_chunk()) {
-          optionals.set(1);
-        }
-        oprot.writeBitSet(optionals, 2);
+        oprot.writeBitSet(optionals, 1);
         if (struct.is_set_location()) {
           oprot.writeString(struct.location);
         }
-        if (struct.is_set_chunk()) {
-          oprot.writeBinary(struct.chunk);
-        }
       }
 
       @Override
-      public void read(org.apache.thrift.protocol.TProtocol prot, uploadChunk_args struct) throws org.apache.thrift.TException {
+      public void read(org.apache.thrift.protocol.TProtocol prot, finishFileUpload_args struct) throws org.apache.thrift.TException {
         TTupleProtocol iprot = (TTupleProtocol) prot;
-        BitSet incoming = iprot.readBitSet(2);
+        BitSet incoming = iprot.readBitSet(1);
         if (incoming.get(0)) {
           struct.location = iprot.readString();
           struct.set_location_isSet(true);
         }
-        if (incoming.get(1)) {
-          struct.chunk = iprot.readBinary();
-          struct.set_chunk_isSet(true);
-        }
       }
     }
 
   }
 
-  public static class uploadChunk_result implements org.apache.thrift.TBase<uploadChunk_result, uploadChunk_result._Fields>, java.io.Serializable, Cloneable, Comparable<uploadChunk_result>   {
-    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("uploadChunk_result");
+  public static class finishFileUpload_result implements org.apache.thrift.TBase<finishFileUpload_result, finishFileUpload_result._Fields>, java.io.Serializable, Cloneable, Comparable<finishFileUpload_result>   {
+    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("finishFileUpload_result");
 
     private static final org.apache.thrift.protocol.TField AZE_FIELD_DESC = new org.apache.thrift.protocol.TField("aze", org.apache.thrift.protocol.TType.STRUCT, (short)1);
 
     private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
     static {
-      schemes.put(StandardScheme.class, new uploadChunk_resultStandardSchemeFactory());
-      schemes.put(TupleScheme.class, new uploadChunk_resultTupleSchemeFactory());
+      schemes.put(StandardScheme.class, new finishFileUpload_resultStandardSchemeFactory());
+      schemes.put(TupleScheme.class, new finishFileUpload_resultTupleSchemeFactory());
     }
 
     private AuthorizationException aze; // required
@@ -32055,13 +32936,13 @@ public class Nimbus {
       tmpMap.put(_Fields.AZE, new org.apache.thrift.meta_data.FieldMetaData("aze", org.apache.thrift.TFieldRequirementType.DEFAULT, 
           new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
       metaDataMap = Collections.unmodifiableMap(tmpMap);
-      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(uploadChunk_result.class, metaDataMap);
+      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(finishFileUpload_result.class, metaDataMap);
     }
 
-    public uploadChunk_result() {
+    public finishFileUpload_result() {
     }
 
-    public uploadChunk_result(
+    public finishFileUpload_result(
       AuthorizationException aze)
     {
       this();
@@ -32071,14 +32952,14 @@ public class Nimbus {
     /**
      * Performs a deep copy on <i>other</i>.
      */
-    public uploadChunk_result(uploadChunk_result other) {
+    public finishFileUpload_result(finishFileUpload_result other) {
       if (other.is_set_aze()) {
         this.aze = new AuthorizationException(other.aze);
       }
     }
 
-    public uploadChunk_result deepCopy() {
-      return new uploadChunk_result(this);
+    public finishFileUpload_result deepCopy() {
+      return new finishFileUpload_result(this);
     }
 
     @Override
@@ -32148,12 +33029,12 @@ public class Nimbus {
     public boolean equals(Object that) {
       if (that == null)
         return false;
-      if (that instanceof uploadChunk_result)
-        return this.equals((uploadChunk_result)that);
+      if (that instanceof finishFileUpload_result)
+        return this.equals((finishFileUpload_result)that);
       return false;
     }
 
-    public boolean equals(uploadChunk_result that) {
+    public boolean equals(finishFileUpload_result that) {
       if (that == null)
         return false;
 
@@ -32182,7 +33063,7 @@ public class Nimbus {
     }
 
     @Override
-    public int compareTo(uploadChunk_result other) {
+    public int compareTo(finishFileUpload_result other) {
       if (!getClass().equals(other.getClass())) {
         return getClass().getName().compareTo(other.getClass().getName());
       }
@@ -32216,7 +33097,7 @@ public class Nimbus {
 
     @Override
     public String toString() {
-      StringBuilder sb = new StringBuilder("uploadChunk_result(");
+      StringBuilder sb = new StringBuilder("finishFileUpload_result(");
       boolean first = true;
 
       sb.append("aze:");
@@ -32251,15 +33132,15 @@ public class Nimbus {
       }
     }
 
-    private static class uploadChunk_resultStandardSchemeFactory implements SchemeFactory {
-      public uploadChunk_resultStandardScheme getScheme() {
-        return new uploadChunk_resultStandardScheme();
+    private static class finishFileUpload_resultStandardSchemeFactory implements SchemeFactory {
+      public finishFileUpload_resultStandardScheme getScheme() {
+        return new finishFileUpload_resultStandardScheme();
       }
     }
 
-    private static class uploadChunk_resultStandardScheme extends StandardScheme<uploadChunk_result> {
+    private static class finishFileUpload_resultStandardScheme extends StandardScheme<finishFileUpload_result> {
 
-      public void read(org.apache.thrift.protocol.TProtocol iprot, uploadChunk_result struct) throws org.apache.thrift.TException {
+      public void read(org.apache.thrift.protocol.TProtocol iprot, finishFileUpload_result struct) throws org.apache.thrift.TException {
         org.apache.thrift.protocol.TField schemeField;
         iprot.readStructBegin();
         while (true)
@@ -32287,7 +33168,7 @@ public class Nimbus {
         struct.validate();
       }
 
-      public void write(org.apache.thrift.protocol.TProtocol oprot, uploadChunk_result struct) throws org.apache.thrift.TException {
+      public void write(org.apache.thrift.protocol.TProtocol oprot, finishFileUpload_result struct) throws org.apache.thrift.TException {
         struct.validate();
 
         oprot.writeStructBegin(STRUCT_DESC);
@@ -32302,16 +33183,16 @@ public class Nimbus {
 
     }
 
-    private static class uploadChunk_resultTupleSchemeFactory implements SchemeFactory {
-      public uploadChunk_resultTupleScheme getScheme() {
-        return new uploadChunk_resultTupleScheme();
+    private static class finishFileUpload_resultTupleSchemeFactory implements SchemeFactory {
+      public finishFileUpload_resultTupleScheme getScheme() {
+        return new finishFileUpload_resultTupleScheme();
       }
     }
 
-    private static class uploadChunk_resultTupleScheme extends TupleScheme<uploadChunk_result> {
+    private static class finishFileUpload_resultTupleScheme extends TupleScheme<finishFileUpload_result> {
 
       @Override
-      public void write(org.apache.thrift.protocol.TProtocol prot, uploadChunk_result struct) throws org.apache.thrift.TException {
+      public void write(org.apache.thrift.protocol.TProtocol prot, finishFileUpload_result struct) throws org.apache.thrift.TException {
         TTupleProtocol oprot = (TTupleProtocol) prot;
         BitSet optionals = new BitSet();
         if (struct.is_set_aze()) {
@@ -32324,7 +33205,7 @@ public class Nimbus {
       }
 
       @Override
-      public void read(org.apache.thrift.protocol.TProtocol prot, uploadChunk_result struct) throws org.apache.thrift.TException {
+      public void read(org.apache.thrift.protocol.TProtocol prot, finishFileUpload_result struct) throws org.apache.thrift.TException {
         TTupleProtocol iprot = (TTupleProtocol) prot;
         BitSet incoming = iprot.readBitSet(1);
         if (incoming.get(0)) {
@@ -32337,22 +33218,22 @@ public class Nimbus {
 
   }
 
-  public static class finishFileUpload_args implements org.apache.thrift.TBase<finishFileUpload_args, finishFileUpload_args._Fields>, java.io.Serializable, Cloneable, Comparable<finishFileUpload_args>   {
-    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("finishFileUpload_args");
+  public static class beginFileDownload_args implements org.apache.thrift.TBase<beginFileDownload_args, beginFileDownload_args._Fields>, java.io.Serializable, Cloneable, Comparable<beginFileDownload_args>   {
+    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("beginFileDownload_args");
 
-    private static final org.apache.thrift.protocol.TField LOCATION_FIELD_DESC = new org.apache.thrift.protocol.TField("location", org.apache.thrift.protocol.TType.STRING, (short)1);
+    private static final org.apache.thrift.protocol.TField FILE_FIELD_DESC = new org.apache.thrift.protocol.TField("file", org.apache.thrift.protocol.TType.STRING, (short)1);
 
     private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
     static {
-      schemes.put(StandardScheme.class, new finishFileUpload_argsStandardSchemeFactory());
-      schemes.put(TupleScheme.class, new finishFileUpload_argsTupleSchemeFactory());
+      schemes.put(StandardScheme.class, new beginFileDownload_argsStandardSchemeFactory());
+      schemes.put(TupleScheme.class, new beginFileDownload_argsTupleSchemeFactory());
     }
 
-    private String location; // required
+    private String file; // required
 
     /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
     public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-      LOCATION((short)1, "location");
+      FILE((short)1, "file");
 
       private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 
@@ -32367,8 +33248,8 @@ public class Nimbus {
        */
       public static _Fields findByThriftId(int fieldId) {
         switch(fieldId) {
-          case 1: // LOCATION
-            return LOCATION;
+          case 1: // FILE
+            return FILE;
           default:
             return null;
         }
@@ -32412,70 +33293,70 @@ public class Nimbus {
     public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
     static {
       Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-      tmpMap.put(_Fields.LOCATION, new org.apache.thrift.meta_data.FieldMetaData("location", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+      tmpMap.put(_Fields.FILE, new org.apache.thrift.meta_data.FieldMetaData("file", org.apache.thrift.TFieldRequirementType.DEFAULT, 
           new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
       metaDataMap = Collections.unmodifiableMap(tmpMap);
-      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(finishFileUpload_args.class, metaDataMap);
+      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(beginFileDownload_args.class, metaDataMap);
     }
 
-    public finishFileUpload_args() {
+    public beginFileDownload_args() {
     }
 
-    public finishFileUpload_args(
-      String location)
+    public beginFileDownload_args(
+      String file)
     {
       this();
-      this.location = location;
+      this.file = file;
     }
 
     /**
      * Performs a deep copy on <i>other</i>.
      */
-    public finishFileUpload_args(finishFileUpload_args other) {
-      if (other.is_set_location()) {
-        this.location = other.location;
+    public beginFileDownload_args(beginFileDownload_args other) {
+      if (other.is_set_file()) {
+        this.file = other.file;
       }
     }
 
-    public finishFileUpload_args deepCopy() {
-      return new finishFileUpload_args(this);
+    public beginFileDownload_args deepCopy() {
+      return new beginFileDownload_args(this);
     }
 
     @Override
     public void clear() {
-      this.location = null;
+      this.file = null;
     }
 
-    public String get_location() {
-      return this.location;
+    public String get_file() {
+      return this.file;
     }
 
-    public void set_location(String location) {
-      this.location = location;
+    public void set_file(String file) {
+      this.file = file;
     }
 
-    public void unset_location() {
-      this.location = null;
+    public void unset_file() {
+      this.file = null;
     }
 
-    /** Returns true if field location is set (has been assigned a value) and false otherwise */
-    public boolean is_set_location() {
-      return this.location != null;
+    /** Returns true if field file is set (has been assigned a value) and false otherwise */
+    public boolean is_set_file() {
+      return this.file != null;
     }
 
-    public void set_location_isSet(boolean value) {
+    public void set_file_isSet(boolean value) {
       if (!value) {
-        this.location = null;
+        this.file = null;
       }
     }
 
     public void setFieldValue(_Fields field, Object value) {
       switch (field) {
-      case LOCATION:
+      case FILE:
         if (value == null) {
-          unset_location();
+          unset_file();
         } else {
-          set_location((String)value);
+          set_file((String)value);
         }
         break;
 
@@ -32484,8 +33365,8 @@ public class Nimbus {
 
     public Object getFieldValue(_Fields field) {
       switch (field) {
-      case LOCATION:
-        return get_location();
+      case FILE:
+        return get_file();
 
       }
       throw new IllegalStateException();
@@ -32498,8 +33379,8 @@ public class Nimbus {
       }
 
       switch (field) {
-      case LOCATION:
-        return is_set_location();
+      case FILE:
+        return is_set_file();
       }
       throw new IllegalStateException();
     }
@@ -32508,21 +33389,21 @@ public class Nimbus {
     public boolean equals(Object that) {
       if (that == null)
         return false;
-      if (that instanceof finishFileUpload_args)
-        return this.equals((finishFileUpload_args)that);
+      if (that instanceof beginFileDownload_args)
+        return this.equals((beginFileDownload_args)that);
       return false;
     }
 
-    public boolean equals(finishFileUpload_args that) {
+    public boolean equals(beginFileDownload_args that) {
       if (that == null)
         return false;
 
-      boolean this_present_location = true && this.is_set_location();
-      boolean that_present_location = true && that.is_set_location();
-      if (this_present_location || that_present_location) {
-        if (!(this_present_location && that_present_location))
+      boolean this_present_file = true && this.is_set_file();
+      boolean that_present_file = true && that.is_set_file();
+      if (this_present_file || that_present_file) {
+        if (!(this_present_file && that_present_file))
           return false;
-        if (!this.location.equals(that.location))
+        if (!this.file.equals(that.file))
           return false;
       }
 
@@ -32533,28 +33414,28 @@ public class Nimbus {
     public int hashCode() {
       List<Object> list = new ArrayList<Object>();
 
-      boolean present_location = true && (is_set_location());
-      list.add(present_location);
-      if (present_location)
-        list.add(location);
+      boolean present_file = true && (is_set_file());
+      list.add(present_file);
+      if (present_file)
+        list.add(file);
 
       return list.hashCode();
     }
 
     @Override
-    public int compareTo(finishFileUpload_args other) {
+    public int compareTo(beginFileDownload_args other) {
       if (!getClass().equals(other.getClass())) {
         return getClass().getName().compareTo(other.getClass().getName());
       }
 
       int lastComparison = 0;
 
-      lastComparison = Boolean.valueOf(is_set_location()).compareTo(other.is_set_location());
+      lastComparison = Boolean.valueOf(is_set_file()).compareTo(other.is_set_file());
       if (lastComparison != 0) {
         return lastComparison;
       }
-      if (is_set_location()) {
-        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.location, other.location);
+      if (is_set_file()) {
+        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.file, other.file);
         if (lastComparison != 0) {
           return lastComparison;
         }
@@ -32576,14 +33457,14 @@ public class Nimbus {
 
     @Override
     public String toString() {
-      StringBuilder sb = new StringBuilder("finishFileUpload_args(");
+      StringBuilder sb = new StringBuilder("beginFileDownload_args(");
       boolean first = true;
 
-      sb.append("location:");
-      if (this.location == null) {
+      sb.append("file:");
+      if (this.file == null) {
         sb.append("null");
       } else {
-        sb.append(this.location);
+        sb.append(this.file);
       }
       first = false;
       sb.append(")");
@@ -32611,15 +33492,15 @@ public class Nimbus {
       }
     }
 
-    private static class finishFileUpload_argsStandardSchemeFactory implements SchemeFactory {
-      public finishFileUpload_argsStandardScheme getScheme() {
-        return new finishFileUpload_argsStandardScheme();
+    private static class beginFileDownload_argsStandardSchemeFactory implements SchemeFactory {
+      public beginFileDownload_argsStandardScheme getScheme() {
+        return new beginFileDownload_argsStandardScheme();
       }
     }
 
-    private static class finishFileUpload_argsStandardScheme extends StandardScheme<finishFileUpload_args> {
+    private static class beginFileDownload_argsStandardScheme extends StandardScheme<beginFileDownload_args> {
 
-      public void read(org.apache.thrift.protocol.TProtocol iprot, finishFileUpload_args struct) throws org.apache.thrift.TException {
+      public void read(org.apache.thrift.protocol.TProtocol iprot, beginFileDownload_args struct) throws org.apache.thrift.TException {
         org.apache.thrift.protocol.TField schemeField;
         iprot.readStructBegin();
         while (true)
@@ -32629,10 +33510,10 @@ public class Nimbus {
             break;
           }
           switch (schemeField.id) {
-            case 1: // LOCATION
+            case 1: // FILE
               if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
-                struct.location = iprot.readString();
-                struct.set_location_isSet(true);
+                struct.file = iprot.readString();
+                struct.set_file_isSet(true);
               } else { 
                 org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
               }
@@ -32646,13 +33527,13 @@ public class Nimbus {
         struct.validate();
       }
 
-      public void write(org.apache.thrift.protocol.TProtocol oprot, finishFileUpload_args struct) throws org.apache.thrift.TException {
+      public void write(org.apache.thrift.protocol.TProtocol oprot, beginFileDownload_args struct) throws org.apache.thrift.TException {
         struct.validate();
 
         oprot.writeStructBegin(STRUCT_DESC);
-        if (struct.location != null) {
-          oprot.writeFieldBegin(LOCATION_FIELD_DESC);
-          oprot.writeString(struct.location);
+        if (struct.file != null) {
+          oprot.writeFieldBegin(FILE_FIELD_DESC);
+          oprot.writeString(struct.file);
           oprot.writeFieldEnd();
         }
         oprot.writeFieldStop();
@@ -32661,55 +33542,58 @@ public class Nimbus {
 
     }
 
-    private static class finishFileUpload_argsTupleSchemeFactory implements SchemeFactory {
-      public finishFileUpload_argsTupleScheme getScheme() {
-        return new finishFileUpload_argsTupleScheme();
+    private static class beginFileDownload_argsTupleSchemeFactory implements SchemeFactory {
+      public beginFileDownload_argsTupleScheme getScheme() {
+        return new beginFileDownload_argsTupleScheme();
       }
     }
 
-    private static class finishFileUpload_argsTupleScheme extends TupleScheme<finishFileUpload_args> {
+    private static class beginFileDownload_argsTupleScheme extends TupleScheme<beginFileDownload_args> {
 
       @Override
-      public void write(org.apache.thrift.protocol.TProtocol prot, finishFileUpload_args struct) throws org.apache.thrift.TException {
+      public void write(org.apache.thrift.protocol.TProtocol prot, beginFileDownload_args struct) throws org.apache.thrift.TException {
         TTupleProtocol oprot = (TTupleProtocol) prot;
         BitSet optionals = new BitSet();
-        if (struct.is_set_location()) {
+        if (struct.is_set_file()) {
           optionals.set(0);
         }
         oprot.writeBitSet(optionals, 1);
-        if (struct.is_set_location()) {
-          oprot.writeString(struct.location);
+        if (struct.is_set_file()) {
+          oprot.writeString(struct.file);
         }
       }
 
       @Override
-      public void read(org.apache.thrift.protocol.TProtocol prot, finishFileUpload_args struct) throws org.apache.thrift.TException {
+      public void read(org.apache.thrift.protocol.TProtocol prot, beginFileDownload_args struct) throws org.apache.thrift.TException {
         TTupleProtocol iprot = (TTupleProtocol) prot;
         BitSet incoming = iprot.readBitSet(1);
         if (incoming.get(0)) {
-          struct.location = iprot.readString();
-          struct.set_location_isSet(true);
+          struct.file = iprot.readString();
+          struct.set_file_isSet(true);
         }
       }
     }
 
   }
 
-  public static class finishFileUpload_result implements org.apache.thrift.TBase<finishFileUpload_result, finishFileUpload_result._Fields>, java.io.Serializable, Cloneable, Comparable<finishFileUpload_result>   {
-    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("finishFileUpload_result");
+  public static class beginFileDownload_result implements org.apache.thrift.TBase<beginFileDownload_result, beginFileDownload_result._Fields>, java.io.Serializable, Cloneable, Comparable<beginFileDownload_result>   {
+    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("beginFileDownload_result");
 
+    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRING, (short)0);
     private static final org.apache.thrift.protocol.TField AZE_FIELD_DESC = new org.apache.thrift.protocol.TField("aze", org.apache.thrift.protocol.TType.STRUCT, (short)1);
 
     private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
     static {
-      schemes.put(StandardScheme.class, new finishFileUpload_resultStandardSchemeFactory());
-      schemes.put(TupleScheme.class, new finishFileUpload_resultTupleSchemeFactory());
+      schemes.put(StandardScheme.class, new beginFileDownload_resultStandardSchemeFactory());
+      schemes.put(TupleScheme.class, new beginFileDownload_resultTupleSchemeFactory());
     }
 
+    private String success; // required
     private AuthorizationException aze; // required
 
     /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
     public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+      SUCCESS((short)0, "success"),
       AZE((short)1, "aze");
 
       private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -32725,6 +33609,8 @@ public class Nimbus {
        */
       public static _Fields findByThriftId(int fieldId) {
         switch(fieldId) {
+          case 0: // SUCCESS
+            return SUCCESS;
           case 1: // AZE
             return AZE;
           default:
@@ -32770,40 +33656,71 @@ public class Nimbus {
     public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
     static {
       Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
       tmpMap.put(_Fields.AZE, new org.apache.thrift.meta_data.FieldMetaData("aze", org.apache.thrift.TFieldRequirementType.DEFAULT, 
           new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
       metaDataMap = Collections.unmodifiableMap(tmpMap);
-      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(finishFileUpload_result.class, metaDataMap);
+      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(beginFileDownload_result.class, metaDataMap);
     }
 
-    public finishFileUpload_result() {
+    public beginFileDownload_result() {
     }
 
-    public finishFileUpload_result(
+    public beginFileDownload_result(
+      String success,
       AuthorizationException aze)
     {
       this();
+      this.success = success;
       this.aze = aze;
     }
 
     /**
      * Performs a deep copy on <i>other</i>.
      */
-    public finishFileUpload_result(finishFileUpload_result other) {
+    public beginFileDownload_result(beginFileDownload_result other) {
+      if (other.is_set_success()) {
+        this.success = other.success;
+      }
       if (other.is_set_aze()) {
         this.aze = new AuthorizationException(other.aze);
       }
     }
 
-    public finishFileUpload_result deepCopy() {
-      return new finishFileUpload_result(this);
+    public beginFileDownload_result deepCopy() {
+      return new beginFileDownload_result(this);
     }
 
     @Override
     public void clear() {
+      this.success = null;
       this.aze = null;
     }
 
+    public String get_success() {
+      return this.success;
+    }
+
+    public void set_success(String success) {
+      this.success = success;
+    }
+
+    public void unset_success() {
+      this.success = null;
+    }
+
+    /** Returns true if field success is set (has been assigned a value) and false otherwise */
+    public boolean is_set_success() {
+      return this.success != null;
+    }
+
+    public void set_success_isSet(boolean value) {
+      if (!value) {
+        this.success = null;
+      }
+    }
+
     public AuthorizationException get_aze() {
       return this.aze;
     }
@@ -32829,6 +33746,14 @@ public class Nimbus {
 
     public void setFieldValue(_Fields field, Object value) {
       switch (field) {
+      case SUCCESS:
+        if (value == null) {
+          unset_success();
+        } else {
+          set_success((String)value);
+        }
+        break;
+
       case AZE:
         if (value == null) {
           unset_aze();
@@ -32842,6 +33767,9 @@ public class Nimbus {
 
     public Object getFieldValue(_Fields field) {
       switch (field) {
+      case SUCCESS:
+        return get_success();
+
       case AZE:
         return get_aze();
 
@@ -32856,6 +33784,8 @@ public class Nimbus {
       }
 
       switch (field) {
+      case SUCCESS:
+        return is_set_success();
       case AZE:
         return is_set_aze();
       }
@@ -32866,15 +33796,24 @@ public class Nimbus {
     public boolean equals(Object that) {
       if (that == null)
         return false;
-      if (that instanceof finishFileUpload_result)
-        return this.equals((finishFileUpload_result)that);
+      if (that instanceof beginFileDownload_result)
+        return this.equals((beginFileDownload_result)that);
       return false;
     }
 
-    public boolean equals(finishFileUpload_result that) {
+    public boolean equals(beginFileDownload_result that) {
       if (that == null)
         return false;
 
+      boolean this_present_success = true && this.is_set_success();
+      boolean that_present_su

<TRUNCATED>

[2/9] storm git commit: STORM-1994: Add table with per-topology and worker resource usage and components in (new) supervisor and topology pages

Posted by ka...@apache.org.
http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/jvm/org/apache/storm/scheduler/resource/ResourceAwareScheduler.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/org/apache/storm/scheduler/resource/ResourceAwareScheduler.java b/storm-core/src/jvm/org/apache/storm/scheduler/resource/ResourceAwareScheduler.java
index 087fe6b..db0e263 100644
--- a/storm-core/src/jvm/org/apache/storm/scheduler/resource/ResourceAwareScheduler.java
+++ b/storm-core/src/jvm/org/apache/storm/scheduler/resource/ResourceAwareScheduler.java
@@ -112,6 +112,7 @@ public class ResourceAwareScheduler implements IScheduler {
         cluster.setStatusMap(schedulingState.cluster.getStatusMap());
         cluster.setSupervisorsResourcesMap(schedulingState.cluster.getSupervisorsResourcesMap());
         cluster.setTopologyResourcesMap(schedulingState.cluster.getTopologyResourcesMap());
+        cluster.setWorkerResourcesMap(schedulingState.cluster.getWorkerResourcesMap());
         //updating resources used by supervisor
         updateSupervisorsResources(cluster, topologies);
     }
@@ -243,6 +244,8 @@ public class ResourceAwareScheduler implements IScheduler {
             double assignedMemOffHeap = 0.0;
             double assignedCpu = 0.0;
 
+            Map<WorkerSlot, Double[]> workerResources = new HashMap<WorkerSlot, Double[]>();
+
             Set<String> nodesUsed = new HashSet<String>();
             for (Map.Entry<WorkerSlot, Collection<ExecutorDetails>> workerToTasksEntry : schedulerAssignmentMap.entrySet()) {
                 WorkerSlot targetSlot = workerToTasksEntry.getKey();
@@ -265,6 +268,11 @@ public class ResourceAwareScheduler implements IScheduler {
                 assignedMemOnHeap += targetSlot.getAllocatedMemOnHeap();
                 assignedMemOffHeap += targetSlot.getAllocatedMemOffHeap();
                 assignedCpu += targetSlot.getAllocatedCpu();
+
+                Double[] worker_resources = {
+                    requestedMemOnHeap, requestedMemOffHeap, requestedCpu,
+                    targetSlot.getAllocatedMemOnHeap(), targetSlot.getAllocatedMemOffHeap(), targetSlot.getAllocatedCpu()};
+                workerResources.put (targetSlot, worker_resources);
             }
 
             Double[] resources = {requestedMemOnHeap, requestedMemOffHeap, requestedCpu,
@@ -275,6 +283,7 @@ public class ResourceAwareScheduler implements IScheduler {
                     assignedMemOnHeap, assignedMemOffHeap, assignedCpu);
             //updating resources used for a topology
             this.schedulingState.cluster.setTopologyResources(td.getId(), resources);
+            this.schedulingState.cluster.setWorkerResources(td.getId(), workerResources);
             return true;
         } else {
             LOG.warn("schedulerAssignmentMap for topo {} is null. This shouldn't happen!", td.getName());

http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/jvm/org/apache/storm/security/auth/authorizer/SimpleACLAuthorizer.java
----------------------------------------------------------------------
diff --git a/storm-core/src/jvm/org/apache/storm/security/auth/authorizer/SimpleACLAuthorizer.java b/storm-core/src/jvm/org/apache/storm/security/auth/authorizer/SimpleACLAuthorizer.java
index 1f84ee8..71705ce 100644
--- a/storm-core/src/jvm/org/apache/storm/security/auth/authorizer/SimpleACLAuthorizer.java
+++ b/storm-core/src/jvm/org/apache/storm/security/auth/authorizer/SimpleACLAuthorizer.java
@@ -42,7 +42,12 @@ import org.slf4j.LoggerFactory;
 public class SimpleACLAuthorizer implements IAuthorizer {
     private static final Logger LOG = LoggerFactory.getLogger(SimpleACLAuthorizer.class);
 
-    protected Set<String> _userCommands = new HashSet<>(Arrays.asList("submitTopology", "fileUpload", "getNimbusConf", "getClusterInfo"));
+    protected Set<String> _userCommands = new HashSet<>(Arrays.asList(
+            "submitTopology", 
+            "fileUpload", 
+            "getNimbusConf", 
+            "getClusterInfo",
+            "getSupervisorPageInfo"));
     protected Set<String> _supervisorCommands = new HashSet<>(Arrays.asList("fileDownload"));
     protected Set<String> _topoCommands = new HashSet<>(Arrays.asList(
             "killTopology",

http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/py/storm/Nimbus-remote
----------------------------------------------------------------------
diff --git a/storm-core/src/py/storm/Nimbus-remote b/storm-core/src/py/storm/Nimbus-remote
index 5b8e396..b39050e 100644
--- a/storm-core/src/py/storm/Nimbus-remote
+++ b/storm-core/src/py/storm/Nimbus-remote
@@ -79,6 +79,7 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help':
   print('  TopologyInfo getTopologyInfo(string id)')
   print('  TopologyInfo getTopologyInfoWithOpts(string id, GetInfoOptions options)')
   print('  TopologyPageInfo getTopologyPageInfo(string id, string window, bool is_include_sys)')
+  print('  SupervisorPageInfo getSupervisorPageInfo(string id, string host, bool is_include_sys)')
   print('  ComponentPageInfo getComponentPageInfo(string topology_id, string component_id, string window, bool is_include_sys)')
   print('  string getTopologyConf(string id)')
   print('  StormTopology getTopology(string id)')
@@ -362,6 +363,12 @@ elif cmd == 'getTopologyPageInfo':
     sys.exit(1)
   pp.pprint(client.getTopologyPageInfo(args[0],args[1],eval(args[2]),))
 
+elif cmd == 'getSupervisorPageInfo':
+  if len(args) != 3:
+    print('getSupervisorPageInfo requires 3 args')
+    sys.exit(1)
+  pp.pprint(client.getSupervisorPageInfo(args[0],args[1],eval(args[2]),))
+
 elif cmd == 'getComponentPageInfo':
   if len(args) != 4:
     print('getComponentPageInfo requires 4 args')

http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/py/storm/Nimbus.py
----------------------------------------------------------------------
diff --git a/storm-core/src/py/storm/Nimbus.py b/storm-core/src/py/storm/Nimbus.py
index 3b680ec..dba051a 100644
--- a/storm-core/src/py/storm/Nimbus.py
+++ b/storm-core/src/py/storm/Nimbus.py
@@ -315,6 +315,15 @@ class Iface:
     """
     pass
 
+  def getSupervisorPageInfo(self, id, host, is_include_sys):
+    """
+    Parameters:
+     - id
+     - host
+     - is_include_sys
+    """
+    pass
+
   def getComponentPageInfo(self, topology_id, component_id, window, is_include_sys):
     """
     Parameters:
@@ -1625,6 +1634,45 @@ class Client(Iface):
       raise result.aze
     raise TApplicationException(TApplicationException.MISSING_RESULT, "getTopologyPageInfo failed: unknown result")
 
+  def getSupervisorPageInfo(self, id, host, is_include_sys):
+    """
+    Parameters:
+     - id
+     - host
+     - is_include_sys
+    """
+    self.send_getSupervisorPageInfo(id, host, is_include_sys)
+    return self.recv_getSupervisorPageInfo()
+
+  def send_getSupervisorPageInfo(self, id, host, is_include_sys):
+    self._oprot.writeMessageBegin('getSupervisorPageInfo', TMessageType.CALL, self._seqid)
+    args = getSupervisorPageInfo_args()
+    args.id = id
+    args.host = host
+    args.is_include_sys = is_include_sys
+    args.write(self._oprot)
+    self._oprot.writeMessageEnd()
+    self._oprot.trans.flush()
+
+  def recv_getSupervisorPageInfo(self):
+    iprot = self._iprot
+    (fname, mtype, rseqid) = iprot.readMessageBegin()
+    if mtype == TMessageType.EXCEPTION:
+      x = TApplicationException()
+      x.read(iprot)
+      iprot.readMessageEnd()
+      raise x
+    result = getSupervisorPageInfo_result()
+    result.read(iprot)
+    iprot.readMessageEnd()
+    if result.success is not None:
+      return result.success
+    if result.e is not None:
+      raise result.e
+    if result.aze is not None:
+      raise result.aze
+    raise TApplicationException(TApplicationException.MISSING_RESULT, "getSupervisorPageInfo failed: unknown result")
+
   def getComponentPageInfo(self, topology_id, component_id, window, is_include_sys):
     """
     Parameters:
@@ -1850,6 +1898,7 @@ class Processor(Iface, TProcessor):
     self._processMap["getTopologyInfo"] = Processor.process_getTopologyInfo
     self._processMap["getTopologyInfoWithOpts"] = Processor.process_getTopologyInfoWithOpts
     self._processMap["getTopologyPageInfo"] = Processor.process_getTopologyPageInfo
+    self._processMap["getSupervisorPageInfo"] = Processor.process_getSupervisorPageInfo
     self._processMap["getComponentPageInfo"] = Processor.process_getComponentPageInfo
     self._processMap["getTopologyConf"] = Processor.process_getTopologyConf
     self._processMap["getTopology"] = Processor.process_getTopology
@@ -2739,6 +2788,31 @@ class Processor(Iface, TProcessor):
     oprot.writeMessageEnd()
     oprot.trans.flush()
 
+  def process_getSupervisorPageInfo(self, seqid, iprot, oprot):
+    args = getSupervisorPageInfo_args()
+    args.read(iprot)
+    iprot.readMessageEnd()
+    result = getSupervisorPageInfo_result()
+    try:
+      result.success = self._handler.getSupervisorPageInfo(args.id, args.host, args.is_include_sys)
+      msg_type = TMessageType.REPLY
+    except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
+      raise
+    except NotAliveException as e:
+      msg_type = TMessageType.REPLY
+      result.e = e
+    except AuthorizationException as aze:
+      msg_type = TMessageType.REPLY
+      result.aze = aze
+    except Exception as ex:
+      msg_type = TMessageType.EXCEPTION
+      logging.exception(ex)
+      result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+    oprot.writeMessageBegin("getSupervisorPageInfo", msg_type, seqid)
+    result.write(oprot)
+    oprot.writeMessageEnd()
+    oprot.trans.flush()
+
   def process_getComponentPageInfo(self, seqid, iprot, oprot):
     args = getComponentPageInfo_args()
     args.read(iprot)
@@ -4723,11 +4797,11 @@ class getComponentPendingProfileActions_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype662, _size659) = iprot.readListBegin()
-          for _i663 in xrange(_size659):
-            _elem664 = ProfileRequest()
-            _elem664.read(iprot)
-            self.success.append(_elem664)
+          (_etype692, _size689) = iprot.readListBegin()
+          for _i693 in xrange(_size689):
+            _elem694 = ProfileRequest()
+            _elem694.read(iprot)
+            self.success.append(_elem694)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -4744,8 +4818,8 @@ class getComponentPendingProfileActions_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter665 in self.success:
-        iter665.write(oprot)
+      for iter695 in self.success:
+        iter695.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -8457,6 +8531,190 @@ class getTopologyPageInfo_result:
   def __ne__(self, other):
     return not (self == other)
 
+class getSupervisorPageInfo_args:
+  """
+  Attributes:
+   - id
+   - host
+   - is_include_sys
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.STRING, 'id', None, None, ), # 1
+    (2, TType.STRING, 'host', None, None, ), # 2
+    (3, TType.BOOL, 'is_include_sys', None, None, ), # 3
+  )
+
+  def __init__(self, id=None, host=None, is_include_sys=None,):
+    self.id = id
+    self.host = host
+    self.is_include_sys = is_include_sys
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.STRING:
+          self.id = iprot.readString().decode('utf-8')
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.STRING:
+          self.host = iprot.readString().decode('utf-8')
+        else:
+          iprot.skip(ftype)
+      elif fid == 3:
+        if ftype == TType.BOOL:
+          self.is_include_sys = iprot.readBool()
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('getSupervisorPageInfo_args')
+    if self.id is not None:
+      oprot.writeFieldBegin('id', TType.STRING, 1)
+      oprot.writeString(self.id.encode('utf-8'))
+      oprot.writeFieldEnd()
+    if self.host is not None:
+      oprot.writeFieldBegin('host', TType.STRING, 2)
+      oprot.writeString(self.host.encode('utf-8'))
+      oprot.writeFieldEnd()
+    if self.is_include_sys is not None:
+      oprot.writeFieldBegin('is_include_sys', TType.BOOL, 3)
+      oprot.writeBool(self.is_include_sys)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __hash__(self):
+    value = 17
+    value = (value * 31) ^ hash(self.id)
+    value = (value * 31) ^ hash(self.host)
+    value = (value * 31) ^ hash(self.is_include_sys)
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class getSupervisorPageInfo_result:
+  """
+  Attributes:
+   - success
+   - e
+   - aze
+  """
+
+  thrift_spec = (
+    (0, TType.STRUCT, 'success', (SupervisorPageInfo, SupervisorPageInfo.thrift_spec), None, ), # 0
+    (1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1
+    (2, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 2
+  )
+
+  def __init__(self, success=None, e=None, aze=None,):
+    self.success = success
+    self.e = e
+    self.aze = aze
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 0:
+        if ftype == TType.STRUCT:
+          self.success = SupervisorPageInfo()
+          self.success.read(iprot)
+        else:
+          iprot.skip(ftype)
+      elif fid == 1:
+        if ftype == TType.STRUCT:
+          self.e = NotAliveException()
+          self.e.read(iprot)
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.STRUCT:
+          self.aze = AuthorizationException()
+          self.aze.read(iprot)
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('getSupervisorPageInfo_result')
+    if self.success is not None:
+      oprot.writeFieldBegin('success', TType.STRUCT, 0)
+      self.success.write(oprot)
+      oprot.writeFieldEnd()
+    if self.e is not None:
+      oprot.writeFieldBegin('e', TType.STRUCT, 1)
+      self.e.write(oprot)
+      oprot.writeFieldEnd()
+    if self.aze is not None:
+      oprot.writeFieldBegin('aze', TType.STRUCT, 2)
+      self.aze.write(oprot)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __hash__(self):
+    value = 17
+    value = (value * 31) ^ hash(self.success)
+    value = (value * 31) ^ hash(self.e)
+    value = (value * 31) ^ hash(self.aze)
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
 class getComponentPageInfo_args:
   """
   Attributes:

http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/py/storm/ttypes.py
----------------------------------------------------------------------
diff --git a/storm-core/src/py/storm/ttypes.py b/storm-core/src/py/storm/ttypes.py
index 1934fb2..cd5bfde 100644
--- a/storm-core/src/py/storm/ttypes.py
+++ b/storm-core/src/py/storm/ttypes.py
@@ -5700,6 +5700,870 @@ class TopologyStats:
   def __ne__(self, other):
     return not (self == other)
 
+class SupervisorPageInfo:
+  """
+  Attributes:
+   - supervisor_summaries
+   - worker_summaries
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.LIST, 'supervisor_summaries', (TType.STRUCT,(SupervisorSummary, SupervisorSummary.thrift_spec)), None, ), # 1
+    (2, TType.LIST, 'worker_summaries', (TType.STRUCT,(WorkerSummary, WorkerSummary.thrift_spec)), None, ), # 2
+  )
+
+  def __init__(self, supervisor_summaries=None, worker_summaries=None,):
+    self.supervisor_summaries = supervisor_summaries
+    self.worker_summaries = worker_summaries
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.LIST:
+          self.supervisor_summaries = []
+          (_etype363, _size360) = iprot.readListBegin()
+          for _i364 in xrange(_size360):
+            _elem365 = SupervisorSummary()
+            _elem365.read(iprot)
+            self.supervisor_summaries.append(_elem365)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.LIST:
+          self.worker_summaries = []
+          (_etype369, _size366) = iprot.readListBegin()
+          for _i370 in xrange(_size366):
+            _elem371 = WorkerSummary()
+            _elem371.read(iprot)
+            self.worker_summaries.append(_elem371)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('SupervisorPageInfo')
+    if self.supervisor_summaries is not None:
+      oprot.writeFieldBegin('supervisor_summaries', TType.LIST, 1)
+      oprot.writeListBegin(TType.STRUCT, len(self.supervisor_summaries))
+      for iter372 in self.supervisor_summaries:
+        iter372.write(oprot)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    if self.worker_summaries is not None:
+      oprot.writeFieldBegin('worker_summaries', TType.LIST, 2)
+      oprot.writeListBegin(TType.STRUCT, len(self.worker_summaries))
+      for iter373 in self.worker_summaries:
+        iter373.write(oprot)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __hash__(self):
+    value = 17
+    value = (value * 31) ^ hash(self.supervisor_summaries)
+    value = (value * 31) ^ hash(self.worker_summaries)
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class WorkerSummary:
+  """
+  Attributes:
+   - supervisor_id
+   - host
+   - port
+   - topology_id
+   - topology_name
+   - num_executors
+   - component_to_num_tasks
+   - time_secs
+   - uptime_secs
+   - requested_memonheap
+   - requested_memoffheap
+   - requested_cpu
+   - assigned_memonheap
+   - assigned_memoffheap
+   - assigned_cpu
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.STRING, 'supervisor_id', None, None, ), # 1
+    (2, TType.STRING, 'host', None, None, ), # 2
+    (3, TType.I32, 'port', None, None, ), # 3
+    (4, TType.STRING, 'topology_id', None, None, ), # 4
+    (5, TType.STRING, 'topology_name', None, None, ), # 5
+    (6, TType.I32, 'num_executors', None, None, ), # 6
+    (7, TType.MAP, 'component_to_num_tasks', (TType.STRING,None,TType.I64,None), None, ), # 7
+    (8, TType.I32, 'time_secs', None, None, ), # 8
+    (9, TType.I32, 'uptime_secs', None, None, ), # 9
+    None, # 10
+    None, # 11
+    None, # 12
+    None, # 13
+    None, # 14
+    None, # 15
+    None, # 16
+    None, # 17
+    None, # 18
+    None, # 19
+    None, # 20
+    None, # 21
+    None, # 22
+    None, # 23
+    None, # 24
+    None, # 25
+    None, # 26
+    None, # 27
+    None, # 28
+    None, # 29
+    None, # 30
+    None, # 31
+    None, # 32
+    None, # 33
+    None, # 34
+    None, # 35
+    None, # 36
+    None, # 37
+    None, # 38
+    None, # 39
+    None, # 40
+    None, # 41
+    None, # 42
+    None, # 43
+    None, # 44
+    None, # 45
+    None, # 46
+    None, # 47
+    None, # 48
+    None, # 49
+    None, # 50
+    None, # 51
+    None, # 52
+    None, # 53
+    None, # 54
+    None, # 55
+    None, # 56
+    None, # 57
+    None, # 58
+    None, # 59
+    None, # 60
+    None, # 61
+    None, # 62
+    None, # 63
+    None, # 64
+    None, # 65
+    None, # 66
+    None, # 67
+    None, # 68
+    None, # 69
+    None, # 70
+    None, # 71
+    None, # 72
+    None, # 73
+    None, # 74
+    None, # 75
+    None, # 76
+    None, # 77
+    None, # 78
+    None, # 79
+    None, # 80
+    None, # 81
+    None, # 82
+    None, # 83
+    None, # 84
+    None, # 85
+    None, # 86
+    None, # 87
+    None, # 88
+    None, # 89
+    None, # 90
+    None, # 91
+    None, # 92
+    None, # 93
+    None, # 94
+    None, # 95
+    None, # 96
+    None, # 97
+    None, # 98
+    None, # 99
+    None, # 100
+    None, # 101
+    None, # 102
+    None, # 103
+    None, # 104
+    None, # 105
+    None, # 106
+    None, # 107
+    None, # 108
+    None, # 109
+    None, # 110
+    None, # 111
+    None, # 112
+    None, # 113
+    None, # 114
+    None, # 115
+    None, # 116
+    None, # 117
+    None, # 118
+    None, # 119
+    None, # 120
+    None, # 121
+    None, # 122
+    None, # 123
+    None, # 124
+    None, # 125
+    None, # 126
+    None, # 127
+    None, # 128
+    None, # 129
+    None, # 130
+    None, # 131
+    None, # 132
+    None, # 133
+    None, # 134
+    None, # 135
+    None, # 136
+    None, # 137
+    None, # 138
+    None, # 139
+    None, # 140
+    None, # 141
+    None, # 142
+    None, # 143
+    None, # 144
+    None, # 145
+    None, # 146
+    None, # 147
+    None, # 148
+    None, # 149
+    None, # 150
+    None, # 151
+    None, # 152
+    None, # 153
+    None, # 154
+    None, # 155
+    None, # 156
+    None, # 157
+    None, # 158
+    None, # 159
+    None, # 160
+    None, # 161
+    None, # 162
+    None, # 163
+    None, # 164
+    None, # 165
+    None, # 166
+    None, # 167
+    None, # 168
+    None, # 169
+    None, # 170
+    None, # 171
+    None, # 172
+    None, # 173
+    None, # 174
+    None, # 175
+    None, # 176
+    None, # 177
+    None, # 178
+    None, # 179
+    None, # 180
+    None, # 181
+    None, # 182
+    None, # 183
+    None, # 184
+    None, # 185
+    None, # 186
+    None, # 187
+    None, # 188
+    None, # 189
+    None, # 190
+    None, # 191
+    None, # 192
+    None, # 193
+    None, # 194
+    None, # 195
+    None, # 196
+    None, # 197
+    None, # 198
+    None, # 199
+    None, # 200
+    None, # 201
+    None, # 202
+    None, # 203
+    None, # 204
+    None, # 205
+    None, # 206
+    None, # 207
+    None, # 208
+    None, # 209
+    None, # 210
+    None, # 211
+    None, # 212
+    None, # 213
+    None, # 214
+    None, # 215
+    None, # 216
+    None, # 217
+    None, # 218
+    None, # 219
+    None, # 220
+    None, # 221
+    None, # 222
+    None, # 223
+    None, # 224
+    None, # 225
+    None, # 226
+    None, # 227
+    None, # 228
+    None, # 229
+    None, # 230
+    None, # 231
+    None, # 232
+    None, # 233
+    None, # 234
+    None, # 235
+    None, # 236
+    None, # 237
+    None, # 238
+    None, # 239
+    None, # 240
+    None, # 241
+    None, # 242
+    None, # 243
+    None, # 244
+    None, # 245
+    None, # 246
+    None, # 247
+    None, # 248
+    None, # 249
+    None, # 250
+    None, # 251
+    None, # 252
+    None, # 253
+    None, # 254
+    None, # 255
+    None, # 256
+    None, # 257
+    None, # 258
+    None, # 259
+    None, # 260
+    None, # 261
+    None, # 262
+    None, # 263
+    None, # 264
+    None, # 265
+    None, # 266
+    None, # 267
+    None, # 268
+    None, # 269
+    None, # 270
+    None, # 271
+    None, # 272
+    None, # 273
+    None, # 274
+    None, # 275
+    None, # 276
+    None, # 277
+    None, # 278
+    None, # 279
+    None, # 280
+    None, # 281
+    None, # 282
+    None, # 283
+    None, # 284
+    None, # 285
+    None, # 286
+    None, # 287
+    None, # 288
+    None, # 289
+    None, # 290
+    None, # 291
+    None, # 292
+    None, # 293
+    None, # 294
+    None, # 295
+    None, # 296
+    None, # 297
+    None, # 298
+    None, # 299
+    None, # 300
+    None, # 301
+    None, # 302
+    None, # 303
+    None, # 304
+    None, # 305
+    None, # 306
+    None, # 307
+    None, # 308
+    None, # 309
+    None, # 310
+    None, # 311
+    None, # 312
+    None, # 313
+    None, # 314
+    None, # 315
+    None, # 316
+    None, # 317
+    None, # 318
+    None, # 319
+    None, # 320
+    None, # 321
+    None, # 322
+    None, # 323
+    None, # 324
+    None, # 325
+    None, # 326
+    None, # 327
+    None, # 328
+    None, # 329
+    None, # 330
+    None, # 331
+    None, # 332
+    None, # 333
+    None, # 334
+    None, # 335
+    None, # 336
+    None, # 337
+    None, # 338
+    None, # 339
+    None, # 340
+    None, # 341
+    None, # 342
+    None, # 343
+    None, # 344
+    None, # 345
+    None, # 346
+    None, # 347
+    None, # 348
+    None, # 349
+    None, # 350
+    None, # 351
+    None, # 352
+    None, # 353
+    None, # 354
+    None, # 355
+    None, # 356
+    None, # 357
+    None, # 358
+    None, # 359
+    None, # 360
+    None, # 361
+    None, # 362
+    None, # 363
+    None, # 364
+    None, # 365
+    None, # 366
+    None, # 367
+    None, # 368
+    None, # 369
+    None, # 370
+    None, # 371
+    None, # 372
+    None, # 373
+    None, # 374
+    None, # 375
+    None, # 376
+    None, # 377
+    None, # 378
+    None, # 379
+    None, # 380
+    None, # 381
+    None, # 382
+    None, # 383
+    None, # 384
+    None, # 385
+    None, # 386
+    None, # 387
+    None, # 388
+    None, # 389
+    None, # 390
+    None, # 391
+    None, # 392
+    None, # 393
+    None, # 394
+    None, # 395
+    None, # 396
+    None, # 397
+    None, # 398
+    None, # 399
+    None, # 400
+    None, # 401
+    None, # 402
+    None, # 403
+    None, # 404
+    None, # 405
+    None, # 406
+    None, # 407
+    None, # 408
+    None, # 409
+    None, # 410
+    None, # 411
+    None, # 412
+    None, # 413
+    None, # 414
+    None, # 415
+    None, # 416
+    None, # 417
+    None, # 418
+    None, # 419
+    None, # 420
+    None, # 421
+    None, # 422
+    None, # 423
+    None, # 424
+    None, # 425
+    None, # 426
+    None, # 427
+    None, # 428
+    None, # 429
+    None, # 430
+    None, # 431
+    None, # 432
+    None, # 433
+    None, # 434
+    None, # 435
+    None, # 436
+    None, # 437
+    None, # 438
+    None, # 439
+    None, # 440
+    None, # 441
+    None, # 442
+    None, # 443
+    None, # 444
+    None, # 445
+    None, # 446
+    None, # 447
+    None, # 448
+    None, # 449
+    None, # 450
+    None, # 451
+    None, # 452
+    None, # 453
+    None, # 454
+    None, # 455
+    None, # 456
+    None, # 457
+    None, # 458
+    None, # 459
+    None, # 460
+    None, # 461
+    None, # 462
+    None, # 463
+    None, # 464
+    None, # 465
+    None, # 466
+    None, # 467
+    None, # 468
+    None, # 469
+    None, # 470
+    None, # 471
+    None, # 472
+    None, # 473
+    None, # 474
+    None, # 475
+    None, # 476
+    None, # 477
+    None, # 478
+    None, # 479
+    None, # 480
+    None, # 481
+    None, # 482
+    None, # 483
+    None, # 484
+    None, # 485
+    None, # 486
+    None, # 487
+    None, # 488
+    None, # 489
+    None, # 490
+    None, # 491
+    None, # 492
+    None, # 493
+    None, # 494
+    None, # 495
+    None, # 496
+    None, # 497
+    None, # 498
+    None, # 499
+    None, # 500
+    None, # 501
+    None, # 502
+    None, # 503
+    None, # 504
+    None, # 505
+    None, # 506
+    None, # 507
+    None, # 508
+    None, # 509
+    None, # 510
+    None, # 511
+    None, # 512
+    None, # 513
+    None, # 514
+    None, # 515
+    None, # 516
+    None, # 517
+    None, # 518
+    None, # 519
+    None, # 520
+    (521, TType.DOUBLE, 'requested_memonheap', None, None, ), # 521
+    (522, TType.DOUBLE, 'requested_memoffheap', None, None, ), # 522
+    (523, TType.DOUBLE, 'requested_cpu', None, None, ), # 523
+    (524, TType.DOUBLE, 'assigned_memonheap', None, None, ), # 524
+    (525, TType.DOUBLE, 'assigned_memoffheap', None, None, ), # 525
+    (526, TType.DOUBLE, 'assigned_cpu', None, None, ), # 526
+  )
+
+  def __init__(self, supervisor_id=None, host=None, port=None, topology_id=None, topology_name=None, num_executors=None, component_to_num_tasks=None, time_secs=None, uptime_secs=None, requested_memonheap=None, requested_memoffheap=None, requested_cpu=None, assigned_memonheap=None, assigned_memoffheap=None, assigned_cpu=None,):
+    self.supervisor_id = supervisor_id
+    self.host = host
+    self.port = port
+    self.topology_id = topology_id
+    self.topology_name = topology_name
+    self.num_executors = num_executors
+    self.component_to_num_tasks = component_to_num_tasks
+    self.time_secs = time_secs
+    self.uptime_secs = uptime_secs
+    self.requested_memonheap = requested_memonheap
+    self.requested_memoffheap = requested_memoffheap
+    self.requested_cpu = requested_cpu
+    self.assigned_memonheap = assigned_memonheap
+    self.assigned_memoffheap = assigned_memoffheap
+    self.assigned_cpu = assigned_cpu
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.STRING:
+          self.supervisor_id = iprot.readString().decode('utf-8')
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.STRING:
+          self.host = iprot.readString().decode('utf-8')
+        else:
+          iprot.skip(ftype)
+      elif fid == 3:
+        if ftype == TType.I32:
+          self.port = iprot.readI32()
+        else:
+          iprot.skip(ftype)
+      elif fid == 4:
+        if ftype == TType.STRING:
+          self.topology_id = iprot.readString().decode('utf-8')
+        else:
+          iprot.skip(ftype)
+      elif fid == 5:
+        if ftype == TType.STRING:
+          self.topology_name = iprot.readString().decode('utf-8')
+        else:
+          iprot.skip(ftype)
+      elif fid == 6:
+        if ftype == TType.I32:
+          self.num_executors = iprot.readI32()
+        else:
+          iprot.skip(ftype)
+      elif fid == 7:
+        if ftype == TType.MAP:
+          self.component_to_num_tasks = {}
+          (_ktype375, _vtype376, _size374 ) = iprot.readMapBegin()
+          for _i378 in xrange(_size374):
+            _key379 = iprot.readString().decode('utf-8')
+            _val380 = iprot.readI64()
+            self.component_to_num_tasks[_key379] = _val380
+          iprot.readMapEnd()
+        else:
+          iprot.skip(ftype)
+      elif fid == 8:
+        if ftype == TType.I32:
+          self.time_secs = iprot.readI32()
+        else:
+          iprot.skip(ftype)
+      elif fid == 9:
+        if ftype == TType.I32:
+          self.uptime_secs = iprot.readI32()
+        else:
+          iprot.skip(ftype)
+      elif fid == 521:
+        if ftype == TType.DOUBLE:
+          self.requested_memonheap = iprot.readDouble()
+        else:
+          iprot.skip(ftype)
+      elif fid == 522:
+        if ftype == TType.DOUBLE:
+          self.requested_memoffheap = iprot.readDouble()
+        else:
+          iprot.skip(ftype)
+      elif fid == 523:
+        if ftype == TType.DOUBLE:
+          self.requested_cpu = iprot.readDouble()
+        else:
+          iprot.skip(ftype)
+      elif fid == 524:
+        if ftype == TType.DOUBLE:
+          self.assigned_memonheap = iprot.readDouble()
+        else:
+          iprot.skip(ftype)
+      elif fid == 525:
+        if ftype == TType.DOUBLE:
+          self.assigned_memoffheap = iprot.readDouble()
+        else:
+          iprot.skip(ftype)
+      elif fid == 526:
+        if ftype == TType.DOUBLE:
+          self.assigned_cpu = iprot.readDouble()
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('WorkerSummary')
+    if self.supervisor_id is not None:
+      oprot.writeFieldBegin('supervisor_id', TType.STRING, 1)
+      oprot.writeString(self.supervisor_id.encode('utf-8'))
+      oprot.writeFieldEnd()
+    if self.host is not None:
+      oprot.writeFieldBegin('host', TType.STRING, 2)
+      oprot.writeString(self.host.encode('utf-8'))
+      oprot.writeFieldEnd()
+    if self.port is not None:
+      oprot.writeFieldBegin('port', TType.I32, 3)
+      oprot.writeI32(self.port)
+      oprot.writeFieldEnd()
+    if self.topology_id is not None:
+      oprot.writeFieldBegin('topology_id', TType.STRING, 4)
+      oprot.writeString(self.topology_id.encode('utf-8'))
+      oprot.writeFieldEnd()
+    if self.topology_name is not None:
+      oprot.writeFieldBegin('topology_name', TType.STRING, 5)
+      oprot.writeString(self.topology_name.encode('utf-8'))
+      oprot.writeFieldEnd()
+    if self.num_executors is not None:
+      oprot.writeFieldBegin('num_executors', TType.I32, 6)
+      oprot.writeI32(self.num_executors)
+      oprot.writeFieldEnd()
+    if self.component_to_num_tasks is not None:
+      oprot.writeFieldBegin('component_to_num_tasks', TType.MAP, 7)
+      oprot.writeMapBegin(TType.STRING, TType.I64, len(self.component_to_num_tasks))
+      for kiter381,viter382 in self.component_to_num_tasks.items():
+        oprot.writeString(kiter381.encode('utf-8'))
+        oprot.writeI64(viter382)
+      oprot.writeMapEnd()
+      oprot.writeFieldEnd()
+    if self.time_secs is not None:
+      oprot.writeFieldBegin('time_secs', TType.I32, 8)
+      oprot.writeI32(self.time_secs)
+      oprot.writeFieldEnd()
+    if self.uptime_secs is not None:
+      oprot.writeFieldBegin('uptime_secs', TType.I32, 9)
+      oprot.writeI32(self.uptime_secs)
+      oprot.writeFieldEnd()
+    if self.requested_memonheap is not None:
+      oprot.writeFieldBegin('requested_memonheap', TType.DOUBLE, 521)
+      oprot.writeDouble(self.requested_memonheap)
+      oprot.writeFieldEnd()
+    if self.requested_memoffheap is not None:
+      oprot.writeFieldBegin('requested_memoffheap', TType.DOUBLE, 522)
+      oprot.writeDouble(self.requested_memoffheap)
+      oprot.writeFieldEnd()
+    if self.requested_cpu is not None:
+      oprot.writeFieldBegin('requested_cpu', TType.DOUBLE, 523)
+      oprot.writeDouble(self.requested_cpu)
+      oprot.writeFieldEnd()
+    if self.assigned_memonheap is not None:
+      oprot.writeFieldBegin('assigned_memonheap', TType.DOUBLE, 524)
+      oprot.writeDouble(self.assigned_memonheap)
+      oprot.writeFieldEnd()
+    if self.assigned_memoffheap is not None:
+      oprot.writeFieldBegin('assigned_memoffheap', TType.DOUBLE, 525)
+      oprot.writeDouble(self.assigned_memoffheap)
+      oprot.writeFieldEnd()
+    if self.assigned_cpu is not None:
+      oprot.writeFieldBegin('assigned_cpu', TType.DOUBLE, 526)
+      oprot.writeDouble(self.assigned_cpu)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __hash__(self):
+    value = 17
+    value = (value * 31) ^ hash(self.supervisor_id)
+    value = (value * 31) ^ hash(self.host)
+    value = (value * 31) ^ hash(self.port)
+    value = (value * 31) ^ hash(self.topology_id)
+    value = (value * 31) ^ hash(self.topology_name)
+    value = (value * 31) ^ hash(self.num_executors)
+    value = (value * 31) ^ hash(self.component_to_num_tasks)
+    value = (value * 31) ^ hash(self.time_secs)
+    value = (value * 31) ^ hash(self.uptime_secs)
+    value = (value * 31) ^ hash(self.requested_memonheap)
+    value = (value * 31) ^ hash(self.requested_memoffheap)
+    value = (value * 31) ^ hash(self.requested_cpu)
+    value = (value * 31) ^ hash(self.assigned_memonheap)
+    value = (value * 31) ^ hash(self.assigned_memoffheap)
+    value = (value * 31) ^ hash(self.assigned_cpu)
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
 class TopologyPageInfo:
   """
   Attributes:
@@ -5718,6 +6582,7 @@ class TopologyPageInfo:
    - owner
    - debug_options
    - replication_count
+   - workers
    - requested_memonheap
    - requested_memoffheap
    - requested_cpu
@@ -5743,7 +6608,7 @@ class TopologyPageInfo:
     (13, TType.STRING, 'owner', None, None, ), # 13
     (14, TType.STRUCT, 'debug_options', (DebugOptions, DebugOptions.thrift_spec), None, ), # 14
     (15, TType.I32, 'replication_count', None, None, ), # 15
-    None, # 16
+    (16, TType.LIST, 'workers', (TType.STRUCT,(WorkerSummary, WorkerSummary.thrift_spec)), None, ), # 16
     None, # 17
     None, # 18
     None, # 19
@@ -6256,7 +7121,7 @@ class TopologyPageInfo:
     (526, TType.DOUBLE, 'assigned_cpu', None, None, ), # 526
   )
 
-  def __init__(self, id=None, name=None, uptime_secs=None, status=None, num_tasks=None, num_workers=None, num_executors=None, topology_conf=None, id_to_spout_agg_stats=None, id_to_bolt_agg_stats=None, sched_status=None, topology_stats=None, owner=None, debug_options=None, replication_count=None, requested_memonheap=None, requested_memoffheap=None, requested_cpu=None, assigned_memonheap=None, assigned_memoffheap=None, assigned_cpu=None,):
+  def __init__(self, id=None, name=None, uptime_secs=None, status=None, num_tasks=None, num_workers=None, num_executors=None, topology_conf=None, id_to_spout_agg_stats=None, id_to_bolt_agg_stats=None, sched_status=None, topology_stats=None, owner=None, debug_options=None, replication_count=None, workers=None, requested_memonheap=None, requested_memoffheap=None, requested_cpu=None, assigned_memonheap=None, assigned_memoffheap=None, assigned_cpu=None,):
     self.id = id
     self.name = name
     self.uptime_secs = uptime_secs
@@ -6272,6 +7137,7 @@ class TopologyPageInfo:
     self.owner = owner
     self.debug_options = debug_options
     self.replication_count = replication_count
+    self.workers = workers
     self.requested_memonheap = requested_memonheap
     self.requested_memoffheap = requested_memoffheap
     self.requested_cpu = requested_cpu
@@ -6331,24 +7197,24 @@ class TopologyPageInfo:
       elif fid == 9:
         if ftype == TType.MAP:
           self.id_to_spout_agg_stats = {}
-          (_ktype361, _vtype362, _size360 ) = iprot.readMapBegin()
-          for _i364 in xrange(_size360):
-            _key365 = iprot.readString().decode('utf-8')
-            _val366 = ComponentAggregateStats()
-            _val366.read(iprot)
-            self.id_to_spout_agg_stats[_key365] = _val366
+          (_ktype384, _vtype385, _size383 ) = iprot.readMapBegin()
+          for _i387 in xrange(_size383):
+            _key388 = iprot.readString().decode('utf-8')
+            _val389 = ComponentAggregateStats()
+            _val389.read(iprot)
+            self.id_to_spout_agg_stats[_key388] = _val389
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
       elif fid == 10:
         if ftype == TType.MAP:
           self.id_to_bolt_agg_stats = {}
-          (_ktype368, _vtype369, _size367 ) = iprot.readMapBegin()
-          for _i371 in xrange(_size367):
-            _key372 = iprot.readString().decode('utf-8')
-            _val373 = ComponentAggregateStats()
-            _val373.read(iprot)
-            self.id_to_bolt_agg_stats[_key372] = _val373
+          (_ktype391, _vtype392, _size390 ) = iprot.readMapBegin()
+          for _i394 in xrange(_size390):
+            _key395 = iprot.readString().decode('utf-8')
+            _val396 = ComponentAggregateStats()
+            _val396.read(iprot)
+            self.id_to_bolt_agg_stats[_key395] = _val396
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -6379,6 +7245,17 @@ class TopologyPageInfo:
           self.replication_count = iprot.readI32()
         else:
           iprot.skip(ftype)
+      elif fid == 16:
+        if ftype == TType.LIST:
+          self.workers = []
+          (_etype400, _size397) = iprot.readListBegin()
+          for _i401 in xrange(_size397):
+            _elem402 = WorkerSummary()
+            _elem402.read(iprot)
+            self.workers.append(_elem402)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
       elif fid == 521:
         if ftype == TType.DOUBLE:
           self.requested_memonheap = iprot.readDouble()
@@ -6454,17 +7331,17 @@ class TopologyPageInfo:
     if self.id_to_spout_agg_stats is not None:
       oprot.writeFieldBegin('id_to_spout_agg_stats', TType.MAP, 9)
       oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.id_to_spout_agg_stats))
-      for kiter374,viter375 in self.id_to_spout_agg_stats.items():
-        oprot.writeString(kiter374.encode('utf-8'))
-        viter375.write(oprot)
+      for kiter403,viter404 in self.id_to_spout_agg_stats.items():
+        oprot.writeString(kiter403.encode('utf-8'))
+        viter404.write(oprot)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.id_to_bolt_agg_stats is not None:
       oprot.writeFieldBegin('id_to_bolt_agg_stats', TType.MAP, 10)
       oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.id_to_bolt_agg_stats))
-      for kiter376,viter377 in self.id_to_bolt_agg_stats.items():
-        oprot.writeString(kiter376.encode('utf-8'))
-        viter377.write(oprot)
+      for kiter405,viter406 in self.id_to_bolt_agg_stats.items():
+        oprot.writeString(kiter405.encode('utf-8'))
+        viter406.write(oprot)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.sched_status is not None:
@@ -6487,6 +7364,13 @@ class TopologyPageInfo:
       oprot.writeFieldBegin('replication_count', TType.I32, 15)
       oprot.writeI32(self.replication_count)
       oprot.writeFieldEnd()
+    if self.workers is not None:
+      oprot.writeFieldBegin('workers', TType.LIST, 16)
+      oprot.writeListBegin(TType.STRUCT, len(self.workers))
+      for iter407 in self.workers:
+        iter407.write(oprot)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
     if self.requested_memonheap is not None:
       oprot.writeFieldBegin('requested_memonheap', TType.DOUBLE, 521)
       oprot.writeDouble(self.requested_memonheap)
@@ -6537,6 +7421,7 @@ class TopologyPageInfo:
     value = (value * 31) ^ hash(self.owner)
     value = (value * 31) ^ hash(self.debug_options)
     value = (value * 31) ^ hash(self.replication_count)
+    value = (value * 31) ^ hash(self.workers)
     value = (value * 31) ^ hash(self.requested_memonheap)
     value = (value * 31) ^ hash(self.requested_memoffheap)
     value = (value * 31) ^ hash(self.requested_cpu)
@@ -6734,59 +7619,59 @@ class ComponentPageInfo:
       elif fid == 7:
         if ftype == TType.MAP:
           self.window_to_stats = {}
-          (_ktype379, _vtype380, _size378 ) = iprot.readMapBegin()
-          for _i382 in xrange(_size378):
-            _key383 = iprot.readString().decode('utf-8')
-            _val384 = ComponentAggregateStats()
-            _val384.read(iprot)
-            self.window_to_stats[_key383] = _val384
+          (_ktype409, _vtype410, _size408 ) = iprot.readMapBegin()
+          for _i412 in xrange(_size408):
+            _key413 = iprot.readString().decode('utf-8')
+            _val414 = ComponentAggregateStats()
+            _val414.read(iprot)
+            self.window_to_stats[_key413] = _val414
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
       elif fid == 8:
         if ftype == TType.MAP:
           self.gsid_to_input_stats = {}
-          (_ktype386, _vtype387, _size385 ) = iprot.readMapBegin()
-          for _i389 in xrange(_size385):
-            _key390 = GlobalStreamId()
-            _key390.read(iprot)
-            _val391 = ComponentAggregateStats()
-            _val391.read(iprot)
-            self.gsid_to_input_stats[_key390] = _val391
+          (_ktype416, _vtype417, _size415 ) = iprot.readMapBegin()
+          for _i419 in xrange(_size415):
+            _key420 = GlobalStreamId()
+            _key420.read(iprot)
+            _val421 = ComponentAggregateStats()
+            _val421.read(iprot)
+            self.gsid_to_input_stats[_key420] = _val421
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
       elif fid == 9:
         if ftype == TType.MAP:
           self.sid_to_output_stats = {}
-          (_ktype393, _vtype394, _size392 ) = iprot.readMapBegin()
-          for _i396 in xrange(_size392):
-            _key397 = iprot.readString().decode('utf-8')
-            _val398 = ComponentAggregateStats()
-            _val398.read(iprot)
-            self.sid_to_output_stats[_key397] = _val398
+          (_ktype423, _vtype424, _size422 ) = iprot.readMapBegin()
+          for _i426 in xrange(_size422):
+            _key427 = iprot.readString().decode('utf-8')
+            _val428 = ComponentAggregateStats()
+            _val428.read(iprot)
+            self.sid_to_output_stats[_key427] = _val428
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
       elif fid == 10:
         if ftype == TType.LIST:
           self.exec_stats = []
-          (_etype402, _size399) = iprot.readListBegin()
-          for _i403 in xrange(_size399):
-            _elem404 = ExecutorAggregateStats()
-            _elem404.read(iprot)
-            self.exec_stats.append(_elem404)
+          (_etype432, _size429) = iprot.readListBegin()
+          for _i433 in xrange(_size429):
+            _elem434 = ExecutorAggregateStats()
+            _elem434.read(iprot)
+            self.exec_stats.append(_elem434)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
       elif fid == 11:
         if ftype == TType.LIST:
           self.errors = []
-          (_etype408, _size405) = iprot.readListBegin()
-          for _i409 in xrange(_size405):
-            _elem410 = ErrorInfo()
-            _elem410.read(iprot)
-            self.errors.append(_elem410)
+          (_etype438, _size435) = iprot.readListBegin()
+          for _i439 in xrange(_size435):
+            _elem440 = ErrorInfo()
+            _elem440.read(iprot)
+            self.errors.append(_elem440)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -6848,39 +7733,39 @@ class ComponentPageInfo:
     if self.window_to_stats is not None:
       oprot.writeFieldBegin('window_to_stats', TType.MAP, 7)
       oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.window_to_stats))
-      for kiter411,viter412 in self.window_to_stats.items():
-        oprot.writeString(kiter411.encode('utf-8'))
-        viter412.write(oprot)
+      for kiter441,viter442 in self.window_to_stats.items():
+        oprot.writeString(kiter441.encode('utf-8'))
+        viter442.write(oprot)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.gsid_to_input_stats is not None:
       oprot.writeFieldBegin('gsid_to_input_stats', TType.MAP, 8)
       oprot.writeMapBegin(TType.STRUCT, TType.STRUCT, len(self.gsid_to_input_stats))
-      for kiter413,viter414 in self.gsid_to_input_stats.items():
-        kiter413.write(oprot)
-        viter414.write(oprot)
+      for kiter443,viter444 in self.gsid_to_input_stats.items():
+        kiter443.write(oprot)
+        viter444.write(oprot)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.sid_to_output_stats is not None:
       oprot.writeFieldBegin('sid_to_output_stats', TType.MAP, 9)
       oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.sid_to_output_stats))
-      for kiter415,viter416 in self.sid_to_output_stats.items():
-        oprot.writeString(kiter415.encode('utf-8'))
-        viter416.write(oprot)
+      for kiter445,viter446 in self.sid_to_output_stats.items():
+        oprot.writeString(kiter445.encode('utf-8'))
+        viter446.write(oprot)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.exec_stats is not None:
       oprot.writeFieldBegin('exec_stats', TType.LIST, 10)
       oprot.writeListBegin(TType.STRUCT, len(self.exec_stats))
-      for iter417 in self.exec_stats:
-        iter417.write(oprot)
+      for iter447 in self.exec_stats:
+        iter447.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.errors is not None:
       oprot.writeFieldBegin('errors', TType.LIST, 11)
       oprot.writeListBegin(TType.STRUCT, len(self.errors))
-      for iter418 in self.errors:
-        iter418.write(oprot)
+      for iter448 in self.errors:
+        iter448.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.eventlog_host is not None:
@@ -7047,11 +7932,11 @@ class RebalanceOptions:
       elif fid == 3:
         if ftype == TType.MAP:
           self.num_executors = {}
-          (_ktype420, _vtype421, _size419 ) = iprot.readMapBegin()
-          for _i423 in xrange(_size419):
-            _key424 = iprot.readString().decode('utf-8')
-            _val425 = iprot.readI32()
-            self.num_executors[_key424] = _val425
+          (_ktype450, _vtype451, _size449 ) = iprot.readMapBegin()
+          for _i453 in xrange(_size449):
+            _key454 = iprot.readString().decode('utf-8')
+            _val455 = iprot.readI32()
+            self.num_executors[_key454] = _val455
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -7076,9 +7961,9 @@ class RebalanceOptions:
     if self.num_executors is not None:
       oprot.writeFieldBegin('num_executors', TType.MAP, 3)
       oprot.writeMapBegin(TType.STRING, TType.I32, len(self.num_executors))
-      for kiter426,viter427 in self.num_executors.items():
-        oprot.writeString(kiter426.encode('utf-8'))
-        oprot.writeI32(viter427)
+      for kiter456,viter457 in self.num_executors.items():
+        oprot.writeString(kiter456.encode('utf-8'))
+        oprot.writeI32(viter457)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -7132,11 +8017,11 @@ class Credentials:
       if fid == 1:
         if ftype == TType.MAP:
           self.creds = {}
-          (_ktype429, _vtype430, _size428 ) = iprot.readMapBegin()
-          for _i432 in xrange(_size428):
-            _key433 = iprot.readString().decode('utf-8')
-            _val434 = iprot.readString().decode('utf-8')
-            self.creds[_key433] = _val434
+          (_ktype459, _vtype460, _size458 ) = iprot.readMapBegin()
+          for _i462 in xrange(_size458):
+            _key463 = iprot.readString().decode('utf-8')
+            _val464 = iprot.readString().decode('utf-8')
+            self.creds[_key463] = _val464
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -7153,9 +8038,9 @@ class Credentials:
     if self.creds is not None:
       oprot.writeFieldBegin('creds', TType.MAP, 1)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.creds))
-      for kiter435,viter436 in self.creds.items():
-        oprot.writeString(kiter435.encode('utf-8'))
-        oprot.writeString(viter436.encode('utf-8'))
+      for kiter465,viter466 in self.creds.items():
+        oprot.writeString(kiter465.encode('utf-8'))
+        oprot.writeString(viter466.encode('utf-8'))
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -7388,11 +8273,11 @@ class SettableBlobMeta:
       if fid == 1:
         if ftype == TType.LIST:
           self.acl = []
-          (_etype440, _size437) = iprot.readListBegin()
-          for _i441 in xrange(_size437):
-            _elem442 = AccessControl()
-            _elem442.read(iprot)
-            self.acl.append(_elem442)
+          (_etype470, _size467) = iprot.readListBegin()
+          for _i471 in xrange(_size467):
+            _elem472 = AccessControl()
+            _elem472.read(iprot)
+            self.acl.append(_elem472)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -7414,8 +8299,8 @@ class SettableBlobMeta:
     if self.acl is not None:
       oprot.writeFieldBegin('acl', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.acl))
-      for iter443 in self.acl:
-        iter443.write(oprot)
+      for iter473 in self.acl:
+        iter473.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.replication_factor is not None:
@@ -7560,10 +8445,10 @@ class ListBlobsResult:
       if fid == 1:
         if ftype == TType.LIST:
           self.keys = []
-          (_etype447, _size444) = iprot.readListBegin()
-          for _i448 in xrange(_size444):
-            _elem449 = iprot.readString().decode('utf-8')
-            self.keys.append(_elem449)
+          (_etype477, _size474) = iprot.readListBegin()
+          for _i478 in xrange(_size474):
+            _elem479 = iprot.readString().decode('utf-8')
+            self.keys.append(_elem479)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -7585,8 +8470,8 @@ class ListBlobsResult:
     if self.keys is not None:
       oprot.writeFieldBegin('keys', TType.LIST, 1)
       oprot.writeListBegin(TType.STRING, len(self.keys))
-      for iter450 in self.keys:
-        oprot.writeString(iter450.encode('utf-8'))
+      for iter480 in self.keys:
+        oprot.writeString(iter480.encode('utf-8'))
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.session is not None:
@@ -7781,31 +8666,31 @@ class SupervisorInfo:
       elif fid == 4:
         if ftype == TType.LIST:
           self.used_ports = []
-          (_etype454, _size451) = iprot.readListBegin()
-          for _i455 in xrange(_size451):
-            _elem456 = iprot.readI64()
-            self.used_ports.append(_elem456)
+          (_etype484, _size481) = iprot.readListBegin()
+          for _i485 in xrange(_size481):
+            _elem486 = iprot.readI64()
+            self.used_ports.append(_elem486)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
       elif fid == 5:
         if ftype == TType.LIST:
           self.meta = []
-          (_etype460, _size457) = iprot.readListBegin()
-          for _i461 in xrange(_size457):
-            _elem462 = iprot.readI64()
-            self.meta.append(_elem462)
+          (_etype490, _size487) = iprot.readListBegin()
+          for _i491 in xrange(_size487):
+            _elem492 = iprot.readI64()
+            self.meta.append(_elem492)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
       elif fid == 6:
         if ftype == TType.MAP:
           self.scheduler_meta = {}
-          (_ktype464, _vtype465, _size463 ) = iprot.readMapBegin()
-          for _i467 in xrange(_size463):
-            _key468 = iprot.readString().decode('utf-8')
-            _val469 = iprot.readString().decode('utf-8')
-            self.scheduler_meta[_key468] = _val469
+          (_ktype494, _vtype495, _size493 ) = iprot.readMapBegin()
+          for _i497 in xrange(_size493):
+            _key498 = iprot.readString().decode('utf-8')
+            _val499 = iprot.readString().decode('utf-8')
+            self.scheduler_meta[_key498] = _val499
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -7822,11 +8707,11 @@ class SupervisorInfo:
       elif fid == 9:
         if ftype == TType.MAP:
           self.resources_map = {}
-          (_ktype471, _vtype472, _size470 ) = iprot.readMapBegin()
-          for _i474 in xrange(_size470):
-            _key475 = iprot.readString().decode('utf-8')
-            _val476 = iprot.readDouble()
-            self.resources_map[_key475] = _val476
+          (_ktype501, _vtype502, _size500 ) = iprot.readMapBegin()
+          for _i504 in xrange(_size500):
+            _key505 = iprot.readString().decode('utf-8')
+            _val506 = iprot.readDouble()
+            self.resources_map[_key505] = _val506
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -7855,23 +8740,23 @@ class SupervisorInfo:
     if self.used_ports is not None:
       oprot.writeFieldBegin('used_ports', TType.LIST, 4)
       oprot.writeListBegin(TType.I64, len(self.used_ports))
-      for iter477 in self.used_ports:
-        oprot.writeI64(iter477)
+      for iter507 in self.used_ports:
+        oprot.writeI64(iter507)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.meta is not None:
       oprot.writeFieldBegin('meta', TType.LIST, 5)
       oprot.writeListBegin(TType.I64, len(self.meta))
-      for iter478 in self.meta:
-        oprot.writeI64(iter478)
+      for iter508 in self.meta:
+        oprot.writeI64(iter508)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.scheduler_meta is not None:
       oprot.writeFieldBegin('scheduler_meta', TType.MAP, 6)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.scheduler_meta))
-      for kiter479,viter480 in self.scheduler_meta.items():
-        oprot.writeString(kiter479.encode('utf-8'))
-        oprot.writeString(viter480.encode('utf-8'))
+      for kiter509,viter510 in self.scheduler_meta.items():
+        oprot.writeString(kiter509.encode('utf-8'))
+        oprot.writeString(viter510.encode('utf-8'))
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.uptime_secs is not None:
@@ -7885,9 +8770,9 @@ class SupervisorInfo:
     if self.resources_map is not None:
       oprot.writeFieldBegin('resources_map', TType.MAP, 9)
       oprot.writeMapBegin(TType.STRING, TType.DOUBLE, len(self.resources_map))
-      for kiter481,viter482 in self.resources_map.items():
-        oprot.writeString(kiter481.encode('utf-8'))
-        oprot.writeDouble(viter482)
+      for kiter511,viter512 in self.resources_map.items():
+        oprot.writeString(kiter511.encode('utf-8'))
+        oprot.writeDouble(viter512)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -7959,10 +8844,10 @@ class NodeInfo:
       elif fid == 2:
         if ftype == TType.SET:
           self.port = set()
-          (_etype486, _size483) = iprot.readSetBegin()
-          for _i487 in xrange(_size483):
-            _elem488 = iprot.readI64()
-            self.port.add(_elem488)
+          (_etype516, _size513) = iprot.readSetBegin()
+          for _i517 in xrange(_size513):
+            _elem518 = iprot.readI64()
+            self.port.add(_elem518)
           iprot.readSetEnd()
         else:
           iprot.skip(ftype)
@@ -7983,8 +8868,8 @@ class NodeInfo:
     if self.port is not None:
       oprot.writeFieldBegin('port', TType.SET, 2)
       oprot.writeSetBegin(TType.I64, len(self.port))
-      for iter489 in self.port:
-        oprot.writeI64(iter489)
+      for iter519 in self.port:
+        oprot.writeI64(iter519)
       oprot.writeSetEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -8165,57 +9050,57 @@ class Assignment:
       elif fid == 2:
         if ftype == TType.MAP:
           self.node_host = {}
-          (_ktype491, _vtype492, _size490 ) = iprot.readMapBegin()
-          for _i494 in xrange(_size490):
-            _key495 = iprot.readString().decode('utf-8')
-            _val496 = iprot.readString().decode('utf-8')
-            self.node_host[_key495] = _val496
+          (_ktype521, _vtype522, _size520 ) = iprot.readMapBegin()
+          for _i524 in xrange(_size520):
+            _key525 = iprot.readString().decode('utf-8')
+            _val526 = iprot.readString().decode('utf-8')
+            self.node_host[_key525] = _val526
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
       elif fid == 3:
         if ftype == TType.MAP:
           self.executor_node_port = {}
-          (_ktype498, _vtype499, _size497 ) = iprot.readMapBegin()
-          for _i501 in xrange(_size497):
-            _key502 = []
-            (_etype507, _size504) = iprot.readListBegin()
-            for _i508 in xrange(_size504):
-              _elem509 = iprot.readI64()
-              _key502.append(_elem509)
+          (_ktype528, _vtype529, _size527 ) = iprot.readMapBegin()
+          for _i531 in xrange(_size527):
+            _key532 = []
+            (_etype537, _size534) = iprot.readListBegin()
+            for _i538 in xrange(_size534):
+              _elem539 = iprot.readI64()
+              _key532.append(_elem539)
             iprot.readListEnd()
-            _val503 = NodeInfo()
-            _val503.read(iprot)
-            self.executor_node_port[_key502] = _val503
+            _val533 = NodeInfo()
+            _val533.read(iprot)
+            self.executor_node_port[_key532] = _val533
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
       elif fid == 4:
         if ftype == TType.MAP:
           self.executor_start_time_secs = {}
-          (_ktype511, _vtype512, _size510 ) = iprot.readMapBegin()
-          for _i514 in xrange(_size510):
-            _key515 = []
-            (_etype520, _size517) = iprot.readListBegin()
-            for _i521 in xrange(_size517):
-              _elem522 = iprot.readI64()
-              _key515.append(_elem522)
+          (_ktype541, _vtype542, _size540 ) = iprot.readMapBegin()
+          for _i544 in xrange(_size540):
+            _key545 = []
+            (_etype550, _size547) = iprot.readListBegin()
+            for _i551 in xrange(_size547):
+              _elem552 = iprot.readI64()
+              _key545.append(_elem552)
             iprot.readListEnd()
-            _val516 = iprot.readI64()
-            self.executor_start_time_secs[_key515] = _val516
+            _val546 = iprot.readI64()
+            self.executor_start_time_secs[_key545] = _val546
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
       elif fid == 5:
         if ftype == TType.MAP:
           self.worker_resources = {}
-          (_ktype524, _vtype525, _size523 ) = iprot.readMapBegin()
-          for _i527 in xrange(_size523):
-            _key528 = NodeInfo()
-            _key528.read(iprot)
-            _val529 = WorkerResources()
-            _val529.read(iprot)
-            self.worker_resources[_key528] = _val529
+          (_ktype554, _vtype555, _size553 ) = iprot.readMapBegin()
+          for _i557 in xrange(_size553):
+            _key558 = NodeInfo()
+            _key558.read(iprot)
+            _val559 = WorkerResources()
+            _val559.read(iprot)
+            self.worker_resources[_key558] = _val559
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -8236,39 +9121,39 @@ class Assignment:
     if self.node_host is not None:
       oprot.writeFieldBegin('node_host', TType.MAP, 2)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.node_host))
-      for kiter530,viter531 in self.node_host.items():
-        oprot.writeString(kiter530.encode('utf-8'))
-        oprot.writeString(viter531.encode('utf-8'))
+      for kiter560,viter561 in self.node_host.items():
+        oprot.writeString(kiter560.encode('utf-8'))
+        oprot.writeString(viter561.encode('utf-8'))
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.executor_node_port is not None:
       oprot.writeFieldBegin('executor_node_port', TType.MAP, 3)
       oprot.writeMapBegin(TType.LIST, TType.STRUCT, len(self.executor_node_port))
-      for kiter532,viter533 in self.executor_node_port.items():
-        oprot.writeListBegin(TType.I64, len(kiter532))
-        for iter534 in kiter532:
-          oprot.writeI64(iter534)
+      for kiter562,viter563 in self.executor_node_port.items():
+        oprot.writeListBegin(TType.I64, len(kiter562))
+        for iter564 in kiter562:
+          oprot.writeI64(iter564)
         oprot.writeListEnd()
-        viter533.write(oprot)
+        viter563.write(oprot)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.executor_start_time_secs is not None:
       oprot.writeFieldBegin('executor_start_time_secs', TType.MAP, 4)
       oprot.writeMapBegin(TType.LIST, TType.I64, len(self.executor_start_time_secs))
-      for kiter535,viter536 in self.executor_start_time_secs.items():
-        oprot.writeListBegin(TType.I64, len(kiter535))
-        for iter537 in kiter535:
-          oprot.writeI64(iter537)
+      for kiter565,viter566 in self.executor_start_time_secs.items():
+        oprot.writeListBegin(TType.I64, len(kiter565))
+        for iter567 in kiter565:
+          oprot.writeI64(iter567)
         oprot.writeListEnd()
-        oprot.writeI64(viter536)
+        oprot.writeI64(viter566)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.worker_resources is not None:
       oprot.writeFieldBegin('worker_resources', TType.MAP, 5)
       oprot.writeMapBegin(TType.STRUCT, TType.STRUCT, len(self.worker_resources))
-      for kiter538,viter539 in self.worker_resources.items():
-        kiter538.write(oprot)
-        viter539.write(oprot)
+      for kiter568,viter569 in self.worker_resources.items():
+        kiter568.write(oprot)
+        viter569.write(oprot)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -8445,11 +9330,11 @@ class StormBase:
       elif fid == 4:
         if ftype == TType.MAP:
           self.component_executors = {}
-          (_ktype541, _vtype542, _size540 ) = iprot.readMapBegin()
-          for _i544 in xrange(_size540):
-            _key545 = iprot.readString().decode('utf-8')
-            _val546 = iprot.readI32()
-            self.component_executors[_key545] = _val546
+          (_ktype571, _vtype572, _size570 ) = iprot.readMapBegin()
+          for _i574 in xrange(_size570):
+            _key575 = iprot.readString().decode('utf-8')
+            _val576 = iprot.readI32()
+            self.component_executors[_key575] = _val576
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -8477,12 +9362,12 @@ class StormBase:
       elif fid == 9:
         if ftype == TType.MAP:
           self.component_debug = {}
-          (_ktype548, _vtype549, _size547 ) = iprot.readMapBegin()
-          for _i551 in xrange(_size547):
-            _key552 = iprot.readString().decode('utf-8')
-            _val553 = DebugOptions()
-            _val553.read(iprot)
-            self.component_debug[_key552] = _val553
+          (_ktype578, _vtype579, _size577 ) = iprot.readMapBegin()
+          for _i581 in xrange(_size577):
+            _key582 = iprot.readString().decode('utf-8')
+            _val583 = DebugOptions()
+            _val583.read(iprot)
+            self.component_debug[_key582] = _val583
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -8511,9 +9396,9 @@ class StormBase:
     if self.component_executors is not None:
       oprot.writeFieldBegin('component_executors', TType.MAP, 4)
       oprot.writeMapBegin(TType.STRING, TType.I32, len(self.component_executors))
-      for kiter554,viter555 in self.component_executors.items():
-        oprot.writeString(kiter554.encode('utf-8'))
-        oprot.writeI32(viter555)
+      for kiter584,viter585 in self.component_executors.items():
+        oprot.writeString(kiter584.encode('utf-8'))
+        oprot.writeI32(viter585)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.launch_time_secs is not None:
@@ -8535,9 +9420,9 @@ class StormBase:
     if self.component_debug is not None:
       oprot.writeFieldBegin('component_debug', TType.MAP, 9)
       oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.component_debug))
-      for kiter556,viter557 in self.component_debug.items():
-        oprot.writeString(kiter556.encode('utf-8'))
-        viter557.write(oprot)
+      for kiter586,viter587 in self.component_debug.items():
+        oprot.writeString(kiter586.encode('utf-8'))
+        viter587.write(oprot)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -8617,13 +9502,13 @@ class ClusterWorkerHeartbeat:
       elif fid == 2:
         if ftype == TType.MAP:
           self.executor_stats = {}
-          (_ktype559, _vtype560, _size558 ) = iprot.readMapBegin()
-          for _i562 in xrange(_size558):
-            _key563 = ExecutorInfo()
-            _key563.read(iprot)
-            _val564 = ExecutorStats()
-            _val564.read(iprot)
-            self.executor_stats[_key563] = _val564
+          (_ktype589, _vtype590, _size588 ) = iprot.readMapBegin()
+          for _i592 in xrange(_size588):
+            _key593 = ExecutorInfo()
+            _key593.read(iprot)
+            _val594 = ExecutorStats()
+            _val594.read(iprot)
+            self.executor_stats[_key593] = _val594
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -8654,9 +9539,9 @@ class ClusterWorkerHeartbeat:
     if self.executor_stats is not None:
       oprot.writeFieldBegin('executor_stats', TType.MAP, 2)
       oprot.writeMapBegin(TType.STRUCT, TType.STRUCT, len(self.executor_stats))
-      for kiter565,viter566 in self.executor_stats.items():
-        kiter565.write(oprot)
-        viter566.write(oprot)
+      for kiter595,viter596 in self.executor_stats.items():
+        kiter595.write(oprot)
+        viter596.write(oprot)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.time_secs is not None:
@@ -8809,12 +9694,12 @@ class LocalStateData:
       if fid == 1:
         if ftype == TType.MAP:
           self.serialized_parts = {}
-          (_ktype568, _vtype569, _size567 ) = iprot.readMapBegin()
-          for _i571 in xrange(_size567):
-            _key572 = iprot.readString().decode('utf-8')
-            _val573 = ThriftSerializedObject()
-            _val573.read(iprot)
-            self.serialized_parts[_key572] = _val573
+          (_ktype598, _vtype599, _size597 ) = iprot.readMapBegin()
+          for _i601 in xrange(_size597):
+            _key602 = iprot.readString().decode('utf-8')
+            _val603 = ThriftSerializedObject()
+            _val603.read(iprot)
+            self.serialized_parts[_key602] = _val603
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -8831,9 +9716,9 @@ class LocalStateData:
     if self.serialized_parts is not None:
       oprot.writeFieldBegin('serialized_parts', TType.MAP, 1)
       oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.serialized_parts))
-      for kiter574,viter575 in self.serialized_parts.items():
-        oprot.writeString(kiter574.encode('utf-8'))
-        viter575.write(oprot)
+      for kiter604,viter605 in self.serialized_parts.items():
+        oprot.writeString(kiter604.encode('utf-8'))
+        viter605.write(oprot)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -8898,11 +9783,11 @@ class LocalAssignment:
       elif fid == 2:
         if ftype == TType.LIST:
           self.executors = []
-          (_etype579, _size576) = iprot.readListBegin()
-          for _i580 in xrange(_size576):
-            _elem581 = ExecutorInfo()
-            _elem581.read(iprot)
-            self.executors.append(_elem581)
+          (_etype609, _size606) = iprot.readListBegin()
+          for _i610 in xrange(_size606):
+            _elem611 = ExecutorInfo()
+            _elem611.read(iprot)
+            self.executors.append(_elem611)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -8929,8 +9814,8 @@ class LocalAssignment:
     if self.executors is not None:
       oprot.writeFieldBegin('executors', TType.LIST, 2)
       oprot.writeListBegin(TType.STRUCT, len(self.executors))
-      for iter582 in self.executors:
-        iter582.write(oprot)
+      for iter612 in self.executors:
+        iter612.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.resources is not None:
@@ -9059,11 +9944,11 @@ class LSApprovedWorkers:
       if fid == 1:
         if ftype == TType.MAP:
           self.approved_workers = {}
-          (_ktype584, _vtype585, _size583 ) = iprot.readMapBegin()
-          for _i587 in xrange(_size583):
-            _key588 = iprot.readString().decode('utf-8')
-            _val589 = iprot.readI32()
-            self.approved_workers[_key588] = _val589
+          (_ktype614, _vtype615, _size613 ) = iprot.readMapBegin()
+          for _i617 in xrange(_size613):
+            _key618 = iprot.readString().decode('utf-8')
+            _val619 = iprot.readI32()
+            self.approved_workers[_key618] = _val619
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -9080,9 +9965,9 @@ class LSApprovedWorkers:
     if self.approved_workers is not None:
       oprot.writeFieldBegin('approved_workers', TType.MAP, 1)
       oprot.writeMapBegin(TType.STRING, TType.I32, len(self.approved_workers))
-      for kiter590,viter591 in self.approved_workers.items():
-        oprot.writeString(kiter590.encode('utf-8'))
-        oprot.writeI32(viter591)
+      for kiter620,viter621 in self.approved_workers.items():
+        oprot.writeString(kiter620.encode('utf-8'))
+        oprot.writeI32(viter621)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -9136,12 +10021,12 @@ class LSSupervisorAssignments:
       if fid == 1:
         if ftype == TType.MAP:
           self.assignments = {}
-          (_ktype593, _vtype594, _size592 ) = iprot.readMapBegin()
-          for _i596 in xrange(_size592):
-            _key597 = iprot.readI32()
-            _val598 = LocalAssignment()
-            _val598.read(iprot)
-            self.assignments[_key597] = _val598
+          (_ktype623, _vtype624, _size622 ) = iprot.readMapBegin()
+          for _i626 in xrange(_size622):
+            _key627 = iprot.readI32()
+            _val628 = LocalAssignment()
+            _val628.read(iprot)
+            self.assignments[_key627] = _val628
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -9158,9 +10043,9 @@ class LSSupervisorAssignments:
     if self.assignments is not None:
       oprot.writeFieldBegin('assignments', TType.MAP, 1)
       oprot.writeMapBegin(TType.I32, TType.STRUCT, len(self.assignments))
-      for kiter599,viter600 in self.assignments.items():
-        oprot.writeI32(kiter599)
-        viter600.write(oprot)
+      for kiter629,viter630 in self.assignments.items():
+        oprot.writeI32(kiter629)
+        viter630.write(oprot)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -9233,11 +10118,11 @@ class LSWorkerHeartbeat:
       elif fid == 3:
         if ftype == TType.LIST:
           self.executors = []
-          (_etype604, _size601) = iprot.readListBegin()
-          for _i605 in xrange(_size601):
-            _elem606 = ExecutorInfo()
-            _elem606.read(iprot)
-            self.executors.append(_elem606)
+          (_etype634, _size631) = iprot.readListBegin()
+          for _i635 in xrange(_size631):
+            _elem636 = ExecutorInfo()
+            _elem636.read(iprot)
+            self.executors.append(_elem636)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -9267,8 +10152,8 @@ class LSWorkerHeartbeat:
     if self.executors is not None:
       oprot.writeFieldBegin('executors', TType.LIST, 3)
       oprot.writeListBegin(TType.STRUCT, len(self.executors))
-      for iter607 in self.executors:
-        iter607.write(oprot)
+      for iter637 in self.executors:
+        iter637.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.port is not None:
@@ -9354,20 +10239,20 @@ class LSTopoHistory:
       elif fid == 3:
         if ftype == TType.LIST:
           self.users = []
-          (_etype611, _size608) = iprot.readListBegin()
-          for _i612 in xrange(_size608):
-            _elem613 = iprot.readString().decode('utf-8')
-            self.users.append(_elem613)
+          (_etype641, _size638) = iprot.readListBegin()
+          for _i642 in xrange(_size638):
+            _elem643 = iprot.readString().decode('utf-8')
+            self.users.append(_elem643)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
       elif fid == 4:
         if ftype == TType.LIST:
           self.groups = []
-          (_etype617, _size614) = iprot.readListBegin()
-          for _i618 in xrange(_size614):
-            _elem619 = iprot.readString().decode('utf-8')
-            self.groups.append(_elem619)
+          (_etype647, _size644) = iprot.readListBegin()
+          for _i648 in xrange(_size644):
+            _elem649 = iprot.readString().decode('utf-8')
+            self.groups.append(_elem649)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -9392,15 +10277,15 @@ class LSTopoHistory:
     if self.users is not None:
       oprot.writeFieldBegin('users', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.users))
-      for iter620 in self.users:
-        oprot.writeString(iter620.encode('utf-8'))
+      for iter650 in self.users:
+        oprot.writeString(iter650.encode('utf-8'))
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.groups is not None:
       oprot.writeFieldBegin('groups', TType.LIST, 4)
       oprot.writeListBegin(TType.STRING, len(self.groups))
-      for iter621 in self.groups:
-        oprot.writeString(iter621.encode('utf-8'))
+      for iter651 in self.groups:
+        oprot.writeString(iter651.encode('utf-8'))
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -9463,11 +10348,11 @@ class LSTopoHistoryList:
       if fid == 1:
         if ftype == TType.LIST:
           self.topo_history = []
-          (_etype625, _size622) = iprot.readListBegin()
-          for _i626 in xrange(_size622):
-            _elem627 = LSTopoHistory()
-            _elem627.read(iprot)
-            self.topo_history.append(_elem627)
+          (_etype655, _size652) = iprot.readListBegin()
+          for _i656 in xrange(_size652):
+            _elem657 = LSTopoHistory()
+            _elem657.read(iprot)
+            self.topo_history.append(_elem657)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -9484,8 +10369,8 @@ class LSTopoHistoryList:
     if self.topo_history is not None:
       oprot.writeFieldBegin('topo_history', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.topo_history))
-      for iter628 in self.topo_history:
-        iter628.write(oprot)
+      for iter658 in self.topo_history:
+        iter658.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -9820,12 +10705,12 @@ class LogConfig:
       if fid == 2:
         if ftype == TType.MAP:
           self.named_logger_level = {}
-          (_ktype630, _vtype631, _size629 ) = iprot.readMapBegin()
-          for _i633 in xrange(_size629):
-            _key634 = iprot.readString().decode('utf-8')
-            _val635 = LogLevel()
-            _val635.read(iprot)
-            self.named_logger_level[_key634] = _val635
+          (_ktype660, _vtype661, _size659 ) = iprot.readMapBegin()
+          for _i663 in xrange(_size659):
+            _key664 = iprot.readString().decode('utf-8')
+            _val665 = LogLevel()
+            _val665.read(iprot)
+            self.named_logger_level[_key664] = _val665
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -9842,9 +10727,9 @@ class LogConfig:
     if self.named_logger_level is not None:
       oprot.writeFieldBegin('named_logger_level', TType.MAP, 2)
       oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.named_logger_level))
-      for kiter636,viter637 in self.named_logger_level.items():
-        oprot.writeString(kiter636.encode('utf-8'))
-        viter637.write(oprot)
+      for kiter666,viter667 in self.named_logger_level.items():
+        oprot.writeString(kiter666.encode('utf-8'))
+        viter667.write(oprot)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -9896,10 +10781,10 @@ class TopologyHistoryInfo:
       if fid == 1:
         if ftype == TType.LIST:
           self.topo_ids = []
-          (_etype641, _size638) = iprot.readListBegin()
-          for _i642 in xrange(_size638):
-            _elem643 = iprot.readString().decode('utf-8')
-            self.topo_ids.append(_elem643)
+          (_etype671, _size668) = iprot.readListBegin()
+          for _i672 in xrange(_size668):
+            _elem673 = iprot.readString().decode('utf-8')
+            self.topo_ids.append(_elem673)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -9916,8 +10801,8 @@ class TopologyHistoryInfo:
     if self.topo_ids is not None:
       oprot.writeFieldBegin('topo_ids', TType.LIST, 1)
       oprot.writeListBegin(TType.STRING, len(self.topo_ids))
-      for iter644 in self.topo_ids:
-        oprot.writeString(iter644.encode('utf-8'))
+      for iter674 in self.topo_ids:
+        oprot.writeString(iter674.encode('utf-8'))
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -10201,11 +11086,11 @@ class HBRecords:
       if fid == 1:
         if ftype == TType.LIST:
           self.pulses = []
-          (_etype648, _size645) = iprot.readListBegin()
-          for _i649 in xrange(_size645):
-            _elem650 = HBPulse()
-            _elem650.read(iprot)
-            self.pulses.append(_elem650)
+          (_etype678, _size675) = iprot.readListBegin()
+          for _i679 in xrange(_size675):
+            _elem680 = HBPulse()
+            _elem680.read(iprot)
+            self.pulses.append(_elem680)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -10222,8 +11107,8 @@ class HBRecords:
     if self.pulses is not None:
       oprot.writeFieldBegin('pulses', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.pulses))
-      for iter651 in self.pulses:
-        iter651.write(oprot)
+      for iter681 in self.pulses:
+        iter681.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -10275,10 +11160,10 @@ class HBNodes:
       if fid == 1:
         if ftype == TType.LIST:
           self.pulseIds = []
-          (_etype655, _size652) = iprot.readListBegin()
-          for _i656 in xrange(_size652):
-            _elem657 = iprot.readString().decode('utf-8')
-            self.pulseIds.append(_elem657)
+          (_etype685, _size682) = iprot.readListBegin()
+          for _i686 in xrange(_size682):
+            _elem687 = iprot.readString().decode('utf-8')
+            self.pulseIds.append(_elem687)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -10295,8 +11180,8 @@ class HBNodes:
     if self.pulseIds is not None:
       oprot.writeFieldBegin('pulseIds', TType.LIST, 1)
       oprot.writeListBegin(TType.STRING, len(self.pulseIds))
-      for iter658 in self.pulseIds:
-        oprot.writeString(iter658.encode('utf-8'))
+      for iter688 in self.pulseIds:
+        oprot.writeString(iter688.encode('utf-8'))
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()

http://git-wip-us.apache.org/repos/asf/storm/blob/0e0bcf27/storm-core/src/storm.thrift
----------------------------------------------------------------------
diff --git a/storm-core/src/storm.thrift b/storm-core/src/storm.thrift
index 4cfb9ad..36ad4dd 100644
--- a/storm-core/src/storm.thrift
+++ b/storm-core/src/storm.thrift
@@ -309,6 +309,29 @@ struct TopologyStats {
 5: optional map<string, i64> window_to_failed;
 }
 
+struct SupervisorPageInfo {
+  1: optional list<SupervisorSummary> supervisor_summaries;
+  2: optional list<WorkerSummary> worker_summaries;
+}
+
+struct WorkerSummary {
+  1: optional string supervisor_id; 
+  2: optional string host;
+  3: optional i32 port;
+  4: optional string topology_id;
+  5: optional string topology_name;
+  6: optional i32 num_executors;
+  7: optional map<string, i64> component_to_num_tasks;
+  8: optional i32 time_secs;
+  9: optional i32 uptime_secs;
+521: optional double requested_memonheap;
+522: optional double requested_memoffheap;
+523: optional double requested_cpu;
+524: optional double assigned_memonheap;
+525: optional double assigned_memoffheap;
+526: optional double assigned_cpu;
+}
+
 struct TopologyPageInfo {
  1: required string id;
  2: optional string name;
@@ -325,6 +348,7 @@ struct TopologyPageInfo {
 13: optional string owner;
 14: optional DebugOptions debug_options;
 15: optional i32 replication_count;
+16: optional list<WorkerSummary> workers;
 521: optional double requested_memonheap;
 522: optional double requested_memoffheap;
 523: optional double requested_cpu;
@@ -642,6 +666,7 @@ service Nimbus {
   TopologyInfo getTopologyInfo(1: string id) throws (1: NotAliveException e, 2: AuthorizationException aze);
   TopologyInfo getTopologyInfoWithOpts(1: string id, 2: GetInfoOptions options) throws (1: NotAliveException e, 2: AuthorizationException aze);
   TopologyPageInfo getTopologyPageInfo(1: string id, 2: string window, 3: bool is_include_sys) throws (1: NotAliveException e, 2: AuthorizationException aze);
+  SupervisorPageInfo getSupervisorPageInfo(1: string id, 2: string host, 3: bool is_include_sys) throws (1: NotAliveException e, 2: AuthorizationException aze);
   ComponentPageInfo getComponentPageInfo(1: string topology_id, 2: string component_id, 3: string window, 4: bool is_include_sys) throws (1: NotAliveException e, 2: AuthorizationException aze);
   //returns json
   string getTopologyConf(1: string id) throws (1: NotAliveException e, 2: AuthorizationException aze);