You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@slider.apache.org by sm...@apache.org on 2014/07/08 02:49:35 UTC
svn commit: r1608633 [5/7] - in /incubator/slider/site: content/design/
content/design/registry/ content/design/specification/ content/developing/
content/docs/ content/docs/configuration/ content/docs/slider_specs/
trunk/content/developing/
Modified: incubator/slider/site/content/docs/configuration/redesign.html
URL: http://svn.apache.org/viewvc/incubator/slider/site/content/docs/configuration/redesign.html?rev=1608633&r1=1608632&r2=1608633&view=diff
==============================================================================
--- incubator/slider/site/content/docs/configuration/redesign.html (original)
+++ incubator/slider/site/content/docs/configuration/redesign.html Tue Jul 8 00:49:34 2014
@@ -204,15 +204,16 @@ more attributes into specific sections <
<h1 id="sections">Sections</h1>
<h2 id="root">Root</h2>
<p>Contains various string and integer values</p>
-<pre class="codehilite"><code>"version": "1.0",
-"name": "test_cluster_lifecycle",
-"type": "hbase",
-"state": 3,
-"createTime": 1393512091276,
-"updateTime": 1393512117286,
-"originConfigurationPath": "hdfs://sandbox:8020/user/stevel/.slider/cluster/test_cluster_lifecycle/snapshot",
-"generatedConfigurationPath": "hdfs://sandbox:8020/user/stevel/.slider/cluster/test_cluster_lifecycle/generated",
-"dataPath": "hdfs://sandbox:8020/user/stevel/.slider/cluster/test_cluster_lifecycle/database",</code></pre>
+<div class="codehilite"><pre><span class="s">"version"</span><span class="o">:</span> <span class="s">"1.0"</span><span class="p">,</span>
+<span class="s">"name"</span><span class="o">:</span> <span class="s">"test_cluster_lifecycle"</span><span class="p">,</span>
+<span class="s">"type"</span><span class="o">:</span> <span class="s">"hbase"</span><span class="p">,</span>
+<span class="s">"state"</span><span class="o">:</span> <span class="mi">3</span><span class="p">,</span>
+<span class="s">"createTime"</span><span class="o">:</span> <span class="mi">1393512091276</span><span class="p">,</span>
+<span class="s">"updateTime"</span><span class="o">:</span> <span class="mi">1393512117286</span><span class="p">,</span>
+<span class="s">"originConfigurationPath"</span><span class="o">:</span> <span class="s">"hdfs://sandbox:8020/user/stevel/.slider/cluster/test_cluster_lifecycle/snapshot"</span><span class="p">,</span>
+<span class="s">"generatedConfigurationPath"</span><span class="o">:</span> <span class="s">"hdfs://sandbox:8020/user/stevel/.slider/cluster/test_cluster_lifecycle/generated"</span><span class="p">,</span>
+<span class="s">"dataPath"</span><span class="o">:</span> <span class="s">"hdfs://sandbox:8020/user/stevel/.slider/cluster/test_cluster_lifecycle/database"</span><span class="p">,</span>
+</pre></div>
<ul>
@@ -226,12 +227,13 @@ releases may not be able to read existin
</li>
<li><code>type</code>: reference to the provider type -this triggers a Hadoop configuration
property lookup to find the implementation classes.</li>
-<li><code>state</code>: an enumeration value of the cluster state.<pre class="codehilite"><code>int STATE_INCOMPLETE = 0;
-int STATE_SUBMITTED = 1;
-int STATE_CREATED = 2;
-int STATE_LIVE = 3;
-int STATE_STOPPED = 4;
-int STATE_DESTROYED = 5;</code></pre>
+<li><code>state</code>: an enumeration value of the cluster state.<div class="codehilite"><pre><span class="kt">int</span> <span class="n">STATE_INCOMPLETE</span> <span class="o">=</span> <span class="mi">0</span><span class="p">;</span>
+<span class="kt">int</span> <span class="n">STATE_SUBMITTED</span> <span class="o">=</span> <span class="mi">1</span><span class="p">;</span>
+<span class="kt">int</span> <span class="n">STATE_CREATED</span> <span class="o">=</span> <span class="mi">2</span><span class="p">;</span>
+<span class="kt">int</span> <span class="n">STATE_LIVE</span> <span class="o">=</span> <span class="mi">3</span><span class="p">;</span>
+<span class="kt">int</span> <span class="n">STATE_STOPPED</span> <span class="o">=</span> <span class="mi">4</span><span class="p">;</span>
+<span class="kt">int</span> <span class="n">STATE_DESTROYED</span> <span class="o">=</span> <span class="mi">5</span><span class="p">;</span>
+</pre></div>
</li>
@@ -257,51 +259,57 @@ into a section <code>/slider-internal</c
<p>Read-only list of information about the application. Generally this is
intended to be used for debugging and testing.</p>
<h3 id="persisted-values-static-information-about-the-file-history">Persisted values: static information about the file history</h3>
-<pre class="codehilite"><code>"info" : {
- "create.hadoop.deployed.info" : "(detached from release-2.3.0) @dfe46336fbc6a044bc124392ec06b85",
- "create.application.build.info" : "Slider Core-0.13.0-SNAPSHOT Built against commit# 1a94ee4aa1 on Java 1.7.0_45 by stevel",
- "create.hadoop.build.info" : "2.3.0",
- "create.time.millis" : "1393512091276",
-},</code></pre>
+<div class="codehilite"><pre><span class="s">"info"</span> <span class="o">:</span> <span class="p">{</span>
+ <span class="s">"create.hadoop.deployed.info"</span> <span class="o">:</span> <span class="s">"(detached from release-2.3.0) @dfe46336fbc6a044bc124392ec06b85"</span><span class="p">,</span>
+ <span class="s">"create.application.build.info"</span> <span class="o">:</span> <span class="s">"Slider Core-0.13.0-SNAPSHOT Built against commit# 1a94ee4aa1 on Java 1.7.0_45 by stevel"</span><span class="p">,</span>
+ <span class="s">"create.hadoop.build.info"</span> <span class="o">:</span> <span class="s">"2.3.0"</span><span class="p">,</span>
+ <span class="s">"create.time.millis"</span> <span class="o">:</span> <span class="s">"1393512091276"</span><span class="p">,</span>
+<span class="p">},</span>
+</pre></div>
<p><em>Proposed</em>: move persisted info K-V pairs to a section <code>/diagnostics</code>.</p>
<h3 id="dynamic-values">Dynamic values:</h3>
<p>whether the AM supports service restart without killing all the containers hosting
the role instances:</p>
-<pre class="codehilite"><code>"slider.am.restart.supported" : "false",</code></pre>
+<div class="codehilite"><pre><span class="s">"slider.am.restart.supported"</span> <span class="o">:</span> <span class="s">"false"</span><span class="p">,</span>
+</pre></div>
<p>timestamps of the cluster going live, and when the status query was made</p>
-<pre class="codehilite"><code>"live.time" : "27 Feb 2014 14:41:56 GMT",
-"live.time.millis" : "1393512116881",
-"status.time" : "27 Feb 2014 14:42:08 GMT",
-"status.time.millis" : "1393512128726",</code></pre>
+<div class="codehilite"><pre><span class="s">"live.time"</span> <span class="o">:</span> <span class="s">"27 Feb 2014 14:41:56 GMT"</span><span class="p">,</span>
+<span class="s">"live.time.millis"</span> <span class="o">:</span> <span class="s">"1393512116881"</span><span class="p">,</span>
+<span class="s">"status.time"</span> <span class="o">:</span> <span class="s">"27 Feb 2014 14:42:08 GMT"</span><span class="p">,</span>
+<span class="s">"status.time.millis"</span> <span class="o">:</span> <span class="s">"1393512128726"</span><span class="p">,</span>
+</pre></div>
<p>yarn data provided to the AM</p>
-<pre class="codehilite"><code>"yarn.vcores" : "32",
-"yarn.memory" : "2048",</code></pre>
+<div class="codehilite"><pre><span class="s">"yarn.vcores"</span> <span class="o">:</span> <span class="s">"32"</span><span class="p">,</span>
+<span class="s">"yarn.memory"</span> <span class="o">:</span> <span class="s">"2048"</span><span class="p">,</span>
+</pre></div>
<p>information about the application and hadoop versions in use. Here
the application was built using Hadoop 2.3.0, but is running against the version
of Hadoop built for HDP-2.</p>
-<pre class="codehilite"><code>"status.application.build.info" : "Slider Core-0.13.0-SNAPSHOT Built against commit# 1a94ee4aa1 on Java 1.7.0_45 by stevel",
-"status.hadoop.build.info" : "2.3.0",
-"status.hadoop.deployed.info" : "bigwheel-m16-2.2.0 @704f1e463ebc4fb89353011407e965"</code></pre>
+<div class="codehilite"><pre><span class="s">"status.application.build.info"</span> <span class="o">:</span> <span class="s">"Slider Core-0.13.0-SNAPSHOT Built against commit# 1a94ee4aa1 on Java 1.7.0_45 by stevel"</span><span class="p">,</span>
+<span class="s">"status.hadoop.build.info"</span> <span class="o">:</span> <span class="s">"2.3.0"</span><span class="p">,</span>
+<span class="s">"status.hadoop.deployed.info"</span> <span class="o">:</span> <span class="s">"bigwheel-m16-2.2.0 @704f1e463ebc4fb89353011407e965"</span>
+</pre></div>
<p>## <code>instances</code></p>
<p>Information about the live containers in a cluster</p>
-<pre class="codehilite"><code> "instances": {
- "slider": [ "container_1393511571284_0002_01_000001" ],
- "master": [ "container_1393511571284_0002_01_000003" ],
- "worker": [
- "container_1393511571284_0002_01_000002",
- "container_1393511571284_0002_01_000004"
- ]
- },</code></pre>
+<div class="codehilite"><pre> <span class="s">"instances"</span><span class="o">:</span> <span class="p">{</span>
+ <span class="s">"slider"</span><span class="o">:</span> <span class="p">[</span> <span class="s">"container_1393511571284_0002_01_000001"</span> <span class="p">],</span>
+ <span class="s">"master"</span><span class="o">:</span> <span class="p">[</span> <span class="s">"container_1393511571284_0002_01_000003"</span> <span class="p">],</span>
+ <span class="s">"worker"</span><span class="o">:</span> <span class="p">[</span>
+ <span class="s">"container_1393511571284_0002_01_000002"</span><span class="p">,</span>
+ <span class="s">"container_1393511571284_0002_01_000004"</span>
+ <span class="p">]</span>
+ <span class="p">},</span>
+</pre></div>
<p>There's no information about location, nor is there any history about containers
@@ -322,17 +330,19 @@ buffer. This avoids a significantly fail
<p>## <code>statistics</code></p>
<p>Statistics on each role. </p>
<p>They can be divided into counters that only increase</p>
-<pre class="codehilite"><code>"containers.start.completed": 0,
-"containers.start.failed": 0,
-"containers.failed": 0,
-"containers.completed": 0,
-"containers.requested": 0</code></pre>
+<div class="codehilite"><pre><span class="s">"containers.start.completed"</span><span class="o">:</span> <span class="mi">0</span><span class="p">,</span>
+<span class="s">"containers.start.failed"</span><span class="o">:</span> <span class="mi">0</span><span class="p">,</span>
+<span class="s">"containers.failed"</span><span class="o">:</span> <span class="mi">0</span><span class="p">,</span>
+<span class="s">"containers.completed"</span><span class="o">:</span> <span class="mi">0</span><span class="p">,</span>
+<span class="s">"containers.requested"</span><span class="o">:</span> <span class="mi">0</span>
+</pre></div>
<p>and those that vary depending upon the current state</p>
-<pre class="codehilite"><code>"containers.live": 0,
-"containers.active.requests": 0,
-"containers.desired": 0,</code></pre>
+<div class="codehilite"><pre><span class="s">"containers.live"</span><span class="o">:</span> <span class="mi">0</span><span class="p">,</span>
+<span class="s">"containers.active.requests"</span><span class="o">:</span> <span class="mi">0</span><span class="p">,</span>
+<span class="s">"containers.desired"</span><span class="o">:</span> <span class="mi">0</span><span class="p">,</span>
+</pre></div>
<ul>
@@ -377,40 +387,41 @@ of the cluster -this is not obvious. A d
again, there's a risk of clash with or confusion with a role. </p>
<p>Better to have a specific <code>/statistics/cluster</code> element,
and to move the roles' statistics under <code>/statistics/roles</code>:</p>
-<pre class="codehilite"><code>"statistics": {
- "cluster": {
- "containers.unknown.completed": 0,
- "containers.start.completed": 3,
- "containers.live": 1,
- "containers.start.failed": 0,
- "containers.failed": 0,
- "containers.completed": 0,
- "containers.surplus": 0
-
- },
- "roles": {
- "worker": {
- "containers.start.completed": 0,
- "containers.live": 2,
- "containers.start.failed": 0,
- "containers.active.requests": 0,
- "containers.failed": 0,
- "containers.completed": 0,
- "containers.desired": 2,
- "containers.requested": 0
- },
- "master": {
- "containers.start.completed": 0,
- "containers.live": 1,
- "containers.start.failed": 0,
- "containers.active.requests": 0,
- "containers.failed": 0,
- "containers.completed": 0,
- "containers.desired": 1,
- "containers.requested": 0
- }
- }
-},</code></pre>
+<div class="codehilite"><pre><span class="s">"statistics"</span><span class="o">:</span> <span class="p">{</span>
+ <span class="s">"cluster"</span><span class="o">:</span> <span class="p">{</span>
+ <span class="s">"containers.unknown.completed"</span><span class="o">:</span> <span class="mi">0</span><span class="p">,</span>
+ <span class="s">"containers.start.completed"</span><span class="o">:</span> <span class="mi">3</span><span class="p">,</span>
+ <span class="s">"containers.live"</span><span class="o">:</span> <span class="mi">1</span><span class="p">,</span>
+ <span class="s">"containers.start.failed"</span><span class="o">:</span> <span class="mi">0</span><span class="p">,</span>
+ <span class="s">"containers.failed"</span><span class="o">:</span> <span class="mi">0</span><span class="p">,</span>
+ <span class="s">"containers.completed"</span><span class="o">:</span> <span class="mi">0</span><span class="p">,</span>
+ <span class="s">"containers.surplus"</span><span class="o">:</span> <span class="mi">0</span>
+
+ <span class="p">},</span>
+ <span class="s">"roles"</span><span class="o">:</span> <span class="p">{</span>
+ <span class="s">"worker"</span><span class="o">:</span> <span class="p">{</span>
+ <span class="s">"containers.start.completed"</span><span class="o">:</span> <span class="mi">0</span><span class="p">,</span>
+ <span class="s">"containers.live"</span><span class="o">:</span> <span class="mi">2</span><span class="p">,</span>
+ <span class="s">"containers.start.failed"</span><span class="o">:</span> <span class="mi">0</span><span class="p">,</span>
+ <span class="s">"containers.active.requests"</span><span class="o">:</span> <span class="mi">0</span><span class="p">,</span>
+ <span class="s">"containers.failed"</span><span class="o">:</span> <span class="mi">0</span><span class="p">,</span>
+ <span class="s">"containers.completed"</span><span class="o">:</span> <span class="mi">0</span><span class="p">,</span>
+ <span class="s">"containers.desired"</span><span class="o">:</span> <span class="mi">2</span><span class="p">,</span>
+ <span class="s">"containers.requested"</span><span class="o">:</span> <span class="mi">0</span>
+ <span class="p">},</span>
+ <span class="s">"master"</span><span class="o">:</span> <span class="p">{</span>
+ <span class="s">"containers.start.completed"</span><span class="o">:</span> <span class="mi">0</span><span class="p">,</span>
+ <span class="s">"containers.live"</span><span class="o">:</span> <span class="mi">1</span><span class="p">,</span>
+ <span class="s">"containers.start.failed"</span><span class="o">:</span> <span class="mi">0</span><span class="p">,</span>
+ <span class="s">"containers.active.requests"</span><span class="o">:</span> <span class="mi">0</span><span class="p">,</span>
+ <span class="s">"containers.failed"</span><span class="o">:</span> <span class="mi">0</span><span class="p">,</span>
+ <span class="s">"containers.completed"</span><span class="o">:</span> <span class="mi">0</span><span class="p">,</span>
+ <span class="s">"containers.desired"</span><span class="o">:</span> <span class="mi">1</span><span class="p">,</span>
+ <span class="s">"containers.requested"</span><span class="o">:</span> <span class="mi">0</span>
+ <span class="p">}</span>
+ <span class="p">}</span>
+<span class="p">},</span>
+</pre></div>
<p>This approach allows extra statistics sections to be added (perhaps
@@ -418,21 +429,22 @@ by providers), without any changes to th
<h2 id="options">Options</h2>
<p>A list of options used by Slider and its providers to build up the AM
and the configurations of the deployed service components</p>
-<pre class="codehilite"><code>"options": {
- "zookeeper.port": "2181",
- "site.hbase.master.startup.retainassign": "true",
- "slider.cluster.application.image.path": "hdfs://sandbox:8020/hbase.tar.gz",
- "site.fs.defaultFS": "hdfs://sandbox:8020",
- "slider.container.failure.threshold": "5",
- "site.fs.default.name": "hdfs://sandbox:8020",
- "slider.cluster.directory.permissions": "0770",
- "slider.am.monitoring.enabled": "false",
- "zookeeper.path": "/yarnapps_slider_stevel_test_cluster_lifecycle",
- "slider.tmp.dir": "hdfs://sandbox:8020/user/stevel/.slider/cluster/test_cluster_lifecycle/tmp/am",
- "slider.data.directory.permissions": "0770",
- "zookeeper.hosts": "sandbox",
- "slider.container.failure.shortlife": "60"
-},</code></pre>
+<div class="codehilite"><pre><span class="s">"options"</span><span class="o">:</span> <span class="p">{</span>
+ <span class="s">"zookeeper.port"</span><span class="o">:</span> <span class="s">"2181"</span><span class="p">,</span>
+ <span class="s">"site.hbase.master.startup.retainassign"</span><span class="o">:</span> <span class="s">"true"</span><span class="p">,</span>
+ <span class="s">"slider.cluster.application.image.path"</span><span class="o">:</span> <span class="s">"hdfs://sandbox:8020/hbase.tar.gz"</span><span class="p">,</span>
+ <span class="s">"site.fs.defaultFS"</span><span class="o">:</span> <span class="s">"hdfs://sandbox:8020"</span><span class="p">,</span>
+ <span class="s">"slider.container.failure.threshold"</span><span class="o">:</span> <span class="s">"5"</span><span class="p">,</span>
+ <span class="s">"site.fs.default.name"</span><span class="o">:</span> <span class="s">"hdfs://sandbox:8020"</span><span class="p">,</span>
+ <span class="s">"slider.cluster.directory.permissions"</span><span class="o">:</span> <span class="s">"0770"</span><span class="p">,</span>
+ <span class="s">"slider.am.monitoring.enabled"</span><span class="o">:</span> <span class="s">"false"</span><span class="p">,</span>
+ <span class="s">"zookeeper.path"</span><span class="o">:</span> <span class="s">"/yarnapps_slider_stevel_test_cluster_lifecycle"</span><span class="p">,</span>
+ <span class="s">"slider.tmp.dir"</span><span class="o">:</span> <span class="s">"hdfs://sandbox:8020/user/stevel/.slider/cluster/test_cluster_lifecycle/tmp/am"</span><span class="p">,</span>
+ <span class="s">"slider.data.directory.permissions"</span><span class="o">:</span> <span class="s">"0770"</span><span class="p">,</span>
+ <span class="s">"zookeeper.hosts"</span><span class="o">:</span> <span class="s">"sandbox"</span><span class="p">,</span>
+ <span class="s">"slider.container.failure.shortlife"</span><span class="o">:</span> <span class="s">"60"</span>
+<span class="p">},</span>
+</pre></div>
<p>Some for these options have been created by slider itself ("slider.tmp.dir")
@@ -461,44 +473,47 @@ specific role.</p>
fix the heap size of a component.
1. <code>app.infoport</code>: an option supported by some providers (e.g. HBase)
to fix the port to which a role (master or worker) binds its web UI.</p>
-<pre class="codehilite"><code> "worker": {
- "yarn.memory": "768",
- "env.MALLOC_ARENA_MAX": "4",
- "role.instances": "0",
- "role.name": "worker",
- "jvm.heapsize": "512M",
- "yarn.vcores": "1",
- "app.infoport": "0"
- },</code></pre>
+<div class="codehilite"><pre> <span class="s">"worker"</span><span class="o">:</span> <span class="p">{</span>
+ <span class="s">"yarn.memory"</span><span class="o">:</span> <span class="s">"768"</span><span class="p">,</span>
+ <span class="s">"env.MALLOC_ARENA_MAX"</span><span class="o">:</span> <span class="s">"4"</span><span class="p">,</span>
+ <span class="s">"role.instances"</span><span class="o">:</span> <span class="s">"0"</span><span class="p">,</span>
+ <span class="s">"role.name"</span><span class="o">:</span> <span class="s">"worker"</span><span class="p">,</span>
+ <span class="s">"jvm.heapsize"</span><span class="o">:</span> <span class="s">"512M"</span><span class="p">,</span>
+ <span class="s">"yarn.vcores"</span><span class="o">:</span> <span class="s">"1"</span><span class="p">,</span>
+ <span class="s">"app.infoport"</span><span class="o">:</span> <span class="s">"0"</span>
+ <span class="p">},</span>
+</pre></div>
<p>In a live cluster, the role information also includes status information
about the cluster.</p>
-<pre class="codehilite"><code> "master": {
- "yarn.memory": "1024",
- "env.MALLOC_ARENA_MAX": "4",
- "role.instances": "0",
- "role.requested.instances": "0",
- "role.name": "master",
- "role.failed.starting.instances": "0",
- "role.actual.instances": "0",
- "jvm.heapsize": "512M",
- "yarn.vcores": "1",
- "role.releasing.instances": "0",
- "role.failed.instances": "0",
- "app.infoport": "0"
- }</code></pre>
+<div class="codehilite"><pre> <span class="s">"master"</span><span class="o">:</span> <span class="p">{</span>
+ <span class="s">"yarn.memory"</span><span class="o">:</span> <span class="s">"1024"</span><span class="p">,</span>
+ <span class="s">"env.MALLOC_ARENA_MAX"</span><span class="o">:</span> <span class="s">"4"</span><span class="p">,</span>
+ <span class="s">"role.instances"</span><span class="o">:</span> <span class="s">"0"</span><span class="p">,</span>
+ <span class="s">"role.requested.instances"</span><span class="o">:</span> <span class="s">"0"</span><span class="p">,</span>
+ <span class="s">"role.name"</span><span class="o">:</span> <span class="s">"master"</span><span class="p">,</span>
+ <span class="s">"role.failed.starting.instances"</span><span class="o">:</span> <span class="s">"0"</span><span class="p">,</span>
+ <span class="s">"role.actual.instances"</span><span class="o">:</span> <span class="s">"0"</span><span class="p">,</span>
+ <span class="s">"jvm.heapsize"</span><span class="o">:</span> <span class="s">"512M"</span><span class="p">,</span>
+ <span class="s">"yarn.vcores"</span><span class="o">:</span> <span class="s">"1"</span><span class="p">,</span>
+ <span class="s">"role.releasing.instances"</span><span class="o">:</span> <span class="s">"0"</span><span class="p">,</span>
+ <span class="s">"role.failed.instances"</span><span class="o">:</span> <span class="s">"0"</span><span class="p">,</span>
+ <span class="s">"app.infoport"</span><span class="o">:</span> <span class="s">"0"</span>
+ <span class="p">}</span>
+</pre></div>
<p>The role <code>slider</code> represents the Slider Application Master itself.</p>
-<pre class="codehilite"><code> "slider": {
- "yarn.memory": "256",
- "env.MALLOC_ARENA_MAX": "4",
- "role.instances": "1",
- "role.name": "slider",
- "jvm.heapsize": "256M",
- "yarn.vcores": "1",
- },</code></pre>
+<div class="codehilite"><pre> <span class="s">"slider"</span><span class="o">:</span> <span class="p">{</span>
+ <span class="s">"yarn.memory"</span><span class="o">:</span> <span class="s">"256"</span><span class="p">,</span>
+ <span class="s">"env.MALLOC_ARENA_MAX"</span><span class="o">:</span> <span class="s">"4"</span><span class="p">,</span>
+ <span class="s">"role.instances"</span><span class="o">:</span> <span class="s">"1"</span><span class="p">,</span>
+ <span class="s">"role.name"</span><span class="o">:</span> <span class="s">"slider"</span><span class="p">,</span>
+ <span class="s">"jvm.heapsize"</span><span class="o">:</span> <span class="s">"256M"</span><span class="p">,</span>
+ <span class="s">"yarn.vcores"</span><span class="o">:</span> <span class="s">"1"</span><span class="p">,</span>
+ <span class="p">},</span>
+</pre></div>
<h3 id="proposed">Proposed:</h3>
@@ -517,8 +532,9 @@ the scope of this document.</p>
<h3 id="proposed-alongside-clientproperties-comes-clientfiles">Proposed: alongside <code>/clientProperties</code> comes <code>/clientfiles</code></h3>
<p>This section will list all files that an application instance can generate
for clients, along with with a description.</p>
-<pre class="codehilite"><code>"/clientfiles/hbase-site.xml": "site information for HBase"
-"/clientfiles/log4.properties": "log4.property file"</code></pre>
+<div class="codehilite"><pre><span class="s">"/clientfiles/hbase-site.xml"</span><span class="o">:</span> <span class="s">"site information for HBase"</span>
+<span class="s">"/clientfiles/log4.properties"</span><span class="o">:</span> <span class="s">"log4.property file"</span>
+</pre></div>
<p>A new CLI command would be added to retrieve a client file.
@@ -528,8 +544,9 @@ for clients, along with with a descripti
destination file/directory <code>--outfile <file></code> and <code>--outdir <dir></code>
1. If the <code>--list</code> argument is provided, the list of available files is
returned (e.g.) </p>
-<pre class="codehilite"><code>hbase-site.xml: site information for HBase
-log4.properties: log4.property file</code></pre>
+<div class="codehilite"><pre><span class="n">hbase</span><span class="o">-</span><span class="n">site</span><span class="p">.</span><span class="n">xml</span><span class="o">:</span> <span class="n">site</span> <span class="n">information</span> <span class="k">for</span> <span class="n">HBase</span>
+<span class="n">log4</span><span class="p">.</span><span class="n">properties</span><span class="o">:</span> <span class="n">log4</span><span class="p">.</span><span class="n">property</span> <span class="n">file</span>
+</pre></div>
<p><em>No attempt to parse/process the body of the messages will be returned.</em></p>
Modified: incubator/slider/site/content/docs/configuration/specification.html
URL: http://svn.apache.org/viewvc/incubator/slider/site/content/docs/configuration/specification.html?rev=1608633&r1=1608632&r2=1608633&view=diff
==============================================================================
--- incubator/slider/site/content/docs/configuration/specification.html (original)
+++ incubator/slider/site/content/docs/configuration/specification.html Tue Jul 8 00:49:34 2014
@@ -264,14 +264,15 @@ are not defined in this document.</p>
<h2 id="diagnostics-diagnostics-sections"><code>/diagnostics</code>: diagnostics sections</h2>
<p>Persisted list of information about Slider. </p>
<p>Static information about the file history</p>
-<pre class="codehilite"><code>"diagnostics" : {
- "create.hadoop.deployed.info" :
- "(detached from release-2.3.0) @dfe46336fbc6a044bc124392ec06b85",
- "create.application.build.info" :
- "Slider Core-0.13.0-SNAPSHOT Built against commit# 1a94ee4aa1 on Java 1.7.0_45 by stevel",
- "create.hadoop.build.info" : "2.3.0",
- "create.time.millis" : "1393512091276",
-},</code></pre>
+<div class="codehilite"><pre><span class="s">"diagnostics"</span> <span class="o">:</span> <span class="p">{</span>
+ <span class="s">"create.hadoop.deployed.info"</span> <span class="o">:</span>
+ <span class="s">"(detached from release-2.3.0) @dfe46336fbc6a044bc124392ec06b85"</span><span class="p">,</span>
+ <span class="s">"create.application.build.info"</span> <span class="o">:</span>
+ <span class="s">"Slider Core-0.13.0-SNAPSHOT Built against commit# 1a94ee4aa1 on Java 1.7.0_45 by stevel"</span><span class="p">,</span>
+ <span class="s">"create.hadoop.build.info"</span> <span class="o">:</span> <span class="s">"2.3.0"</span><span class="p">,</span>
+ <span class="s">"create.time.millis"</span> <span class="o">:</span> <span class="s">"1393512091276"</span><span class="p">,</span>
+<span class="p">},</span>
+</pre></div>
<p>This information is not intended to provide anything other
@@ -281,21 +282,22 @@ an empty or absent <code>/diagnostics</c
<h2 id="options-cluster-options">Options: cluster options</h2>
<p>A persisted list of options used by Slider and its providers to build up the AM
and the configurations of the deployed service components</p>
-<pre class="codehilite"><code>"options": {
- "slider.am.monitoring.enabled": "false",
- "slider.cluster.application.image.path": "hdfs://sandbox:8020/hbase.tar.gz",
- "slider.container.failure.threshold": "5",
- "slider.container.failure.shortlife": "60",
- "zookeeper.port": "2181",
- "zookeeper.path": "/yarnapps_slider_stevel_test_cluster_lifecycle",
- "zookeeper.hosts": "sandbox",
- "site.hbase.master.startup.retainassign": "true",
- "site.fs.defaultFS": "hdfs://sandbox:8020",
- "site.fs.default.name": "hdfs://sandbox:8020",
- "env.MALLOC_ARENA_MAX": "4",
- "site.hbase.master.info.port": "0",
- "site.hbase.regionserver.info.port": "0"
-},</code></pre>
+<div class="codehilite"><pre><span class="s">"options"</span><span class="o">:</span> <span class="p">{</span>
+ <span class="s">"slider.am.monitoring.enabled"</span><span class="o">:</span> <span class="s">"false"</span><span class="p">,</span>
+ <span class="s">"slider.cluster.application.image.path"</span><span class="o">:</span> <span class="s">"hdfs://sandbox:8020/hbase.tar.gz"</span><span class="p">,</span>
+ <span class="s">"slider.container.failure.threshold"</span><span class="o">:</span> <span class="s">"5"</span><span class="p">,</span>
+ <span class="s">"slider.container.failure.shortlife"</span><span class="o">:</span> <span class="s">"60"</span><span class="p">,</span>
+ <span class="s">"zookeeper.port"</span><span class="o">:</span> <span class="s">"2181"</span><span class="p">,</span>
+ <span class="s">"zookeeper.path"</span><span class="o">:</span> <span class="s">"/yarnapps_slider_stevel_test_cluster_lifecycle"</span><span class="p">,</span>
+ <span class="s">"zookeeper.hosts"</span><span class="o">:</span> <span class="s">"sandbox"</span><span class="p">,</span>
+ <span class="s">"site.hbase.master.startup.retainassign"</span><span class="o">:</span> <span class="s">"true"</span><span class="p">,</span>
+ <span class="s">"site.fs.defaultFS"</span><span class="o">:</span> <span class="s">"hdfs://sandbox:8020"</span><span class="p">,</span>
+ <span class="s">"site.fs.default.name"</span><span class="o">:</span> <span class="s">"hdfs://sandbox:8020"</span><span class="p">,</span>
+ <span class="s">"env.MALLOC_ARENA_MAX"</span><span class="o">:</span> <span class="s">"4"</span><span class="p">,</span>
+ <span class="s">"site.hbase.master.info.port"</span><span class="o">:</span> <span class="s">"0"</span><span class="p">,</span>
+ <span class="s">"site.hbase.regionserver.info.port"</span><span class="o">:</span> <span class="s">"0"</span>
+<span class="p">},</span>
+</pre></div>
<p>Many of the properties are automatically set by Slider when a cluster is constructed.
@@ -341,26 +343,28 @@ specific role.</p>
1. <code>yarn.</code> properties to configure YARN requests.
1. <code>jvm.heapsize</code>: an option supported by some providers to
fix the heap size of a component.</p>
-<pre class="codehilite"><code> "worker": {
- "yarn.memory": "768",
- "env.MALLOC_ARENA_MAX": "4",
- "role.instances": "0",
- "role.name": "worker",
- "role.failed.starting.instances": "0",
- "jvm.heapsize": "512M",
- "yarn.vcores": "1",
- },</code></pre>
+<div class="codehilite"><pre> <span class="s">"worker"</span><span class="o">:</span> <span class="p">{</span>
+ <span class="s">"yarn.memory"</span><span class="o">:</span> <span class="s">"768"</span><span class="p">,</span>
+ <span class="s">"env.MALLOC_ARENA_MAX"</span><span class="o">:</span> <span class="s">"4"</span><span class="p">,</span>
+ <span class="s">"role.instances"</span><span class="o">:</span> <span class="s">"0"</span><span class="p">,</span>
+ <span class="s">"role.name"</span><span class="o">:</span> <span class="s">"worker"</span><span class="p">,</span>
+ <span class="s">"role.failed.starting.instances"</span><span class="o">:</span> <span class="s">"0"</span><span class="p">,</span>
+ <span class="s">"jvm.heapsize"</span><span class="o">:</span> <span class="s">"512M"</span><span class="p">,</span>
+ <span class="s">"yarn.vcores"</span><span class="o">:</span> <span class="s">"1"</span><span class="p">,</span>
+ <span class="p">},</span>
+</pre></div>
<p>The role <code>slider</code> represents the Slider Application Master itself.</p>
-<pre class="codehilite"><code> "slider": {
- "yarn.memory": "256",
- "env.MALLOC_ARENA_MAX": "4",
- "role.instances": "1",
- "role.name": "slider",
- "jvm.heapsize": "256M",
- "yarn.vcores": "1",
- },</code></pre>
+<div class="codehilite"><pre> <span class="s">"slider"</span><span class="o">:</span> <span class="p">{</span>
+ <span class="s">"yarn.memory"</span><span class="o">:</span> <span class="s">"256"</span><span class="p">,</span>
+ <span class="s">"env.MALLOC_ARENA_MAX"</span><span class="o">:</span> <span class="s">"4"</span><span class="p">,</span>
+ <span class="s">"role.instances"</span><span class="o">:</span> <span class="s">"1"</span><span class="p">,</span>
+ <span class="s">"role.name"</span><span class="o">:</span> <span class="s">"slider"</span><span class="p">,</span>
+ <span class="s">"jvm.heapsize"</span><span class="o">:</span> <span class="s">"256M"</span><span class="p">,</span>
+ <span class="s">"yarn.vcores"</span><span class="o">:</span> <span class="s">"1"</span><span class="p">,</span>
+ <span class="p">},</span>
+</pre></div>
<p>Providers may support a fixed number of roles -or they may support a dynamic
@@ -423,23 +427,26 @@ information about the running applicatio
<li>
<p><code>slider.am.restart.supported"</code> whether the AM supports service restart without killing all the containers hosting
the role instances:</p>
-<pre class="codehilite"><code>"slider.am.restart.supported" : "false",</code></pre>
+<div class="codehilite"><pre><span class="s">"slider.am.restart.supported"</span> <span class="o">:</span> <span class="s">"false"</span><span class="p">,</span>
+</pre></div>
</li>
<li>
<p>timestamps of the cluster going live, and when the status query was made</p>
-<pre class="codehilite"><code>"live.time" : "27 Feb 2014 14:41:56 GMT",
-"live.time.millis" : "1393512116881",
-"status.time" : "27 Feb 2014 14:42:08 GMT",
-"status.time.millis" : "1393512128726",</code></pre>
+<div class="codehilite"><pre><span class="s">"live.time"</span> <span class="o">:</span> <span class="s">"27 Feb 2014 14:41:56 GMT"</span><span class="p">,</span>
+<span class="s">"live.time.millis"</span> <span class="o">:</span> <span class="s">"1393512116881"</span><span class="p">,</span>
+<span class="s">"status.time"</span> <span class="o">:</span> <span class="s">"27 Feb 2014 14:42:08 GMT"</span><span class="p">,</span>
+<span class="s">"status.time.millis"</span> <span class="o">:</span> <span class="s">"1393512128726"</span><span class="p">,</span>
+</pre></div>
</li>
<li>
<p>yarn data provided to the AM</p>
-<pre class="codehilite"><code>"yarn.vcores" : "32",
-"yarn.memory" : "2048",</code></pre>
+<div class="codehilite"><pre><span class="s">"yarn.vcores"</span> <span class="o">:</span> <span class="s">"32"</span><span class="p">,</span>
+<span class="s">"yarn.memory"</span> <span class="o">:</span> <span class="s">"2048"</span><span class="p">,</span>
+</pre></div>
</li>
@@ -447,9 +454,10 @@ information about the running applicatio
<p>information about the application and hadoop versions in use. Here
the application was built using Hadoop 2.3.0, but is running against the version
of Hadoop built for HDP-2.</p>
-<pre class="codehilite"><code>"status.application.build.info" : "Slider Core-0.13.0-SNAPSHOT Built against commit# 1a94ee4aa1 on Java 1.7.0_45 by stevel",
-"status.hadoop.build.info" : "2.3.0",
-"status.hadoop.deployed.info" : "bigwheel-m16-2.2.0 @704f1e463ebc4fb89353011407e965"</code></pre>
+<div class="codehilite"><pre><span class="s">"status.application.build.info"</span> <span class="o">:</span> <span class="s">"Slider Core-0.13.0-SNAPSHOT Built against commit# 1a94ee4aa1 on Java 1.7.0_45 by stevel"</span><span class="p">,</span>
+<span class="s">"status.hadoop.build.info"</span> <span class="o">:</span> <span class="s">"2.3.0"</span><span class="p">,</span>
+<span class="s">"status.hadoop.deployed.info"</span> <span class="o">:</span> <span class="s">"bigwheel-m16-2.2.0 @704f1e463ebc4fb89353011407e965"</span>
+</pre></div>
</li>
@@ -458,45 +466,47 @@ information about the running applicatio
for debugging.</p>
<p>## <code>/instances</code>: instance list</p>
<p>Information about the live containers in a cluster</p>
-<pre class="codehilite"><code> "instances": {
- "slider": [ "container_1393511571284_0002_01_000001" ],
- "master": [ "container_1393511571284_0002_01_000003" ],
- "worker": [
- "container_1393511571284_0002_01_000002",
- "container_1393511571284_0002_01_000004"
- ]
- },</code></pre>
+<div class="codehilite"><pre> <span class="s">"instances"</span><span class="o">:</span> <span class="p">{</span>
+ <span class="s">"slider"</span><span class="o">:</span> <span class="p">[</span> <span class="s">"container_1393511571284_0002_01_000001"</span> <span class="p">],</span>
+ <span class="s">"master"</span><span class="o">:</span> <span class="p">[</span> <span class="s">"container_1393511571284_0002_01_000003"</span> <span class="p">],</span>
+ <span class="s">"worker"</span><span class="o">:</span> <span class="p">[</span>
+ <span class="s">"container_1393511571284_0002_01_000002"</span><span class="p">,</span>
+ <span class="s">"container_1393511571284_0002_01_000004"</span>
+ <span class="p">]</span>
+ <span class="p">},</span>
+</pre></div>
<h2 id="status-detailed-dynamic-state"><code>/status</code>: detailed dynamic state</h2>
<p>This provides more detail on the application including live and failed instances</p>
<h3 id="statuslive-live-role-instances-by-container"><code>/status/live</code>: live role instances by container</h3>
-<pre class="codehilite"><code>"cluster": {
- "live": {
- "worker": {
- "container_1394032374441_0001_01_000003": {
- "name": "container_1394032374441_0001_01_000003",
- "role": "worker",
- "roleId": 1,
- "createTime": 1394032384451,
- "startTime": 1394032384503,
- "released": false,
- "host": "192.168.1.88",
- "state": 3,
- "exitCode": 0,
- "command": "hbase-0.98.0/bin/hbase --config $PROPAGATED_CONFDIR regionserver start 1><LOG_DIR>/region-server.txt 2>&1 ; ",
- "diagnostics": "",
- "environment": [
- "HADOOP_USER_NAME=\"slider\"",
- "HBASE_LOG_DIR=\"/tmp/slider-slider\"",
- "HBASE_HEAPSIZE=\"256\"",
- "MALLOC_ARENA_MAX=\"4\"",
- "PROPAGATED_CONFDIR=\"$PWD/propagatedconf\""
- ]
+<div class="codehilite"><pre><span class="s2">"cluster"</span><span class="p">:</span> <span class="p">{</span>
+ <span class="s2">"live"</span><span class="p">:</span> <span class="p">{</span>
+ <span class="s2">"worker"</span><span class="p">:</span> <span class="p">{</span>
+ <span class="s2">"container_1394032374441_0001_01_000003"</span><span class="p">:</span> <span class="p">{</span>
+ <span class="s2">"name"</span><span class="p">:</span> <span class="s2">"container_1394032374441_0001_01_000003"</span><span class="p">,</span>
+ <span class="s2">"role"</span><span class="p">:</span> <span class="s2">"worker"</span><span class="p">,</span>
+ <span class="s2">"roleId"</span><span class="p">:</span> <span class="mi">1</span><span class="p">,</span>
+ <span class="s2">"createTime"</span><span class="p">:</span> <span class="mi">1394032384451</span><span class="p">,</span>
+ <span class="s2">"startTime"</span><span class="p">:</span> <span class="mi">1394032384503</span><span class="p">,</span>
+ <span class="s2">"released"</span><span class="p">:</span> <span class="kc">false</span><span class="p">,</span>
+ <span class="s2">"host"</span><span class="p">:</span> <span class="s2">"192.168.1.88"</span><span class="p">,</span>
+ <span class="s2">"state"</span><span class="p">:</span> <span class="mi">3</span><span class="p">,</span>
+ <span class="s2">"exitCode"</span><span class="p">:</span> <span class="mi">0</span><span class="p">,</span>
+ <span class="s2">"command"</span><span class="p">:</span> <span class="s2">"hbase-0.98.0/bin/hbase --config $PROPAGATED_CONFDIR regionserver start 1><LOG_DIR>/region-server.txt 2>&1 ; "</span><span class="p">,</span>
+ <span class="s2">"diagnostics"</span><span class="p">:</span> <span class="s2">""</span><span class="p">,</span>
+ <span class="s2">"environment"</span><span class="p">:</span> <span class="err">[</span>
+ <span class="s2">"HADOOP_USER_NAME=</span><span class="se">\"</span><span class="s2">slider</span><span class="se">\"</span><span class="s2">"</span><span class="p">,</span>
+ <span class="s2">"HBASE_LOG_DIR=</span><span class="se">\"</span><span class="s2">/tmp/slider-slider</span><span class="se">\"</span><span class="s2">"</span><span class="p">,</span>
+ <span class="s2">"HBASE_HEAPSIZE=</span><span class="se">\"</span><span class="s2">256</span><span class="se">\"</span><span class="s2">"</span><span class="p">,</span>
+ <span class="s2">"MALLOC_ARENA_MAX=</span><span class="se">\"</span><span class="s2">4</span><span class="se">\"</span><span class="s2">"</span><span class="p">,</span>
+ <span class="s2">"PROPAGATED_CONFDIR=</span><span class="se">\"</span><span class="s2">$PWD/propagatedconf</span><span class="se">\"</span><span class="s2">"</span>
+ <span class="cp">]</span>
}
}
failed : {}
- }</code></pre>
+ }
+</pre></div>
<p>All live instances MUST be described in <code>/status/live</code></p>
@@ -507,33 +517,34 @@ a limited set of recently failed cluster
<p>This lists the current status of the roles:
How many are running vs requested, how many are being
released.</p>
-<pre class="codehilite"><code>"rolestatus": {
- "worker": {
- "role.instances": "2",
- "role.requested.instances": "0",
- "role.failed.starting.instances": "0",
- "role.actual.instances": "2",
- "role.releasing.instances": "0",
- "role.failed.instances": "1"
- },
- "slider": {
- "role.instances": "1",
- "role.requested.instances": "0",
- "role.name": "slider",
- "role.actual.instances": "1",
- "role.releasing.instances": "0",
- "role.failed.instances": "0"
- },
- "master": {
- "role.instances": "1",
- "role.requested.instances": "1",
- "role.name": "master",
- "role.failed.starting.instances": "0",
- "role.actual.instances": "0",
- "role.releasing.instances": "0",
- "role.failed.instances": "0"
- }
-}</code></pre>
+<div class="codehilite"><pre><span class="s">"rolestatus"</span><span class="o">:</span> <span class="p">{</span>
+ <span class="s">"worker"</span><span class="o">:</span> <span class="p">{</span>
+ <span class="s">"role.instances"</span><span class="o">:</span> <span class="s">"2"</span><span class="p">,</span>
+ <span class="s">"role.requested.instances"</span><span class="o">:</span> <span class="s">"0"</span><span class="p">,</span>
+ <span class="s">"role.failed.starting.instances"</span><span class="o">:</span> <span class="s">"0"</span><span class="p">,</span>
+ <span class="s">"role.actual.instances"</span><span class="o">:</span> <span class="s">"2"</span><span class="p">,</span>
+ <span class="s">"role.releasing.instances"</span><span class="o">:</span> <span class="s">"0"</span><span class="p">,</span>
+ <span class="s">"role.failed.instances"</span><span class="o">:</span> <span class="s">"1"</span>
+ <span class="p">},</span>
+ <span class="s">"slider"</span><span class="o">:</span> <span class="p">{</span>
+ <span class="s">"role.instances"</span><span class="o">:</span> <span class="s">"1"</span><span class="p">,</span>
+ <span class="s">"role.requested.instances"</span><span class="o">:</span> <span class="s">"0"</span><span class="p">,</span>
+ <span class="s">"role.name"</span><span class="o">:</span> <span class="s">"slider"</span><span class="p">,</span>
+ <span class="s">"role.actual.instances"</span><span class="o">:</span> <span class="s">"1"</span><span class="p">,</span>
+ <span class="s">"role.releasing.instances"</span><span class="o">:</span> <span class="s">"0"</span><span class="p">,</span>
+ <span class="s">"role.failed.instances"</span><span class="o">:</span> <span class="s">"0"</span>
+ <span class="p">},</span>
+ <span class="s">"master"</span><span class="o">:</span> <span class="p">{</span>
+ <span class="s">"role.instances"</span><span class="o">:</span> <span class="s">"1"</span><span class="p">,</span>
+ <span class="s">"role.requested.instances"</span><span class="o">:</span> <span class="s">"1"</span><span class="p">,</span>
+ <span class="s">"role.name"</span><span class="o">:</span> <span class="s">"master"</span><span class="p">,</span>
+ <span class="s">"role.failed.starting.instances"</span><span class="o">:</span> <span class="s">"0"</span><span class="p">,</span>
+ <span class="s">"role.actual.instances"</span><span class="o">:</span> <span class="s">"0"</span><span class="p">,</span>
+ <span class="s">"role.releasing.instances"</span><span class="o">:</span> <span class="s">"0"</span><span class="p">,</span>
+ <span class="s">"role.failed.instances"</span><span class="o">:</span> <span class="s">"0"</span>
+ <span class="p">}</span>
+<span class="p">}</span>
+</pre></div>
<h3 id="statusprovider-provider-specific-information"><code>/status/provider</code>: provider-specific information</h3>
@@ -547,39 +558,40 @@ write this block -operations that are as
<p>Statistics on the cluster and each role in the cluster </p>
<p>Better to have a specific <code>/statistics/cluster</code> element,
and to move the roles' statistics under <code>/statistics/roles</code>:</p>
-<pre class="codehilite"><code>"statistics": {
- "cluster": {
- "containers.unknown.completed": 0,
- "containers.start.completed": 3,
- "containers.live": 1,
- "containers.start.failed": 0,
- "containers.failed": 0,
- "containers.completed": 0,
- "containers.surplus": 0
- },
- "roles": {
- "worker": {
- "containers.start.completed": 0,
- "containers.live": 2,
- "containers.start.failed": 0,
- "containers.active.requests": 0,
- "containers.failed": 0,
- "containers.completed": 0,
- "containers.desired": 2,
- "containers.requested": 0
- },
- "master": {
- "containers.start.completed": 0,
- "containers.live": 1,
- "containers.start.failed": 0,
- "containers.active.requests": 0,
- "containers.failed": 0,
- "containers.completed": 0,
- "containers.desired": 1,
- "containers.requested": 0
- }
- }
-},</code></pre>
+<div class="codehilite"><pre><span class="s">"statistics"</span><span class="o">:</span> <span class="p">{</span>
+ <span class="s">"cluster"</span><span class="o">:</span> <span class="p">{</span>
+ <span class="s">"containers.unknown.completed"</span><span class="o">:</span> <span class="mi">0</span><span class="p">,</span>
+ <span class="s">"containers.start.completed"</span><span class="o">:</span> <span class="mi">3</span><span class="p">,</span>
+ <span class="s">"containers.live"</span><span class="o">:</span> <span class="mi">1</span><span class="p">,</span>
+ <span class="s">"containers.start.failed"</span><span class="o">:</span> <span class="mi">0</span><span class="p">,</span>
+ <span class="s">"containers.failed"</span><span class="o">:</span> <span class="mi">0</span><span class="p">,</span>
+ <span class="s">"containers.completed"</span><span class="o">:</span> <span class="mi">0</span><span class="p">,</span>
+ <span class="s">"containers.surplus"</span><span class="o">:</span> <span class="mi">0</span>
+ <span class="p">},</span>
+ <span class="s">"roles"</span><span class="o">:</span> <span class="p">{</span>
+ <span class="s">"worker"</span><span class="o">:</span> <span class="p">{</span>
+ <span class="s">"containers.start.completed"</span><span class="o">:</span> <span class="mi">0</span><span class="p">,</span>
+ <span class="s">"containers.live"</span><span class="o">:</span> <span class="mi">2</span><span class="p">,</span>
+ <span class="s">"containers.start.failed"</span><span class="o">:</span> <span class="mi">0</span><span class="p">,</span>
+ <span class="s">"containers.active.requests"</span><span class="o">:</span> <span class="mi">0</span><span class="p">,</span>
+ <span class="s">"containers.failed"</span><span class="o">:</span> <span class="mi">0</span><span class="p">,</span>
+ <span class="s">"containers.completed"</span><span class="o">:</span> <span class="mi">0</span><span class="p">,</span>
+ <span class="s">"containers.desired"</span><span class="o">:</span> <span class="mi">2</span><span class="p">,</span>
+ <span class="s">"containers.requested"</span><span class="o">:</span> <span class="mi">0</span>
+ <span class="p">},</span>
+ <span class="s">"master"</span><span class="o">:</span> <span class="p">{</span>
+ <span class="s">"containers.start.completed"</span><span class="o">:</span> <span class="mi">0</span><span class="p">,</span>
+ <span class="s">"containers.live"</span><span class="o">:</span> <span class="mi">1</span><span class="p">,</span>
+ <span class="s">"containers.start.failed"</span><span class="o">:</span> <span class="mi">0</span><span class="p">,</span>
+ <span class="s">"containers.active.requests"</span><span class="o">:</span> <span class="mi">0</span><span class="p">,</span>
+ <span class="s">"containers.failed"</span><span class="o">:</span> <span class="mi">0</span><span class="p">,</span>
+ <span class="s">"containers.completed"</span><span class="o">:</span> <span class="mi">0</span><span class="p">,</span>
+ <span class="s">"containers.desired"</span><span class="o">:</span> <span class="mi">1</span><span class="p">,</span>
+ <span class="s">"containers.requested"</span><span class="o">:</span> <span class="mi">0</span>
+ <span class="p">}</span>
+ <span class="p">}</span>
+<span class="p">},</span>
+</pre></div>
<p><code>/statistics/cluster</code> provides aggregate statistics for the entire cluster.</p>
@@ -595,8 +607,9 @@ the scope of this document.</p>
<h3 id="clientfiles"><code>/clientfiles</code></h3>
<p>This section list all files that an application instance MAY generate
for clients, along with with a description.</p>
-<pre class="codehilite"><code>"/clientfiles/hbase-site.xml": "site information for HBase"
-"/clientfiles/log4.properties": "log4.property file"</code></pre>
+<div class="codehilite"><pre><span class="s">"/clientfiles/hbase-site.xml"</span><span class="o">:</span> <span class="s">"site information for HBase"</span>
+<span class="s">"/clientfiles/log4.properties"</span><span class="o">:</span> <span class="s">"log4.property file"</span>
+</pre></div>
<p>Client configuration file retrieval is by other means; this
Modified: incubator/slider/site/content/docs/debugging.html
URL: http://svn.apache.org/viewvc/incubator/slider/site/content/docs/debugging.html?rev=1608633&r1=1608632&r2=1608633&view=diff
==============================================================================
--- incubator/slider/site/content/docs/debugging.html (original)
+++ incubator/slider/site/content/docs/debugging.html Tue Jul 8 00:49:34 2014
@@ -218,12 +218,13 @@ Application Master that enable attaching
In order to specify the JVM options, edit the application configuration file
(the file specified as the <code>--template</code> argument value on the command line for cluster creation)
and specify the <code>jvm.opts</code> property for the <code>slider-appmaster</code> component:</p>
-<pre class="codehilite"><code>`"components": {
- "slider-appmaster": {
- "jvm.heapsize": "256M",
- "jvm.opts": "-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=5005"
- },
- ...`</code></pre>
+<div class="codehilite"><pre><span class="err">`</span><span class="s">"components"</span><span class="o">:</span> <span class="p">{</span>
+ <span class="s">"slider-appmaster"</span><span class="o">:</span> <span class="p">{</span>
+ <span class="s">"jvm.heapsize"</span><span class="o">:</span> <span class="s">"256M"</span><span class="p">,</span>
+ <span class="s">"jvm.opts"</span><span class="o">:</span> <span class="s">"-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=5005"</span>
+ <span class="p">},</span>
+ <span class="p">...</span><span class="err">`</span>
+</pre></div>
<p>You may specify <code>suspend=y</code> in the line above if you wish to have the application master process wait for the debugger to attach before beginning its processing.</p>
Modified: incubator/slider/site/content/docs/examples.html
URL: http://svn.apache.org/viewvc/incubator/slider/site/content/docs/examples.html?rev=1608633&r1=1608632&r2=1608633&view=diff
==============================================================================
--- incubator/slider/site/content/docs/examples.html (original)
+++ incubator/slider/site/content/docs/examples.html Tue Jul 8 00:49:34 2014
@@ -178,117 +178,127 @@ its configuration should be changed to u
<h1 id="the-examples-below-all-assume-there-is-a-cluster-node-called-master-which">The examples below all assume there is a cluster node called 'master', which</h1>
<p>hosts the HDFS NameNode and the YARN Resource Manager</p>
<h1 id="preamble">preamble</h1>
-<pre class="codehilite"><code>export HADOOP_CONF_DIR=~/conf
-export PATH=~/hadoop/bin:/~/hadoop/sbin:~/zookeeper-3.4.5/bin:$PATH
+<div class="codehilite"><pre><span class="n">export</span> <span class="n">HADOOP_CONF_DIR</span><span class="o">=~/</span><span class="n">conf</span>
+<span class="n">export</span> <span class="n">PATH</span><span class="o">=~/</span><span class="n">hadoop</span><span class="o">/</span><span class="n">bin</span><span class="o">:/~/</span><span class="n">hadoop</span><span class="o">/</span><span class="n">sbin</span><span class="o">:~/</span><span class="n">zookeeper</span><span class="o">-</span><span class="mf">3.4.5</span><span class="o">/</span><span class="n">bin</span><span class="o">:</span><span class="err">$</span><span class="n">PATH</span>
-hdfs namenode -format master</code></pre>
+<span class="n">hdfs</span> <span class="n">namenode</span> <span class="o">-</span><span class="n">format</span> <span class="n">master</span>
+</pre></div>
<h1 id="start-all-the-services">start all the services</h1>
-<pre class="codehilite"><code>nohup hdfs --config $HADOOP_CONF_DIR namenode &
-nohup hdfs --config $HADOOP_CONF_DIR datanode &
+<div class="codehilite"><pre><span class="n">nohup</span> <span class="n">hdfs</span> <span class="o">--</span><span class="n">config</span> <span class="err">$</span><span class="n">HADOOP_CONF_DIR</span> <span class="n">namenode</span> <span class="o">&</span>
+<span class="n">nohup</span> <span class="n">hdfs</span> <span class="o">--</span><span class="n">config</span> <span class="err">$</span><span class="n">HADOOP_CONF_DIR</span> <span class="n">datanode</span> <span class="o">&</span>
-nohup yarn --config $HADOOP_CONF_DIR resourcemanager &
-nohup yarn --config $HADOOP_CONF_DIR nodemanager &</code></pre>
+<span class="n">nohup</span> <span class="n">yarn</span> <span class="o">--</span><span class="n">config</span> <span class="err">$</span><span class="n">HADOOP_CONF_DIR</span> <span class="n">resourcemanager</span> <span class="o">&</span>
+<span class="n">nohup</span> <span class="n">yarn</span> <span class="o">--</span><span class="n">config</span> <span class="err">$</span><span class="n">HADOOP_CONF_DIR</span> <span class="n">nodemanager</span> <span class="o">&</span>
+</pre></div>
<h1 id="using-hadoopsbin-service-launchers">using hadoop/sbin service launchers</h1>
-<pre class="codehilite"><code>hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs start namenode
-hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs start datanode
-yarn-daemon.sh --config $HADOOP_CONF_DIR start resourcemanager
-yarn-daemon.sh --config $HADOOP_CONF_DIR start nodemanager
+<div class="codehilite"><pre><span class="n">hadoop</span><span class="o">-</span><span class="n">daemon</span><span class="p">.</span><span class="n">sh</span> <span class="o">--</span><span class="n">config</span> <span class="err">$</span><span class="n">HADOOP_CONF_DIR</span> <span class="o">--</span><span class="n">script</span> <span class="n">hdfs</span> <span class="n">start</span> <span class="n">namenode</span>
+<span class="n">hadoop</span><span class="o">-</span><span class="n">daemon</span><span class="p">.</span><span class="n">sh</span> <span class="o">--</span><span class="n">config</span> <span class="err">$</span><span class="n">HADOOP_CONF_DIR</span> <span class="o">--</span><span class="n">script</span> <span class="n">hdfs</span> <span class="n">start</span> <span class="n">datanode</span>
+<span class="n">yarn</span><span class="o">-</span><span class="n">daemon</span><span class="p">.</span><span class="n">sh</span> <span class="o">--</span><span class="n">config</span> <span class="err">$</span><span class="n">HADOOP_CONF_DIR</span> <span class="n">start</span> <span class="n">resourcemanager</span>
+<span class="n">yarn</span><span class="o">-</span><span class="n">daemon</span><span class="p">.</span><span class="n">sh</span> <span class="o">--</span><span class="n">config</span> <span class="err">$</span><span class="n">HADOOP_CONF_DIR</span> <span class="n">start</span> <span class="n">nodemanager</span>
-~/zookeeper/bin/zkServer.sh start</code></pre>
+<span class="o">~/</span><span class="n">zookeeper</span><span class="o">/</span><span class="n">bin</span><span class="o">/</span><span class="n">zkServer</span><span class="p">.</span><span class="n">sh</span> <span class="n">start</span>
+</pre></div>
<h1 id="stop-them">stop them</h1>
-<pre class="codehilite"><code>hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs stop namenode
-hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs stop datanode
+<div class="codehilite"><pre><span class="n">hadoop</span><span class="o">-</span><span class="n">daemon</span><span class="p">.</span><span class="n">sh</span> <span class="o">--</span><span class="n">config</span> <span class="err">$</span><span class="n">HADOOP_CONF_DIR</span> <span class="o">--</span><span class="n">script</span> <span class="n">hdfs</span> <span class="n">stop</span> <span class="n">namenode</span>
+<span class="n">hadoop</span><span class="o">-</span><span class="n">daemon</span><span class="p">.</span><span class="n">sh</span> <span class="o">--</span><span class="n">config</span> <span class="err">$</span><span class="n">HADOOP_CONF_DIR</span> <span class="o">--</span><span class="n">script</span> <span class="n">hdfs</span> <span class="n">stop</span> <span class="n">datanode</span>
-yarn-daemon.sh --config $HADOOP_CONF_DIR stop resourcemanager
-yarn-daemon.sh --config $HADOOP_CONF_DIR stop nodemanager</code></pre>
+<span class="n">yarn</span><span class="o">-</span><span class="n">daemon</span><span class="p">.</span><span class="n">sh</span> <span class="o">--</span><span class="n">config</span> <span class="err">$</span><span class="n">HADOOP_CONF_DIR</span> <span class="n">stop</span> <span class="n">resourcemanager</span>
+<span class="n">yarn</span><span class="o">-</span><span class="n">daemon</span><span class="p">.</span><span class="n">sh</span> <span class="o">--</span><span class="n">config</span> <span class="err">$</span><span class="n">HADOOP_CONF_DIR</span> <span class="n">stop</span> <span class="n">nodemanager</span>
+</pre></div>
<p>NN up on <a href="http://master:50070/dfshealth.jsp">http://master:50070/dfshealth.jsp</a>
RM yarn-daemon.sh --config $HADOOP_CONF_DIR start nodemanager</p>
-<pre class="codehilite"><code>~/zookeeper/bin/zkServer.sh start
+<div class="codehilite"><pre><span class="o">~/</span><span class="n">zookeeper</span><span class="o">/</span><span class="n">bin</span><span class="o">/</span><span class="n">zkServer</span><span class="p">.</span><span class="n">sh</span> <span class="n">start</span>
-# shutdown
-~/zookeeper/bin/zkServer.sh stop</code></pre>
+<span class="cp"># shutdown</span>
+<span class="o">~/</span><span class="n">zookeeper</span><span class="o">/</span><span class="n">bin</span><span class="o">/</span><span class="n">zkServer</span><span class="p">.</span><span class="n">sh</span> <span class="n">stop</span>
+</pre></div>
<p>Tip: after a successful run on a local cluster, do a quick <code>rm -rf $HADOOP_HOME/logs</code>
to keep the log bloat under control.</p>
<h2 id="get-hbase-in">get hbase in</h2>
<p>copy to local </p>
-<pre class="codehilite"><code>get hbase-0.98.0-bin.tar on
+<div class="codehilite"><pre><span class="n">get</span> <span class="n">hbase</span><span class="o">-</span><span class="mf">0.98.0</span><span class="o">-</span><span class="n">bin</span><span class="p">.</span><span class="n">tar</span> <span class="n">on</span>
-hdfs dfs -rm hdfs://master:9090/hbase.tar
-hdfs dfs -copyFromLocal hbase-0.98.0-bin.tar hdfs://master:9090/hbase.tar</code></pre>
+<span class="n">hdfs</span> <span class="n">dfs</span> <span class="o">-</span><span class="n">rm</span> <span class="n">hdfs</span><span class="o">:</span><span class="c1">//master:9090/hbase.tar</span>
+<span class="n">hdfs</span> <span class="n">dfs</span> <span class="o">-</span><span class="n">copyFromLocal</span> <span class="n">hbase</span><span class="o">-</span><span class="mf">0.98.0</span><span class="o">-</span><span class="n">bin</span><span class="p">.</span><span class="n">tar</span> <span class="n">hdfs</span><span class="o">:</span><span class="c1">//master:9090/hbase.tar</span>
+</pre></div>
<p>or</p>
-<pre class="codehilite"><code>hdfs dfs -copyFromLocal hbase-0.96.0-bin.tar hdfs://master:9090/hbase.tar
-hdfs dfs -ls hdfs://master:9090/</code></pre>
+<div class="codehilite"><pre><span class="n">hdfs</span> <span class="n">dfs</span> <span class="o">-</span><span class="n">copyFromLocal</span> <span class="n">hbase</span><span class="o">-</span><span class="mf">0.96.0</span><span class="o">-</span><span class="n">bin</span><span class="p">.</span><span class="n">tar</span> <span class="n">hdfs</span><span class="o">:</span><span class="c1">//master:9090/hbase.tar</span>
+<span class="n">hdfs</span> <span class="n">dfs</span> <span class="o">-</span><span class="n">ls</span> <span class="n">hdfs</span><span class="o">:</span><span class="c1">//master:9090/</span>
+</pre></div>
<h3 id="optional-point-binslider-at-your-chosen-cluster-configuration">Optional: point bin/slider at your chosen cluster configuration</h3>
<p>export SLIDER_CONF_DIR=~/Projects/slider/slider-core/src/test/configs/ubuntu-secure/slider</p>
<h2 id="optional-clean-up-any-existing-slider-cluster-details">Optional: Clean up any existing slider cluster details</h2>
<p>This is for demos only, otherwise you lose the clusters and their databases.</p>
-<pre class="codehilite"><code>hdfs dfs -rm -r hdfs://master:9090/user/home/stevel/.slider</code></pre>
+<div class="codehilite"><pre><span class="n">hdfs</span> <span class="n">dfs</span> <span class="o">-</span><span class="n">rm</span> <span class="o">-</span><span class="n">r</span> <span class="n">hdfs</span><span class="o">:</span><span class="c1">//master:9090/user/home/stevel/.slider</span>
+</pre></div>
<h2 id="create-a-slider-cluster">Create a Slider Cluster</h2>
-<pre class="codehilite"><code>slider create cl1 \
---component worker 1 --component master 1 \
- --manager master:8032 --filesystem hdfs://master:9090 \
- --zkhosts localhost:2181 --image hdfs://master:9090/hbase.tar
-
-# create the cluster
-
-slider create cl1 \
- --component worker 4 --component master 1 \
- --manager master:8032 --filesystem hdfs://master:9090 --zkhosts localhost \
- --image hdfs://master:9090/hbase.tar \
- --appconf file:////Users/slider/Hadoop/configs/master/hbase \
- --compopt master jvm.heap 128 \
- --compopt master env.MALLOC_ARENA_MAX 4 \
- --compopt worker jvm.heap 128
-
-# freeze the cluster
-slider freeze cl1 \
---manager master:8032 --filesystem hdfs://master:9090
-
-# thaw a cluster
-slider thaw cl1 \
---manager master:8032 --filesystem hdfs://master:9090
-
-# destroy the cluster
-slider destroy cl1 \
---manager master:8032 --filesystem hdfs://master:9090
-
-# list clusters
-slider list cl1 \
---manager master:8032 --filesystem hdfs://master:9090
-
-slider flex cl1 --component worker 2
---manager master:8032 --filesystem hdfs://master:9090 \
---component worker 5</code></pre>
+<div class="codehilite"><pre><span class="n">slider</span> <span class="n">create</span> <span class="n">cl1</span> \
+<span class="o">--</span><span class="n">component</span> <span class="n">worker</span> <span class="mi">1</span> <span class="o">--</span><span class="n">component</span> <span class="n">master</span> <span class="mi">1</span> \
+ <span class="o">--</span><span class="n">manager</span> <span class="n">master</span><span class="o">:</span><span class="mi">8032</span> <span class="o">--</span><span class="n">filesystem</span> <span class="n">hdfs</span><span class="o">:</span><span class="c1">//master:9090 \</span>
+<span class="c1"> --zkhosts localhost:2181 --image hdfs://master:9090/hbase.tar</span>
+
+<span class="cp"># create the cluster</span>
+
+<span class="n">slider</span> <span class="n">create</span> <span class="n">cl1</span> \
+ <span class="o">--</span><span class="n">component</span> <span class="n">worker</span> <span class="mi">4</span> <span class="o">--</span><span class="n">component</span> <span class="n">master</span> <span class="mi">1</span> \
+ <span class="o">--</span><span class="n">manager</span> <span class="n">master</span><span class="o">:</span><span class="mi">8032</span> <span class="o">--</span><span class="n">filesystem</span> <span class="n">hdfs</span><span class="o">:</span><span class="c1">//master:9090 --zkhosts localhost \</span>
+<span class="c1"> --image hdfs://master:9090/hbase.tar \</span>
+<span class="c1"> --appconf file:////Users/slider/Hadoop/configs/master/hbase \</span>
+<span class="c1"> --compopt master jvm.heap 128 \</span>
+<span class="c1"> --compopt master env.MALLOC_ARENA_MAX 4 \</span>
+<span class="c1"> --compopt worker jvm.heap 128</span>
+
+<span class="cp"># freeze the cluster</span>
+<span class="n">slider</span> <span class="n">freeze</span> <span class="n">cl1</span> \
+<span class="o">--</span><span class="n">manager</span> <span class="n">master</span><span class="o">:</span><span class="mi">8032</span> <span class="o">--</span><span class="n">filesystem</span> <span class="n">hdfs</span><span class="o">:</span><span class="c1">//master:9090</span>
+
+<span class="cp"># thaw a cluster</span>
+<span class="n">slider</span> <span class="n">thaw</span> <span class="n">cl1</span> \
+<span class="o">--</span><span class="n">manager</span> <span class="n">master</span><span class="o">:</span><span class="mi">8032</span> <span class="o">--</span><span class="n">filesystem</span> <span class="n">hdfs</span><span class="o">:</span><span class="c1">//master:9090</span>
+
+<span class="cp"># destroy the cluster</span>
+<span class="n">slider</span> <span class="n">destroy</span> <span class="n">cl1</span> \
+<span class="o">--</span><span class="n">manager</span> <span class="n">master</span><span class="o">:</span><span class="mi">8032</span> <span class="o">--</span><span class="n">filesystem</span> <span class="n">hdfs</span><span class="o">:</span><span class="c1">//master:9090</span>
+
+<span class="cp"># list clusters</span>
+<span class="n">slider</span> <span class="n">list</span> <span class="n">cl1</span> \
+<span class="o">--</span><span class="n">manager</span> <span class="n">master</span><span class="o">:</span><span class="mi">8032</span> <span class="o">--</span><span class="n">filesystem</span> <span class="n">hdfs</span><span class="o">:</span><span class="c1">//master:9090</span>
+
+<span class="n">slider</span> <span class="n">flex</span> <span class="n">cl1</span> <span class="o">--</span><span class="n">component</span> <span class="n">worker</span> <span class="mi">2</span>
+<span class="o">--</span><span class="n">manager</span> <span class="n">master</span><span class="o">:</span><span class="mi">8032</span> <span class="o">--</span><span class="n">filesystem</span> <span class="n">hdfs</span><span class="o">:</span><span class="c1">//master:9090 \</span>
+<span class="c1">--component worker 5</span>
+</pre></div>
<h2 id="create-an-accumulo-cluster">Create an Accumulo Cluster</h2>
-<pre class="codehilite"><code>slider create accl1 --provider accumulo \
---component master 1 --component tserver 1 --component gc 1 --component monitor 1 --component tracer 1 \
---manager localhost:8032 --filesystem hdfs://localhost:9000 \
---zkhosts localhost:2181 --zkpath /local/zookeeper \
---image hdfs://localhost:9000/user/username/accumulo-1.6.0-SNAPSHOT-bin.tar \
---appconf hdfs://localhost:9000/user/username/accumulo-conf \
--O zk.home /local/zookeeper -O hadoop.home /local/hadoop \
--O site.monitor.port.client 50095 -O accumulo.password secret</code></pre>
+<div class="codehilite"><pre><span class="n">slider</span> <span class="n">create</span> <span class="n">accl1</span> <span class="o">--</span><span class="n">provider</span> <span class="n">accumulo</span> \
+<span class="o">--</span><span class="n">component</span> <span class="n">master</span> <span class="mi">1</span> <span class="o">--</span><span class="n">component</span> <span class="n">tserver</span> <span class="mi">1</span> <span class="o">--</span><span class="n">component</span> <span class="n">gc</span> <span class="mi">1</span> <span class="o">--</span><span class="n">component</span> <span class="n">monitor</span> <span class="mi">1</span> <span class="o">--</span><span class="n">component</span> <span class="n">tracer</span> <span class="mi">1</span> \
+<span class="o">--</span><span class="n">manager</span> <span class="n">localhost</span><span class="o">:</span><span class="mi">8032</span> <span class="o">--</span><span class="n">filesystem</span> <span class="n">hdfs</span><span class="o">:</span><span class="c1">//localhost:9000 \</span>
+<span class="c1">--zkhosts localhost:2181 --zkpath /local/zookeeper \</span>
+<span class="c1">--image hdfs://localhost:9000/user/username/accumulo-1.6.0-SNAPSHOT-bin.tar \</span>
+<span class="c1">--appconf hdfs://localhost:9000/user/username/accumulo-conf \</span>
+<span class="c1">-O zk.home /local/zookeeper -O hadoop.home /local/hadoop \</span>
+<span class="c1">-O site.monitor.port.client 50095 -O accumulo.password secret</span>
+</pre></div>
</div>
<div id="footer">