You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@slider.apache.org by bi...@apache.org on 2014/06/24 21:46:39 UTC

svn commit: r1605167 [6/9] - in /incubator/slider/site: branches/ trunk/ trunk/cgi-bin/ trunk/content/ trunk/content/css/ trunk/content/design/ trunk/content/design/registry/ trunk/content/design/specification/ trunk/content/developing/ trunk/content/d...

Added: incubator/slider/site/trunk/content/docs/configuration/proposed-hbase.json
URL: http://svn.apache.org/viewvc/incubator/slider/site/trunk/content/docs/configuration/proposed-hbase.json?rev=1605167&view=auto
==============================================================================
--- incubator/slider/site/trunk/content/docs/configuration/proposed-hbase.json (added)
+++ incubator/slider/site/trunk/content/docs/configuration/proposed-hbase.json Tue Jun 24 19:46:37 2014
@@ -0,0 +1,273 @@
+{
+  "version": "2.0.0",
+  "name": "test_cluster_lifecycle",
+  "valid`": true,
+  
+  "slider-internal":{
+    "type": "hbase",
+    "createTime": 1393512091276,
+    "updateTime": 1393512117286,
+    "originConfigurationPath": "hdfs://sandbox:8020/user/slider/.slider/cluster/test_cluster_lifecycle/snapshot",
+    "generatedConfigurationPath": "hdfs://sandbox:8020/user/slider/.slider/cluster/test_cluster_lifecycle/generated",
+    "dataPath": "hdfs://sandbox:8020/user/slider/.slider/cluster/test_cluster_lifecycle/database",
+    "slider.tmp.dir": "hdfs://sandbox:8020/user/slider/.slider/cluster/test_cluster_lifecycle/tmp/am",
+    "slider.cluster.directory.permissions": "0770",
+    "slider.data.directory.permissions": "0770"
+  },
+  
+  "options": {
+    "slider.am.monitoring.enabled": "false",
+    "slider.cluster.application.image.path": "hdfs://sandbox:8020/hbase.tar.gz",
+    "slider.container.failure.threshold": "5",
+    "slider.container.failure.shortlife": "60",
+    "zookeeper.port": "2181",
+    "zookeeper.path": "/yarnapps_slider_slider_test_cluster_lifecycle",
+    "zookeeper.hosts": "sandbox",
+    "site.hbase.master.startup.retainassign": "true",
+    "site.fs.defaultFS": "hdfs://sandbox:8020",
+    "site.fs.default.name": "hdfs://sandbox:8020",
+    "env.MALLOC_ARENA_MAX": "4",
+    "site.hbase.master.info.port": "0",
+    "site.hbase.regionserver.info.port": "0"
+  },
+  
+  "diagnostics": {
+    "create.hadoop.deployed.info": "(detached from release-2.3.0) @dfe46336fbc6a044bc124392ec06b85",
+    "create.application.build.info": "Slider Core-0.13.0-SNAPSHOT Built against commit# 1a94ee4aa1 on Java 1.7.0_45 by slider",
+    "create.hadoop.build.info": "2.3.0",
+    "create.time.millis": "1393512091276",
+    "create.time": "27 Feb 2014 14:41:31 GMT"
+  },
+  
+  "info": {
+    "slider.am.restart.supported": "false",
+    "live.time": "27 Feb 2014 14:41:56 GMT",
+    "live.time.millis": "1393512116881",
+    "status.time": "27 Feb 2014 14:42:08 GMT",
+    "status.time.millis": "1393512128726",
+    "yarn.vcores": "32",
+    "yarn.memory": "2048",
+    "status.application.build.info": "Slider Core-0.13.0-SNAPSHOT Built against commit# 1a94ee4aa1 on Java 1.7.0_45 by slider",
+    "status.hadoop.build.info": "2.3.0",
+    "status.hadoop.deployed.info": "bigwheel-m16-2.2.0 @704f1e463ebc4fb89353011407e965"
+  },
+
+  "statistics": {
+
+    "cluster": {
+      "containers.unknown.completed": 0,
+      "containers.start.completed": 3,
+      "containers.live": 1,
+      "containers.start.failed": 0,
+      "containers.failed": 0,
+      "containers.completed": 0,
+      "containers.surplus": 0
+
+    },
+    "roles": {
+      "worker": {
+        "containers.start.completed": 0,
+        "containers.live": 2,
+        "containers.start.failed": 0,
+        "containers.active.requests": 0,
+        "containers.failed": 0,
+        "containers.completed": 0,
+        "containers.desired": 2,
+        "containers.requested": 0
+      },
+      "master": {
+        "containers.start.completed": 0,
+        "containers.live": 1,
+        "containers.start.failed": 0,
+        "containers.active.requests": 0,
+        "containers.failed": 0,
+        "containers.completed": 0,
+        "containers.desired": 1,
+        "containers.requested": 0
+      }
+    }
+  },
+
+  "instances": {
+    "slider": [ "container_1393511571284_0002_01_000001" ],
+    "master": [ "container_1393511571284_0002_01_000003" ],
+    "worker": [ 
+      "container_1393511571284_0002_01_000002",
+      "container_1393511571284_0002_01_000004"
+    ]
+  },
+  
+  "roles": {
+    "worker": {
+      "yarn.memory": "768",
+      "role.instances": "0",
+      "role.name": "worker",
+      "jvm.heapsize": "512M",
+      "yarn.vcores": "1"
+    },
+    "slider": {
+      "yarn.memory": "256",
+      "role.instances": "1",
+      "role.name": "slider",
+      "jvm.heapsize": "256M",
+      "yarn.vcores": "1"
+    },
+    "master": {
+      "yarn.memory": "1024",
+      "role.instances": "0",
+      "role.name": "master",
+      "jvm.heapsize": "512M",
+      "yarn.vcores": "1"
+    }
+  },
+
+
+  "clientProperties": {
+    "fs.defaultFS": "hdfs://sandbox:8020",
+    "hbase.cluster.distributed": "true",
+    "hbase.master.info.port": "0",
+    "hbase.master.port": "0",
+    "hbase.master.startup.retainassign": "true",
+    "hbase.regionserver.hlog.tolerable.lowreplication": "1",
+    "hbase.regionserver.info.port": "0",
+    "hbase.regionserver.port": "0",
+    "hbase.rootdir": "hdfs://sandbox:8020/user/slider/.slider/cluster/test_cluster_lifecycle/database",
+    "hbase.tmp.dir": "./hbase-tmp",
+    "hbase.zookeeper.property.clientPort": "2181",
+    "hbase.zookeeper.quorum": "sandbox",
+    "zookeeper.znode.parent": "/yarnapps_slider_slider_test_cluster_lifecycle"
+  },
+
+
+  "clientfiles": {
+    "hbase-site.xml": "site information for HBase",
+    "log4.properties": "log4.property file"
+  },
+
+  "provider":{
+    "load":0.4,
+    "urls": {
+      "master": ["http://node4:28209"],
+      "worker": ["http://node4:28717", "http://node6:31268"]
+    }
+  },
+
+  "status": {
+    "live": {
+      "worker": {
+        "container_1394032374441_0001_01_000003": {
+          "name": "container_1394032374441_0001_01_000003",
+          "role": "worker",
+          "roleId": 1,
+          "createTime": 1394032384451,
+          "startTime": 1394032384503,
+          "released": false,
+          "host": "192.168.1.88",
+          "state": 3,
+          "exitCode": 0,
+          "command": "hbase-0.98.0/bin/hbase --config $PROPAGATED_CONFDIR regionserver start 1><LOG_DIR>/region-server.txt 2>&1 ; ",
+          "diagnostics": "",
+          "environment": [
+            "HADOOP_USER_NAME=\"slider\"",
+            "HBASE_LOG_DIR=\"/tmp/slider-slider\"",
+            "HBASE_HEAPSIZE=\"256\"",
+            "MALLOC_ARENA_MAX=\"4\"",
+            "PROPAGATED_CONFDIR=\"$PWD/propagatedconf\""
+          ]
+        },
+        "container_1394032374441_0001_01_000002": {
+          "name": "container_1394032374441_0001_01_000002",
+          "role": "worker",
+          "roleId": 1,
+          "createTime": 1394032384451,
+          "startTime": 1394032384552,
+          "released": false,
+          "host": "192.168.1.86",
+          "state": 3,
+          "exitCode": 0,
+          "command": "hbase-0.98.0/bin/hbase --config $PROPAGATED_CONFDIR regionserver start 1><LOG_DIR>/region-server.txt 2>&1 ; ",
+          "diagnostics": "",
+          "environment": [
+            "HADOOP_USER_NAME=\"slider\"",
+            "HBASE_LOG_DIR=\"/tmp/slider-slider\"",
+            "HBASE_HEAPSIZE=\"256\"",
+            "MALLOC_ARENA_MAX=\"4\"",
+            "PROPAGATED_CONFDIR=\"$PWD/propagatedconf\""
+          ]
+        }
+      },
+      "slider": {
+        "container_1394032374441_0001_01_000001": {
+          "name": "container_1394032374441_0001_01_000001",
+          "role": "slider",
+          "roleId": 0,
+          "createTime": 0,
+          "startTime": 0,
+          "released": false,
+          "host": "slider-8.local",
+          "state": 3,
+          "exitCode": 0,
+          "command": "",
+          "diagnostics": ""
+        }
+      },
+      "master": {
+        "container_1394032374441_0001_01_000004": {
+          "name": "container_1394032374441_0001_01_000004",
+          "role": "master",
+          "roleId": 2,
+          "createTime": 1394032384451,
+          "startTime": 1394032384573,
+          "released": false,
+          "host": "192.168.1.86",
+          "state": 3,
+          "exitCode": 0,
+          "command": "hbase-0.98.0/bin/hbase --config $PROPAGATED_CONFDIR master start 1><LOG_DIR>/master.txt 2>&1 ; ",
+          "diagnostics": "",
+          "environment": [
+            "HADOOP_USER_NAME=\"slider\"",
+            "HBASE_LOG_DIR=\"/tmp/slider-slider\"",
+            "HBASE_HEAPSIZE=\"256\"",
+            "MALLOC_ARENA_MAX=\"4\"",
+            "PROPAGATED_CONFDIR=\"$PWD/propagatedconf\""
+          ]
+        }
+      }
+    },
+    "failed": {
+      
+    },
+
+    "rolestatus": {
+      "worker": {
+        "role.instances": "2",
+        "role.requested.instances": "0",
+        "role.failed.starting.instances": "0",
+        "role.actual.instances": "2",
+        "role.releasing.instances": "0",
+        "role.failed.instances": "1"
+      },
+      "slider": {
+        "role.instances": "1",
+        "role.requested.instances": "0",
+        "role.name": "slider",
+        "role.actual.instances": "1",
+        "role.releasing.instances": "0",
+        "role.failed.instances": "0"
+      },
+      "master": {
+        "role.instances": "1",
+        "role.requested.instances": "1",
+        "role.name": "master",
+        "role.failed.starting.instances": "0",
+        "role.actual.instances": "0",
+        "role.releasing.instances": "0",
+        "role.failed.instances": "0"
+      }
+    }
+  }
+
+
+
+
+}

Added: incubator/slider/site/trunk/content/docs/configuration/redesign.md
URL: http://svn.apache.org/viewvc/incubator/slider/site/trunk/content/docs/configuration/redesign.md?rev=1605167&view=auto
==============================================================================
--- incubator/slider/site/trunk/content/docs/configuration/redesign.md (added)
+++ incubator/slider/site/trunk/content/docs/configuration/redesign.md Tue Jun 24 19:46:37 2014
@@ -0,0 +1,478 @@
+<!---
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+# Apache Slider Cluster Specification
+
+### Notation: 
+
+In this document, a full path to a value is represented as a path 
+`options/zookeeper.port`  ; an assigment as  `options/zookeeper.port=2181`.
+
+A wildcard indicates all entries matching a path: `options/zookeeper.*`
+or `/roles/*/yarn.memory`
+
+
+## History
+
+The Slider cluster specification was implicitly defined in the file
+`org.apache.slider.api.ClusterDescription`. It had a number of roles
+
+1. Persistent representaton of cluster state
+1. Internal model of desired cluster state within the Application Master.
+1. Dynamic representation of current cluster state when the AM
+was queried, marshalled over the network as JSON.
+1. Description of updated state when reconfiguring a running cluster.
+
+Initially the dynamic status included a complete history of all containers
+-this soon highlit some restrictions on the maximum size of a JSON-formatted
+string in Hadoop's "classic" RPC: 32K, after which the string was silently
+truncated. Accordingly, this history was dropped.
+
+Having moved to Protocol Buffers as the IPC wire format, with a web view
+alongside, this history could be reconsidered.
+
+The initial design place most values into the root entry, and relied
+on Jaxon introspection to set and retrieve the values -it was a
+Java-first specification, with no external specificatin or regression tests.
+
+As the number of entries in the root increased, the design switched to storing
+more attributes into specific sections *under* the root path:
+
+* `info`: read-only information about the cluster.
+* `statistics`: Numeric statistics about the cluster
+
+# Sections
+
+## Root
+
+Contains various string and integer values
+
+    "version": "1.0",
+    "name": "test_cluster_lifecycle",
+    "type": "hbase",
+    "state": 3,
+    "createTime": 1393512091276,
+    "updateTime": 1393512117286,
+    "originConfigurationPath": "hdfs://sandbox:8020/user/stevel/.slider/cluster/test_cluster_lifecycle/snapshot",
+    "generatedConfigurationPath": "hdfs://sandbox:8020/user/stevel/.slider/cluster/test_cluster_lifecycle/generated",
+    "dataPath": "hdfs://sandbox:8020/user/stevel/.slider/cluster/test_cluster_lifecycle/database",
+
+
+* `version`: version of the JSON file. Not currently used
+to validate version compatibility; at this point in time
+releases may not be able to read existing .json files.
+
+* `name`: cluster name
+* `type`: reference to the provider type -this triggers a Hadoop configuration
+property lookup to find the implementation classes.
+* `state`: an enumeration value of the cluster state.
+
+        int STATE_INCOMPLETE = 0;
+        int STATE_SUBMITTED = 1;
+        int STATE_CREATED = 2;
+        int STATE_LIVE = 3;
+        int STATE_STOPPED = 4;
+        int STATE_DESTROYED = 5;
+        
+  Only two states are persisted, "incomplete" and "created", though more
+  are used internally.
+  The `incomplete` state is used during cluster create/build,
+   allowing an incomplete JSON file to be written
+  -so minimising the window for race conditions on cluster construction.
+        
+* `createTime` and `updateTime`: timestamps, informative only.
+ The `createTime` value is duplicated in `/info/createTimeMillis`
+* `originConfigurationPath`, `generatedConfigurationPath`, `dataPath` paths
+used internally -if changed the cluster may not start.
+
+*Proposed*: 
+1. Move all state bar `name` and cluster state
+into a section `/slider-internal`.
+1. The cluster state is moved from an enum to a simple
+ boolean, `valid`, set to true when the cluster JSON
+ has been fully constructed.
+
+## `/info`
+
+Read-only list of information about the application. Generally this is
+intended to be used for debugging and testing.
+
+### Persisted values: static information about the file history
+ 
+    "info" : {
+      "create.hadoop.deployed.info" : "(detached from release-2.3.0) @dfe46336fbc6a044bc124392ec06b85",
+      "create.application.build.info" : "Slider Core-0.13.0-SNAPSHOT Built against commit# 1a94ee4aa1 on Java 1.7.0_45 by stevel",
+      "create.hadoop.build.info" : "2.3.0",
+      "create.time.millis" : "1393512091276",
+    },
+ 
+*Proposed*: move persisted info K-V pairs to a section `/diagnostics`.
+ 
+### Dynamic values: 
+ 
+ 
+ whether the AM supports service restart without killing all the containers hosting
+ the role instances:
+ 
+    "slider.am.restart.supported" : "false",
+    
+ timestamps of the cluster going live, and when the status query was made
+    
+    "live.time" : "27 Feb 2014 14:41:56 GMT",
+    "live.time.millis" : "1393512116881",
+    "status.time" : "27 Feb 2014 14:42:08 GMT",
+    "status.time.millis" : "1393512128726",
+    
+  yarn data provided to the AM
+    
+    "yarn.vcores" : "32",
+    "yarn.memory" : "2048",
+  
+  information about the application and hadoop versions in use. Here
+  the application was built using Hadoop 2.3.0, but is running against the version
+  of Hadoop built for HDP-2.
+  
+    "status.application.build.info" : "Slider Core-0.13.0-SNAPSHOT Built against commit# 1a94ee4aa1 on Java 1.7.0_45 by stevel",
+    "status.hadoop.build.info" : "2.3.0",
+    "status.hadoop.deployed.info" : "bigwheel-m16-2.2.0 @704f1e463ebc4fb89353011407e965"
+ 
+ 
+ ## `instances`
+ 
+ Information about the live containers in a cluster
+
+     "instances": {
+       "slider": [ "container_1393511571284_0002_01_000001" ],
+       "master": [ "container_1393511571284_0002_01_000003" ],
+       "worker": [ 
+         "container_1393511571284_0002_01_000002",
+         "container_1393511571284_0002_01_000004"
+       ]
+     },
+
+There's no information about location, nor is there any history about containers
+that are no longer part of the cluster (i.e. failed & released containers). 
+
+It could be possible to include a list of previous containers,
+though Slider would need to be selective about how many to store
+(or how much detail to retain) on those previous containers.
+
+Perhaps the list could be allowed to grow without limit, but detail
+only preserved on the last 100. If more containers fail than that,
+there is likely to be a problem which the most recent containers
+will also display.
+
+*Proposed* 
+
+1. Return to the full serialization of container state -but only for running containers.
+1. Have a list of failed containers, but only include last 8; make it a rolling
+buffer. This avoids a significantly failing role to overload the status document.
+
+ 
+ ## `statistics`
+ 
+ Statistics on each role. 
+ 
+ They can be divided into counters that only increase
+
+    "containers.start.completed": 0,
+    "containers.start.failed": 0,
+    "containers.failed": 0,
+    "containers.completed": 0,
+    "containers.requested": 0
+
+and those that vary depending upon the current state
+
+    "containers.live": 0,
+    "containers.active.requests": 0,
+    "containers.desired": 0,
+
+
+* Propose: move these values out of statistics into some other section, as they
+are state, not statistics*
+
+
+       "statistics": {
+         "worker": {
+           "containers.start.completed": 0,
+           "containers.live": 2,
+           "containers.start.failed": 0,
+           "containers.active.requests": 0,
+           "containers.failed": 0,
+           "containers.completed": 0,
+           "containers.desired": 2,
+           "containers.requested": 0
+         },
+         "slider": {
+           "containers.unknown.completed": 0,
+           "containers.start.completed": 3,
+           "containers.live": 1,
+           "containers.start.failed": 0,
+           "containers.failed": 0,
+           "containers.completed": 0,
+           "containers.surplus": 0
+         },
+         "master": {
+           "containers.start.completed": 0,
+           "containers.live": 1,
+           "containers.start.failed": 0,
+           "containers.active.requests": 0,
+           "containers.failed": 0,
+           "containers.completed": 0,
+           "containers.desired": 1,
+           "containers.requested": 0
+         }
+       },
+    
+The `/statistics/slider` section is unusual in that it provides the aggregate statistics
+of the cluster -this is not obvious. A different name could be used -but
+again, there's a risk of clash with or confusion with a role. 
+
+Better to have a specific `/statistics/cluster` element, 
+and to move the roles' statistics under `/statistics/roles`:
+
+    "statistics": {
+      "cluster": {
+        "containers.unknown.completed": 0,
+        "containers.start.completed": 3,
+        "containers.live": 1,
+        "containers.start.failed": 0,
+        "containers.failed": 0,
+        "containers.completed": 0,
+        "containers.surplus": 0
+  
+      },
+      "roles": {
+        "worker": {
+          "containers.start.completed": 0,
+          "containers.live": 2,
+          "containers.start.failed": 0,
+          "containers.active.requests": 0,
+          "containers.failed": 0,
+          "containers.completed": 0,
+          "containers.desired": 2,
+          "containers.requested": 0
+        },
+        "master": {
+          "containers.start.completed": 0,
+          "containers.live": 1,
+          "containers.start.failed": 0,
+          "containers.active.requests": 0,
+          "containers.failed": 0,
+          "containers.completed": 0,
+          "containers.desired": 1,
+          "containers.requested": 0
+        }
+      }
+    },
+
+This approach allows extra statistics sections to be added (perhaps
+by providers), without any changes to the toplevel section.
+
+## Options
+
+A list of options used by Slider and its providers to build up the AM
+and the configurations of the deployed service components
+
+
+    "options": {
+      "zookeeper.port": "2181",
+      "site.hbase.master.startup.retainassign": "true",
+      "slider.cluster.application.image.path": "hdfs://sandbox:8020/hbase.tar.gz",
+      "site.fs.defaultFS": "hdfs://sandbox:8020",
+      "slider.container.failure.threshold": "5",
+      "site.fs.default.name": "hdfs://sandbox:8020",
+      "slider.cluster.directory.permissions": "0770",
+      "slider.am.monitoring.enabled": "false",
+      "zookeeper.path": "/yarnapps_slider_stevel_test_cluster_lifecycle",
+      "slider.tmp.dir": "hdfs://sandbox:8020/user/stevel/.slider/cluster/test_cluster_lifecycle/tmp/am",
+      "slider.data.directory.permissions": "0770",
+      "zookeeper.hosts": "sandbox",
+      "slider.container.failure.shortlife": "60"
+    },
+  
+Some for these options have been created by slider itself ("slider.tmp.dir")
+for internal use -and are cluster specific. If/when the ability to use
+an existing json file as a template for a new cluster is added, having these
+options in the configuration will create problems
+
+
+# Proposed Changes
+
+
+## Move Slider internal state to `/slider-internal`
+
+Move all slider "private" data to an internal section,`/slider-internal`
+including those in the toplevel directory and in `/options`
+  
+## Allow `/options` and `roles/*/` options entries to take the value "null".
+
+This would be a definition that the value must be defined before the cluster
+can start. Provider templates could declare this.
+  
+## Make client configuration retrieval hierarchical -and maybe move out of the
+status
+
+The current design assumes that it is a -site.xml file being served up. This
+does not work for alternate file formats generated by the Provider.
+
+## Role Options
+
+The `/roles/$ROLENAME/` clauses each provide options for a
+specific role.
+
+This includes
+1. `role.instances`: defines the number of instances of a role to create
+1. `env.` environment variables for launching the container
+1. `yarn.` properties to configure YARN requests.
+1. `jvm.heapsize`: an option supported by some providers to 
+fix the heap size of a component.
+1. `app.infoport`: an option supported by some providers (e.g. HBase)
+to fix the port to which a role (master or worker) binds its web UI.
+
+
+
+      "worker": {
+        "yarn.memory": "768",
+        "env.MALLOC_ARENA_MAX": "4",
+        "role.instances": "0",
+        "role.name": "worker",
+        "jvm.heapsize": "512M",
+        "yarn.vcores": "1",
+        "app.infoport": "0"
+      },
+
+In a live cluster, the role information also includes status information
+about the cluster.
+
+      "master": {
+        "yarn.memory": "1024",
+        "env.MALLOC_ARENA_MAX": "4",
+        "role.instances": "0",
+        "role.requested.instances": "0",
+        "role.name": "master",
+        "role.failed.starting.instances": "0",
+        "role.actual.instances": "0",
+        "jvm.heapsize": "512M",
+        "yarn.vcores": "1",
+        "role.releasing.instances": "0",
+        "role.failed.instances": "0",
+        "app.infoport": "0"
+      }
+
+The role `slider` represents the Slider Application Master itself.
+
+      
+      "slider": {
+        "yarn.memory": "256",
+        "env.MALLOC_ARENA_MAX": "4",
+        "role.instances": "1",
+        "role.name": "slider",
+        "jvm.heapsize": "256M",
+        "yarn.vcores": "1",
+      },
+
+### Proposed: 
+1. move all dynamic role status to its own clauses.
+1. use a simple inheritance model from `/options`
+1. don't allow role entries to alter the cluster state. 
+  
+### Proposed:  `/clientProperties` continues return Key-val pairs
+
+The `/clientProperties` section will remain, with key-val pairs of type
+string, the expectation being this is where providers can insert specific
+single attributes for client applications.
+
+These values can be converted to application-specific files on the client,
+in code -as done today in the Slider CLI-, or via template expansion (beyond
+the scope of this document.
+
+
+
+### Proposed: alongside `/clientProperties`  comes `/clientfiles` 
+
+This section will list all files that an application instance can generate
+for clients, along with with a description.
+
+    "/clientfiles/hbase-site.xml": "site information for HBase"
+    "/clientfiles/log4.properties": "log4.property file"
+
+A new CLI command would be added to retrieve a client file.
+1. The specific file must be named.
+1. If it is not present, an error must be raised.
+1. If it is present, it is downloaded and output to the console/to a named
+destination file/directory `--outfile <file>` and `--outdir <dir>`
+1. If the `--list` argument is provided, the list of available files is
+returned (e.g.) 
+
+    hbase-site.xml: site information for HBase
+    log4.properties: log4.property file
+    
+*No attempt to parse/process the body of the messages will be returned.*
+
+In a REST implementation of the client API, /clientconf would be a path
+to the list of options; each file a path underneath.
+
+Client configuration file retrieval outside the status completely;
+the status just lists the possible values; a separate call returns them.
+
+This will  permit binary content to be retrieved, and avoid any marshalling
+problems and inefficiencies.
+
+With this change, there will now be two ways to generate client configuration
+files
+
+* Client-side: as today
+* Server-side: via the provider
+
+Client side is more extensible as it allows for arbitrary clients; server-side
+is restricted to those files which the application provider is capable of
+generating. The advantage of the server-side option is that for those files
+about which the provider is aware of, they will be visible through the 
+REST and Web UIs, so trivially retrieved.
+
+### Stop intermixing role specification with role current state
+
+Create a new section, `rolestatus`, which lists the current status
+of the roles: how many are running vs requested, how many are being
+released.
+
+There's some overlap here with the `/statistics` field, so we should
+either merge them or clearly separate the two. Only the `role.failed`
+properties match entries in the statistics -perhaps they should be cut.
+
+#### provider-specific status
+
+Allow providers to publish information to the status, in their
+own section.
+
+There already is support for providers updating the cluster status
+in Slider 12.1 and earlier, but it has flaws
+
+A key one is that it is done sychronously on a `getStatus()` call;
+as providers may perform a live query of their status (example, the HBase
+provider looks up the Web UI ports published by HBase to zookeeper),
+there's overhead, and if the operation blocks (example: when HBase hasn't
+ever been deployed and the zookeeper path is empty), then the status
+call blocks.
+
+*Proposed:*
+
+1. There is a specific `/provider` section
+1. There's no restriction on what JSON is permitted in this section.
+1. Providers may make their own updates to the application state to read and
+write this block -operations that are asynchronous to any status queries.

Propchange: incubator/slider/site/trunk/content/docs/configuration/redesign.md
------------------------------------------------------------------------------
    svn:eol-style = native

Added: incubator/slider/site/trunk/content/docs/configuration/resolved-resources.json
URL: http://svn.apache.org/viewvc/incubator/slider/site/trunk/content/docs/configuration/resolved-resources.json?rev=1605167&view=auto
==============================================================================
--- incubator/slider/site/trunk/content/docs/configuration/resolved-resources.json (added)
+++ incubator/slider/site/trunk/content/docs/configuration/resolved-resources.json Tue Jun 24 19:46:37 2014
@@ -0,0 +1,22 @@
+{
+  "schema": "http://example.org/specification/v2.0.0",
+
+  "metadata": {
+    "description": "example of a resources file"
+  },
+  
+  "global": {
+    "yarn.vcores": "1",
+    "yarn.memory": "512"
+  },
+  
+  "components": {
+    "master": {
+      "instances": "1",
+      "yarn.memory": "1024"
+    },
+    "worker": {
+      "instances":"5"
+    }
+  }
+}
\ No newline at end of file

Added: incubator/slider/site/trunk/content/docs/configuration/specification.md
URL: http://svn.apache.org/viewvc/incubator/slider/site/trunk/content/docs/configuration/specification.md?rev=1605167&view=auto
==============================================================================
--- incubator/slider/site/trunk/content/docs/configuration/specification.md (added)
+++ incubator/slider/site/trunk/content/docs/configuration/specification.md Tue Jun 24 19:46:37 2014
@@ -0,0 +1,512 @@
+<!---
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+# Apache Slider Specification of the "Cluster Description"
+
+* This is partially obsolete. Slider still returns the Slider Cluster Description
+as changing it will break most of the unit tests -once these are updated
+this document will be completely obsolete and replaced with a new one.
+
+
+### Notation: 
+
+In this document, a full path to a value is represented as a path 
+`options/zookeeper.port`  ; an assigment as  `options/zookeeper.port=2181`.
+
+A wildcard indicates all entries matching a path: `options/zookeeper.*`
+or `/roles/*/yarn.memory`
+
+
+## Core Concepts
+
+The specificaton of an application instance is defined in an application instance
+directory, `${user.home}/.slidera/clusters/${clustername}/cluster.json`)
+
+
+## Sections for specifying and describing cluster state
+
+The cluster desciption is hierarchal, with standardized sections.
+
+Different sections have one of three roles.
+
+1. Storage and specification of internal properties used to define a cluster -properties
+that should not be modified by users -doing so is likely to render the
+cluster undeployable.
+
+1. Storage and specification of the components deployed by Slider.
+These sections define options for the deployed application, the size of
+the deployed application, attributes of the deployed roles, and customizable
+aspects of the Slider application master. 
+
+  This information defines the *desired state* of a cluster.
+   
+  Users may edit these sections, either via the CLI, or by directly editing the `cluster.json` file of
+  a frozen cluster.
+
+1. Status information provided by a running cluster. These include:
+ information about the cluster, statistics, information about reach role in
+ the cluster -as well as other aspects of the deployment.
+ 
+ This information describes the *actual state* of a cluster.
+  
+Using a common format for both the specification and description of a cluster
+may be confusing, but it is designed to unify the logic needed to parse
+and process cluster descriptions. There is only one JSON file to parse
+-merely different sections of relevance at different times.
+
+## Role-by-role subsections
+
+A slider-deployed application consists of the single Slider application master,
+and one or more roles -specific components in the actual application.
+
+The `/roles` section contains a listing for each role, 
+declaring the number of instances of each role desired,
+possibly along with some details defining the actual execution of the application.
+
+The `/statistics/roles/` section returns statistics on each role,
+while `/instances` has a per-role entry listing the YARN
+containers hosting instances. 
+
+
+## Cluster information for applications
+
+The AM/application provider may generate information for use by client applications.
+
+There are three ways to provide this
+
+1. A section in which simple key-value pairs are provided for interpretation
+by client applications -usually to generate configuration documents
+2. A listing of files that may be provided directly to a client. The API to provide these files is not covered by this document.
+3. A provider-specific section in which arbitrary values and structures may be defined. This allows greater flexibility in the information that a provider can publish -though it does imply custom code to process this data on the client.
+
+
+# Persistent Specification Sections
+
+## "/" : root
+
+The root contains a limited number of key-value pairs, 
+
+* `version`: string; required.
+The version of the JSON file, as an `x.y.z` version string.
+    1. Applications MUST verify that they can process a specific version.
+    1. The version number SHOULD be incremented in the minor "z" value
+    after enhancements that are considered backwards compatible.
+    Incompatible updates MUST be updated with a new "y" value.
+    The final, "x" number, is to be reserved for major reworkings
+    of the cluster specification itself (this document or its
+    successors).
+
+* `name`: string; required. Cluster name; 
+* `type`: string; required.
+Reference to the provider type -this triggers a Hadoop configuration
+property lookup to find the implementation classes.
+* `valid`: boolean; required.
+Flag to indicate whether or not a specification is considered valid.
+If false, the rest of the document is in an unknown state.
+
+## `/slider-internal`: internal confiugration
+
+Stores internal configuration options. These parameters
+are not defined in this document.
+
+## `/diagnostics`: diagnostics sections
+
+Persisted list of information about Slider. 
+
+Static information about the file history
+ 
+    "diagnostics" : {
+      "create.hadoop.deployed.info" : 
+       "(detached from release-2.3.0) @dfe46336fbc6a044bc124392ec06b85",
+      "create.application.build.info" : 
+       "Slider Core-0.13.0-SNAPSHOT Built against commit# 1a94ee4aa1 on Java 1.7.0_45 by stevel",
+      "create.hadoop.build.info" : "2.3.0",
+      "create.time.millis" : "1393512091276",
+    },
+ 
+This information is not intended to provide anything other
+than diagnostics to an application; the values and their meaning
+are not defined. All applications MUST be able to process
+an empty or absent `/diagnostics` section.
+
+## Options: cluster options
+
+A persisted list of options used by Slider and its providers to build up the AM
+and the configurations of the deployed service components
+
+  
+    "options": {
+      "slider.am.monitoring.enabled": "false",
+      "slider.cluster.application.image.path": "hdfs://sandbox:8020/hbase.tar.gz",
+      "slider.container.failure.threshold": "5",
+      "slider.container.failure.shortlife": "60",
+      "zookeeper.port": "2181",
+      "zookeeper.path": "/yarnapps_slider_stevel_test_cluster_lifecycle",
+      "zookeeper.hosts": "sandbox",
+      "site.hbase.master.startup.retainassign": "true",
+      "site.fs.defaultFS": "hdfs://sandbox:8020",
+      "site.fs.default.name": "hdfs://sandbox:8020",
+      "env.MALLOC_ARENA_MAX": "4",
+      "site.hbase.master.info.port": "0",
+      "site.hbase.regionserver.info.port": "0"
+    },
+
+Many of the properties are automatically set by Slider when a cluster is constructed.
+They may be edited afterwards.
+
+
+### Standard Option types
+
+All option values MUST be strings.
+
+#### `slider.`
+All options that begin with `slider.` are intended for use by slider and 
+providers to configure the Slider application master itself, and the
+application. For example, `slider.container.failure.threshold` defines
+the number of times a container must fail before the role (and hence the cluster)
+is considered to have failed. As another example, the zookeeper bindings
+such as `zookeeper.hosts` are read by the HBase and Ambari providers, and
+used to modify the applications' site configurations with application-specific
+properties.
+
+#### `site.`
+ 
+These are properties that are expected to be propagated to an application's
+ `site` configuration -if such a configuration is created. For HBase, the 
+ site file is `hbase-site.xml`; for Accumulo it is `accumulo-site.xml`
+
+1. The destination property is taken by removing the prefix `site.`, and
+setting the shortened key with the defined value.
+1. Not all applications have the notion of a site file; These applications MAY
+ignore the settings.
+1. Providers MAY validate site settings to recognise invalid values. This
+aids identifying and diagnosing startup problems.
+
+#### `env.`
+
+These are options to configure environment variables in the roles. When
+a container is started, all `env.` options have the prefix removed, and
+are then set as environment variables in the target context.
+
+1. The Slider AM uses these values to configure itself, after following the
+option/role merge process.
+1. Application providers SHOULD follow the same process.
+
+
+## '/roles': role declarations
+
+The `/roles/$ROLENAME/` clauses each provide options for a
+specific role.
+
+This includes
+1. `role.instances`: defines the number of instances of a role to create
+1. `env.` environment variables for launching the container
+1. `yarn.` properties to configure YARN requests.
+1. `jvm.heapsize`: an option supported by some providers to 
+fix the heap size of a component.
+
+
+      "worker": {
+        "yarn.memory": "768",
+        "env.MALLOC_ARENA_MAX": "4",
+        "role.instances": "0",
+        "role.name": "worker",
+        "role.failed.starting.instances": "0",
+        "jvm.heapsize": "512M",
+        "yarn.vcores": "1",
+      },
+
+
+The role `slider` represents the Slider Application Master itself.
+
+      
+      "slider": {
+        "yarn.memory": "256",
+        "env.MALLOC_ARENA_MAX": "4",
+        "role.instances": "1",
+        "role.name": "slider",
+        "jvm.heapsize": "256M",
+        "yarn.vcores": "1",
+      },
+
+Providers may support a fixed number of roles -or they may support a dynamic
+number of roles defined at run-time, potentially from other data sources.
+
+## How `/options` and role options are merged.
+
+The options declared for a specific role are merged with the cluster-wide options
+to define the final options for a role. This is implemented in a simple
+override model: role-specific options can override any site-wide options.
+
+1. The options defined in `/options` are used to create the initial option
+map for each role.
+1. The role's options are then applied to the map -this may overwrite definitions
+from the `/options` section.
+1. There is no way to "undefine" a cluster option, merely overwrite it. 
+1. The merged map is then used by the provider to create the component.
+1. The special `slider` role is used in the CLI to define the attributes of the AM.
+
+Options set on a role do not affect any site-wide options: they
+are specific to the invidual role being created. 
+
+As such, overwriting a `site.` option may have no effect -or it it may
+change the value of a site configuration document *in that specific role instance*.
+
+### Standard role options
+
+* `role.instances` : number; required.
+  The number of instances of that role desired in the application.
+* `yarn.vcores` : number.
+  The number of YARN "virtual cores" to request for each role instance.
+  The larger the number, the more CPU allocation -and potentially the longer
+  time to satisfy the request and so instantiate the node. 
+  If the value '"-1"` is used -for any role but `slider`-the maximum value
+  available to the application is requested.
+* `yarn.memory` : number.
+  The number in Megabytes of RAM to request for each role instance.
+  The larger the number, the more memory allocation -and potentially the longer
+  time to satisfy the request and so instantiate the node. 
+  If the value '"-1"` is used -for any role but `slider`-the maximum value
+  available to the application is requested.
+ 
+* `env.` environment variables.
+String environment variables to use when setting up the container
+
+### Provider-specific role options
+  
+* `jvm.heapsize` -the amount of memory for a provider to allocate for
+ a processes JVM. Example "512M". This option MAY be implemented by a provider.
+ 
+
+
+
+
+# Dynamic Information Sections
+
+These are the parts of the document that provide dynamic run-time
+information about an application. They are provided by the
+Slider Application Master when a request for the cluster status is issued.
+
+## `/info`
+
+Dynamic set of string key-value pairs containing
+information about the running application -as provided by th 
+
+The values in this section are not normatively defined. 
+
+Here are some standard values
+ 
+* `slider.am.restart.supported"`  whether the AM supports service restart without killing all the containers hosting
+ the role instances:
+ 
+        "slider.am.restart.supported" : "false",
+    
+* timestamps of the cluster going live, and when the status query was made
+    
+        "live.time" : "27 Feb 2014 14:41:56 GMT",
+        "live.time.millis" : "1393512116881",
+        "status.time" : "27 Feb 2014 14:42:08 GMT",
+        "status.time.millis" : "1393512128726",
+    
+* yarn data provided to the AM
+    
+        "yarn.vcores" : "32",
+        "yarn.memory" : "2048",
+      
+*  information about the application and hadoop versions in use. Here
+  the application was built using Hadoop 2.3.0, but is running against the version
+  of Hadoop built for HDP-2.
+  
+        "status.application.build.info" : "Slider Core-0.13.0-SNAPSHOT Built against commit# 1a94ee4aa1 on Java 1.7.0_45 by stevel",
+        "status.hadoop.build.info" : "2.3.0",
+        "status.hadoop.deployed.info" : "bigwheel-m16-2.2.0 @704f1e463ebc4fb89353011407e965"
+     
+ 
+As with the `/diagnostics` section, this area is primarily intended
+for debugging.
+
+ ## `/instances`: instance list
+ 
+ Information about the live containers in a cluster
+
+     "instances": {
+       "slider": [ "container_1393511571284_0002_01_000001" ],
+       "master": [ "container_1393511571284_0002_01_000003" ],
+       "worker": [ 
+         "container_1393511571284_0002_01_000002",
+         "container_1393511571284_0002_01_000004"
+       ]
+     },
+
+
+## `/status`: detailed dynamic state
+
+This provides more detail on the application including live and failed instances
+
+### `/status/live`: live role instances by container
+
+    "cluster": {
+      "live": {
+        "worker": {
+          "container_1394032374441_0001_01_000003": {
+            "name": "container_1394032374441_0001_01_000003",
+            "role": "worker",
+            "roleId": 1,
+            "createTime": 1394032384451,
+            "startTime": 1394032384503,
+            "released": false,
+            "host": "192.168.1.88",
+            "state": 3,
+            "exitCode": 0,
+            "command": "hbase-0.98.0/bin/hbase --config $PROPAGATED_CONFDIR regionserver start 1><LOG_DIR>/region-server.txt 2>&1 ; ",
+            "diagnostics": "",
+            "environment": [
+              "HADOOP_USER_NAME=\"slider\"",
+              "HBASE_LOG_DIR=\"/tmp/slider-slider\"",
+              "HBASE_HEAPSIZE=\"256\"",
+              "MALLOC_ARENA_MAX=\"4\"",
+              "PROPAGATED_CONFDIR=\"$PWD/propagatedconf\""
+            ]
+          }
+        }
+        failed : {}
+      }
+
+All live instances MUST be described in `/status/live`
+
+Failed clusters MAY be listed in the `/status/failed` section, specifically,
+a limited set of recently failed clusters SHOULD be provided.
+
+Future versions of this document may introduce more sections under `/status`.
+        
+### `/status/rolestatus`: role status information
+
+This lists the current status of the roles: 
+How many are running vs requested, how many are being
+released.
+ 
+      
+    "rolestatus": {
+      "worker": {
+        "role.instances": "2",
+        "role.requested.instances": "0",
+        "role.failed.starting.instances": "0",
+        "role.actual.instances": "2",
+        "role.releasing.instances": "0",
+        "role.failed.instances": "1"
+      },
+      "slider": {
+        "role.instances": "1",
+        "role.requested.instances": "0",
+        "role.name": "slider",
+        "role.actual.instances": "1",
+        "role.releasing.instances": "0",
+        "role.failed.instances": "0"
+      },
+      "master": {
+        "role.instances": "1",
+        "role.requested.instances": "1",
+        "role.name": "master",
+        "role.failed.starting.instances": "0",
+        "role.actual.instances": "0",
+        "role.releasing.instances": "0",
+        "role.failed.instances": "0"
+      }
+    }
+
+
+### `/status/provider`: provider-specific information
+
+Providers MAY publish information to the `/status/provider` section.
+
+1. There's no restriction on what JSON is permitted in this section.
+1. Providers may make their own updates to the application state to read and
+write this block -operations that are asynchronous to any status queries.
+
+
+
+## `/statistics`: aggregate statistics 
+ 
+Statistics on the cluster and each role in the cluster 
+
+Better to have a specific `/statistics/cluster` element, 
+and to move the roles' statistics under `/statistics/roles`:
+
+    "statistics": {
+      "cluster": {
+        "containers.unknown.completed": 0,
+        "containers.start.completed": 3,
+        "containers.live": 1,
+        "containers.start.failed": 0,
+        "containers.failed": 0,
+        "containers.completed": 0,
+        "containers.surplus": 0
+      },
+      "roles": {
+        "worker": {
+          "containers.start.completed": 0,
+          "containers.live": 2,
+          "containers.start.failed": 0,
+          "containers.active.requests": 0,
+          "containers.failed": 0,
+          "containers.completed": 0,
+          "containers.desired": 2,
+          "containers.requested": 0
+        },
+        "master": {
+          "containers.start.completed": 0,
+          "containers.live": 1,
+          "containers.start.failed": 0,
+          "containers.active.requests": 0,
+          "containers.failed": 0,
+          "containers.completed": 0,
+          "containers.desired": 1,
+          "containers.requested": 0
+        }
+      }
+    },
+
+`/statistics/cluster` provides aggregate statistics for the entire cluster.
+
+Under `/statistics/roles` MUST come an entry for each role in the cluster.
+
+All simple values in statistics section are integers.
+
+
+### `/clientProperties` 
+
+The `/clientProperties` section contains key-val pairs of type
+string, the expectation being this is where providers can insert specific
+single attributes for client applications.
+
+These values can be converted to application-specific files on the client,
+in code -as done today in the Slider CLI-, or via template expansion (beyond
+the scope of this document.
+
+
+### `/clientfiles` 
+
+This section list all files that an application instance MAY generate
+for clients, along with with a description.
+
+    "/clientfiles/hbase-site.xml": "site information for HBase"
+    "/clientfiles/log4.properties": "log4.property file"
+
+Client configuration file retrieval is by other means; this
+status operation merely lists files that are available;
+
+

Propchange: incubator/slider/site/trunk/content/docs/configuration/specification.md
------------------------------------------------------------------------------
    svn:eol-style = native

Added: incubator/slider/site/trunk/content/docs/debugging.md
URL: http://svn.apache.org/viewvc/incubator/slider/site/trunk/content/docs/debugging.md?rev=1605167&view=auto
==============================================================================
--- incubator/slider/site/trunk/content/docs/debugging.md (added)
+++ incubator/slider/site/trunk/content/docs/debugging.md Tue Jun 24 19:46:37 2014
@@ -0,0 +1,92 @@
+<!---
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+# Debugging Apache Slider
+There are a number of options available to you for debugging Slider applications.  They include:
+
+* Using Slider logging
+* IDE-based remote debugging of the Application Master
+
+## Using Slider logging
+There are a number of options for viewing the generated log files:
+
+1. Using a web browser
+2. Accessing the host machine
+  
+### Using a web browser
+
+The log files are accessible via the Yarn Resource Manager UI.  From the main page (e.g. `http://${YARN_RESOURCE_MGR_HOST}:8088`),
+click on the link for the application instance of interest, and then click on the `logs` link.
+This will present you with a page with links to the `slider-err.txt` file and the `slider-out.txt` file.
+The former is the file you should select -it is where the applicaton logs go
+Once the log page is presented, click on the link at the top of the page ("Click here for full log") to view the entire file.
+
+If the file `slider-out.txt` is empty, then examine  `slider-err.txt` -an empty
+output log usually means that the java process failed to start -this should be
+logged in the error file.
+     
+
+### Accessing the host machine
+
+If access to other log files is required, there is the option of logging in
+ to the host machine on which the application component is running
+  -provided you have the correct permissions.
+  
+The root directory for all YARN associated files is the value of `yarn.nodemanager.log-dirs` in `yarn-site.xml` - e.g. `/hadoop/yarn/log`.
+Below the root directory you will find an application and container sub-directory (e.g. `/application_1398372047522_0009/container_1398372047522_0009_01_000001/`).
+Below the container directory you will find any log files associated with the processes running in the given Yarn container.
+
+Within a container log the following files are useful while debugging the application.
+
+**agent.log** 
+  
+E.g. `application_1398098639743_0024/container_1398098639743_0024_01_000003/infra/log/agent.log`
+This file contains the logs from the Slider-Agent.
+
+**application component log**
+
+E.g. `./log/application_1398098639743_0024/container_1398098639743_0024_01_000003/app/log/hbase-yarn-regionserver-c6403.ambari.apache.org.log`
+
+The location of the application log is defined by the application. "${AGENT_LOG_ROOT}" is a symbol available to the app developers to use as a root folder for logging.
+
+**agent operations log**
+
+E.g. ./log/application_1398098639743_0024/container_1398098639743_0024_01_000003/app/command-log/
+
+The command logs produced by the slider-agent are available in the `command-log` folder relative to `${AGENT_LOG_ROOT}/app`
+
+Note that the *fish* shell is convenient for debugging, as  `cat log/**/slider-out.txt` will find the relevant output file 
+irrespective of what the path leading to it is.
+
+## IDE-based remote debugging of the Application Master
+
+For situations in which the logging does not yield enough information to debug an issue,
+the user has the option of specifying JVM command line options for the
+Application Master that enable attaching to the running process with a debugger
+(e.g. the remote debugging facilities in Eclipse or Intellij IDEA). 
+In order to specify the JVM options, edit the application configuration file
+(the file specified as the `--template` argument value on the command line for cluster creation)
+and specify the `jvm.opts` property for the `slider-appmaster` component:
+
+	`"components": {
+    	"slider-appmaster": {
+      		"jvm.heapsize": "256M",
+      		"jvm.opts": "-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=5005"
+    	},
+ 		...`
+ 		
+You may specify `suspend=y` in the line above if you wish to have the application master process wait for the debugger to attach before beginning its processing.

Propchange: incubator/slider/site/trunk/content/docs/debugging.md
------------------------------------------------------------------------------
    svn:eol-style = native

Added: incubator/slider/site/trunk/content/docs/examples.md
URL: http://svn.apache.org/viewvc/incubator/slider/site/trunk/content/docs/examples.md?rev=1605167&view=auto
==============================================================================
--- incubator/slider/site/trunk/content/docs/examples.md (added)
+++ incubator/slider/site/trunk/content/docs/examples.md Tue Jun 24 19:46:37 2014
@@ -0,0 +1,159 @@
+<!---
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+# Apache Slider Examples
+
+ 
+## Setup
+ 
+### Setting up a YARN cluster
+ 
+For simple local demos, a Hadoop pseudo-distributed cluster will suffice -if on a VM then
+its configuration should be changed to use a public (machine public) IP.
+
+# The examples below all assume there is a cluster node called 'master', which
+hosts the HDFS NameNode and the YARN Resource Manager
+
+
+# preamble
+
+    export HADOOP_CONF_DIR=~/conf
+    export PATH=~/hadoop/bin:/~/hadoop/sbin:~/zookeeper-3.4.5/bin:$PATH
+    
+    hdfs namenode -format master
+  
+
+
+
+# start all the services
+
+    nohup hdfs --config $HADOOP_CONF_DIR namenode & 
+    nohup hdfs --config $HADOOP_CONF_DIR datanode &
+    
+    
+    nohup yarn --config $HADOOP_CONF_DIR resourcemanager &
+    nohup yarn --config $HADOOP_CONF_DIR nodemanager &
+    
+# using hadoop/sbin service launchers
+    
+    hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs start namenode
+    hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs start datanode
+    yarn-daemon.sh --config $HADOOP_CONF_DIR start resourcemanager
+    yarn-daemon.sh --config $HADOOP_CONF_DIR start nodemanager
+    
+    ~/zookeeper/bin/zkServer.sh start
+    
+    
+# stop them
+
+    hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs stop namenode
+    hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs stop datanode
+    
+    yarn-daemon.sh --config $HADOOP_CONF_DIR stop resourcemanager
+    yarn-daemon.sh --config $HADOOP_CONF_DIR stop nodemanager
+    
+
+
+NN up on [http://master:50070/dfshealth.jsp](http://master:50070/dfshealth.jsp)
+RM yarn-daemon.sh --config $HADOOP_CONF_DIR start nodemanager
+
+    ~/zookeeper/bin/zkServer.sh start
+
+
+    # shutdown
+    ~/zookeeper/bin/zkServer.sh stop
+
+
+Tip: after a successful run on a local cluster, do a quick `rm -rf $HADOOP_HOME/logs`
+to keep the log bloat under control.
+
+## get hbase in
+
+copy to local 
+
+    get hbase-0.98.0-bin.tar on 
+
+
+    hdfs dfs -rm hdfs://master:9090/hbase.tar
+    hdfs dfs -copyFromLocal hbase-0.98.0-bin.tar hdfs://master:9090/hbase.tar
+
+or
+    
+    hdfs dfs -copyFromLocal hbase-0.96.0-bin.tar hdfs://master:9090/hbase.tar
+    hdfs dfs -ls hdfs://master:9090/
+    
+
+### Optional: point bin/slider at your chosen cluster configuration
+
+export SLIDER_CONF_DIR=~/Projects/slider/slider-core/src/test/configs/ubuntu-secure/slider
+
+## Optional: Clean up any existing slider cluster details
+
+This is for demos only, otherwise you lose the clusters and their databases.
+
+    hdfs dfs -rm -r hdfs://master:9090/user/home/stevel/.slider
+
+## Create a Slider Cluster
+ 
+ 
+    slider  create cl1 \
+    --component worker 1  --component master 1 \
+     --manager master:8032 --filesystem hdfs://master:9090 \
+     --zkhosts localhost:2181 --image hdfs://master:9090/hbase.tar
+    
+    # create the cluster
+    
+    slider create cl1 \
+     --component worker 4 --component master 1 \
+      --manager master:8032 --filesystem hdfs://master:9090 --zkhosts localhost \
+      --image hdfs://master:9090/hbase.tar \
+      --appconf file:////Users/slider/Hadoop/configs/master/hbase \
+      --compopt master jvm.heap 128 \
+      --compopt master env.MALLOC_ARENA_MAX 4 \
+      --compopt worker jvm.heap 128 
+
+    # freeze the cluster
+    slider freeze cl1 \
+    --manager master:8032 --filesystem hdfs://master:9090
+
+    # thaw a cluster
+    slider thaw cl1 \
+    --manager master:8032 --filesystem hdfs://master:9090
+
+    # destroy the cluster
+    slider destroy cl1 \
+    --manager master:8032 --filesystem hdfs://master:9090
+
+    # list clusters
+    slider list cl1 \
+    --manager master:8032 --filesystem hdfs://master:9090
+    
+    slider flex cl1 --component worker 2
+    --manager master:8032 --filesystem hdfs://master:9090 \
+    --component worker 5
+    
+## Create an Accumulo Cluster
+
+    slider create accl1 --provider accumulo \
+    --component master 1 --component tserver 1 --component gc 1 --component monitor 1 --component tracer 1 \
+    --manager localhost:8032 --filesystem hdfs://localhost:9000 \
+    --zkhosts localhost:2181 --zkpath /local/zookeeper \
+    --image hdfs://localhost:9000/user/username/accumulo-1.6.0-SNAPSHOT-bin.tar \
+    --appconf hdfs://localhost:9000/user/username/accumulo-conf \
+    -O zk.home /local/zookeeper -O hadoop.home /local/hadoop \
+    -O site.monitor.port.client 50095 -O accumulo.password secret 
+    

Propchange: incubator/slider/site/trunk/content/docs/examples.md
------------------------------------------------------------------------------
    svn:eol-style = native

Added: incubator/slider/site/trunk/content/docs/exitcodes.md
URL: http://svn.apache.org/viewvc/incubator/slider/site/trunk/content/docs/exitcodes.md?rev=1605167&view=auto
==============================================================================
--- incubator/slider/site/trunk/content/docs/exitcodes.md (added)
+++ incubator/slider/site/trunk/content/docs/exitcodes.md Tue Jun 24 19:46:37 2014
@@ -0,0 +1,161 @@
+<!---
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+# Apache Slider Client Exit Codes
+
+Here are the exit codes returned 
+
+Exit code values 1 and 2 are interpreted by YARN -in particular converting the
+"1" value from an error into a successful shut down. Slider
+converts the -1 error code from a forked process into `EXIT_MASTER_PROCESS_FAILED`;
+no. 72.
+
+
+    /**
+     * 0: success
+     */
+    int EXIT_SUCCESS                    =  0;
+    
+    /**
+     * -1: generic "false" response. The operation worked but
+     * the result was not true
+     */
+    int EXIT_FALSE                      = -1;
+    
+    /**
+     * Exit code when a client requested service termination:
+     */
+    int EXIT_CLIENT_INITIATED_SHUTDOWN  =  1;
+    
+    /**
+     * Exit code when targets could not be launched:
+     */
+    int EXIT_TASK_LAUNCH_FAILURE        =  2;
+    
+    /**
+     * Exit code when an exception was thrown from the service:
+     */
+    int EXIT_EXCEPTION_THROWN           = 32;
+    
+    /**
+     * Exit code when a usage message was printed:
+     */
+    int EXIT_USAGE                      = 33;
+    
+    /**
+     * Exit code when something happened but we can't be specific:
+     */
+    int EXIT_OTHER_FAILURE              = 34;
+    
+    /**
+     * Exit code when a control-C, kill -3, signal was picked up:
+     */
+                                  
+    int EXIT_INTERRUPTED                = 35;
+    
+    /**
+     * Exit code when the command line doesn't parse:, or
+     * when it is otherwise invalid.
+     */
+    int EXIT_COMMAND_ARGUMENT_ERROR     = 36;
+    
+    /**
+     * Exit code when the configurations in valid/incomplete:
+     */
+    int EXIT_BAD_CONFIGURATION          = 37;
+    
+    /**
+     * Exit code when the configurations in valid/incomplete:
+     */
+    int EXIT_CONNECTIVTY_PROBLEM        = 38;
+    
+    /**
+     * internal error: {@value}
+     */
+    int EXIT_INTERNAL_ERROR = 64;
+    
+    /**
+     * Unimplemented feature: {@value}
+     */
+    int EXIT_UNIMPLEMENTED =        65;
+  
+    /**
+     * service entered the failed state: {@value}
+     */
+    int EXIT_YARN_SERVICE_FAILED =  66;
+  
+    /**
+     * service was killed: {@value}
+     */
+    int EXIT_YARN_SERVICE_KILLED =  67;
+  
+    /**
+     * timeout on monitoring client: {@value}
+     */
+    int EXIT_TIMED_OUT =            68;
+  
+    /**
+     * service finished with an error: {@value}
+     */
+    int EXIT_YARN_SERVICE_FINISHED_WITH_ERROR = 69;
+  
+    /**
+     * the application instance is unknown: {@value}
+     */
+    int EXIT_UNKNOWN_INSTANCE = 70;
+  
+    /**
+     * the application instance is in the wrong state for that operation: {@value}
+     */
+    int EXIT_BAD_STATE =    71;
+  
+    /**
+     * A spawned master process failed 
+     */
+    int EXIT_PROCESS_FAILED = 72;
+  
+    /**
+     * The cluster failed -too many containers were
+     * failing or some other threshold was reached
+     */
+    int EXIT_DEPLOYMENT_FAILED = 73;
+  
+    /**
+     * The application is live -and the requested operation
+     * does not work if the cluster is running
+     */
+    int EXIT_APPLICATION_IN_USE = 74;
+  
+    /**
+     * There already is an application instance of that name
+     * when an attempt is made to create a new instance
+     */
+    int EXIT_INSTANCE_EXISTS = 75;
+    
+    /**
+     * The resource was not found
+     */
+    int EXIT_NOT_FOUND = 77;
+    
+## Other exit codes
+
+YARN itself can fail containers, here are some of the causes we've seen
+
+
+    143: Appears to be triggered by the container exceeding its cgroup memory
+    limits
+ 

Propchange: incubator/slider/site/trunk/content/docs/exitcodes.md
------------------------------------------------------------------------------
    svn:eol-style = native

Added: incubator/slider/site/trunk/content/docs/getting_started.md
URL: http://svn.apache.org/viewvc/incubator/slider/site/trunk/content/docs/getting_started.md?rev=1605167&view=auto
==============================================================================
--- incubator/slider/site/trunk/content/docs/getting_started.md (added)
+++ incubator/slider/site/trunk/content/docs/getting_started.md Tue Jun 24 19:46:37 2014
@@ -0,0 +1,509 @@
+<!---
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+# Apache Slider: Getting Started
+
+
+## Introduction
+
+The following provides the steps required for setting up a cluster and deploying a YARN hosted application using Slider.
+
+* [Prerequisites](#sysreqs)
+
+* [Setup the Cluster](#setup)
+
+* [Download Slider Packages](#download)
+
+* [Build Slider](#build)
+
+* [Install Slider](#install)
+
+* [Deploy Slider Resources](#deploy)
+
+* [Download Sample Application Packages](#downsample)
+
+* [Install, Configure, Start and Verify Sample Application](#installapp)
+
+* [Appendix A: Storm Sample Application Specifications](#appendixa)
+
+* [Appendix B: HBase Sample Application Specifications](#appendixb)
+
+## <a name="sysreqs"></a>System Requirements
+
+The Slider deployment has the following minimum system requirements:
+
+* Hadoop 2.4+
+
+* Required Services: HDFS, YARN, MapReduce2 and ZooKeeper
+
+* Oracle JDK 1.7 (64-bit)
+
+## <a name="setup"></a>Setup the Cluster
+
+After setting up your Hadoop cluster (using Ambari or other means) with the 
+services listed above, modify your YARN configuration to allow for multiple
+containers on a single host. In `yarn-site.xml` make the following modifications:
+
+<table>
+  <tr>
+    <td>Property</td>
+    <td>Value</td>
+  </tr>
+  <tr>
+    <td>yarn.scheduler.minimum-allocation-mb</td>
+    <td>>= 256</td>
+  </tr>
+  <tr>
+    <td>yarn.nodemanager.delete.debug-delay-sec</td>
+    <td>>= 3600 (to retain for an hour)</td>
+  </tr>
+</table>
+
+
+There are other options detailed in the Troubleshooting file available [here](/docs/troubleshooting.html).
+
+
+## <a name="download"></a>Download Slider Packages
+
+Slider releases are available at
+[https://www.apache.org/dyn/closer.cgi/incubator/slider](https://www.apache.org/dyn/closer.cgi/incubator/slider).
+
+## <a name="build"></a>Build Slider
+
+* From the top level directory, execute `mvn clean install -DskipTests`
+* Use the generated compressed tar file in slider-assembly/target directory (e.g. slider-0.30.0-all.tar.gz) for the subsequent steps
+
+## <a name="install"></a>Install Slider
+
+Follow the following steps to expand/install Slider:
+
+    mkdir ${slider-install-dir*;
+
+    cd ${slider-install-dir}
+
+Login as the "yarn" user (assuming this is a host associated with the installed cluster).  E.g., `su yarn`
+*This assumes that all apps are being run as ‘yarn’ user. Any other user can be used to run the apps - ensure that file permission is granted as required.*
+
+Expand the tar file:  `tar -xvf slider-0.30.0-all.tar.gz`
+
+Browse to the Slider directory: `cd slider-0.30.0/bin`
+
+      export PATH=$PATH:/usr/jdk64/jdk1.7.0_45/bin 
+    
+(or the path to the JDK bin directory)
+
+Modify Slider configuration file `${slider-install-dir}/slider-0.30.0/conf/slider-client.xml` to add the following properties:
+
+      <property>
+          <name>yarn.application.classpath</name>
+          <value>/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*</value>
+      </property>
+      
+      <property>
+          <name>slider.zookeeper.quorum</name>
+          <value>yourZooKeeperHost:port</value>
+      </property>
+
+
+In addition, specify the scheduler and HDFS addresses as follows:
+
+    <property>
+        <name>yarn.resourcemanager.address</name>
+        <value>yourResourceManagerHost:8050</value>
+    </property>
+    <property>
+        <name>yarn.resourcemanager.scheduler.address</name>
+        <value>yourResourceManagerHost:8030</value>
+    </property>
+    <property>
+        <name>fs.defaultFS</name>
+        <value>hdfs://yourNameNodeHost:8020</value>
+    </property>
+
+
+Execute:
+ 
+    ${slider-install-dir}/slider-0.30.0/bin/slider version
+
+Ensure there are no errors and you can see "Compiled against Hadoop 2.4.0"
+
+## <a name="deploy"></a>Deploy Slider Resources
+
+Ensure that all file folders are accessible to the user creating the application instance. The example assumes "yarn" to be that user.
+
+### Create HDFS root folder for Slider
+
+Perform the following steps to create the Slider root folder with the appropriate permissions:
+
+    su hdfs
+    
+    hdfs dfs -mkdir /slider
+    
+    hdfs dfs -chown yarn:hdfs /slider
+    
+    hdfs dfs -mkdir /user/yarn
+    
+    hdfs dfs -chown yarn:hdfs /user/yarn
+
+### Load Slider Agent
+
+    su yarn
+    
+    hdfs dfs -mkdir /slider/agent
+    
+    hdfs dfs -mkdir /slider/agent/conf
+    
+    hdfs dfs -copyFromLocal ${slider-install-dir}/slider-0.30.0/agent/slider-agent-0.30.0.tar.gz /slider/agent
+
+### Create and deploy Slider Agent configuration
+
+Create an agent config file (agent.ini) based on the sample available at:
+
+    ${slider-install-dir}/slider-0.30.0/agent/conf/agent.ini
+
+The sample agent.ini file can be used as is (see below). Some of the parameters of interest are:
+
+# `log_level` = INFO or DEBUG, to control the verbosity of log
+# `app_log_dir` = the relative location of the application log file
+# `log_dir` = the relative location of the agent and command log file
+
+    [server]
+    hostname=localhost
+    port=8440
+    secured_port=8441
+    check_path=/ws/v1/slider/agents/
+    register_path=/ws/v1/slider/agents/{name}/register
+    heartbeat_path=/ws/v1/slider/agents/{name}/heartbeat
+
+    [agent]
+    app_pkg_dir=app/definition
+    app_install_dir=app/install
+    app_run_dir=app/run
+    app_task_dir=app/command-log
+    app_log_dir=app/log
+    app_tmp_dir=app/tmp
+    log_dir=infra/log
+    run_dir=infra/run
+    version_file=infra/version
+    log_level=INFO
+
+    [python]
+
+    [command]
+    max_retries=2
+    sleep_between_retries=1
+
+    [security]
+
+    [heartbeat]
+    state_interval=6
+    log_lines_count=300
+
+
+Once created, deploy the agent.ini file to HDFS:
+
+    su yarn
+    
+    hdfs dfs -copyFromLocal agent.ini /slider/agent/conf
+
+## <a name="downsample"></a>Download Sample Application Packages
+
+There are three sample application packages available for download to use with Slider:
+
+<table>
+  <tr>
+    <td>Application</td>
+    <td>Version</td>
+    <td>URL</td>
+  </tr>
+  <tr>
+    <td>Apache HBase</td>
+    <td>0.96.0</td>
+    <td>http://public-repo-1.hortonworks.com/slider/hbase_v096.tar</td>
+  </tr>
+  <tr>
+    <td>Apache Storm</td>
+    <td>0.9.1</td>
+    <td>http://public-repo-1.hortonworks.com/slider/storm_v091.tar</td>
+  </tr>
+  <tr>
+    <td>Apache Accumulo</td>
+    <td>1.5.1</td>
+    <td>http://public-repo-1.hortonworks.com/slider/accumulo_v151.tar</td>
+  </tr>
+</table>
+
+
+Download the packages and deploy one of these sample applications to YARN via Slider using the steps below.
+
+## <a name="installapp"></a>Install, Configure, Start and Verify Sample Application
+
+* [Load Sample Application Package](#load)
+
+* [Create Application Specifications](#create)
+
+* [Start the Application](#start)
+
+* [Verify the Application](#verify)
+
+* [Manage the Application Lifecycle](#manage)
+
+### <a name="load"></a>Load Sample Application Package
+
+    hdfs dfs -copyFromLocal *sample-application-package/slider
+
+If necessary, create HDFS folders needed by the application. For example, HBase requires the following HDFS-based setup:
+
+    su hdfs
+    
+    hdfs dfs -mkdir /apps
+    
+    hdfs dfs -mkdir /apps/hbase
+    
+    hdfs dfs -chown yarn:hdfs /apps/hbase
+
+### <a name="create"></a>Create Application Specifications
+
+Configuring a Slider application consists of two parts: the [Resource Specification](#resspec),
+ and the *[Application Configuration](#appconfig). Below are guidelines for creating these files.
+
+*Note: There are sample Resource Specifications (**resources.json**) and Application Configuration 
+(**appConfig.json**) files in the *[Appendix](#appendixa)* and also in the root directory of the
+Sample Applications packages (e.g. /**hbase-v096/resources.json** and /**hbase-v096/appConfig.json**).*
+
+#### <a name="resspec"></a>Resource Specification
+
+Slider needs to know what components (and how many components) are in an application package to deploy. For example, in HBase, the components are **_master_** and **_worker_** -- the latter hosting **HBase RegionServers**, and the former hosting the **HBase Master**. 
+
+As Slider creates each instance of a component in its own YARN container, it also needs to know what to ask YARN for in terms of **memory** and **CPU** for those containers. 
+
+All this information goes into the **Resources Specification** file ("Resource Spec") named `resources.json`. The Resource Spec tells Slider how many instances of each component in the application (such as an HBase RegionServer) to deploy and the parameters for YARN.
+
+Sample Resource Spec files are available in the Appendix:
+
+* [Appendix A: Storm Sample Resource Specification](#heading=h.1hj8hn5xne7c)
+
+* [Appendix B: HBase Sample Resource Specification](#heading=h.l7z5mvhvxmzv)
+
+Store the Resource Spec file on your local disk (e.g. `/tmp/resources.json`).
+
+#### <a name="appconfig"></a>Application Configuration
+
+Alongside the Resource Spec there is the **Application Configuration** file ("App Config") which includes parameters that are specific to the application, rather than YARN. The App Config is a file that contains all application configuration. This configuration is applied to the default configuration provided by the application definition and then handed off to the associated component agent.
+
+For example, the heap sizes of the JVMs,  The App Config defines the configuration details **specific to the application and component** instances. For HBase, this includes any values for the *to-be-generated *hbase-site.xml file, as well as options for individual components, such as their heap size.
+
+Sample App Configs are available in the Appendix:
+
+* [Appendix A: Storm Sample Application Configuration](#heading=h.2qai3c6w260l)
+
+* [Appendix B: HBase Sample Application Configuration](#heading=h.hetv1wn44c5x)
+
+Store the appConfig.json file on your local disc and a copy in HDFS:
+
+    su yarn
+    
+    hdfs dfs -mkdir /slider/appconf
+    
+    hdfs dfs -copyFromLocal appConf.json /slider/appconf
+
+### <a name="start"></a>Start the Application
+
+Once the steps above are completed, the application can be started through the **Slider Command Line Interface (CLI)**.
+
+Change directory to the "bin" directory under the slider installation
+
+    cd ${slider-install-dir}/slider-0.30.0/bin
+
+Execute the following command:
+
+    ./slider create cl1 --manager yourResourceManagerHost:8050 --image hdfs://yourNameNodeHost:8020/slider/agent/slider-agent-0.30.0.tar.gz --template appConfig.json --resources resources.json
+
+### <a name="verify"></a>Verify the Application
+
+The successful launch of the application can be verified via the YARN Resource Manager Web UI. In most instances, this UI is accessible via a web browser at port 8088 of the Resource Manager Host:
+
+![image alt text](images/image_0.png)
+
+The specific information for the running application is accessible via the "ApplicationMaster" link that can be seen in the far right column of the row associated with the running application (probably the top row):
+
+![image alt text](images/image_1.png)
+
+### <a name="manage"></a>Manage the Application Lifecycle
+
+Once started, applications can be frozen/stopped, thawed/restarted, and destroyed/removed as follows:
+
+#### Frozen:
+
+    ./slider freeze cl1 --manager yourResourceManagerHost:8050  --filesystem hdfs://yourNameNodeHost:8020
+
+#### Thawed: 
+
+    ./slider thaw cl1 --manager yourResourceManagerHost:8050  --filesystem hdfs://yourNameNodeHost:8020
+
+#### Destroyed: 
+
+    ./slider destroy cl1 --manager yourResourceManagerHost:8050  --filesystem hdfs://yourNameNodeHost:8020
+
+#### Flexed:
+
+    ./slider flex cl1 --component worker 5 --manager yourResourceManagerHost:8050  --filesystem hdfs://yourNameNodeHost:8020
+
+# <a name="appendixa"></a>Appendix A: Apache Storm Sample Application Specifications
+
+## Storm Resource Specification Sample
+
+    {
+      "schema" : "http://example.org/specification/v2.0.0",
+      "metadata" : {
+      },
+      "global" : {
+      },
+      "components" : {
+        "slider-appmaster" : {
+        },
+        "NIMBUS" : {
+            "yarn.role.priority" : "1",
+            "yarn.component.instances" : "1"
+        },
+        "STORM_REST_API" : {
+            "yarn.role.priority" : "2",
+            "yarn.component.instances" : "1"
+        },
+        "STORM_UI_SERVER" : {
+            "yarn.role.priority" : "3",
+            "yarn.component.instances" : "1"
+        },
+        "DRPC_SERVER" : {
+            "yarn.role.priority" : "4",
+            "yarn.component.instances" : "1"
+        },
+        "SUPERVISOR" : {
+            "yarn.role.priority" : "5",
+            "yarn.component.instances" : "1"
+        }
+      }
+    }
+
+
+## Storm Application Configuration Sample
+
+    {
+      "schema" : "http://example.org/specification/v2.0.0",
+      "metadata" : {
+      },
+      "global" : {
+          "A site property for type XYZ with name AA": "its value",
+          "site.XYZ.AA": "Value",
+          "site.hbase-site.hbase.regionserver.port": "0",
+          "site.core-site.fs.defaultFS": "${NN_URI}",
+          "Using a well known keyword": "Such as NN_HOST for name node host",
+          "site.hdfs-site.dfs.namenode.http-address": "${NN_HOST}:50070",
+          "a global property used by app scripts": "not affiliated with any site-xml",
+          "site.global.app_user": "yarn",
+          "Another example of available keywords": "Such as AGENT_LOG_ROOT",
+          "site.global.app_log_dir": "${AGENT_LOG_ROOT}/app/log",
+          "site.global.app_pid_dir": "${AGENT_WORK_ROOT}/app/run",
+      }
+    }
+
+
+# <a name="appendixb"></a>Appendix B:  Apache HBase Sample Application Specifications
+
+## HBase Resource Specification Sample
+
+    {
+      "schema" : "http://example.org/specification/v2.0.0",
+      "metadata" : {
+      },
+      "global" : {
+      },
+      "components" : {
+        "HBASE_MASTER" : {
+            "yarn.role.priority" : "1",
+            "yarn.component.instances" : "1"
+        },
+        "slider-appmaster" : {
+        },
+        "HBASE_REGIONSERVER" : {
+            "yarn.role.priority" : "2",
+            "yarn.component.instances" : "1"
+        }
+      }
+    }
+
+
+## HBase Application Configuration Sample
+
+    {
+      "schema" : "http://example.org/specification/v2.0.0",
+      "metadata" : {
+      },
+      "global" : {
+        "agent.conf": "/slider/agent/conf/agent.ini",
+        "agent.version": "/slider/agent/version",
+        "application.def": "/slider/hbase_v096.tar",
+        "config_types": "core-site,hdfs-site,hbase-site",
+        "java_home": "/usr/jdk64/jdk1.7.0_45",
+        "package_list": "files/hbase-0.96.1-hadoop2-bin.tar",
+        "site.global.app_user": "yarn",
+        "site.global.app_log_dir": "${AGENT_LOG_ROOT}/app/log",
+        "site.global.app_pid_dir": "${AGENT_WORK_ROOT}/app/run",
+        "site.global.app_root": "${AGENT_WORK_ROOT}/app/install/hbase-0.96.1-hadoop2",
+        "site.global.app_install_dir": "${AGENT_WORK_ROOT}/app/install",
+        "site.global.hbase_master_heapsize": "1024m",
+        "site.global.hbase_regionserver_heapsize": "1024m",
+        "site.global.user_group": "hadoop",
+        "site.global.security_enabled": "false",
+        "site.hbase-site.hbase.hstore.flush.retries.number": "120",
+        "site.hbase-site.hbase.client.keyvalue.maxsize": "10485760",
+        "site.hbase-site.hbase.hstore.compactionThreshold": "3",
+        "site.hbase-site.hbase.rootdir": "${NN_URI}/apps/hbase/data",
+        "site.hbase-site.hbase.stagingdir": "${NN_URI}/apps/hbase/staging",
+        "site.hbase-site.hbase.regionserver.handler.count": "60",
+        "site.hbase-site.hbase.regionserver.global.memstore.lowerLimit": "0.38",
+        "site.hbase-site.hbase.hregion.memstore.block.multiplier": "2",
+        "site.hbase-site.hbase.hregion.memstore.flush.size": "134217728",
+        "site.hbase-site.hbase.superuser": "yarn",
+        "site.hbase-site.hbase.zookeeper.property.clientPort": "2181",
+        "site.hbase-site.hbase.regionserver.global.memstore.upperLimit": "0.4",
+        "site.hbase-site.zookeeper.session.timeout": "30000",
+        "site.hbase-site.hbase.tmp.dir": "${AGENT_WORK_ROOT}/work/app/tmp",
+        "site.hbase-site.hbase.local.dir": "${hbase.tmp.dir}/local",
+        "site.hbase-site.hbase.hregion.max.filesize": "10737418240",
+        "site.hbase-site.hfile.block.cache.size": "0.40",
+        "site.hbase-site.hbase.security.authentication": "simple",
+        "site.hbase-site.hbase.defaults.for.version.skip": "true",
+        "site.hbase-site.hbase.zookeeper.quorum": "${ZK_HOST}",
+        "site.hbase-site.zookeeper.znode.parent": "/hbase-unsecure",
+        "site.hbase-site.hbase.hstore.blockingStoreFiles": "10",
+        "site.hbase-site.hbase.hregion.majorcompaction": "86400000",
+        "site.hbase-site.hbase.security.authorization": "false",
+        "site.hbase-site.hbase.cluster.distributed": "true",
+        "site.hbase-site.hbase.hregion.memstore.mslab.enabled": "true",
+        "site.hbase-site.hbase.client.scanner.caching": "100",
+        "site.hbase-site.hbase.zookeeper.useMulti": "true",
+        "site.hbase-site.hbase.regionserver.info.port": "0",
+        "site.hbase-site.hbase.master.info.port": "60010",
+        "site.hbase-site.hbase.regionserver.port": "0",
+        "site.core-site.fs.defaultFS": "${NN_URI}",
+        "site.hdfs-site.dfs.namenode.https-address": "${NN_HOST}:50470",
+        "site.hdfs-site.dfs.namenode.http-address": "${NN_HOST}:50070"
+      }
+  }
+
+

Propchange: incubator/slider/site/trunk/content/docs/getting_started.md
------------------------------------------------------------------------------
    svn:eol-style = native