You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@slider.apache.org by st...@apache.org on 2014/10/01 02:48:23 UTC

[01/12] git commit: SLIDER-450 rename sample template and resources files for accumulo

Repository: incubator-slider
Updated Branches:
  refs/heads/feature/SLIDER-149_Support_a_YARN_service_registry 969b734fc -> 733745eaf


SLIDER-450 rename sample template and resources files for accumulo


Project: http://git-wip-us.apache.org/repos/asf/incubator-slider/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-slider/commit/bb4e7d31
Tree: http://git-wip-us.apache.org/repos/asf/incubator-slider/tree/bb4e7d31
Diff: http://git-wip-us.apache.org/repos/asf/incubator-slider/diff/bb4e7d31

Branch: refs/heads/feature/SLIDER-149_Support_a_YARN_service_registry
Commit: bb4e7d31704d8d711ff85288166192f7d41d2648
Parents: 43f9a94
Author: Billie Rinaldi <bi...@gmail.com>
Authored: Fri Sep 26 07:29:34 2014 -0700
Committer: Billie Rinaldi <bi...@gmail.com>
Committed: Fri Sep 26 07:29:34 2014 -0700

----------------------------------------------------------------------
 app-packages/accumulo/appConfig-default.json    | 57 ++++++++++++++++++++
 app-packages/accumulo/appConfig.json            | 57 --------------------
 app-packages/accumulo/pom.xml                   |  2 +
 app-packages/accumulo/resources-default.json    | 39 ++++++++++++++
 app-packages/accumulo/resources.json            | 39 --------------
 app-packages/accumulo/src/assembly/accumulo.xml |  4 +-
 .../funtest/accumulo/AccumuloBasicIT.groovy     |  3 +-
 7 files changed, 102 insertions(+), 99 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/bb4e7d31/app-packages/accumulo/appConfig-default.json
----------------------------------------------------------------------
diff --git a/app-packages/accumulo/appConfig-default.json b/app-packages/accumulo/appConfig-default.json
new file mode 100644
index 0000000..62050af
--- /dev/null
+++ b/app-packages/accumulo/appConfig-default.json
@@ -0,0 +1,57 @@
+{
+  "schema": "http://example.org/specification/v2.0.0",
+  "metadata": {
+  },
+  "global": {
+    "application.def": ".slider/package/ACCUMULO/${app.package.name}.zip",
+    "java_home": "/usr/lib/jvm/java",
+    "site.global.app_user": "${app.user}",
+    "site.global.app_log_dir": "${AGENT_LOG_ROOT}",
+    "site.global.app_pid_dir": "${AGENT_WORK_ROOT}/app/run",
+    "site.global.app_root": "${AGENT_WORK_ROOT}/app/install/accumulo-${accumulo.version}",
+    "site.global.tserver_heapsize": "256m",
+    "site.global.master_heapsize": "128m",
+    "site.global.monitor_heapsize": "64m",
+    "site.global.gc_heapsize": "64m",
+    "site.global.other_heapsize": "128m",
+    "site.global.hadoop_prefix": "${hadoop.dir}/hadoop",
+    "site.global.hadoop_conf_dir": "/etc/hadoop/conf",
+    "site.global.zookeeper_home": "${zk.dir}/zookeeper",
+    "site.global.accumulo_instance_name": "${USER}-${CLUSTER_NAME}",
+    "site.global.accumulo_root_password": "NOT_USED",
+    "site.global.user_group": "${app.user.group}",
+    "site.global.security_enabled": "false",
+    "site.global.ssl_cert_dir": "ssl",
+    "site.global.monitor_protocol": "http",
+    "site.accumulo-site.instance.volumes": "${DEFAULT_DATA_DIR}/data",
+    "site.accumulo-site.instance.zookeeper.host": "${ZK_HOST}",
+    "site.accumulo-site.instance.security.authenticator": "org.apache.slider.accumulo.CustomAuthenticator",
+    "site.accumulo-site.general.security.credential.provider.paths": "jceks://hdfs/user/${USER}/accumulo-${CLUSTER_NAME}.jceks",
+    "site.accumulo-site.general.kerberos.keytab": "${accumulo.keytab}",
+    "site.accumulo-site.general.kerberos.principal": "${accumulo.principal}",
+    "site.accumulo-site.tserver.memory.maps.native.enabled": "false",
+    "site.accumulo-site.tserver.memory.maps.max": "80M",
+    "site.accumulo-site.tserver.cache.data.size": "7M",
+    "site.accumulo-site.tserver.cache.index.size": "20M",
+    "site.accumulo-site.trace.user": "root",
+    "site.accumulo-site.tserver.sort.buffer.size": "50M",
+    "site.accumulo-site.tserver.walog.max.size": "40M",
+    "site.accumulo-site.master.port.client": "0",
+    "site.accumulo-site.trace.port.client": "0",
+    "site.accumulo-site.tserver.port.client": "0",
+    "site.accumulo-site.gc.port.client": "0",
+    "site.accumulo-site.monitor.port.client": "${ACCUMULO_MONITOR.ALLOCATED_PORT}",
+    "site.accumulo-site.monitor.port.log4j": "0",
+    "site.accumulo-site.master.replication.coordinator.port": "0",
+    "site.accumulo-site.replication.receipt.service.port": "0",
+    "site.accumulo-site.general.classpaths": "$ACCUMULO_HOME/lib/accumulo-server.jar,\n$ACCUMULO_HOME/lib/accumulo-core.jar,\n$ACCUMULO_HOME/lib/accumulo-start.jar,\n$ACCUMULO_HOME/lib/accumulo-fate.jar,\n$ACCUMULO_HOME/lib/accumulo-proxy.jar,\n$ACCUMULO_HOME/lib/[^.].*.jar,\n$ZOOKEEPER_HOME/zookeeper[^.].*.jar,\n$HADOOP_CONF_DIR,\n$HADOOP_PREFIX/[^.].*.jar,\n$HADOOP_PREFIX/lib/[^.].*.jar,\n$HADOOP_PREFIX/share/hadoop/common/.*.jar,\n$HADOOP_PREFIX/share/hadoop/common/lib/.*.jar,\n$HADOOP_PREFIX/share/hadoop/hdfs/.*.jar,\n$HADOOP_PREFIX/share/hadoop/mapreduce/.*.jar,\n$HADOOP_PREFIX/share/hadoop/yarn/.*.jar,\n${hadoop.dir}/hadoop/.*.jar,\n${hadoop.dir}/hadoop/lib/.*.jar,\n${hadoop.dir}/hadoop-hdfs/.*.jar,\n${hadoop.dir}/hadoop-mapreduce/.*.jar,\n${hadoop.dir}/hadoop-yarn/.*.jar,"
+  },
+  "credentials": {
+    "jceks://hdfs/user/${USER}/accumulo-${CLUSTER_NAME}.jceks": ["root.initial.password", "instance.secret", "trace.token.property.password"]
+  },
+  "components": {
+    "slider-appmaster": {
+      "jvm.heapsize": "256M"
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/bb4e7d31/app-packages/accumulo/appConfig.json
----------------------------------------------------------------------
diff --git a/app-packages/accumulo/appConfig.json b/app-packages/accumulo/appConfig.json
deleted file mode 100644
index 28fb637..0000000
--- a/app-packages/accumulo/appConfig.json
+++ /dev/null
@@ -1,57 +0,0 @@
-{
-  "schema": "http://example.org/specification/v2.0.0",
-  "metadata": {
-  },
-  "global": {
-    "application.def": ".slider/package/ACCUMULO/${app.package.name}.zip",
-    "java_home": "/usr/lib/jvm/java",
-    "site.global.app_user": "${app.user}",
-    "site.global.app_log_dir": "${AGENT_LOG_ROOT}",
-    "site.global.app_pid_dir": "${AGENT_WORK_ROOT}/app/run",
-    "site.global.app_root": "${AGENT_WORK_ROOT}/app/install/accumulo-${accumulo.version}",
-    "site.global.tserver_heapsize": "256m",
-    "site.global.master_heapsize": "128m",
-    "site.global.monitor_heapsize": "64m",
-    "site.global.gc_heapsize": "64m",
-    "site.global.other_heapsize": "128m",
-    "site.global.hadoop_prefix": "/usr/lib/hadoop",
-    "site.global.hadoop_conf_dir": "/etc/hadoop/conf",
-    "site.global.zookeeper_home": "/usr/lib/zookeeper",
-    "site.global.accumulo_instance_name": "${USER}-${CLUSTER_NAME}",
-    "site.global.accumulo_root_password": "NOT_USED",
-    "site.global.user_group": "${app.user.group}",
-    "site.global.security_enabled": "false",
-    "site.global.ssl_cert_dir": "ssl",
-    "site.global.monitor_protocol": "http",
-    "site.accumulo-site.instance.volumes": "${DEFAULT_DATA_DIR}/data",
-    "site.accumulo-site.instance.zookeeper.host": "${ZK_HOST}",
-    "site.accumulo-site.instance.security.authenticator": "org.apache.slider.accumulo.CustomAuthenticator",
-    "site.accumulo-site.general.security.credential.provider.paths": "jceks://hdfs/user/${USER}/accumulo-${CLUSTER_NAME}.jceks",
-    "site.accumulo-site.general.kerberos.keytab": "${accumulo.keytab}",
-    "site.accumulo-site.general.kerberos.principal": "${accumulo.principal}",
-    "site.accumulo-site.tserver.memory.maps.native.enabled": "false",
-    "site.accumulo-site.tserver.memory.maps.max": "80M",
-    "site.accumulo-site.tserver.cache.data.size": "7M",
-    "site.accumulo-site.tserver.cache.index.size": "20M",
-    "site.accumulo-site.trace.user": "root",
-    "site.accumulo-site.tserver.sort.buffer.size": "50M",
-    "site.accumulo-site.tserver.walog.max.size": "40M",
-    "site.accumulo-site.master.port.client": "0",
-    "site.accumulo-site.trace.port.client": "0",
-    "site.accumulo-site.tserver.port.client": "0",
-    "site.accumulo-site.gc.port.client": "0",
-    "site.accumulo-site.monitor.port.client": "${ACCUMULO_MONITOR.ALLOCATED_PORT}",
-    "site.accumulo-site.monitor.port.log4j": "0",
-    "site.accumulo-site.master.replication.coordinator.port": "0",
-    "site.accumulo-site.replication.receipt.service.port": "0",
-    "site.accumulo-site.general.classpaths": "$ACCUMULO_HOME/lib/accumulo-server.jar,\n$ACCUMULO_HOME/lib/accumulo-core.jar,\n$ACCUMULO_HOME/lib/accumulo-start.jar,\n$ACCUMULO_HOME/lib/accumulo-fate.jar,\n$ACCUMULO_HOME/lib/accumulo-proxy.jar,\n$ACCUMULO_HOME/lib/[^.].*.jar,\n$ZOOKEEPER_HOME/zookeeper[^.].*.jar,\n$HADOOP_CONF_DIR,\n$HADOOP_PREFIX/[^.].*.jar,\n$HADOOP_PREFIX/lib/[^.].*.jar,\n$HADOOP_PREFIX/share/hadoop/common/.*.jar,\n$HADOOP_PREFIX/share/hadoop/common/lib/.*.jar,\n$HADOOP_PREFIX/share/hadoop/hdfs/.*.jar,\n$HADOOP_PREFIX/share/hadoop/mapreduce/.*.jar,\n$HADOOP_PREFIX/share/hadoop/yarn/.*.jar,\n/usr/lib/hadoop/.*.jar,\n/usr/lib/hadoop/lib/.*.jar,\n/usr/lib/hadoop-hdfs/.*.jar,\n/usr/lib/hadoop-mapreduce/.*.jar,\n/usr/lib/hadoop-yarn/.*.jar,"
-  },
-  "credentials": {
-    "jceks://hdfs/user/${USER}/accumulo-${CLUSTER_NAME}.jceks": ["root.initial.password", "instance.secret", "trace.token.property.password"]
-  },
-  "components": {
-    "slider-appmaster": {
-      "jvm.heapsize": "256M"
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/bb4e7d31/app-packages/accumulo/pom.xml
----------------------------------------------------------------------
diff --git a/app-packages/accumulo/pom.xml b/app-packages/accumulo/pom.xml
index 02a0a72..a2014f2 100644
--- a/app-packages/accumulo/pom.xml
+++ b/app-packages/accumulo/pom.xml
@@ -39,6 +39,8 @@
     <test.app.pkg.dir>${project.build.directory}</test.app.pkg.dir>
     <test.app.resources.dir>${project.build.directory}/test-config</test.app.resources.dir>
     <!-- these properties are used in the default and the test appConfigs -->
+    <hadoop.dir>/usr/lib</hadoop.dir> <!-- hadoop expected to be found at ${hadoop.dir}/hadoop -->
+    <zk.dir>${hadoop.dir}</zk.dir> <!-- zookeeper expected to be found at ${zk.dir}/zookeeper -->
     <app.user>yarn</app.user>
     <app.user.group>hadoop</app.user.group>
     <accumulo.keytab></accumulo.keytab>

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/bb4e7d31/app-packages/accumulo/resources-default.json
----------------------------------------------------------------------
diff --git a/app-packages/accumulo/resources-default.json b/app-packages/accumulo/resources-default.json
new file mode 100644
index 0000000..f9cae60
--- /dev/null
+++ b/app-packages/accumulo/resources-default.json
@@ -0,0 +1,39 @@
+{
+  "schema": "http://example.org/specification/v2.0.0",
+  "metadata": {
+  },
+  "global": {
+    "yarn.log.include.patterns": "",
+    "yarn.log.exclude.patterns": "",
+    "yarn.log.interval": "0"
+  },
+  "components": {
+    "ACCUMULO_MASTER": {
+      "yarn.role.priority": "1",
+      "yarn.component.instances": "1",
+      "yarn.memory": "256"
+    },
+    "slider-appmaster": {
+    },
+    "ACCUMULO_TSERVER": {
+      "yarn.role.priority": "2",
+      "yarn.component.instances": "1",
+      "yarn.memory": "256"
+    },
+    "ACCUMULO_MONITOR": {
+      "yarn.role.priority": "3",
+      "yarn.component.instances": "1",
+      "yarn.memory": "128"
+    },
+    "ACCUMULO_GC": {
+      "yarn.role.priority": "4",
+      "yarn.component.instances": "1",
+      "yarn.memory": "128"
+    },
+    "ACCUMULO_TRACER": {
+      "yarn.role.priority": "5",
+      "yarn.component.instances": "1",
+      "yarn.memory": "256"
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/bb4e7d31/app-packages/accumulo/resources.json
----------------------------------------------------------------------
diff --git a/app-packages/accumulo/resources.json b/app-packages/accumulo/resources.json
deleted file mode 100644
index f9cae60..0000000
--- a/app-packages/accumulo/resources.json
+++ /dev/null
@@ -1,39 +0,0 @@
-{
-  "schema": "http://example.org/specification/v2.0.0",
-  "metadata": {
-  },
-  "global": {
-    "yarn.log.include.patterns": "",
-    "yarn.log.exclude.patterns": "",
-    "yarn.log.interval": "0"
-  },
-  "components": {
-    "ACCUMULO_MASTER": {
-      "yarn.role.priority": "1",
-      "yarn.component.instances": "1",
-      "yarn.memory": "256"
-    },
-    "slider-appmaster": {
-    },
-    "ACCUMULO_TSERVER": {
-      "yarn.role.priority": "2",
-      "yarn.component.instances": "1",
-      "yarn.memory": "256"
-    },
-    "ACCUMULO_MONITOR": {
-      "yarn.role.priority": "3",
-      "yarn.component.instances": "1",
-      "yarn.memory": "128"
-    },
-    "ACCUMULO_GC": {
-      "yarn.role.priority": "4",
-      "yarn.component.instances": "1",
-      "yarn.memory": "128"
-    },
-    "ACCUMULO_TRACER": {
-      "yarn.role.priority": "5",
-      "yarn.component.instances": "1",
-      "yarn.memory": "256"
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/bb4e7d31/app-packages/accumulo/src/assembly/accumulo.xml
----------------------------------------------------------------------
diff --git a/app-packages/accumulo/src/assembly/accumulo.xml b/app-packages/accumulo/src/assembly/accumulo.xml
index a75d2b1..2a27638 100644
--- a/app-packages/accumulo/src/assembly/accumulo.xml
+++ b/app-packages/accumulo/src/assembly/accumulo.xml
@@ -29,7 +29,7 @@
 
   <files>
     <file>
-      <source>appConfig.json</source>
+      <source>appConfig-default.json</source>
       <outputDirectory>/</outputDirectory>
       <filtered>true</filtered>
       <fileMode>0755</fileMode>
@@ -63,7 +63,7 @@
         <exclude>pom.xml</exclude>
         <exclude>src/**</exclude>
         <exclude>target/**</exclude>
-        <exclude>appConfig.json</exclude>
+        <exclude>appConfig-default.json</exclude>
         <exclude>metainfo.xml</exclude>
       </excludes>
       <fileMode>0755</fileMode>

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/bb4e7d31/app-packages/accumulo/src/test/groovy/org/apache/slider/funtest/accumulo/AccumuloBasicIT.groovy
----------------------------------------------------------------------
diff --git a/app-packages/accumulo/src/test/groovy/org/apache/slider/funtest/accumulo/AccumuloBasicIT.groovy b/app-packages/accumulo/src/test/groovy/org/apache/slider/funtest/accumulo/AccumuloBasicIT.groovy
index 475c4e3..dad7601 100644
--- a/app-packages/accumulo/src/test/groovy/org/apache/slider/funtest/accumulo/AccumuloBasicIT.groovy
+++ b/app-packages/accumulo/src/test/groovy/org/apache/slider/funtest/accumulo/AccumuloBasicIT.groovy
@@ -57,7 +57,8 @@ class AccumuloBasicIT extends AccumuloAgentCommandTestBase {
     Configuration conf = new Configuration()
     FileSystem fs = FileSystem.getLocal(conf)
     InputStream stream = SliderUtils.getApplicationResourceInputStream(
-      fs, new Path(TEST_APP_PKG_DIR, TEST_APP_PKG_FILE), "appConfig.json");
+      fs, new Path(TEST_APP_PKG_DIR, TEST_APP_PKG_FILE),
+      "appConfig-default.json");
     assert stream!=null, "Couldn't pull appConfig.json from app pkg"
     ConfTreeSerDeser c = new ConfTreeSerDeser()
     ConfTree t = c.fromStream(stream)


[12/12] git commit: Merge branch 'develop' into feature/SLIDER-149_Support_a_YARN_service_registry

Posted by st...@apache.org.
Merge branch 'develop' into feature/SLIDER-149_Support_a_YARN_service_registry

Conflicts:
	slider-core/src/main/java/org/apache/slider/client/SliderClient.java


Project: http://git-wip-us.apache.org/repos/asf/incubator-slider/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-slider/commit/733745ea
Tree: http://git-wip-us.apache.org/repos/asf/incubator-slider/tree/733745ea
Diff: http://git-wip-us.apache.org/repos/asf/incubator-slider/diff/733745ea

Branch: refs/heads/feature/SLIDER-149_Support_a_YARN_service_registry
Commit: 733745eaf1cf3734dd8f25e7ff0945c253c4777f
Parents: f058495 41ec741
Author: Steve Loughran <st...@apache.org>
Authored: Tue Sep 30 17:48:02 2014 -0700
Committer: Steve Loughran <st...@apache.org>
Committed: Tue Sep 30 17:48:02 2014 -0700

----------------------------------------------------------------------
 app-packages/accumulo/appConfig-default.json    |  57 ++
 app-packages/accumulo/appConfig.json            |  57 --
 app-packages/accumulo/pom.xml                   |  10 +
 app-packages/accumulo/resources-default.json    |  39 ++
 app-packages/accumulo/resources.json            |  39 --
 app-packages/accumulo/src/assembly/accumulo.xml |   4 +-
 .../funtest/accumulo/AccumuloBasicIT.groovy     |   3 +-
 app-packages/hbase-win/README.txt               |   3 -
 app-packages/hbase-win/appConfig-default.json   |   2 +-
 app-packages/hbase/appConfig-default.json       |   2 +-
 app-packages/hbase/resources-default.json       |   3 +
 app-packages/storm-win/README.txt               |  36 ++
 app-packages/storm-win/appConfig-default.json   |  41 ++
 .../storm-win/configuration/storm-env.xml       |  65 +++
 .../storm-win/configuration/storm-site.xml      | 580 +++++++++++++++++++
 app-packages/storm-win/metainfo.xml             | 149 +++++
 .../storm-win/package/scripts/drpc_server.py    |  55 ++
 .../storm-win/package/scripts/nimbus.py         |  55 ++
 .../storm-win/package/scripts/params.py         |  56 ++
 .../storm-win/package/scripts/rest_api.py       |  57 ++
 .../storm-win/package/scripts/service.py        |  56 ++
 .../storm-win/package/scripts/status_params.py  |  37 ++
 app-packages/storm-win/package/scripts/storm.py |  53 ++
 .../storm-win/package/scripts/supervisor.py     |  61 ++
 .../storm-win/package/scripts/ui_server.py      |  55 ++
 .../storm-win/package/scripts/yaml_config.py    |  80 +++
 .../storm-win/package/templates/config.yaml.j2  |  28 +
 .../package/templates/storm_jaas.conf.j2        |  44 ++
 app-packages/storm-win/pom.xml                  |  91 +++
 app-packages/storm-win/resources-default.json   |  30 +
 app-packages/storm-win/src/assembly/storm.xml   |  68 +++
 app-packages/storm/appConfig-default.json       |   2 +-
 slider-agent/src/main/python/kazoo/client.py    |  56 +-
 .../src/main/python/kazoo/handlers/utils.py     |  42 +-
 .../main/python/kazoo/protocol/connection.py    |  74 +--
 .../src/main/python/kazoo/tests/test_client.py  |  55 +-
 .../main/python/kazoo/tests/test_connection.py  |  89 ++-
 slider-core/pom.xml                             |   6 +
 .../org/apache/slider/client/SliderClient.java  | 291 +++++++++-
 .../common/SliderXMLConfKeysForTesting.java     |   1 +
 .../common/params/ActionDiagnosticArgs.java     |  66 +++
 .../apache/slider/common/params/Arguments.java  |   7 +
 .../apache/slider/common/params/ClientArgs.java |  11 +-
 .../slider/common/params/SliderActions.java     |   4 +
 .../apache/slider/common/tools/SliderUtils.java | 108 ++++
 .../slider/providers/agent/AgentKeys.java       |   5 +
 .../providers/agent/AgentProviderService.java   |   2 +-
 .../services/security/CertificateManager.java   |  48 +-
 .../providers/slideram/instance/appconf.json    |   3 +-
 .../agent/TestAgentAMManagementWS.groovy        | 131 +++--
 .../apache/slider/test/SliderTestUtils.groovy   |   3 +-
 .../framework/AgentCommandTestBase.groovy       |   7 +
 .../funtest/lifecycle/AMFailuresIT.groovy       |   6 +
 .../clusters/remote/slider/slider-client.xml    |   6 +
 54 files changed, 2595 insertions(+), 344 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/733745ea/app-packages/accumulo/src/test/groovy/org/apache/slider/funtest/accumulo/AccumuloBasicIT.groovy
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/733745ea/slider-core/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/733745ea/slider-core/src/main/java/org/apache/slider/client/SliderClient.java
----------------------------------------------------------------------
diff --cc slider-core/src/main/java/org/apache/slider/client/SliderClient.java
index 99896c8,06c37ba..5d994f1
--- a/slider-core/src/main/java/org/apache/slider/client/SliderClient.java
+++ b/slider-core/src/main/java/org/apache/slider/client/SliderClient.java
@@@ -133,8 -136,8 +142,9 @@@ import java.io.IOException
  import java.io.StringWriter;
  import java.io.Writer;
  import java.net.InetSocketAddress;
+ import java.net.URISyntaxException;
  import java.util.ArrayList;
 +import java.util.Collection;
  import java.util.HashMap;
  import java.util.HashSet;
  import java.util.LinkedList;
@@@ -2408,55 -2377,297 +2428,318 @@@ public class SliderClient extends Abstr
     * @throws IOException Network or other problems
     */
    @VisibleForTesting
 -  public List<ServiceInstanceData> actionRegistryList(
 +  public Collection<ServiceRecord> actionRegistryListYarn(
        ActionRegistryArgs registryArgs)
        throws YarnException, IOException {
 -    SliderRegistryService registryService = getRegistry();
      String serviceType = registryArgs.serviceType;
      String name = registryArgs.name;
 -    List<CuratorServiceInstance<ServiceInstanceData>> instances =
 -        registryService.findInstances(serviceType, name);
 -    int size = instances.size();
 -    if (size == 0) {
 -      throw new FileNotFoundException("No entries for servicetype "
 -                                      + serviceType
 -                                      + " name " + name);
 +    RegistryOperations operations = getRegistryOperations();
 +    Collection<ServiceRecord> serviceRecords;
 +    if (StringUtils.isEmpty(name)) {
 +      String serviceclassPath =
 +          serviceclassPath(
 +              currentUser(),
 +              serviceType);
 +
 +      try {
 +        Map<String, ServiceRecord> recordMap =
 +            listServiceRecords(operations, serviceclassPath);
 +        RegistryPathStatus[] listDir;
 +        if (recordMap.isEmpty()) {
 +          throw new UnknownApplicationInstanceException(
 +              "No applications registered under " + serviceclassPath);
 +        }
 +        serviceRecords = recordMap.values();
 +      } catch (PathNotFoundException e) {
 +        throw new UnknownApplicationInstanceException(e.getPath().toString(),
 +            e);
 +      }
 +    } else {
 +      ServiceRecord instance = lookupServiceRecord(registryArgs);
 +      serviceRecords = new ArrayList<ServiceRecord>(1);
 +      serviceRecords.add(instance);
      }
 -    List<ServiceInstanceData> sids = new ArrayList<ServiceInstanceData>(size);
 -    for (CuratorServiceInstance<ServiceInstanceData> instance : instances) {
 -      ServiceInstanceData payload = instance.payload;
 -      logInstance(payload, registryArgs.verbose);
 -      sids.add(payload);
 +
 +    for (ServiceRecord serviceRecord : serviceRecords) {
 +      logInstance(serviceRecord, registryArgs.verbose);
      }
 -    return sids;
 +    return serviceRecords;
    }
  
+ 	/**
+ 	 * diagnostic operation
+ 	 *
+ 	 * @param clusterName
+ 	 *            application name
+ 	 * @param diagosticArgs
+ 	 *            diagnostic Arguments
+ 	 * @return 0 for success, -1 for some issues that aren't errors, just
+ 	 *         failures to retrieve information (e.g. no application name
+ 	 *         specified)
+ 	 * @throws YarnException
+ 	 *             YARN problems
+ 	 * @throws IOException
+ 	 *             Network or other problems
+ 	 */
+ 	private int actionDiagnostic(ActionDiagnosticArgs diagnosticArgs) {
+ 		try {
+ 			if (diagnosticArgs.client) {
+ 				actionDiagnosticClient();
+ 			} else if (SliderUtils.isSet(diagnosticArgs.application)) {
+ 				actionDiagnosticApplication(diagnosticArgs);
+ 			} else if (SliderUtils.isSet(diagnosticArgs.slider)) {
+ 				actionDiagnosticSlider(diagnosticArgs);
+ 			} else if (diagnosticArgs.yarn) {
+ 				actionDiagnosticYarn(diagnosticArgs);
+ 			} else if (diagnosticArgs.credentials) {
+ 				actionDiagnosticCredentials();
+ 			} else if (SliderUtils.isSet(diagnosticArgs.all)) {
+ 				actionDiagnosticAll(diagnosticArgs);
+ 			} else if (SliderUtils.isSet(diagnosticArgs.level)) {
+ 				actionDiagnosticIntelligent(diagnosticArgs);
+ 			} else {
+ 				// it's an unknown command
+ 		        log.info(ActionDiagnosticArgs.USAGE);
+ 		        return EXIT_USAGE;
+ 			}
+ 		} catch (Exception e) {
+ 			log.error(e.toString());
+ 			return EXIT_FALSE;
+ 		}
+ 		return EXIT_SUCCESS;
+ 	}
+ 
+ 	private void actionDiagnosticIntelligent(ActionDiagnosticArgs diagnosticArgs)
+ 			throws YarnException, IOException, URISyntaxException {
+ 		// not using member variable clustername because we want to place
+ 		// application name after --application option and member variable
+ 		// cluster name has to be put behind action
+ 		String clusterName = diagnosticArgs.level;
+ 
+ 		try {
+ 			SliderUtils.validateClientConfigFile();
+ 			log.info("Slider-client.xml is accessible");
+ 		} catch (IOException e) {
+ 			// we are catching exceptions here because those are indication of
+ 			// validation result, and we need to print them here
+ 			log.error("validation of slider-client.xml fails because: "
+ 					+ e.toString());
+ 			return;
+ 		}
+ 		SliderClusterOperations clusterOperations = createClusterOperations(clusterName);
+ 		// cluster not found exceptions will be thrown upstream
+ 		ClusterDescription clusterDescription = clusterOperations
+ 				.getClusterDescription();
+ 		log.info("Slider AppMaster is accessible");
+ 		
+ 		if (clusterDescription.state == ClusterDescription.STATE_LIVE) {
+ 			AggregateConf instanceDefinition = clusterOperations
+ 					.getInstanceDefinition();
+ 			String imagePath = instanceDefinition.getInternalOperations().get(
+ 					InternalKeys.INTERNAL_APPLICATION_IMAGE_PATH);
+ 			//if null, that means slider uploaded the agent tarball for the user
+ 			//and we need to use where slider has put
+ 			if(imagePath == null){
 -				ApplicationReport appReport = YARNRegistryClient.findInstance(clusterName);
++				ApplicationReport appReport = findInstance(clusterName);
+ 				Path path1 = sliderFileSystem.getTempPathForCluster(clusterName);
+ 				Path subPath = new Path(path1, appReport.getApplicationId().toString() + "/am");
+ 				imagePath = subPath.toString();
+ 			}
+ 			try {
+ 				SliderUtils.validateHDFSFile(sliderFileSystem, imagePath);
+ 				log.info("Slider agent tarball is properly installed");
+ 			} catch (IOException e) {
+ 				log.error("can not find or open agent tar ball: " + e.toString());
+ 				return;
+ 			}
+ 			String pkgTarballPath = instanceDefinition.getAppConfOperations()
+ 					.getGlobalOptions().getMandatoryOption(AgentKeys.APP_DEF);
+ 			try {
+ 				SliderUtils.validateHDFSFile(sliderFileSystem, pkgTarballPath);
+ 				log.info("Application tarball is properly installed");
+ 			} catch (IOException e) {
+ 				log.error("can not find or open application tar ball: "
+ 						+ e.toString());
+ 				return;
+ 			}
+ 		}
+ 	}
+ 
+ 	private void actionDiagnosticAll(ActionDiagnosticArgs diagnosticArgs)
+ 			throws IOException, YarnException {
+ 		//assign application name from param to each sub diagnostic function
+ 		diagnosticArgs.application = diagnosticArgs.all;
+ 		diagnosticArgs.slider = diagnosticArgs.all;
+ 		actionDiagnosticClient();
+ 		actionDiagnosticApplication(diagnosticArgs);
+ 		actionDiagnosticSlider(diagnosticArgs);
+ 		actionDiagnosticYarn(diagnosticArgs);
+ 		actionDiagnosticCredentials();
+ 	}
+ 
+ 	private void actionDiagnosticCredentials() throws BadConfigException, IOException
+ 			 {
+ 		if (SliderUtils.isHadoopClusterSecure(SliderUtils
+ 				.loadClientConfigurationResource())) {
+ 			String credentialCacheFileDescription = null;
+ 			try {
+ 				credentialCacheFileDescription = SliderUtils
+ 						.checkCredentialCacheFile();
+ 			} catch (BadConfigException e) {
+ 				log.error("The credential config is not valid: " + e.toString());
+ 				throw e;
+ 			} catch (IOException e) {
+ 				log.error("Unable to read the credential file: " + e.toString());
+ 				throw e;
+ 			}
+ 			log.info("Credential cache file for the current user: "
+ 					+ credentialCacheFileDescription);
+ 		} else {
+ 			log.info("the cluster is not in secure mode");
+ 		}
+ 	}
+ 
+ 	private void actionDiagnosticYarn(ActionDiagnosticArgs diagnosticArgs) throws IOException, YarnException {
+ 		JSONObject converter = null;
+ 		log.info("the node in the YARN cluster has below state: ");
+ 		List<NodeReport> yarnClusterInfo;
+ 		try {
+ 			yarnClusterInfo = yarnClient.getNodeReports(NodeState.RUNNING);
+ 		} catch (YarnException e1) {
+ 			log.error("Exception happened when fetching node report from the YARN cluster: " + e1.toString());
+ 			throw e1;
+ 		} catch (IOException e1) {
+ 			log.error("Network problem happened when fetching node report YARN cluster: " + e1.toString());
+ 			throw e1;
+ 		}
+ 		for(NodeReport nodeReport : yarnClusterInfo){
+ 			log.info(nodeReport.toString());
+ 		}
+ 		
+ 		if (diagnosticArgs.verbose) {
+ 			Writer configWriter = new StringWriter();
+ 			try {
+ 				Configuration.dumpConfiguration(yarnClient.getConfig(), configWriter);
+ 			} catch (IOException e1) {
+ 				log.error("Network problem happened when retrieving YARN config from YARN: " + e1.toString());
+ 				throw e1;
+ 			}
+ 			try {
+ 				converter = new JSONObject(configWriter.toString());
+ 				log.info("the configuration of the YARN cluster is: "
+ 						+ converter.toString(2));
+ 				
+ 			} catch (JSONException e) {
+ 				log.error("JSONException happened during parsing response from YARN: " + e.toString());
+ 			}
+ 		}
+ 	}
+ 
+ 	private void actionDiagnosticSlider(ActionDiagnosticArgs diagnosticArgs) throws YarnException, IOException
+ 			{
+ 		// not using member variable clustername because we want to place
+ 		// application name after --application option and member variable
+ 		// cluster name has to be put behind action
+ 		String clusterName = diagnosticArgs.slider;
+ 		SliderClusterOperations clusterOperations;
+ 		AggregateConf instanceDefinition = null;
+ 		try {
+ 			clusterOperations = createClusterOperations(clusterName);
+ 			instanceDefinition = clusterOperations
+ 					.getInstanceDefinition();
+ 		} catch (YarnException e) {
+ 			log.error("Exception happened when retrieving instance definition from YARN: " + e.toString());
+ 			throw e;
+ 		} catch (IOException e) {
+ 			log.error("Network problem happened when retrieving instance definition from YARN: " + e.toString());
+ 			throw e;
+ 		}
+ 		String imagePath = instanceDefinition.getInternalOperations().get(
+ 				InternalKeys.INTERNAL_APPLICATION_IMAGE_PATH);
+ 		//if null, it will be uploaded by Slider and thus at slider's path
+ 		if(imagePath == null){
 -			ApplicationReport appReport = YARNRegistryClient.findInstance(clusterName);
++			ApplicationReport appReport = findInstance(clusterName);
+ 			Path path1 = sliderFileSystem.getTempPathForCluster(clusterName);
+ 			Path subPath = new Path(path1, appReport.getApplicationId().toString() + "/am");
+ 			imagePath = subPath.toString();
+ 		}
+ 		log.info("The path of slider agent tarball on HDFS is: " + imagePath);
+ 	}
+ 
+ 	private void actionDiagnosticApplication(ActionDiagnosticArgs diagnosticArgs) throws YarnException, IOException
+ 			{
+ 		// not using member variable clustername because we want to place
+ 		// application name after --application option and member variable
+ 		// cluster name has to be put behind action
+ 		String clusterName = diagnosticArgs.application;
+ 		SliderClusterOperations clusterOperations;
+ 		AggregateConf instanceDefinition = null;
+ 		try {
+ 			clusterOperations = createClusterOperations(clusterName);
+ 			instanceDefinition = clusterOperations
+ 					.getInstanceDefinition();
+ 		} catch (YarnException e) {
+ 			log.error("Exception happened when retrieving instance definition from YARN: " + e.toString());
+ 			throw e;
+ 		} catch (IOException e) {
+ 			log.error("Network problem happened when retrieving instance definition from YARN: " + e.toString());
+ 			throw e;
+ 		}
+ 		String clusterDir = instanceDefinition.getAppConfOperations()
+ 				.getGlobalOptions().get(AgentKeys.APP_ROOT);
+ 		String pkgTarball = instanceDefinition.getAppConfOperations()
+ 				.getGlobalOptions().get(AgentKeys.APP_DEF);
+ 		String runAsUser = instanceDefinition.getAppConfOperations()
+ 				.getGlobalOptions().get(AgentKeys.RUNAS_USER);
+ 
+ 		log.info("The location of the cluster instance directory in HDFS is: "
+ 				+ clusterDir);
+ 		log.info("The name of the application package tarball on HDFS is: "
+ 				+ pkgTarball);
+ 		log.info("The runas user of the application in the cluster is: "
+ 				+ runAsUser);
+ 
+ 		if (diagnosticArgs.verbose) {
+ 			log.info("App config of the application: "
+ 					+ instanceDefinition.getAppConf().toJson());
+ 			log.info("Resource config of the application: "
+ 					+ instanceDefinition.getResources().toJson());
+ 		}
+ 	}
+ 
+ 	private void actionDiagnosticClient() throws SliderException, IOException {
+ 		String currentCommandPath = SliderUtils.getCurrentCommandPath();
+ 		SliderVersionInfo.loadAndPrintVersionInfo(log);
+ 		String clientConfigPath = SliderUtils.getClientConfigPath();
+ 		String jdkInfo = SliderUtils.getJDKInfo();
+ 		log.info("The slider command path: " + currentCommandPath);
+ 		log.info("The slider-client.xml used by current running command path: "
+ 				+ clientConfigPath);
+ 		log.info(jdkInfo);
+ 
+ 		try {
+ 			SliderUtils.validateSliderClientEnvironment(log);
+ 		} catch (SliderException e) {
+ 			log.error(e.toString());
+ 			throw e;
+ 		} catch (IOException e) {
+ 			log.error(e.toString());
+ 			throw e;
+ 		}
+ 	}
+ 
 -  private void logInstance(ServiceInstanceData instance,
++
 +  /**
 +   * Log a service record instance
 +   * @param instance record
 +   * @param verbose verbose logging of all external endpoints
 +   */
 +  private void logInstance(ServiceRecord instance,
        boolean verbose) {
      if (!verbose) {
 -      log.info("{}", instance.id);
 +      log.info("{}", instance.yarn_id);
      } else {
 -      log.info("{}: ", instance.id);
 +      log.info("{}: ", instance.yarn_id);
        logEndpoints(instance);
      }
    }

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/733745ea/slider-core/src/main/java/org/apache/slider/providers/agent/AgentProviderService.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/733745ea/slider-core/src/test/groovy/org/apache/slider/test/SliderTestUtils.groovy
----------------------------------------------------------------------


[08/12] git commit: Add log aggregation entries to resources-default.json

Posted by st...@apache.org.
Add log aggregation entries to resources-default.json


Project: http://git-wip-us.apache.org/repos/asf/incubator-slider/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-slider/commit/f392d23e
Tree: http://git-wip-us.apache.org/repos/asf/incubator-slider/tree/f392d23e
Diff: http://git-wip-us.apache.org/repos/asf/incubator-slider/diff/f392d23e

Branch: refs/heads/feature/SLIDER-149_Support_a_YARN_service_registry
Commit: f392d23ee9342a8e250132ce9ad093d19a4ae234
Parents: ba1e79b
Author: tedyu <yu...@gmail.com>
Authored: Mon Sep 29 19:58:49 2014 -0700
Committer: tedyu <yu...@gmail.com>
Committed: Mon Sep 29 19:58:49 2014 -0700

----------------------------------------------------------------------
 app-packages/hbase/resources-default.json | 3 +++
 1 file changed, 3 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/f392d23e/app-packages/hbase/resources-default.json
----------------------------------------------------------------------
diff --git a/app-packages/hbase/resources-default.json b/app-packages/hbase/resources-default.json
index c3fec68..2487e14 100644
--- a/app-packages/hbase/resources-default.json
+++ b/app-packages/hbase/resources-default.json
@@ -3,6 +3,9 @@
   "metadata": {
   },
   "global": {
+    "yarn.log.include.patterns": "",
+    "yarn.log.exclude.patterns": "",
+    "yarn.log.interval": "0"
   },
   "components": {
     "HBASE_MASTER": {


[03/12] git commit: SLIDER-443 remove default keystore location SLIDER-449 Consume both streams for openssl (required for windows)

Posted by st...@apache.org.
SLIDER-443 remove default keystore location
SLIDER-449 Consume both streams for openssl (required for windows)


Project: http://git-wip-us.apache.org/repos/asf/incubator-slider/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-slider/commit/732569db
Tree: http://git-wip-us.apache.org/repos/asf/incubator-slider/tree/732569db
Diff: http://git-wip-us.apache.org/repos/asf/incubator-slider/diff/732569db

Branch: refs/heads/feature/SLIDER-149_Support_a_YARN_service_registry
Commit: 732569db9468ed7ced6fe5ae750130fdcd455d04
Parents: 964e403
Author: Jon Maron <jm...@hortonworks.com>
Authored: Fri Sep 26 17:07:18 2014 -0400
Committer: Jon Maron <jm...@hortonworks.com>
Committed: Fri Sep 26 17:07:18 2014 -0400

----------------------------------------------------------------------
 .../org/apache/slider/client/SliderClient.java  |  12 +-
 .../services/security/CertificateManager.java   |  48 ++++++-
 .../providers/slideram/instance/appconf.json    |   3 +-
 .../agent/TestAgentAMManagementWS.groovy        | 131 ++++++++++++-------
 .../apache/slider/test/SliderTestUtils.groovy   |   3 +-
 5 files changed, 141 insertions(+), 56 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/732569db/slider-core/src/main/java/org/apache/slider/client/SliderClient.java
----------------------------------------------------------------------
diff --git a/slider-core/src/main/java/org/apache/slider/client/SliderClient.java b/slider-core/src/main/java/org/apache/slider/client/SliderClient.java
index afbb4a8..d57f7dc 100644
--- a/slider-core/src/main/java/org/apache/slider/client/SliderClient.java
+++ b/slider-core/src/main/java/org/apache/slider/client/SliderClient.java
@@ -782,7 +782,8 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
     builder.init(providerName, instanceDefinition);
     builder.propagateFilename();
     builder.propagatePrincipals();
-    builder.setImageDetailsIfAvailable(buildInfo.getImage(), buildInfo.getAppHomeDir());
+    builder.setImageDetailsIfAvailable(buildInfo.getImage(),
+                                       buildInfo.getAppHomeDir());
     builder.setQueue(buildInfo.queue);
 
     String quorum = buildInfo.getZKhosts();
@@ -839,7 +840,7 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
       throw e;
     }
     try {
-      builder.persist(appconfdir, overwrite);
+      persistInstanceDefinition(overwrite, appconfdir, builder);
     } catch (LockAcquireFailedException e) {
       log.warn("Failed to get a Lock on {} : {}", builder, e);
       throw new BadClusterStateException("Failed to save " + clustername
@@ -847,6 +848,13 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
     }
   }
 
+  protected void persistInstanceDefinition(boolean overwrite,
+                                         Path appconfdir,
+                                         InstanceBuilder builder)
+      throws IOException, SliderException, LockAcquireFailedException {
+    builder.persist(appconfdir, overwrite);
+  }
+
   @VisibleForTesting
   public static void replaceTokens(ConfTree conf,
       String userName, String clusterName) throws IOException {

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/732569db/slider-core/src/main/java/org/apache/slider/server/services/security/CertificateManager.java
----------------------------------------------------------------------
diff --git a/slider-core/src/main/java/org/apache/slider/server/services/security/CertificateManager.java b/slider-core/src/main/java/org/apache/slider/server/services/security/CertificateManager.java
index 8f679e1..d200033 100644
--- a/slider-core/src/main/java/org/apache/slider/server/services/security/CertificateManager.java
+++ b/slider-core/src/main/java/org/apache/slider/server/services/security/CertificateManager.java
@@ -28,6 +28,7 @@ import org.slf4j.LoggerFactory;
 import java.io.BufferedReader;
 import java.io.File;
 import java.io.IOException;
+import java.io.InputStream;
 import java.io.InputStreamReader;
 import java.nio.charset.Charset;
 import java.text.MessageFormat;
@@ -86,6 +87,42 @@ public class CertificateManager {
     return certFile.exists();
   }
 
+  class StreamConsumer extends Thread
+  {
+    InputStream is;
+    boolean logOutput;
+
+    StreamConsumer(InputStream is, boolean logOutput)
+    {
+      this.is = is;
+      this.logOutput = logOutput;
+    }
+
+    StreamConsumer(InputStream is)
+    {
+      this(is, false);
+    }
+
+    public void run()
+    {
+      try
+      {
+        InputStreamReader isr = new InputStreamReader(is,
+                                                      Charset.forName("UTF8"));
+        BufferedReader br = new BufferedReader(isr);
+        String line;
+        while ( (line = br.readLine()) != null)
+          if (logOutput) {
+            LOG.info(line);
+          }
+      } catch (IOException e)
+      {
+        LOG.error("Error during processing of process stream", e);
+      }
+    }
+  }
+
+
   /**
    * Runs os command
    *
@@ -98,12 +135,13 @@ public class CertificateManager {
     BufferedReader br= null;
     try {
       process = Runtime.getRuntime().exec(command);
-      br = new BufferedReader(new InputStreamReader(
-          process.getInputStream(), Charset.forName("UTF8")));
+      StreamConsumer outputConsumer =
+          new StreamConsumer(process.getInputStream(), true);
+      StreamConsumer errorConsumer =
+          new StreamConsumer(process.getErrorStream());
 
-      while ((line = br.readLine()) != null) {
-        LOG.info(line);
-      }
+      outputConsumer.start();
+      errorConsumer.start();
 
       try {
         process.waitFor();

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/732569db/slider-core/src/main/resources/org/apache/slider/providers/slideram/instance/appconf.json
----------------------------------------------------------------------
diff --git a/slider-core/src/main/resources/org/apache/slider/providers/slideram/instance/appconf.json b/slider-core/src/main/resources/org/apache/slider/providers/slideram/instance/appconf.json
index 89095b1..81239a2 100644
--- a/slider-core/src/main/resources/org/apache/slider/providers/slideram/instance/appconf.json
+++ b/slider-core/src/main/resources/org/apache/slider/providers/slideram/instance/appconf.json
@@ -12,8 +12,7 @@
 
   "components": {
     "slider-appmaster" : {
-      "jvm.heapsize": "256M",
-      "ssl.server.keystore.location": "/tmp/work/security/keystore.p12"
+      "jvm.heapsize": "256M"
     }
 
   }

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/732569db/slider-core/src/test/groovy/org/apache/slider/providers/agent/TestAgentAMManagementWS.groovy
----------------------------------------------------------------------
diff --git a/slider-core/src/test/groovy/org/apache/slider/providers/agent/TestAgentAMManagementWS.groovy b/slider-core/src/test/groovy/org/apache/slider/providers/agent/TestAgentAMManagementWS.groovy
index 6720bda..7a39035 100644
--- a/slider-core/src/test/groovy/org/apache/slider/providers/agent/TestAgentAMManagementWS.groovy
+++ b/slider-core/src/test/groovy/org/apache/slider/providers/agent/TestAgentAMManagementWS.groovy
@@ -22,11 +22,19 @@ import com.sun.jersey.api.client.Client
 import com.sun.jersey.api.client.WebResource
 import groovy.transform.CompileStatic
 import groovy.util.logging.Slf4j
+import org.apache.hadoop.fs.Path
+import org.apache.hadoop.yarn.exceptions.YarnException
 import org.apache.slider.api.StatusKeys
 import org.apache.slider.client.SliderClient
 import org.apache.slider.common.SliderKeys
+import org.apache.slider.common.params.AbstractClusterBuildingActionArgs
+import org.apache.slider.core.build.InstanceBuilder
+import org.apache.slider.core.conf.AggregateConf
 import org.apache.slider.core.conf.MapOperations
+import org.apache.slider.core.exceptions.SliderException
+import org.apache.slider.core.launch.LaunchedApplication
 import org.apache.slider.core.main.ServiceLauncher
+import org.apache.slider.core.persist.LockAcquireFailedException
 import org.apache.slider.server.appmaster.web.rest.agent.RegistrationResponse
 import org.apache.slider.server.appmaster.web.rest.agent.RegistrationStatus
 import org.apache.slider.server.services.security.CertificateManager
@@ -113,55 +121,86 @@ class TestAgentAMManagementWS extends AgentTestBase {
     assert app_def_path.exists()
     assert agt_ver_path.exists()
     assert agt_conf_path.exists()
-    ServiceLauncher<SliderClient> launcher = buildAgentCluster(clustername,
-        roles,
-        [
-            ARG_OPTION, PACKAGE_PATH, slider_core.absolutePath,
-            ARG_OPTION, APP_DEF, toURIArg(app_def_path),
-            ARG_OPTION, AGENT_CONF, toURIArg(agt_conf_path),
-            ARG_OPTION, AGENT_VERSION, toURIArg(agt_ver_path),
-        ],
-        true, true,
-        true)
-    SliderClient sliderClient = launcher.service
-    def report = waitForClusterLive(sliderClient)
-    def trackingUrl = report.trackingUrl
-    log.info("tracking URL is $trackingUrl")
-    def agent_url = trackingUrl + AGENT_URI
-
-    
-    def status = dumpClusterStatus(sliderClient, "agent AM")
-    def liveURL = status.getInfo(StatusKeys.INFO_AM_AGENT_OPS_URL)
-    if (liveURL) {
-      agent_url = liveURL + AGENT_URI
+    try {
+        setSliderClientClassName(TestSliderClient.name)
+        ServiceLauncher<SliderClient> launcher = buildAgentCluster(clustername,
+            roles,
+            [
+                ARG_OPTION, PACKAGE_PATH, slider_core.absolutePath,
+                ARG_OPTION, APP_DEF, toURIArg(app_def_path),
+                ARG_OPTION, AGENT_CONF, toURIArg(agt_conf_path),
+                ARG_OPTION, AGENT_VERSION, toURIArg(agt_ver_path),
+            ],
+            true, true,
+            true)
+        SliderClient sliderClient = launcher.service
+        def report = waitForClusterLive(sliderClient)
+        def trackingUrl = report.trackingUrl
+        log.info("tracking URL is $trackingUrl")
+        def agent_url = trackingUrl + AGENT_URI
+
+
+        def status = dumpClusterStatus(sliderClient, "agent AM")
+        def liveURL = status.getInfo(StatusKeys.INFO_AM_AGENT_OPS_URL)
+        if (liveURL) {
+          agent_url = liveURL + AGENT_URI
+        }
+
+        log.info("Agent  is $agent_url")
+        log.info("stacks is ${liveURL}stacks")
+        log.info("conf   is ${liveURL}conf")
+
+
+        def sleeptime = 10
+        log.info "sleeping for $sleeptime seconds"
+        Thread.sleep(sleeptime * 1000)
+
+
+        String page = fetchWebPageWithoutError(agent_url);
+        log.info(page);
+
+        //WS get
+        Client client = createTestClient();
+
+
+        WebResource webResource = client.resource(agent_url + "test/register");
+        RegistrationResponse response = webResource.type(MediaType.APPLICATION_JSON)
+                              .post(
+            RegistrationResponse.class,
+            createDummyJSONRegister());
+
+        //TODO: assert failure as actual agent is not started. This test only starts the AM.
+        assert RegistrationStatus.FAILED == response.getResponseStatus();
+    } finally {
+        setSliderClientClassName(SliderClient.name)
     }
     
-    log.info("Agent  is $agent_url")
-    log.info("stacks is ${liveURL}stacks")
-    log.info("conf   is ${liveURL}conf")
-
-
-    def sleeptime = 10
-    log.info "sleeping for $sleeptime seconds"
-    Thread.sleep(sleeptime * 1000)
-    
-
-    String page = fetchWebPageWithoutError(agent_url);
-    log.info(page);
-    
-    //WS get
-    Client client = createTestClient();
-
-
-    WebResource webResource = client.resource(agent_url + "test/register");
-    RegistrationResponse response = webResource.type(MediaType.APPLICATION_JSON)
-                          .post(
-        RegistrationResponse.class,
-        createDummyJSONRegister());
+  }
 
-    //TODO: assert failure as actual agent is not started. This test only starts the AM.
-    assert RegistrationStatus.FAILED == response.getResponseStatus();
-    
+  static class TestSliderClient extends SliderClient {
+      @Override
+      protected void persistInstanceDefinition(boolean overwrite,
+                                               Path appconfdir,
+                                               InstanceBuilder builder)
+      throws IOException, SliderException, LockAcquireFailedException {
+          AggregateConf conf = builder.getInstanceDescription()
+          conf.getAppConfOperations().getComponent("slider-appmaster").put(
+                  "ssl.server.keystore.location",
+                  "/tmp/work/security/keystore.p12")
+          super.persistInstanceDefinition(overwrite, appconfdir, builder)
+      }
+
+      @Override
+      LaunchedApplication launchApplication(String clustername,
+                                            Path clusterDirectory,
+                                            AggregateConf instanceDefinition,
+                                            boolean debugAM)
+      throws YarnException, IOException {
+          instanceDefinition.getAppConfOperations().getComponent("slider-appmaster").put(
+                  "ssl.server.keystore.location",
+                  "/tmp/work/security/keystore.p12")
+          return super.launchApplication(clustername, clusterDirectory, instanceDefinition, debugAM)
+      }
   }
 
 

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/732569db/slider-core/src/test/groovy/org/apache/slider/test/SliderTestUtils.groovy
----------------------------------------------------------------------
diff --git a/slider-core/src/test/groovy/org/apache/slider/test/SliderTestUtils.groovy b/slider-core/src/test/groovy/org/apache/slider/test/SliderTestUtils.groovy
index 9266634..b6d3e17 100644
--- a/slider-core/src/test/groovy/org/apache/slider/test/SliderTestUtils.groovy
+++ b/slider-core/src/test/groovy/org/apache/slider/test/SliderTestUtils.groovy
@@ -66,6 +66,7 @@ import static Arguments.ARG_OPTION
 @Slf4j
 @CompileStatic
 class SliderTestUtils extends Assert {
+  static String sliderClientClassName = SliderClient.class.name
 
   public static void describe(String s) {
     log.info("");
@@ -480,7 +481,7 @@ class SliderTestUtils extends Assert {
       Configuration conf,
       List args) {
     ServiceLauncher<SliderClient> serviceLauncher =
-        new ServiceLauncher<SliderClient>(SliderClient.name);
+        new ServiceLauncher<SliderClient>(sliderClientClassName);
 
     log.debug("slider ${SliderUtils.join(args, " ", false)}")
     serviceLauncher.launchService(conf,


[04/12] git commit: SLIDER-389. Upgrade kazoo code base to the latest version

Posted by st...@apache.org.
SLIDER-389. Upgrade kazoo code base to the latest version


Project: http://git-wip-us.apache.org/repos/asf/incubator-slider/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-slider/commit/ce647d39
Tree: http://git-wip-us.apache.org/repos/asf/incubator-slider/tree/ce647d39
Diff: http://git-wip-us.apache.org/repos/asf/incubator-slider/diff/ce647d39

Branch: refs/heads/feature/SLIDER-149_Support_a_YARN_service_registry
Commit: ce647d399acf5b2d90e51a8509633183438e11d3
Parents: 732569d
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Fri Sep 26 14:20:49 2014 -0700
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Fri Sep 26 14:20:49 2014 -0700

----------------------------------------------------------------------
 slider-agent/src/main/python/kazoo/client.py    | 56 +++++++-----
 .../src/main/python/kazoo/handlers/utils.py     | 42 +--------
 .../main/python/kazoo/protocol/connection.py    | 74 ++++++++--------
 .../src/main/python/kazoo/tests/test_client.py  | 55 ++++++------
 .../main/python/kazoo/tests/test_connection.py  | 89 ++++++++------------
 5 files changed, 137 insertions(+), 179 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/ce647d39/slider-agent/src/main/python/kazoo/client.py
----------------------------------------------------------------------
diff --git a/slider-agent/src/main/python/kazoo/client.py b/slider-agent/src/main/python/kazoo/client.py
index 11d9008..47545ee 100644
--- a/slider-agent/src/main/python/kazoo/client.py
+++ b/slider-agent/src/main/python/kazoo/client.py
@@ -20,7 +20,7 @@ from kazoo.exceptions import (
     WriterNotClosedException,
 )
 from kazoo.handlers.threading import SequentialThreadingHandler
-from kazoo.handlers.utils import capture_exceptions, wrap, pipe_or_sock_write
+from kazoo.handlers.utils import capture_exceptions, wrap
 from kazoo.hosts import collect_hosts
 from kazoo.loggingsupport import BLATHER
 from kazoo.protocol.connection import ConnectionHandler
@@ -220,7 +220,6 @@ class KazooClient(object):
         elif type(command_retry) is KazooRetry:
             self.retry = command_retry
 
-
         if type(self._conn_retry) is KazooRetry:
             if self.handler.sleep_func != self._conn_retry.sleep_func:
                 raise ConfigurationError("Retry handler and event handler "
@@ -228,19 +227,21 @@ class KazooClient(object):
 
         if type(self.retry) is KazooRetry:
             if self.handler.sleep_func != self.retry.sleep_func:
-                raise ConfigurationError("Command retry handler and event "
-                                         "handler must use the same sleep func")
+                raise ConfigurationError(
+                    "Command retry handler and event handler "
+                    "must use the same sleep func")
 
         if self.retry is None or self._conn_retry is None:
             old_retry_keys = dict(_RETRY_COMPAT_DEFAULTS)
             for key in old_retry_keys:
                 try:
                     old_retry_keys[key] = kwargs.pop(key)
-                    warnings.warn('Passing retry configuration param %s to the'
-                            ' client directly is deprecated, please pass a'
-                            ' configured retry object (using param %s)' % (
-                                key, _RETRY_COMPAT_MAPPING[key]),
-                            DeprecationWarning, stacklevel=2)
+                    warnings.warn(
+                        'Passing retry configuration param %s to the '
+                        'client directly is deprecated, please pass a '
+                        'configured retry object (using param %s)' % (
+                            key, _RETRY_COMPAT_MAPPING[key]),
+                        DeprecationWarning, stacklevel=2)
                 except KeyError:
                     pass
 
@@ -258,12 +259,13 @@ class KazooClient(object):
                     **retry_keys)
 
         self._conn_retry.interrupt = lambda: self._stopped.is_set()
-        self._connection = ConnectionHandler(self, self._conn_retry.copy(),
-            logger=self.logger)
+        self._connection = ConnectionHandler(
+            self, self._conn_retry.copy(), logger=self.logger)
 
         # Every retry call should have its own copy of the retry helper
         # to avoid shared retry counts
         self._retry = self.retry
+
         def _retry(*args, **kwargs):
             return self._retry.copy()(*args, **kwargs)
         self.retry = _retry
@@ -282,7 +284,7 @@ class KazooClient(object):
         self.Semaphore = partial(Semaphore, self)
         self.ShallowParty = partial(ShallowParty, self)
 
-         # If we got any unhandled keywords, complain like python would
+        # If we got any unhandled keywords, complain like Python would
         if kwargs:
             raise TypeError('__init__() got unexpected keyword arguments: %s'
                             % (kwargs.keys(),))
@@ -433,7 +435,8 @@ class KazooClient(object):
             return
 
         if state in (KeeperState.CONNECTED, KeeperState.CONNECTED_RO):
-            self.logger.info("Zookeeper connection established, state: %s", state)
+            self.logger.info("Zookeeper connection established, "
+                             "state: %s", state)
             self._live.set()
             self._make_state_change(KazooState.CONNECTED)
         elif state in LOST_STATES:
@@ -510,12 +513,12 @@ class KazooClient(object):
         self._queue.append((request, async_object))
 
         # wake the connection, guarding against a race with close()
-        write_pipe = self._connection._write_pipe
-        if write_pipe is None:
+        write_sock = self._connection._write_sock
+        if write_sock is None:
             async_object.set_exception(ConnectionClosedError(
                 "Connection has been closed"))
         try:
-            pipe_or_sock_write(write_pipe, b'\0')
+            write_sock.send(b'\0')
         except:
             async_object.set_exception(ConnectionClosedError(
                 "Connection has been closed"))
@@ -585,7 +588,7 @@ class KazooClient(object):
 
         self._stopped.set()
         self._queue.append((CloseInstance, None))
-        pipe_or_sock_write(self._connection._write_pipe, b'\0')
+        self._connection._write_sock.send(b'\0')
         self._safe_close()
 
     def restart(self):
@@ -622,7 +625,7 @@ class KazooClient(object):
         if not self._live.is_set():
             raise ConnectionLoss("No connection to server")
 
-        peer = self._connection._socket.getpeername()
+        peer = self._connection._socket.getpeername()[:2]
         sock = self.handler.create_connection(
             peer, timeout=self._session_timeout / 1000.0)
         sock.sendall(cmd)
@@ -786,7 +789,7 @@ class KazooClient(object):
         """
         acl = acl or self.default_acl
         return self.create_async(path, value, acl=acl, ephemeral=ephemeral,
-            sequence=sequence, makepath=makepath).get()
+                                 sequence=sequence, makepath=makepath).get()
 
     def create_async(self, path, value=b"", acl=None, ephemeral=False,
                      sequence=False, makepath=False):
@@ -828,7 +831,8 @@ class KazooClient(object):
 
         @capture_exceptions(async_result)
         def do_create():
-            result = self._create_async_inner(path, value, acl, flags, trailing=sequence)
+            result = self._create_async_inner(
+                path, value, acl, flags, trailing=sequence)
             result.rawlink(create_completion)
 
         @capture_exceptions(async_result)
@@ -867,10 +871,13 @@ class KazooClient(object):
         return async_result
 
     def ensure_path(self, path, acl=None):
-        """Recursively create a path if it doesn't exist.
+        """Recursively create a path if it doesn't exist. Also return value indicates
+        if path already existed or had to be created.
 
         :param path: Path of node.
         :param acl: Permissions for node.
+        :returns `True` if path existed, `False` otherwise.
+        :rtype: bool
 
         """
         return self.ensure_path_async(path, acl).get()
@@ -1291,6 +1298,13 @@ class TransactionRequest(object):
     Transactions are not thread-safe and should not be accessed from
     multiple threads at once.
 
+    .. note::
+
+        The ``committed`` attribute only indicates whether this
+        transaction has been sent to Zookeeper and is used to prevent
+        duplicate commits of the same transaction. The result should be
+        checked to determine if the transaction executed as desired.
+
     .. versionadded:: 0.6
         Requires Zookeeper 3.4+
 

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/ce647d39/slider-agent/src/main/python/kazoo/handlers/utils.py
----------------------------------------------------------------------
diff --git a/slider-agent/src/main/python/kazoo/handlers/utils.py b/slider-agent/src/main/python/kazoo/handlers/utils.py
index 385495e..93cfdb5 100644
--- a/slider-agent/src/main/python/kazoo/handlers/utils.py
+++ b/slider-agent/src/main/python/kazoo/handlers/utils.py
@@ -23,47 +23,7 @@ def _set_default_tcpsock_options(module, sock):
         _set_fd_cloexec(sock)
     return sock
 
-def pipe_or_sock_read(p_or_s, n):
-    ''' Use a socket or a pipe to read something'''
-    if isinstance(p_or_s, int):
-        # This is a pipe
-        return os.read(p_or_s, n)
-    else:
-        return p_or_s.recv(n)
-
-def pipe_or_sock_close(p_or_s):
-    ''' Closes either a socket or a pipe'''
-    if isinstance(p_or_s, int):
-        os.close(p_or_s)
-    else:
-        p_or_s.close()
-        
-def pipe_or_sock_write(p_or_s, b):
-    ''' Read from a socket or a pipe depending on what is passed'''
-    if isinstance(p_or_s, int):
-        # This is a pipe
-        os.write(p_or_s,b)
-    else:
-        p_or_s.send(b)
-        
-def create_pipe_or_sock():
-    """ Create a non-blocking read/write pipe.
-        On Windows create a pair of sockets
-    """
-    if sys.platform == "win32":
-        r, w = create_sock_pair()
-    else:
-        r, w = os.pipe()
-        if HAS_FNCTL:
-            fcntl.fcntl(r, fcntl.F_SETFL, os.O_NONBLOCK)
-            fcntl.fcntl(w, fcntl.F_SETFL, os.O_NONBLOCK)
-            _set_fd_cloexec(r)
-            _set_fd_cloexec(w)
-    return r, w
-
-
-
-def create_sock_pair(port=0):
+def create_socket_pair(port=0):
     """Create socket pair.
 
     If socket.socketpair isn't available, we emulate it.

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/ce647d39/slider-agent/src/main/python/kazoo/protocol/connection.py
----------------------------------------------------------------------
diff --git a/slider-agent/src/main/python/kazoo/protocol/connection.py b/slider-agent/src/main/python/kazoo/protocol/connection.py
index 7872928..6b89c18 100644
--- a/slider-agent/src/main/python/kazoo/protocol/connection.py
+++ b/slider-agent/src/main/python/kazoo/protocol/connection.py
@@ -17,7 +17,7 @@ from kazoo.exceptions import (
     SessionExpiredError,
     NoNodeError
 )
-from kazoo.handlers.utils import create_pipe_or_sock, pipe_or_sock_read, pipe_or_sock_write, pipe_or_sock_close
+from kazoo.handlers.utils import create_socket_pair
 from kazoo.loggingsupport import BLATHER
 from kazoo.protocol.serialization import (
     Auth,
@@ -146,8 +146,8 @@ class ConnectionHandler(object):
         self.connection_stopped.set()
         self.ping_outstanding = client.handler.event_object()
 
-        self._read_pipe = None
-        self._write_pipe = None
+        self._read_sock = None
+        self._write_sock = None
 
         self._socket = None
         self._xid = None
@@ -169,7 +169,7 @@ class ConnectionHandler(object):
     def start(self):
         """Start the connection up"""
         if self.connection_closed.is_set():
-            self._read_pipe, self._write_pipe = create_pipe_or_sock()
+            self._read_sock, self._write_sock = create_socket_pair()
             self.connection_closed.clear()
         if self._connection_routine:
             raise Exception("Unable to start, connection routine already "
@@ -192,12 +192,12 @@ class ConnectionHandler(object):
         if not self.connection_stopped.is_set():
             raise Exception("Cannot close connection until it is stopped")
         self.connection_closed.set()
-        wp, rp = self._write_pipe, self._read_pipe
-        self._write_pipe = self._read_pipe = None
-        if wp is not None:
-            pipe_or_sock_close(wp)
-        if rp is not None:
-            pipe_or_sock_close(rp)
+        ws, rs = self._write_sock, self._read_sock
+        self._write_sock = self._read_sock = None
+        if ws is not None:
+            ws.close()
+        if rs is not None:
+            rs.close()
 
     def _server_pinger(self):
         """Returns a server pinger iterable, that will ping the next
@@ -238,8 +238,8 @@ class ConnectionHandler(object):
         if xid:
             header, buffer, offset = self._read_header(timeout)
             if header.xid != xid:
-                raise RuntimeError('xids do not match, expected %r received %r',
-                                   xid, header.xid)
+                raise RuntimeError('xids do not match, expected %r '
+                                   'received %r', xid, header.xid)
             if header.zxid > 0:
                 zxid = header.zxid
             if header.err:
@@ -257,8 +257,9 @@ class ConnectionHandler(object):
             try:
                 obj, _ = request.deserialize(msg, 0)
             except Exception:
-                self.logger.exception("Exception raised during deserialization"
-                                      " of request: %s", request)
+                self.logger.exception(
+                    "Exception raised during deserialization "
+                    "of request: %s", request)
 
                 # raise ConnectionDropped so connect loop will retry
                 raise ConnectionDropped('invalid server response')
@@ -276,8 +277,9 @@ class ConnectionHandler(object):
         if request.type:
             b.extend(int_struct.pack(request.type))
         b += request.serialize()
-        self.logger.log((BLATHER if isinstance(request, Ping) else logging.DEBUG),
-                        "Sending request(xid=%s): %s", xid, request)
+        self.logger.log(
+            (BLATHER if isinstance(request, Ping) else logging.DEBUG),
+            "Sending request(xid=%s): %s", xid, request)
         self._write(int_struct.pack(len(b)) + b, timeout)
 
     def _write(self, msg, timeout):
@@ -358,8 +360,9 @@ class ConnectionHandler(object):
                 try:
                     response = request.deserialize(buffer, offset)
                 except Exception as exc:
-                    self.logger.exception("Exception raised during deserialization"
-                                          " of request: %s", request)
+                    self.logger.exception(
+                        "Exception raised during deserialization "
+                        "of request: %s", request)
                     async_object.set_exception(exc)
                     return
                 self.logger.debug(
@@ -415,11 +418,11 @@ class ConnectionHandler(object):
         except IndexError:
             # Not actually something on the queue, this can occur if
             # something happens to cancel the request such that we
-            # don't clear the pipe below after sending
+            # don't clear the socket below after sending
             try:
                 # Clear possible inconsistence (no request in the queue
-                # but have data in the read pipe), which causes cpu to spin.
-                pipe_or_sock_read(self._read_pipe, 1)
+                # but have data in the read socket), which causes cpu to spin.
+                self._read_sock.recv(1)
             except OSError:
                 pass
             return
@@ -440,7 +443,7 @@ class ConnectionHandler(object):
 
         self._submit(request, connect_timeout, xid)
         client._queue.popleft()
-        pipe_or_sock_read(self._read_pipe, 1)
+        self._read_sock.recv(1)
         client._pending.append((request, async_object, xid))
 
     def _send_ping(self, connect_timeout):
@@ -519,7 +522,7 @@ class ConnectionHandler(object):
                 jitter_time = random.randint(0, 40) / 100.0
                 # Ensure our timeout is positive
                 timeout = max([read_timeout / 2.0 - jitter_time, jitter_time])
-                s = self.handler.select([self._socket, self._read_pipe],
+                s = self.handler.select([self._socket, self._read_sock],
                                         [], [], timeout)[0]
 
                 if not s:
@@ -570,9 +573,9 @@ class ConnectionHandler(object):
         self.logger.info('Connecting to %s:%s', host, port)
 
         self.logger.log(BLATHER,
-                          '    Using session_id: %r session_passwd: %s',
-                          client._session_id,
-                          hexlify(client._session_passwd))
+                        '    Using session_id: %r session_passwd: %s',
+                        client._session_id,
+                        hexlify(client._session_passwd))
 
         with self._socket_error_handling():
             self._socket = self.handler.create_connection(
@@ -584,7 +587,8 @@ class ConnectionHandler(object):
                           client._session_id or 0, client._session_passwd,
                           client.read_only)
 
-        connect_result, zxid = self._invoke(client._session_timeout, connect)
+        connect_result, zxid = self._invoke(
+            client._session_timeout / 1000.0, connect)
 
         if connect_result.time_out <= 0:
             raise SessionExpiredError("Session has expired")
@@ -601,13 +605,13 @@ class ConnectionHandler(object):
         client._session_passwd = connect_result.passwd
 
         self.logger.log(BLATHER,
-                          'Session created, session_id: %r session_passwd: %s\n'
-                          '    negotiated session timeout: %s\n'
-                          '    connect timeout: %s\n'
-                          '    read timeout: %s', client._session_id,
-                          hexlify(client._session_passwd),
-                          negotiated_session_timeout, connect_timeout,
-                          read_timeout)
+                        'Session created, session_id: %r session_passwd: %s\n'
+                        '    negotiated session timeout: %s\n'
+                        '    connect timeout: %s\n'
+                        '    read timeout: %s', client._session_id,
+                        hexlify(client._session_passwd),
+                        negotiated_session_timeout, connect_timeout,
+                        read_timeout)
 
         if connect_result.read_only:
             client._session_callback(KeeperState.CONNECTED_RO)
@@ -618,7 +622,7 @@ class ConnectionHandler(object):
 
         for scheme, auth in client.auth_data:
             ap = Auth(0, scheme, auth)
-            zxid = self._invoke(connect_timeout, ap, xid=AUTH_XID)
+            zxid = self._invoke(connect_timeout / 1000.0, ap, xid=AUTH_XID)
             if zxid:
                 client.last_zxid = zxid
         return read_timeout, connect_timeout

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/ce647d39/slider-agent/src/main/python/kazoo/tests/test_client.py
----------------------------------------------------------------------
diff --git a/slider-agent/src/main/python/kazoo/tests/test_client.py b/slider-agent/src/main/python/kazoo/tests/test_client.py
index 5807292..f851b63 100644
--- a/slider-agent/src/main/python/kazoo/tests/test_client.py
+++ b/slider-agent/src/main/python/kazoo/tests/test_client.py
@@ -72,7 +72,8 @@ class TestClientConstructor(unittest.TestCase):
 
     def test_invalid_handler(self):
         from kazoo.handlers.threading import SequentialThreadingHandler
-        self.assertRaises(ConfigurationError,
+        self.assertRaises(
+            ConfigurationError,
             self._makeOne, handler=SequentialThreadingHandler)
 
     def test_chroot(self):
@@ -91,7 +92,7 @@ class TestClientConstructor(unittest.TestCase):
 
     def test_ordered_host_selection(self):
         client = self._makeOne(hosts='127.0.0.1:9,127.0.0.2:9/a',
-            randomize_hosts=False)
+                               randomize_hosts=False)
         hosts = [h for h in client.hosts]
         eq_(hosts, [('127.0.0.1', 9), ('127.0.0.2', 9)])
 
@@ -371,29 +372,29 @@ class TestConnection(KazooTestCase):
         client = self.client
         client.stop()
 
-        write_pipe = client._connection._write_pipe
+        write_sock = client._connection._write_sock
 
-        # close the connection to free the pipe
+        # close the connection to free the socket
         client.close()
-        eq_(client._connection._write_pipe, None)
+        eq_(client._connection._write_sock, None)
 
         # sneak in and patch client to simulate race between a thread
         # calling stop(); close() and one running a command
         oldstate = client._state
         client._state = KeeperState.CONNECTED
-        client._connection._write_pipe = write_pipe
+        client._connection._write_sock = write_sock
         try:
-            # simulate call made after write pipe is closed
+            # simulate call made after write socket is closed
             self.assertRaises(ConnectionClosedError, client.exists, '/')
 
-            # simualte call made after write pipe is set to None
-            client._connection._write_pipe = None
+            # simulate call made after write socket is set to None
+            client._connection._write_sock = None
             self.assertRaises(ConnectionClosedError, client.exists, '/')
 
         finally:
             # reset for teardown
             client._state = oldstate
-            client._connection._write_pipe = None
+            client._connection._write_sock = None
 
 
 class TestClient(KazooTestCase):
@@ -544,9 +545,9 @@ class TestClient(KazooTestCase):
         client = self.client
         client.create("/1", b"ephemeral", ephemeral=True)
         self.assertRaises(NoChildrenForEphemeralsError,
-            client.create, "/1/2", b"val1")
+                          client.create, "/1/2", b"val1")
         self.assertRaises(NoChildrenForEphemeralsError,
-            client.create, "/1/2", b"val1", ephemeral=True)
+                          client.create, "/1/2", b"val1", ephemeral=True)
 
     def test_create_sequence(self):
         client = self.client
@@ -560,8 +561,8 @@ class TestClient(KazooTestCase):
 
     def test_create_ephemeral_sequence(self):
         basepath = "/" + uuid.uuid4().hex
-        realpath = self.client.create(basepath, b"sandwich", sequence=True,
-            ephemeral=True)
+        realpath = self.client.create(basepath, b"sandwich",
+                                      sequence=True, ephemeral=True)
         self.assertTrue(basepath != realpath and realpath.startswith(basepath))
         data, stat = self.client.get(realpath)
         eq_(data, b"sandwich")
@@ -575,33 +576,35 @@ class TestClient(KazooTestCase):
         data, stat = self.client.get("/1/2/3/4/5")
         eq_(data, b"val2")
 
-        self.assertRaises(NodeExistsError, self.client.create, "/1/2/3/4/5",
-            b"val2", makepath=True)
+        self.assertRaises(NodeExistsError, self.client.create,
+                          "/1/2/3/4/5", b"val2", makepath=True)
 
     def test_create_makepath_incompatible_acls(self):
         from kazoo.client import KazooClient
         from kazoo.security import make_digest_acl_credential, CREATOR_ALL_ACL
         credential = make_digest_acl_credential("username", "password")
-        alt_client = KazooClient(self.cluster[0].address + self.client.chroot,
+        alt_client = KazooClient(
+            self.cluster[0].address + self.client.chroot,
             max_retries=5, auth_data=[("digest", credential)])
         alt_client.start()
         alt_client.create("/1/2", b"val2", makepath=True, acl=CREATOR_ALL_ACL)
 
         try:
-            self.assertRaises(NoAuthError, self.client.create, "/1/2/3/4/5",
-                b"val2", makepath=True)
+            self.assertRaises(NoAuthError, self.client.create,
+                              "/1/2/3/4/5", b"val2", makepath=True)
         finally:
             alt_client.delete('/', recursive=True)
             alt_client.stop()
 
     def test_create_no_makepath(self):
-        self.assertRaises(NoNodeError, self.client.create, "/1/2", b"val1")
-        self.assertRaises(NoNodeError, self.client.create, "/1/2", b"val1",
-            makepath=False)
+        self.assertRaises(NoNodeError, self.client.create,
+                          "/1/2", b"val1")
+        self.assertRaises(NoNodeError, self.client.create,
+                          "/1/2", b"val1", makepath=False)
 
         self.client.create("/1/2", b"val1", makepath=True)
-        self.assertRaises(NoNodeError, self.client.create, "/1/2/3/4", b"val1",
-            makepath=False)
+        self.assertRaises(NoNodeError, self.client.create,
+                          "/1/2/3/4", b"val1", makepath=False)
 
     def test_create_exists(self):
         from kazoo.exceptions import NodeExistsError
@@ -844,7 +847,7 @@ class TestClient(KazooTestCase):
         client = self.client
         self.assertRaises(NoNodeError, client.get_children, '/none')
         self.assertRaises(NoNodeError, client.get_children,
-            '/none', include_data=True)
+                          '/none', include_data=True)
 
     def test_get_children_invalid_path(self):
         client = self.client
@@ -855,7 +858,7 @@ class TestClient(KazooTestCase):
         self.assertRaises(TypeError, client.get_children, ('a', 'b'))
         self.assertRaises(TypeError, client.get_children, 'a', watch=True)
         self.assertRaises(TypeError, client.get_children,
-            'a', include_data='yes')
+                          'a', include_data='yes')
 
     def test_invalid_auth(self):
         from kazoo.exceptions import AuthFailedError

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/ce647d39/slider-agent/src/main/python/kazoo/tests/test_connection.py
----------------------------------------------------------------------
diff --git a/slider-agent/src/main/python/kazoo/tests/test_connection.py b/slider-agent/src/main/python/kazoo/tests/test_connection.py
index d89a2c6..c8c4581 100644
--- a/slider-agent/src/main/python/kazoo/tests/test_connection.py
+++ b/slider-agent/src/main/python/kazoo/tests/test_connection.py
@@ -43,8 +43,9 @@ class Delete(namedtuple('Delete', 'path version')):
 class TestConnectionHandler(KazooTestCase):
     def test_bad_deserialization(self):
         async_object = self.client.handler.async_result()
-        self.client._queue.append((Delete(self.client.chroot, -1), async_object))
-        os.write(self.client._connection._write_pipe, b'\0')
+        self.client._queue.append(
+            (Delete(self.client.chroot, -1), async_object))
+        self.client._connection._write_sock.send(b'\0')
 
         @raises(ValueError)
         def testit():
@@ -185,75 +186,51 @@ class TestConnectionHandler(KazooTestCase):
         # should be able to restart
         self.client.start()
 
-    def test_connection_pipe(self):
+    def test_connection_sock(self):
         client = self.client
-        read_pipe = client._connection._read_pipe
-        write_pipe = client._connection._write_pipe
+        read_sock = client._connection._read_sock
+        write_sock = client._connection._write_sock
 
-        assert read_pipe is not None
-        assert write_pipe is not None
+        assert read_sock is not None
+        assert write_sock is not None
 
-        # stop client and pipe should not yet be closed
+        # stop client and socket should not yet be closed
         client.stop()
-        assert read_pipe is not None
-        assert write_pipe is not None
-        if sys.platform != "win32"
-            os.fstat(read_pipe)
-            os.fstat(write_pipe)
-        else:
-            read_pipe.getsockname()
-            write_pipe.getsockname()
+        assert read_sock is not None
+        assert write_sock is not None
+        
+        read_sock.getsockname()
+        write_sock.getsockname()
 
-        # close client, and pipes should be
+        # close client, and sockets should be closed
         client.close()
 
-        if sys.platform != "win32"
-            try:
-                os.fstat(read_pipe)
-            except OSError as e:
-                if not e.errno == errno.EBADF:
-                    raise
-            else:
-                self.fail("Expected read_pipe to be closed")
-    
-            try:
-                os.fstat(write_pipe)
-            except OSError as e:
-                if not e.errno == errno.EBADF:
-                    raise
-            else:
-                self.fail("Expected write_pipe to be closed")
-        else:
-            pass # Not sure what to do here
-            
-        # start client back up. should get a new, valid pipe
+        # Todo check socket closing
+                   
+        # start client back up. should get a new, valid socket
         client.start()
-        read_pipe = client._connection._read_pipe
-        write_pipe = client._connection._write_pipe
-
-        assert read_pipe is not None
-        assert write_pipe is not None
-        if sys.platform != "win32"
-            os.fstat(read_pipe)
-            os.fstat(write_pipe)
-        else:
-            read_pipe.getsockname()
-            write_pipe.getsockname()
+        read_sock = client._connection._read_sock
+        write_sock = client._connection._write_sock
+
+        assert read_sock is not None
+        assert write_sock is not None
+        read_sock.getsockname()
+        write_sock.getsockname()
             
 
-    def test_dirty_pipe(self):
+    def test_dirty_sock(self):
         client = self.client
-        read_pipe = client._connection._read_pipe
-        write_pipe = client._connection._write_pipe
+        read_sock = client._connection._read_sock
+        write_sock = client._connection._write_sock
 
-        # add a stray byte to the pipe and ensure that doesn't
+        # add a stray byte to the socket and ensure that doesn't
         # blow up client. simulates case where some error leaves
-        # a byte in the pipe which doesn't correspond to the
+        # a byte in the socket which doesn't correspond to the
         # request queue.
-        pipe_or_sock_write(write_pipe, b'\0')
+        write_sock.send(b'\0')
 
-        # eventually this byte should disappear from pipe
-        wait(lambda: client.handler.select([read_pipe], [], [], 0)[0] == [])
+        # eventually this byte should disappear from socket
+        wait(lambda: client.handler.select([read_sock], [], [], 0)[0] == [])
 
 
 class TestConnectionDrop(KazooTestCase):


[09/12] git commit: SLIDER-180. Add storm package for windows (I)

Posted by st...@apache.org.
SLIDER-180. Add storm package for windows (I)


Project: http://git-wip-us.apache.org/repos/asf/incubator-slider/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-slider/commit/41ec7410
Tree: http://git-wip-us.apache.org/repos/asf/incubator-slider/tree/41ec7410
Diff: http://git-wip-us.apache.org/repos/asf/incubator-slider/diff/41ec7410

Branch: refs/heads/feature/SLIDER-149_Support_a_YARN_service_registry
Commit: 41ec7410f047e28c114a77f67ac6f97763b139ad
Parents: f392d23
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Tue Sep 30 09:05:31 2014 -0700
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Tue Sep 30 09:05:31 2014 -0700

----------------------------------------------------------------------
 app-packages/hbase-win/README.txt               |   3 -
 app-packages/storm-win/README.txt               |  36 ++
 app-packages/storm-win/appConfig-default.json   |  41 ++
 .../storm-win/configuration/storm-env.xml       |  65 +++
 .../storm-win/configuration/storm-site.xml      | 580 +++++++++++++++++++
 app-packages/storm-win/metainfo.xml             | 149 +++++
 .../storm-win/package/scripts/drpc_server.py    |  55 ++
 .../storm-win/package/scripts/nimbus.py         |  55 ++
 .../storm-win/package/scripts/params.py         |  56 ++
 .../storm-win/package/scripts/rest_api.py       |  57 ++
 .../storm-win/package/scripts/service.py        |  56 ++
 .../storm-win/package/scripts/status_params.py  |  37 ++
 app-packages/storm-win/package/scripts/storm.py |  53 ++
 .../storm-win/package/scripts/supervisor.py     |  61 ++
 .../storm-win/package/scripts/ui_server.py      |  55 ++
 .../storm-win/package/scripts/yaml_config.py    |  80 +++
 .../storm-win/package/templates/config.yaml.j2  |  28 +
 .../package/templates/storm_jaas.conf.j2        |  44 ++
 app-packages/storm-win/pom.xml                  |  91 +++
 app-packages/storm-win/resources-default.json   |  30 +
 app-packages/storm-win/src/assembly/storm.xml   |  68 +++
 21 files changed, 1697 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/41ec7410/app-packages/hbase-win/README.txt
----------------------------------------------------------------------
diff --git a/app-packages/hbase-win/README.txt b/app-packages/hbase-win/README.txt
index 0e64a84..4e0e30a 100644
--- a/app-packages/hbase-win/README.txt
+++ b/app-packages/hbase-win/README.txt
@@ -22,9 +22,6 @@ These files are included as reference configuration for Slider apps and are suit
 for a one-node cluster.
 
 
-OPTION-I: Use a downloaded hbase tarball fro Windows
-
-****** OPTION - I **
 To create the app package you will need the HBase tarball and invoke mvn command
 with appropriate parameters.
 

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/41ec7410/app-packages/storm-win/README.txt
----------------------------------------------------------------------
diff --git a/app-packages/storm-win/README.txt b/app-packages/storm-win/README.txt
new file mode 100644
index 0000000..8631714
--- /dev/null
+++ b/app-packages/storm-win/README.txt
@@ -0,0 +1,36 @@
+<!---
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+How to create a Slider app package for Storm?
+
+To create the app package you will need the Storm tarball and invoke mvn command
+with appropriate parameters.
+
+Command:
+mvn clean package -Pstorm-app-package-win -Dpkg.version=<version>
+   -Dpkg.name=<file name of app tarball> -Dpkg.src=<folder location where the pkg is available>
+
+Example:
+mvn clean package -Pstorm-app-package-win -Dpkg.version=0.9.3
+   -Dpkg.name=storm-0.9.3.zip -Dpkg.src=/Users/user1/Downloads
+
+App package can be found in
+  app-packages/storm-win/target/slider-storm-app-win-package-${pkg.version}.zip
+
+appConfig-default.json and resources-default.json are not required to be packaged.
+These files are included as reference configuration for Slider apps and are suitable
+for a one-node cluster.

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/41ec7410/app-packages/storm-win/appConfig-default.json
----------------------------------------------------------------------
diff --git a/app-packages/storm-win/appConfig-default.json b/app-packages/storm-win/appConfig-default.json
new file mode 100644
index 0000000..428dea5
--- /dev/null
+++ b/app-packages/storm-win/appConfig-default.json
@@ -0,0 +1,41 @@
+{
+  "schema": "http://example.org/specification/v2.0.0",
+  "metadata": {
+  },
+  "global": {
+    "application.def": ".slider/package/STORM/slider-storm-app-win-package-${pkg.version}.zip",
+    "java_home": "C:\\java",
+    "create.default.zookeeper.node": "true",
+
+    "site.global.app_user": "hadoop",
+    "site.global.app_root": "${AGENT_WORK_ROOT}/app/install/storm-${pkg.version}",
+    "site.global.user_group": "hadoop",
+    "site.global.security_enabled": "false",
+    "site.global.rest_api_port": "${STORM_REST_API.ALLOCATED_PORT}",
+    "site.global.rest_api_admin_port": "${STORM_REST_API.ALLOCATED_PORT}",
+
+    "site.storm-site.storm.log.dir" : "${AGENT_LOG_ROOT}",
+    "site.storm-site.storm.zookeeper.servers": "['${ZK_HOST}']",
+    "site.storm-site.nimbus.thrift.port": "${NIMBUS.ALLOCATED_PORT}",
+    "site.storm-site.storm.local.dir": "${AGENT_WORK_ROOT}/app/tmp/storm",
+    "site.storm-site.transactional.zookeeper.root": "/transactional",
+    "site.storm-site.storm.zookeeper.port": "2181",
+    "site.storm-site.nimbus.childopts": "-Xmx1024m",
+    "site.storm-site.worker.childopts": "-Xmx768m",
+    "site.storm-site.dev.zookeeper.path": "${AGENT_WORK_ROOT}/app/tmp/dev-storm-zookeeper",
+    "site.storm-site.drpc.invocations.port": "0",
+    "site.storm-site.storm.zookeeper.root": "${DEFAULT_ZK_PATH}",
+    "site.storm-site.transactional.zookeeper.port": "null",
+    "site.storm-site.nimbus.host": "${NIMBUS_HOST}",
+    "site.storm-site.ui.port": "${STORM_UI_SERVER.ALLOCATED_PORT}",
+    "site.storm-site.supervisor.slots.ports": "[${SUPERVISOR.ALLOCATED_PORT}{PER_CONTAINER},${SUPERVISOR.ALLOCATED_PORT}{PER_CONTAINER}]",
+    "site.storm-site.supervisor.childopts": "-Xmx256m",
+    "site.storm-site.drpc.port": "0",
+    "site.storm-site.logviewer.port": "${SUPERVISOR.ALLOCATED_PORT}{PER_CONTAINER}"
+  },
+  "components": {
+    "slider-appmaster": {
+      "jvm.heapsize": "256M"
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/41ec7410/app-packages/storm-win/configuration/storm-env.xml
----------------------------------------------------------------------
diff --git a/app-packages/storm-win/configuration/storm-env.xml b/app-packages/storm-win/configuration/storm-env.xml
new file mode 100644
index 0000000..091c08d
--- /dev/null
+++ b/app-packages/storm-win/configuration/storm-env.xml
@@ -0,0 +1,65 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+
+  <property>
+    <name>kerberos_domain</name>
+    <value></value>
+    <description>The kerberos domain to be used for this Storm cluster</description>
+  </property>
+  <property>
+    <name>storm_client_principal_name</name>
+    <value></value>
+    <description>The principal name for the Storm client to be used to communicate with Nimbus and Zookeeper</description>
+  </property>
+  <property>
+    <name>storm_server_principal_name</name>
+    <value></value>
+    <description>The principal name for the Storm server to be used by Nimbus</description>
+  </property>
+  <property>
+    <name>storm_client_keytab</name>
+    <value></value>
+    <description>The keytab file path for Storm client</description>
+  </property>
+  <property>
+    <name>storm_server_keytab</name>
+    <value></value>
+    <description>The keytab file path for Storm server</description>
+  </property>
+  <!-- storm-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for storm-env.sh file</description>
+    <value>
+#!/bin/bash
+
+# Set Storm specific environment variables here.
+
+# The java implementation to use.
+export JAVA_HOME={{java_home}}
+
+# export STORM_CONF_DIR=""
+    </value>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/41ec7410/app-packages/storm-win/configuration/storm-site.xml
----------------------------------------------------------------------
diff --git a/app-packages/storm-win/configuration/storm-site.xml b/app-packages/storm-win/configuration/storm-site.xml
new file mode 100644
index 0000000..b3cce6a
--- /dev/null
+++ b/app-packages/storm-win/configuration/storm-site.xml
@@ -0,0 +1,580 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>java.library.path</name>
+    <value>/usr/local/lib:/opt/local/lib:/usr/lib</value>
+    <description>This value is passed to spawned JVMs (e.g., Nimbus, Supervisor, and Workers)
+       for the java.library.path value. java.library.path tells the JVM where
+       to look for native libraries. It is necessary to set this config correctly since
+       Storm uses the ZeroMQ and JZMQ native libs. </description>
+  </property>
+  <property>
+    <name>storm.local.dir</name>
+    <value>/hadoop/storm</value>
+    <description>A directory on the local filesystem used by Storm for any local
+       filesystem usage it needs. The directory must exist and the Storm daemons must
+       have permission to read/write from this location.</description>
+  </property>
+  <property>
+    <name>storm.zookeeper.servers</name>
+    <value>['localhost']</value>
+    <description>A list of hosts of ZooKeeper servers used to manage the cluster.</description>
+  </property>
+  <property>
+    <name>storm.zookeeper.port</name>
+    <value>2181</value>
+    <description>The port Storm will use to connect to each of the ZooKeeper servers.</description>
+  </property>
+  <property>
+    <name>storm.zookeeper.root</name>
+    <value>/storm</value>
+    <description>The root location at which Storm stores data in ZooKeeper.</description>
+  </property>
+  <property>
+    <name>storm.zookeeper.session.timeout</name>
+    <value>20000</value>
+    <description>The session timeout for clients to ZooKeeper.</description>
+  </property>
+  <property>
+    <name>storm.zookeeper.connection.timeout</name>
+    <value>15000</value>
+    <description>The connection timeout for clients to ZooKeeper.</description>
+  </property>
+  <property>
+    <name>storm.zookeeper.retry.times</name>
+    <value>5</value>
+    <description>The number of times to retry a Zookeeper operation.</description>
+  </property>
+  <property>
+    <name>storm.zookeeper.retry.interval</name>
+    <value>1000</value>
+    <description>The interval between retries of a Zookeeper operation.</description>
+  </property>
+  <property>
+    <name>storm.zookeeper.retry.intervalceiling.millis</name>
+    <value>30000</value>
+    <description>The ceiling of the interval between retries of a Zookeeper operation.</description>
+  </property>
+  <property>
+    <name>storm.cluster.mode</name>
+    <value>distributed</value>
+    <description>The mode this Storm cluster is running in. Either "distributed" or "local".</description>
+  </property>
+  <property>
+    <name>storm.local.mode.zmq</name>
+    <value>false</value>
+    <description>Whether or not to use ZeroMQ for messaging in local mode. If this is set
+       to false, then Storm will use a pure-Java messaging system. The purpose
+       of this flag is to make it easy to run Storm in local mode by eliminating
+       the need for native dependencies, which can be difficult to install.
+    </description>
+  </property>
+  <property>
+    <name>storm.thrift.transport</name>
+    <value>backtype.storm.security.auth.SimpleTransportPlugin</value>
+    <description>The transport plug-in for Thrift client/server communication.</description>
+  </property>
+  <property>
+    <name>storm.messaging.transport</name>
+    <value>backtype.storm.messaging.netty.Context</value>
+    <description>The transporter for communication among Storm tasks.</description>
+  </property>
+  <property>
+    <name>nimbus.host</name>
+    <value>localhost</value>
+    <description>The host that the master server is running on.</description>
+  </property>
+  <property>
+    <name>nimbus.thrift.port</name>
+    <value>6627</value>
+    <description> Which port the Thrift interface of Nimbus should run on. Clients should
+       connect to this port to upload jars and submit topologies.</description>
+  </property>
+  <property>
+    <name>nimbus.thrift.max_buffer_size</name>
+    <value>1048576</value>
+    <description>The maximum buffer size thrift should use when reading messages.</description>
+  </property>
+  <property>
+    <name>nimbus.childopts</name>
+    <value>-Xmx1024m -Djava.security.auth.login.config=/etc/storm/conf/storm_jaas.conf -javaagent:/usr/lib/storm/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8649,wireformat31x=true,mode=multicast,config=/usr/lib/storm/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Nimbus_JVM</value>
+    <description>This parameter is used by the storm-deploy project to configure the jvm options for the nimbus daemon.</description>
+  </property>
+  <property>
+    <name>nimbus.task.timeout.secs</name>
+    <value>30</value>
+    <description>How long without heartbeating a task can go before nimbus will consider the task dead and reassign it to another location.</description>
+  </property>
+  <property>
+    <name>nimbus.supervisor.timeout.secs</name>
+    <value>60</value>
+    <description>How long before a supervisor can go without heartbeating before nimbus considers it dead and stops assigning new work to it.</description>
+  </property>
+  <property>
+    <name>nimbus.monitor.freq.secs</name>
+    <value>10</value>
+    <description>
+      How often nimbus should wake up to check heartbeats and do reassignments. Note
+       that if a machine ever goes down Nimbus will immediately wake up and take action.
+       This parameter is for checking for failures when there's no explicit event like that occuring.
+    </description>
+  </property>
+  <property>
+    <name>nimbus.cleanup.inbox.freq.secs</name>
+    <value>600</value>
+    <description>How often nimbus should wake the cleanup thread to clean the inbox.</description>
+  </property>
+  <property>
+    <name>nimbus.inbox.jar.expiration.secs</name>
+    <value>3600</value>
+    <description>
+      The length of time a jar file lives in the inbox before being deleted by the cleanup thread.
+
+       Probably keep this value greater than or equal to NIMBUS_CLEANUP_INBOX_JAR_EXPIRATION_SECS.
+       Note that the time it takes to delete an inbox jar file is going to be somewhat more than
+       NIMBUS_CLEANUP_INBOX_JAR_EXPIRATION_SECS (depending on how often NIMBUS_CLEANUP_FREQ_SECS is set to).
+      </description>
+  </property>
+  <property>
+    <name>nimbus.task.launch.secs</name>
+    <value>120</value>
+    <description>A special timeout used when a task is initially launched. During launch, this is the timeout
+       used until the first heartbeat, overriding nimbus.task.timeout.secs.</description>
+  </property>
+  <property>
+    <name>nimbus.reassign</name>
+    <value>true</value>
+    <description>Whether or not nimbus should reassign tasks if it detects that a task goes down.
+       Defaults to true, and it's not recommended to change this value.</description>
+  </property>
+  <property>
+    <name>nimbus.file.copy.expiration.secs</name>
+    <value>600</value>
+    <description>During upload/download with the master, how long an upload or download connection is idle
+       before nimbus considers it dead and drops the connection.</description>
+  </property>
+  <property>
+    <name>nimbus.topology.validator</name>
+    <value>backtype.storm.nimbus.DefaultTopologyValidator</value>
+    <description>A custom class that implements ITopologyValidator that is run whenever a
+       topology is submitted. Can be used to provide business-specific logic for
+       whether topologies are allowed to run or not.</description>
+  </property>
+  <property>
+    <name>ui.port</name>
+    <value>8744</value>
+    <description>Storm UI binds to this port.</description>
+  </property>
+  <property>
+    <name>ui.childopts</name>
+    <value>-Xmx768m -Djava.security.auth.login.config=/etc/storm/conf/storm_jaas.conf</value>
+    <description>Childopts for Storm UI Java process.</description>
+  </property>
+  <property>
+    <name>logviewer.port</name>
+    <value>8000</value>
+    <description>HTTP UI port for log viewer.</description>
+  </property>
+  <property>
+    <name>logviewer.childopts</name>
+    <value>-Xmx128m</value>
+    <description>Childopts for log viewer java process.</description>
+  </property>
+  <property>
+    <name>logviewer.appender.name</name>
+    <value>A1</value>
+    <description>Appender name used by log viewer to determine log directory.</description>
+  </property>
+  <property>
+    <name>drpc.port</name>
+    <value>3772</value>
+    <description>This port is used by Storm DRPC for receiving DPRC requests from clients.</description>
+  </property>
+  <property>
+    <name>drpc.worker.threads</name>
+    <value>64</value>
+    <description>DRPC thrift server worker threads.</description>
+  </property>
+  <property>
+    <name>drpc.queue.size</name>
+    <value>128</value>
+    <description>DRPC thrift server queue size.</description>
+  </property>
+  <property>
+    <name>drpc.invocations.port</name>
+    <value>3773</value>
+    <description>This port on Storm DRPC is used by DRPC topologies to receive function invocations and send results back.</description>
+  </property>
+  <property>
+    <name>drpc.request.timeout.secs</name>
+    <value>600</value>
+    <description>The timeout on DRPC requests within the DRPC server. Defaults to 10 minutes. Note that requests can also
+       timeout based on the socket timeout on the DRPC client, and separately based on the topology message
+       timeout for the topology implementing the DRPC function.</description>
+  </property>
+  <property>
+    <name>drpc.childopts</name>
+    <value>-Xmx768m</value>
+    <description>Childopts for Storm DRPC Java process.</description>
+  </property>
+  <property>
+    <name>transactional.zookeeper.root</name>
+    <value>/transactional</value>
+    <description>The root directory in ZooKeeper for metadata about TransactionalSpouts.</description>
+  </property>
+  <property>
+    <name>transactional.zookeeper.servers</name>
+    <value>null</value>
+    <description>The list of zookeeper servers in which to keep the transactional state. If null (which is default),
+       will use storm.zookeeper.servers</description>
+  </property>
+  <property>
+    <name>transactional.zookeeper.port</name>
+    <value>null</value>
+    <description>The port to use to connect to the transactional zookeeper servers. If null (which is default),
+       will use storm.zookeeper.port</description>
+  </property>
+  <property>
+    <name>supervisor.slots.ports</name>
+    <value>[6700, 6701]</value>
+    <description>A list of ports that can run workers on this supervisor. Each worker uses one port, and
+       the supervisor will only run one worker per port. Use this configuration to tune
+       how many workers run on each machine.</description>
+  </property>
+  <property>
+    <name>supervisor.childopts</name>
+    <value>-Xmx256m -Djava.security.auth.login.config=/etc/storm/conf/storm_jaas.conf -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.port=56431 -javaagent:/usr/lib/storm/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/lib/storm/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Supervisor_JVM</value>
+    <description>This parameter is used by the storm-deploy project to configure the jvm options for the supervisor daemon.</description>
+  </property>
+  <property>
+    <name>supervisor.worker.start.timeout.secs</name>
+    <value>120</value>
+    <description>How long a worker can go without heartbeating during the initial launch before
+       the supervisor tries to restart the worker process. This value override
+       supervisor.worker.timeout.secs during launch because there is additional
+       overhead to starting and configuring the JVM on launch.</description>
+  </property>
+  <property>
+    <name>supervisor.worker.timeout.secs</name>
+    <value>30</value>
+    <description>How long a worker can go without heartbeating before the supervisor tries to restart the worker process.</description>
+  </property>
+  <property>
+    <name>supervisor.monitor.frequency.secs</name>
+    <value>3</value>
+    <description>How often the supervisor checks the worker heartbeats to see if any of them need to be restarted.</description>
+  </property>
+  <property>
+    <name>supervisor.heartbeat.frequency.secs</name>
+    <value>5</value>
+    <description>How often the supervisor sends a heartbeat to the master.</description>
+  </property>
+  <property>
+    <name>worker.childopts</name>
+    <value>-Xmx768m -javaagent:/usr/lib/storm/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/lib/storm/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Worker_%ID%_JVM</value>
+    <description>The jvm opts provided to workers launched by this supervisor. All \"%ID%\" substrings are replaced with an identifier for this worker.</description>
+  </property>
+  <property>
+    <name>worker.heartbeat.frequency.secs</name>
+    <value>1</value>
+    <description>How often this worker should heartbeat to the supervisor.</description>
+  </property>
+  <property>
+    <name>task.heartbeat.frequency.secs</name>
+    <value>3</value>
+    <description>How often a task should heartbeat its status to the master.</description>
+  </property>
+  <property>
+    <name>task.refresh.poll.secs</name>
+    <value>10</value>
+    <description>How often a task should sync its connections with other tasks (if a task is
+       reassigned, the other tasks sending messages to it need to refresh their connections).
+       In general though, when a reassignment happens other tasks will be notified
+       almost immediately. This configuration is here just in case that notification doesn't
+       come through.</description>
+  </property>
+  <property>
+    <name>zmq.threads</name>
+    <value>1</value>
+    <description>The number of threads that should be used by the zeromq context in each worker process.</description>
+  </property>
+  <property>
+    <name>zmq.linger.millis</name>
+    <value>5000</value>
+    <description>How long a connection should retry sending messages to a target host when
+       the connection is closed. This is an advanced configuration and can almost
+       certainly be ignored.</description>
+  </property>
+  <property>
+    <name>zmq.hwm</name>
+    <value>0</value>
+    <description>The high water for the ZeroMQ push sockets used for networking. Use this config to prevent buffer explosion
+       on the networking layer.</description>
+  </property>
+  <property>
+    <name>storm.messaging.netty.server_worker_threads</name>
+    <value>1</value>
+    <description>Netty based messaging: The # of worker threads for the server.</description>
+  </property>
+  <property>
+    <name>storm.messaging.netty.client_worker_threads</name>
+    <value>1</value>
+    <description>Netty based messaging: The # of worker threads for the client.</description>
+  </property>
+  <property>
+    <name>storm.messaging.netty.buffer_size</name>
+    <value>5242880</value>
+    <description>Netty based messaging: The buffer size for send/recv buffer.</description>
+  </property>
+  <property>
+    <name>storm.messaging.netty.max_retries</name>
+    <value>30</value>
+    <description>Netty based messaging: The max # of retries that a peer will perform when a remote is not accessible.</description>
+  </property>
+  <property>
+    <name>storm.messaging.netty.max_wait_ms</name>
+    <value>1000</value>
+    <description>Netty based messaging: The max # of milliseconds that a peer will wait.</description>
+  </property>
+  <property>
+    <name>storm.messaging.netty.min_wait_ms</name>
+    <value>100</value>
+    <description>Netty based messaging: The min # of milliseconds that a peer will wait.</description>
+  </property>
+  <property>
+    <name>topology.enable.message.timeouts</name>
+    <value>true</value>
+    <description>True if Storm should timeout messages or not. Defaults to true. This is meant to be used
+       in unit tests to prevent tuples from being accidentally timed out during the test.</description>
+  </property>
+  <property>
+    <name>topology.debug</name>
+    <value>false</value>
+    <description>When set to true, Storm will log every message that's emitted.</description>
+  </property>
+  <property>
+    <name>topology.optimize</name>
+    <value>true</value>
+    <description>Whether or not the master should optimize topologies by running multiple tasks in a single thread where appropriate.</description>
+  </property>
+  <property>
+    <name>topology.workers</name>
+    <value>1</value>
+    <description>How many processes should be spawned around the cluster to execute this
+       topology. Each process will execute some number of tasks as threads within
+       them. This parameter should be used in conjunction with the parallelism hints
+       on each component in the topology to tune the performance of a topology.</description>
+  </property>
+  <property>
+    <name>topology.acker.executors</name>
+    <value>null</value>
+    <description>How many executors to spawn for ackers.
+
+      If this is set to 0, then Storm will immediately ack tuples as soon
+       as they come off the spout, effectively disabling reliability.
+    </description>
+  </property>
+  <property>
+    <name>topology.message.timeout.secs</name>
+    <value>30</value>
+    <description>The maximum amount of time given to the topology to fully process a message
+       emitted by a spout. If the message is not acked within this time frame, Storm
+       will fail the message on the spout. Some spouts implementations will then replay
+       the message at a later time.</description>
+  </property>
+  <property>
+    <name>topology.skip.missing.kryo.registrations</name>
+    <value>false</value>
+    <description> Whether or not Storm should skip the loading of kryo registrations for which it
+       does not know the class or have the serializer implementation. Otherwise, the task will
+       fail to load and will throw an error at runtime. The use case of this is if you want to
+       declare your serializations on the storm.yaml files on the cluster rather than every single
+       time you submit a topology. Different applications may use different serializations and so
+       a single application may not have the code for the other serializers used by other apps.
+       By setting this config to true, Storm will ignore that it doesn't have those other serializations
+       rather than throw an error.</description>
+  </property>
+  <property>
+    <name>topology.max.task.parallelism</name>
+    <value>null</value>
+    <description>The maximum parallelism allowed for a component in this topology. This configuration is
+       typically used in testing to limit the number of threads spawned in local mode.</description>
+  </property>
+  <property>
+    <name>topology.max.spout.pending</name>
+    <value>null</value>
+    <description>The maximum number of tuples that can be pending on a spout task at any given time.
+       This config applies to individual tasks, not to spouts or topologies as a whole.
+
+       A pending tuple is one that has been emitted from a spout but has not been acked or failed yet.
+       Note that this config parameter has no effect for unreliable spouts that don't tag
+       their tuples with a message id.</description>
+  </property>
+  <property>
+    <name>topology.state.synchronization.timeout.secs</name>
+    <value>60</value>
+    <description>The maximum amount of time a component gives a source of state to synchronize before it requests
+       synchronization again.</description>
+  </property>
+  <property>
+    <name>topology.stats.sample.rate</name>
+    <value>0.05</value>
+    <description>The percentage of tuples to sample to produce stats for a task.</description>
+  </property>
+  <property>
+    <name>topology.builtin.metrics.bucket.size.secs</name>
+    <value>60</value>
+    <description>The time period that builtin metrics data in bucketed into.</description>
+  </property>
+  <property>
+    <name>topology.fall.back.on.java.serialization</name>
+    <value>true</value>
+    <description>Whether or not to use Java serialization in a topology.</description>
+  </property>
+  <property>
+    <name>topology.worker.childopts</name>
+    <value>null</value>
+    <description>Topology-specific options for the worker child process. This is used in addition to WORKER_CHILDOPTS.</description>
+  </property>
+  <property>
+    <name>topology.executor.receive.buffer.size</name>
+    <value>1024</value>
+    <description>The size of the Disruptor receive queue for each executor. Must be a power of 2.</description>
+  </property>
+  <property>
+    <name>topology.executor.send.buffer.size</name>
+    <value>1024</value>
+    <description>The size of the Disruptor send queue for each executor. Must be a power of 2.</description>
+  </property>
+  <property>
+    <name>topology.receiver.buffer.size</name>
+    <value>8</value>
+    <description>The maximum number of messages to batch from the thread receiving off the network to the
+       executor queues. Must be a power of 2.</description>
+  </property>
+  <property>
+    <name>topology.transfer.buffer.size</name>
+    <value>1024</value>
+    <description>The size of the Disruptor transfer queue for each worker.</description>
+  </property>
+  <property>
+    <name>topology.tick.tuple.freq.secs</name>
+    <value>null</value>
+    <description>How often a tick tuple from the "__system" component and "__tick" stream should be sent
+       to tasks. Meant to be used as a component-specific configuration.</description>
+  </property>
+  <property>
+    <name>topology.worker.shared.thread.pool.size</name>
+    <value>4</value>
+    <description>The size of the shared thread pool for worker tasks to make use of. The thread pool can be accessed
+       via the TopologyContext.</description>
+  </property>
+  <property>
+    <name>topology.disruptor.wait.strategy</name>
+    <value>com.lmax.disruptor.BlockingWaitStrategy</value>
+    <description>Configure the wait strategy used for internal queuing. Can be used to tradeoff latency
+       vs. throughput.</description>
+  </property>
+  <property>
+    <name>topology.executor.send.buffer.size</name>
+    <value>1024</value>
+    <description>The size of the Disruptor send queue for each executor. Must be a power of 2.</description>
+  </property>
+  <property>
+    <name>topology.receiver.buffer.size</name>
+    <value>8</value>
+    <description>The maximum number of messages to batch from the thread receiving off the network to the
+       executor queues. Must be a power of 2.</description>
+  </property>
+  <property>
+    <name>topology.transfer.buffer.size</name>
+    <value>1024</value>
+    <description>The size of the Disruptor transfer queue for each worker.</description>
+  </property>
+  <property>
+    <name>topology.tick.tuple.freq.secs</name>
+    <value>null</value>
+    <description>How often a tick tuple from the "__system" component and "__tick" stream should be sent
+       to tasks. Meant to be used as a component-specific configuration.</description>
+  </property>
+  <property>
+    <name>topology.worker.shared.thread.pool.size</name>
+    <value>4</value>
+    <description>The size of the shared thread pool for worker tasks to make use of. The thread pool can be accessed
+       via the TopologyContext.</description>
+  </property>
+  <property>
+    <name>topology.spout.wait.strategy</name>
+    <value>backtype.storm.spout.SleepSpoutWaitStrategy</value>
+    <description>A class that implements a strategy for what to do when a spout needs to wait. Waiting is
+       triggered in one of two conditions:
+
+       1. nextTuple emits no tuples
+       2. The spout has hit maxSpoutPending and can't emit any more tuples</description>
+  </property>
+  <property>
+    <name>topology.sleep.spout.wait.strategy.time.ms</name>
+    <value>1</value>
+    <description>The amount of milliseconds the SleepEmptyEmitStrategy should sleep for.</description>
+  </property>
+  <property>
+    <name>topology.error.throttle.interval.secs</name>
+    <value>10</value>
+    <description>The interval in seconds to use for determining whether to throttle error reported to Zookeeper. For example,
+       an interval of 10 seconds with topology.max.error.report.per.interval set to 5 will only allow 5 errors to be
+       reported to Zookeeper per task for every 10 second interval of time.</description>
+  </property>
+  <property>
+    <name>topology.max.error.report.per.interval</name>
+    <value>5</value>
+    <description>The interval in seconds to use for determining whether to throttle error reported to Zookeeper. For example,
+       an interval of 10 seconds with topology.max.error.report.per.interval set to 5 will only allow 5 errors to be
+       reported to Zookeeper per task for every 10 second interval of time.</description>
+  </property>
+  <property>
+    <name>topology.kryo.factory</name>
+    <value>backtype.storm.serialization.DefaultKryoFactory</value>
+    <description>Class that specifies how to create a Kryo instance for serialization. Storm will then apply
+       topology.kryo.register and topology.kryo.decorators on top of this. The default implementation
+       implements topology.fall.back.on.java.serialization and turns references off.</description>
+  </property>
+  <property>
+    <name>topology.tuple.serializer</name>
+    <value>backtype.storm.serialization.types.ListDelegateSerializer</value>
+    <description>The serializer class for ListDelegate (tuple payload).
+       The default serializer will be ListDelegateSerializer</description>
+  </property>
+  <property>
+    <name>topology.trident.batch.emit.interval.millis</name>
+    <value>500</value>
+    <description>How often a batch can be emitted in a Trident topology.</description>
+  </property>
+  <property>
+    <name>dev.zookeeper.path</name>
+    <value>/tmp/dev-storm-zookeeper</value>
+    <description>The path to use as the zookeeper dir when running a zookeeper server via
+       "storm dev-zookeeper". This zookeeper instance is only intended for development;
+       it is not a production grade zookeeper setup.</description>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/41ec7410/app-packages/storm-win/metainfo.xml
----------------------------------------------------------------------
diff --git a/app-packages/storm-win/metainfo.xml b/app-packages/storm-win/metainfo.xml
new file mode 100644
index 0000000..070ce35
--- /dev/null
+++ b/app-packages/storm-win/metainfo.xml
@@ -0,0 +1,149 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <application>
+    <name>STORM</name>
+    <comment>Apache Hadoop Stream processing framework</comment>
+    <version>${pkg.version}</version>
+    <exportedConfigs>storm-site</exportedConfigs>
+
+    <exportGroups>
+      <exportGroup>
+        <name>QuickLinks</name>
+        <exports>
+          <export>
+            <name>org.apache.slider.monitor</name>
+            <value>http://${STORM_UI_SERVER_HOST}:${site.storm-site.ui.port}</value>
+          </export>
+          <export>
+            <name>nimbus.host_port</name>
+            <value>http://${NIMBUS_HOST}:${site.storm-site.nimbus.thrift.port}</value>
+          </export>
+        </exports>
+      </exportGroup>
+    </exportGroups>
+
+    <commandOrders>
+      <commandOrder>
+        <command>NIMBUS-START</command>
+        <requires>SUPERVISOR-INSTALLED,STORM_UI_SERVER-INSTALLED,DRPC_SERVER-INSTALLED,STORM_REST_API-INSTALLED
+        </requires>
+      </commandOrder>
+      <commandOrder>
+        <command>SUPERVISOR-START</command>
+        <requires>NIMBUS-STARTED</requires>
+      </commandOrder>
+      <commandOrder>
+        <command>DRPC_SERVER-START</command>
+        <requires>NIMBUS-STARTED</requires>
+      </commandOrder>
+      <commandOrder>
+        <command>STORM_UI_SERVER-START</command>
+        <requires>NIMBUS-STARTED</requires>
+      </commandOrder>
+      <commandOrder>
+        <command>STORM_REST_API-START</command>
+        <requires>NIMBUS-STARTED,DRPC_SERVER-STARTED,STORM_UI_SERVER-STARTED</requires>
+      </commandOrder>
+    </commandOrders>
+
+    <components>
+
+      <component>
+        <name>NIMBUS</name>
+        <category>MASTER</category>
+        <autoStartOnFailure>true</autoStartOnFailure>
+        <appExports>QuickLinks-nimbus.host_port</appExports>
+        <commandScript>
+          <script>scripts/nimbus.py</script>
+          <scriptType>PYTHON</scriptType>
+          <timeout>600</timeout>
+        </commandScript>
+      </component>
+
+      <component>
+        <name>SUPERVISOR</name>
+        <category>SLAVE</category>
+        <autoStartOnFailure>true</autoStartOnFailure>
+        <componentExports>
+          <componentExport>
+            <name>log_viewer_port</name>
+            <value>${THIS_HOST}:${site.storm-site.logviewer.port}</value>
+          </componentExport>
+        </componentExports>
+        <commandScript>
+          <script>scripts/supervisor.py</script>
+          <scriptType>PYTHON</scriptType>
+          <timeout>600</timeout>
+        </commandScript>
+      </component>
+
+      <component>
+        <name>STORM_UI_SERVER</name>
+        <category>MASTER</category>
+        <publishConfig>true</publishConfig>
+        <appExports>QuickLinks-org.apache.slider.monitor</appExports>
+        <autoStartOnFailure>true</autoStartOnFailure>
+        <commandScript>
+          <script>scripts/ui_server.py</script>
+          <scriptType>PYTHON</scriptType>
+          <timeout>600</timeout>
+        </commandScript>
+      </component>
+
+      <component>
+        <name>DRPC_SERVER</name>
+        <category>MASTER</category>
+        <autoStartOnFailure>true</autoStartOnFailure>
+        <commandScript>
+          <script>scripts/drpc_server.py</script>
+          <scriptType>PYTHON</scriptType>
+          <timeout>600</timeout>
+        </commandScript>
+      </component>
+    </components>
+
+    <osSpecifics>
+      <osSpecific>
+        <osType>any</osType>
+        <packages>
+          <package>
+            <type>zip</type>
+            <name>files/${pkg.name}</name>
+          </package>
+        </packages>
+      </osSpecific>
+    </osSpecifics>
+
+    <configFiles>
+      <configFile>
+        <type>yaml</type>
+        <fileName>storm.yaml</fileName>
+        <dictionaryName>storm-site</dictionaryName>
+      </configFile>
+      <configFile>
+        <type>env</type>
+        <fileName>storm-env.sh</fileName>
+        <dictionaryName>storm-env</dictionaryName>
+      </configFile>
+    </configFiles>
+
+  </application>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/41ec7410/app-packages/storm-win/package/scripts/drpc_server.py
----------------------------------------------------------------------
diff --git a/app-packages/storm-win/package/scripts/drpc_server.py b/app-packages/storm-win/package/scripts/drpc_server.py
new file mode 100644
index 0000000..779854a
--- /dev/null
+++ b/app-packages/storm-win/package/scripts/drpc_server.py
@@ -0,0 +1,55 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from storm import storm
+from service import service
+
+class DrpcServer(Script):
+  def install(self, env):
+    self.install_packages(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    storm()
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env)
+
+    service("drpc", action="start")
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    service("drpc", action="stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    #check_process_status(status_params.pid_drpc)
+
+if __name__ == "__main__":
+  DrpcServer().execute()

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/41ec7410/app-packages/storm-win/package/scripts/nimbus.py
----------------------------------------------------------------------
diff --git a/app-packages/storm-win/package/scripts/nimbus.py b/app-packages/storm-win/package/scripts/nimbus.py
new file mode 100644
index 0000000..fafc97a
--- /dev/null
+++ b/app-packages/storm-win/package/scripts/nimbus.py
@@ -0,0 +1,55 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from storm import storm
+from service import service
+
+class Nimbus(Script):
+  def install(self, env):
+    self.install_packages(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    storm()
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env)
+
+    service("nimbus", action="start")
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    service("nimbus", action="stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    #check_process_status(status_params.pid_nimbus)
+
+if __name__ == "__main__":
+  Nimbus().execute()

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/41ec7410/app-packages/storm-win/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/app-packages/storm-win/package/scripts/params.py b/app-packages/storm-win/package/scripts/params.py
new file mode 100644
index 0000000..c6ea62d
--- /dev/null
+++ b/app-packages/storm-win/package/scripts/params.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import status_params
+
+# server configurations
+config = Script.get_config()
+
+app_root = config['configurations']['global']['app_root']
+conf_dir = format("{app_root}/conf")
+storm_user = config['configurations']['global']['app_user']
+log_dir = config['configurations']['global']['app_log_dir']
+pid_dir = status_params.pid_dir
+local_dir = config['configurations']['storm-site']['storm.local.dir']
+user_group = config['configurations']['global']['user_group']
+java64_home = config['hostLevelParams']['java_home']
+nimbus_host = config['configurations']['storm-site']['nimbus.host']
+nimbus_port = config['configurations']['storm-site']['nimbus.thrift.port']
+rest_api_port = config['configurations']['global']['rest_api_port']
+rest_api_admin_port = config['configurations']['global']['rest_api_admin_port']
+rest_api_conf_file = format("{conf_dir}/config.yaml")
+rest_lib_dir = format("{app_root}/external/storm-rest")
+storm_bin = format("{app_root}/bin/storm.cmd")
+
+security_enabled = config['configurations']['global']['security_enabled']
+
+if security_enabled:
+  _hostname_lowercase = config['hostname'].lower()
+  _kerberos_domain = config['configurations']['storm-env']['kerberos_domain']
+  _storm_client_principal_name = config['configurations']['storm-env']['storm_client_principal_name']
+  _storm_server_principal_name = config['configurations']['storm-env']['storm_server_principal_name']
+
+  storm_jaas_client_principal = _storm_client_principal_name.replace('_HOST', _hostname_lowercase)
+  storm_client_keytab_path = config['configurations']['storm-env']['storm_client_keytab']
+  storm_jaas_server_principal = _storm_server_principal_name.replace('_HOST',nimbus_host.lower())
+  storm_jaas_stormclient_servicename = storm_jaas_server_principal.split("/")[0]
+  storm_server_keytab_path = config['configurations']['storm-env']['storm_server_keytab']
+  kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/41ec7410/app-packages/storm-win/package/scripts/rest_api.py
----------------------------------------------------------------------
diff --git a/app-packages/storm-win/package/scripts/rest_api.py b/app-packages/storm-win/package/scripts/rest_api.py
new file mode 100644
index 0000000..6a09e4e
--- /dev/null
+++ b/app-packages/storm-win/package/scripts/rest_api.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from storm import storm
+from service import service
+
+
+class StormRestApi(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    storm()
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env)
+
+    service("rest_api", action="start")
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    service("rest_api", action="stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    #check_process_status(status_params.pid_rest_api)
+
+if __name__ == "__main__":
+  StormRestApi().execute()

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/41ec7410/app-packages/storm-win/package/scripts/service.py
----------------------------------------------------------------------
diff --git a/app-packages/storm-win/package/scripts/service.py b/app-packages/storm-win/package/scripts/service.py
new file mode 100644
index 0000000..dd08858
--- /dev/null
+++ b/app-packages/storm-win/package/scripts/service.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+
+from resource_management import *
+import time
+import os
+import sys
+
+"""
+Slider package uses jps as pgrep does not list the whole process start command
+"""
+def service(
+    name,
+    action='start'):
+  import params
+  import status_params
+
+  pid_file = status_params.pid_files[name]
+  backtype = format("backtype.storm.daemon.{name}")
+
+  if action == "start":
+    cmd = format("{storm_bin} {name} > {log_dir}/{name}.out 2>&1")
+
+    Execute(cmd,
+            user=params.storm_user,
+            logoutput=False,
+            wait_for_finish=False,
+            pid_file = pid_file
+    )
+
+  elif action == "stop":
+    pid = format("`cat {pid_file}` >/dev/null 2>&1")
+    Execute(format("kill {pid}")
+    )
+    Execute(format("kill -9 {pid}"),
+            ignore_failures=True
+    )
+    Execute(format("rm -f {pid_file}"))

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/41ec7410/app-packages/storm-win/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/app-packages/storm-win/package/scripts/status_params.py b/app-packages/storm-win/package/scripts/status_params.py
new file mode 100644
index 0000000..7dda158
--- /dev/null
+++ b/app-packages/storm-win/package/scripts/status_params.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management import *
+
+config = Script.get_config()
+
+container_id = config['configurations']['global']['app_container_id']
+pid_dir = config['configurations']['global']['app_pid_dir']
+pid_nimbus = format("{pid_dir}/nimbus.pid")
+pid_supervisor = format("{pid_dir}/supervisor.pid")
+pid_drpc = format("{pid_dir}/drpc.pid")
+pid_ui = format("{pid_dir}/ui.pid")
+pid_logviewer = format("{pid_dir}/logviewer.pid")
+pid_rest_api = format("{pid_dir}/restapi.pid")
+pid_files = {"logviewer":pid_logviewer,
+             "ui": pid_ui,
+             "nimbus": pid_nimbus,
+             "supervisor": pid_supervisor,
+             "rest_api": pid_rest_api,
+             "drpc": pid_drpc}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/41ec7410/app-packages/storm-win/package/scripts/storm.py
----------------------------------------------------------------------
diff --git a/app-packages/storm-win/package/scripts/storm.py b/app-packages/storm-win/package/scripts/storm.py
new file mode 100644
index 0000000..e109826
--- /dev/null
+++ b/app-packages/storm-win/package/scripts/storm.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from yaml_config import yaml_config
+import sys
+
+def storm():
+  import params
+
+  Directory([params.log_dir, params.pid_dir, params.local_dir, params.conf_dir],
+            owner=params.storm_user,
+            group=params.user_group,
+            recursive=True
+  )
+
+  File(format("{conf_dir}/config.yaml"),
+            content=Template("config.yaml.j2"),
+            owner = params.storm_user,
+            group = params.user_group
+  )
+
+  yaml_config( "storm.yaml",
+               conf_dir = params.conf_dir,
+               configurations = params.config['configurations']['storm-site'],
+               owner = params.storm_user,
+               group = params.user_group
+  )
+
+  if params.security_enabled:
+    File(format("{conf_dir}/storm_jaas.conf"),
+              content=Template("storm_jaas.conf.j2"),
+              owner = params.storm_user,
+              group = params.user_group
+    )
+

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/41ec7410/app-packages/storm-win/package/scripts/supervisor.py
----------------------------------------------------------------------
diff --git a/app-packages/storm-win/package/scripts/supervisor.py b/app-packages/storm-win/package/scripts/supervisor.py
new file mode 100644
index 0000000..eba9fa9
--- /dev/null
+++ b/app-packages/storm-win/package/scripts/supervisor.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from yaml_config import yaml_config
+from storm import storm
+from service import service
+
+
+class Supervisor(Script):
+  def install(self, env):
+    self.install_packages(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    storm()
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env)
+
+    service("supervisor", action="start")
+    service("logviewer", action="start")
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    service("supervisor", action="stop")
+    service("logviewer", action="stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+
+    #check_process_status(status_params.pid_supervisor)
+
+
+if __name__ == "__main__":
+  Supervisor().execute()
+

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/41ec7410/app-packages/storm-win/package/scripts/ui_server.py
----------------------------------------------------------------------
diff --git a/app-packages/storm-win/package/scripts/ui_server.py b/app-packages/storm-win/package/scripts/ui_server.py
new file mode 100644
index 0000000..0875b76
--- /dev/null
+++ b/app-packages/storm-win/package/scripts/ui_server.py
@@ -0,0 +1,55 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from storm import storm
+from service import service
+
+class UiServer(Script):
+  def install(self, env):
+    self.install_packages(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    storm()
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env)
+
+    service("ui", action="start")
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    service("ui", action="stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    #check_process_status(status_params.pid_ui)
+
+if __name__ == "__main__":
+  UiServer().execute()

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/41ec7410/app-packages/storm-win/package/scripts/yaml_config.py
----------------------------------------------------------------------
diff --git a/app-packages/storm-win/package/scripts/yaml_config.py b/app-packages/storm-win/package/scripts/yaml_config.py
new file mode 100644
index 0000000..5f763cc
--- /dev/null
+++ b/app-packages/storm-win/package/scripts/yaml_config.py
@@ -0,0 +1,80 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import re
+import socket
+from resource_management import *
+
+def escape_yaml_propetry(value):
+  # pre-process value for any "_HOST" tokens
+  value = value.replace('_HOST', socket.getfqdn())
+
+  unquouted = False
+  unquouted_values = ["null","Null","NULL","true","True","TRUE","false","False","FALSE","YES","Yes","yes","NO","No","no","ON","On","on","OFF","Off","off"]
+  
+  if value in unquouted_values:
+    unquouted = True
+
+  # if is list [a,b,c]
+  if re.match('^\w*\[.+\]\w*$', value):
+    unquouted = True
+
+  # if is map {'a':'b'}
+  if re.match('^\w*\{.+\}\w*$', value):
+    unquouted = True
+
+  try:
+    int(value)
+    unquouted = True
+  except ValueError:
+    pass
+  
+  try:
+    float(value)
+    unquouted = True
+  except ValueError:
+    pass
+  
+  if not unquouted:
+    value = value.replace("'","''")
+    value = "'"+value+"'"
+    
+  return value
+
+def yaml_inline_template(configurations):
+  return source.InlineTemplate('''{% for key, value in configurations_dict.items() %}{{ key }}: {{ escape_yaml_propetry(value) }}
+{% endfor %}''', configurations_dict=configurations, extra_imports=[escape_yaml_propetry])
+
+def yaml_config(
+  filename,
+  configurations = None,
+  conf_dir = None,
+  mode = None,
+  owner = None,
+  group = None
+):
+    config_content = yaml_inline_template(configurations)
+
+    File (format("{conf_dir}/{filename}"),
+      content = config_content,
+      owner = owner,
+      group = group,
+      mode = mode
+    )

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/41ec7410/app-packages/storm-win/package/templates/config.yaml.j2
----------------------------------------------------------------------
diff --git a/app-packages/storm-win/package/templates/config.yaml.j2 b/app-packages/storm-win/package/templates/config.yaml.j2
new file mode 100644
index 0000000..58f5c9e
--- /dev/null
+++ b/app-packages/storm-win/package/templates/config.yaml.j2
@@ -0,0 +1,28 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+nimbusHost: {{nimbus_host}}
+nimbusPort: {{nimbus_port}}
+
+# HTTP-specific options.
+http:
+
+  # The port on which the HTTP server listens for service requests.
+  port: {{rest_api_port}}
+
+  # The port on which the HTTP server listens for administrative requests.
+  adminPort: {{rest_api_admin_port}}
+
+enableGanglia: false
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/41ec7410/app-packages/storm-win/package/templates/storm_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/app-packages/storm-win/package/templates/storm_jaas.conf.j2 b/app-packages/storm-win/package/templates/storm_jaas.conf.j2
new file mode 100644
index 0000000..a1ba6ea
--- /dev/null
+++ b/app-packages/storm-win/package/templates/storm_jaas.conf.j2
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+StormServer {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useKeyTab=true
+   keyTab="{{storm_server_keytab_path}}"
+   storeKey=true
+   useTicketCache=false
+   principal="{{storm_jaas_server_principal}}";
+};
+StormClient {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useKeyTab=true
+   keyTab="{{storm_client_keytab_path}}"
+   storeKey=true
+   useTicketCache=false
+   serviceName="{{storm_jaas_stormclient_servicename}}"
+   debug=true
+   principal="{{storm_jaas_client_principal}}";
+};
+Client {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useKeyTab=true
+   keyTab="{{storm_client_keytab_path}}"
+   storeKey=true
+   useTicketCache=false
+   serviceName="zookeeper"
+   principal="{{storm_jaas_client_principal}}";
+};

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/41ec7410/app-packages/storm-win/pom.xml
----------------------------------------------------------------------
diff --git a/app-packages/storm-win/pom.xml b/app-packages/storm-win/pom.xml
new file mode 100644
index 0000000..490ca96
--- /dev/null
+++ b/app-packages/storm-win/pom.xml
@@ -0,0 +1,91 @@
+<?xml version="1.0"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+  <!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+  <parent>
+    <groupId>org.apache.slider</groupId>
+    <artifactId>slider</artifactId>
+    <version>0.51.0-incubating-SNAPSHOT</version>
+    <relativePath>../../pom.xml</relativePath>
+  </parent>
+  <modelVersion>4.0.0</modelVersion>
+  <artifactId>slider-storm-app-win-package</artifactId>
+  <packaging>pom</packaging>
+  <name>Slider Storm App Package</name>
+  <description>Slider Storm App Package</description>
+  <version>${pkg.version}</version>
+  <properties>
+    <work.dir>package-tmp</work.dir>
+  </properties>
+
+  <profiles>
+    <profile>
+      <id>storm-app-package-win</id>
+      <build>
+        <plugins>
+
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-antrun-plugin</artifactId>
+            <version>1.7</version>
+            <executions>
+              <execution>
+                <id>copy</id>
+                <phase>validate</phase>
+                <configuration>
+                  <target name="copy and rename file">
+                    <copy file="${pkg.src}/${pkg.name}" tofile="${project.build.directory}/${pkg.name}" />
+                  </target>
+                </configuration>
+                <goals>
+                  <goal>run</goal>
+                </goals>
+              </execution>
+            </executions>
+          </plugin>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-assembly-plugin</artifactId>
+            <configuration>
+              <tarLongFileMode>gnu</tarLongFileMode>
+              <descriptor>src/assembly/storm.xml</descriptor>
+              <appendAssemblyId>false</appendAssemblyId>
+            </configuration>
+            <executions>
+              <execution>
+                <id>build-tarball</id>
+                <phase>package</phase>
+                <goals>
+                  <goal>single</goal>
+                </goals>
+              </execution>
+            </executions>
+          </plugin>
+
+        </plugins>
+      </build>
+    </profile>
+  </profiles>
+
+  <build>
+  </build>
+
+  <dependencies>
+  </dependencies>
+
+</project>

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/41ec7410/app-packages/storm-win/resources-default.json
----------------------------------------------------------------------
diff --git a/app-packages/storm-win/resources-default.json b/app-packages/storm-win/resources-default.json
new file mode 100644
index 0000000..31b445e
--- /dev/null
+++ b/app-packages/storm-win/resources-default.json
@@ -0,0 +1,30 @@
+{
+  "schema" : "http://example.org/specification/v2.0.0",
+  "metadata" : {
+  },
+  "global" : {
+    "yarn.log.include.patterns": "",
+    "yarn.log.exclude.patterns": "",
+    "yarn.log.interval": "0"
+  },
+  "components": {
+    "slider-appmaster": {
+    },
+    "NIMBUS": {
+      "yarn.role.priority": "1",
+      "yarn.component.instances": "1"
+    },
+    "STORM_UI_SERVER": {
+      "yarn.role.priority": "2",
+      "yarn.component.instances": "1"
+    },
+    "DRPC_SERVER": {
+      "yarn.role.priority": "3",
+      "yarn.component.instances": "1"
+    },
+    "SUPERVISOR": {
+      "yarn.role.priority": "4",
+      "yarn.component.instances": "1"
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/41ec7410/app-packages/storm-win/src/assembly/storm.xml
----------------------------------------------------------------------
diff --git a/app-packages/storm-win/src/assembly/storm.xml b/app-packages/storm-win/src/assembly/storm.xml
new file mode 100644
index 0000000..2ee7d31
--- /dev/null
+++ b/app-packages/storm-win/src/assembly/storm.xml
@@ -0,0 +1,68 @@
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~  or more contributor license agreements.  See the NOTICE file
+  ~  distributed with this work for additional information
+  ~  regarding copyright ownership.  The ASF licenses this file
+  ~  to you under the Apache License, Version 2.0 (the
+  ~  "License"); you may not use this file except in compliance
+  ~  with the License.  You may obtain a copy of the License at
+  ~
+  ~       http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~  Unless required by applicable law or agreed to in writing, software
+  ~  distributed under the License is distributed on an "AS IS" BASIS,
+  ~  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~  See the License for the specific language governing permissions and
+  ~  limitations under the License.
+  -->
+
+
+<assembly
+  xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0 http://maven.apache.org/xsd/assembly-1.1.0.xsd">
+  <id>storm_v${storm.version}</id>
+  <formats>
+    <format>zip</format>
+    <format>dir</format>
+  </formats>
+  <includeBaseDirectory>false</includeBaseDirectory>
+
+  <files>
+    <file>
+      <source>appConfig-default.json</source>
+      <outputDirectory>/</outputDirectory>
+      <filtered>true</filtered>
+      <fileMode>0755</fileMode>
+    </file>
+    <file>
+      <source>metainfo.xml</source>
+      <outputDirectory>/</outputDirectory>
+      <filtered>true</filtered>
+      <fileMode>0755</fileMode>
+    </file>
+    <file>
+      <source>${pkg.src}/${pkg.name}</source>
+      <outputDirectory>package/files</outputDirectory>
+      <filtered>false</filtered>
+      <fileMode>0755</fileMode>
+    </file>
+  </files>
+
+  <fileSets>
+    <fileSet>
+      <directory>${project.basedir}</directory>
+      <outputDirectory>/</outputDirectory>
+      <excludes>
+        <exclude>pom.xml</exclude>
+        <exclude>src/**</exclude>
+        <exclude>target/**</exclude>
+        <exclude>appConfig-default.json</exclude>
+        <exclude>metainfo.xml</exclude>
+      </excludes>
+      <fileMode>0755</fileMode>
+      <directoryMode>0755</directoryMode>
+    </fileSet>
+
+  </fileSets>
+</assembly>


[06/12] git commit: SLIDER-450 additional fix to use default appConfig for accumulo testing

Posted by st...@apache.org.
SLIDER-450 additional fix to use default appConfig for accumulo testing


Project: http://git-wip-us.apache.org/repos/asf/incubator-slider/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-slider/commit/2a3a4ed6
Tree: http://git-wip-us.apache.org/repos/asf/incubator-slider/tree/2a3a4ed6
Diff: http://git-wip-us.apache.org/repos/asf/incubator-slider/diff/2a3a4ed6

Branch: refs/heads/feature/SLIDER-149_Support_a_YARN_service_registry
Commit: 2a3a4ed67789aa0833fe00ca1bb5bb9bd80816d1
Parents: f8210a7
Author: Billie Rinaldi <bi...@gmail.com>
Authored: Sat Sep 27 13:02:51 2014 -0700
Committer: Billie Rinaldi <bi...@gmail.com>
Committed: Sat Sep 27 13:02:51 2014 -0700

----------------------------------------------------------------------
 app-packages/accumulo/pom.xml                                | 8 ++++++++
 .../apache/slider/funtest/accumulo/AccumuloBasicIT.groovy    | 4 +---
 2 files changed, 9 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/2a3a4ed6/app-packages/accumulo/pom.xml
----------------------------------------------------------------------
diff --git a/app-packages/accumulo/pom.xml b/app-packages/accumulo/pom.xml
index a2014f2..bc4b591 100644
--- a/app-packages/accumulo/pom.xml
+++ b/app-packages/accumulo/pom.xml
@@ -210,6 +210,14 @@
         <filtering>true</filtering>
         <targetPath>${test.app.resources.dir}</targetPath>
       </resource>
+      <resource>
+        <directory>.</directory>
+        <filtering>true</filtering>
+        <targetPath>${test.app.resources.dir}</targetPath>
+        <includes>
+          <include>appConfig-default.json</include>
+        </includes>
+      </resource>
     </resources>
 
     <plugins>

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/2a3a4ed6/app-packages/accumulo/src/test/groovy/org/apache/slider/funtest/accumulo/AccumuloBasicIT.groovy
----------------------------------------------------------------------
diff --git a/app-packages/accumulo/src/test/groovy/org/apache/slider/funtest/accumulo/AccumuloBasicIT.groovy b/app-packages/accumulo/src/test/groovy/org/apache/slider/funtest/accumulo/AccumuloBasicIT.groovy
index dad7601..12c0655 100644
--- a/app-packages/accumulo/src/test/groovy/org/apache/slider/funtest/accumulo/AccumuloBasicIT.groovy
+++ b/app-packages/accumulo/src/test/groovy/org/apache/slider/funtest/accumulo/AccumuloBasicIT.groovy
@@ -56,9 +56,7 @@ class AccumuloBasicIT extends AccumuloAgentCommandTestBase {
     String appTemplateFile = templateName()
     Configuration conf = new Configuration()
     FileSystem fs = FileSystem.getLocal(conf)
-    InputStream stream = SliderUtils.getApplicationResourceInputStream(
-      fs, new Path(TEST_APP_PKG_DIR, TEST_APP_PKG_FILE),
-      "appConfig-default.json");
+    InputStream stream = new FileInputStream(sysprop("test.app.resources.dir") + "/appConfig-default.json")
     assert stream!=null, "Couldn't pull appConfig.json from app pkg"
     ConfTreeSerDeser c = new ConfTreeSerDeser()
     ConfTree t = c.fromStream(stream)


[11/12] git commit: SLIDER149 in sync with YARN-913 changes

Posted by st...@apache.org.
SLIDER149 in sync with YARN-913 changes


Project: http://git-wip-us.apache.org/repos/asf/incubator-slider/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-slider/commit/f058495a
Tree: http://git-wip-us.apache.org/repos/asf/incubator-slider/tree/f058495a
Diff: http://git-wip-us.apache.org/repos/asf/incubator-slider/diff/f058495a

Branch: refs/heads/feature/SLIDER-149_Support_a_YARN_service_registry
Commit: f058495a6c05a385518333d135b84d357780c7f6
Parents: 62335bf
Author: Steve Loughran <st...@apache.org>
Authored: Tue Sep 30 17:42:22 2014 -0700
Committer: Steve Loughran <st...@apache.org>
Committed: Tue Sep 30 17:42:22 2014 -0700

----------------------------------------------------------------------
 .../org/apache/slider/client/SliderClient.java  |  6 +++---
 .../YarnRegistryViewForProviders.java           | 10 ++++-----
 .../TestStandaloneYarnRegistryAM.groovy         | 17 ++++++---------
 .../registry/TestRegistryRestResources.groovy   |  4 ++--
 .../minicluster/live/TestHBaseMaster.groovy     |  8 +++----
 .../minicluster/live/TestTwoLiveClusters.groovy | 22 +++++++++-----------
 6 files changed, 30 insertions(+), 37 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/f058495a/slider-core/src/main/java/org/apache/slider/client/SliderClient.java
----------------------------------------------------------------------
diff --git a/slider-core/src/main/java/org/apache/slider/client/SliderClient.java b/slider-core/src/main/java/org/apache/slider/client/SliderClient.java
index 88d07b5..99896c8 100644
--- a/slider-core/src/main/java/org/apache/slider/client/SliderClient.java
+++ b/slider-core/src/main/java/org/apache/slider/client/SliderClient.java
@@ -41,9 +41,9 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.registry.client.api.RegistryConstants;
 import org.apache.hadoop.yarn.registry.client.api.RegistryOperations;
-import static org.apache.hadoop.yarn.registry.client.binding.RegistryOperationUtils.*;
+import static org.apache.hadoop.yarn.registry.client.binding.RegistryUtils.*;
 
-import org.apache.hadoop.yarn.registry.client.binding.RegistryOperationUtils;
+import org.apache.hadoop.yarn.registry.client.binding.RegistryUtils;
 import org.apache.hadoop.yarn.registry.client.exceptions.NoRecordException;
 import org.apache.hadoop.yarn.registry.client.types.Endpoint;
 import org.apache.hadoop.yarn.registry.client.types.RegistryPathStatus;
@@ -1455,7 +1455,7 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
   }
 
   public String getUsername() throws IOException {
-    return RegistryOperationUtils.currentUser();
+    return RegistryUtils.currentUser();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/f058495a/slider-core/src/main/java/org/apache/slider/server/services/yarnregistry/YarnRegistryViewForProviders.java
----------------------------------------------------------------------
diff --git a/slider-core/src/main/java/org/apache/slider/server/services/yarnregistry/YarnRegistryViewForProviders.java b/slider-core/src/main/java/org/apache/slider/server/services/yarnregistry/YarnRegistryViewForProviders.java
index e5b150a..d0c891d 100644
--- a/slider-core/src/main/java/org/apache/slider/server/services/yarnregistry/YarnRegistryViewForProviders.java
+++ b/slider-core/src/main/java/org/apache/slider/server/services/yarnregistry/YarnRegistryViewForProviders.java
@@ -20,7 +20,7 @@ package org.apache.slider.server.services.yarnregistry;
 
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.registry.client.api.RegistryOperations;
-import org.apache.hadoop.yarn.registry.client.binding.RegistryOperationUtils;
+import org.apache.hadoop.yarn.registry.client.binding.RegistryUtils;
 import org.apache.hadoop.yarn.registry.client.binding.RegistryPathUtils;
 
 import org.apache.hadoop.yarn.registry.client.api.CreateFlags;
@@ -104,7 +104,7 @@ public class YarnRegistryViewForProviders {
       String serviceName,
       String componentName,
       ServiceRecord record) throws IOException {
-    String path = RegistryOperationUtils.componentPath(
+    String path = RegistryUtils.componentPath(
         user, serviceClass, serviceName, componentName);
     registryOperations.mknode(RegistryPathUtils.parentOf(path), true);
     registryOperations.create(path, record, CreateFlags.OVERWRITE);
@@ -122,7 +122,7 @@ public class YarnRegistryViewForProviders {
       String serviceClass,
       String serviceName,
       ServiceRecord record) throws IOException {
-    String path = RegistryOperationUtils.servicePath(
+    String path = RegistryUtils.servicePath(
         username, serviceClass, serviceName);
     registryOperations.mknode(RegistryPathUtils.parentOf(path), true);
     registryOperations.create(path, record, CreateFlags.OVERWRITE);
@@ -139,14 +139,14 @@ public class YarnRegistryViewForProviders {
       String serviceClass,
       String serviceName,
       ServiceRecord record) throws IOException {
-    String path = RegistryOperationUtils.servicePath(
+    String path = RegistryUtils.servicePath(
         user, serviceClass, serviceName);
     registryOperations.mknode(RegistryPathUtils.parentOf(path), true);
     registryOperations.create(path, record, CreateFlags.OVERWRITE);
   }
 
   public void rmComponent(String componentName) throws IOException {
-    String path = RegistryOperationUtils.componentPath(
+    String path = RegistryUtils.componentPath(
         user, sliderServiceclass, instanceName,
         componentName);
     registryOperations.delete(path, false);

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/f058495a/slider-core/src/test/groovy/org/apache/slider/agent/standalone/TestStandaloneYarnRegistryAM.groovy
----------------------------------------------------------------------
diff --git a/slider-core/src/test/groovy/org/apache/slider/agent/standalone/TestStandaloneYarnRegistryAM.groovy b/slider-core/src/test/groovy/org/apache/slider/agent/standalone/TestStandaloneYarnRegistryAM.groovy
index 0155232..b9238f4 100644
--- a/slider-core/src/test/groovy/org/apache/slider/agent/standalone/TestStandaloneYarnRegistryAM.groovy
+++ b/slider-core/src/test/groovy/org/apache/slider/agent/standalone/TestStandaloneYarnRegistryAM.groovy
@@ -24,14 +24,13 @@ import org.apache.hadoop.yarn.api.records.ApplicationReport
 import org.apache.hadoop.yarn.api.records.YarnApplicationState
 import org.apache.hadoop.yarn.conf.YarnConfiguration
 import org.apache.hadoop.yarn.registry.client.api.RegistryConstants
-import org.apache.hadoop.yarn.registry.client.binding.RecordOperations
 import org.apache.hadoop.yarn.registry.client.binding.RegistryTypeUtils
 import org.apache.hadoop.yarn.registry.client.services.RegistryOperationsClient
 import org.apache.hadoop.yarn.registry.client.types.RegistryPathStatus
 import org.apache.hadoop.yarn.registry.client.types.ServiceRecord
 import org.apache.slider.core.exceptions.UnknownApplicationInstanceException
 
-import static org.apache.hadoop.yarn.registry.client.binding.RegistryOperationUtils.*
+import static org.apache.hadoop.yarn.registry.client.binding.RegistryUtils.*
 import org.apache.slider.agent.AgentMiniClusterTestBase
 import org.apache.slider.api.ClusterNode
 import org.apache.slider.client.SliderClient
@@ -49,11 +48,7 @@ import org.apache.slider.server.appmaster.PublishedArtifacts
 import org.apache.slider.server.appmaster.web.rest.RestPaths
 import org.junit.Test
 
-import static org.apache.slider.core.registry.info.CustomRegistryConstants.AGENT_ONEWAY_REST_API
-import static org.apache.slider.core.registry.info.CustomRegistryConstants.AGENT_SECURE_REST_API
-import static org.apache.slider.core.registry.info.CustomRegistryConstants.AM_IPC_PROTOCOL
-import static org.apache.slider.core.registry.info.CustomRegistryConstants.MANAGEMENT_REST_API
-import static org.apache.slider.core.registry.info.CustomRegistryConstants.PUBLISHER_REST_API
+import static org.apache.slider.core.registry.info.CustomRegistryConstants.*
 
 /**
  *  work with a YARN registry
@@ -147,15 +142,15 @@ class TestStandaloneYarnRegistryAM extends AgentMiniClusterTestBase {
         
 
     def self = currentUser()
-    List<RegistryPathStatus> serviceTypes = registryService.listFull(homePathForUser(self))
+    def children = statChildren(registryService, homePathForUser(self));
+    Collection<RegistryPathStatus> serviceTypes = children.values()
     dumpCollection(serviceTypes)
 
     def recordsPath = serviceclassPath(self, SliderKeys.APP_TYPE)
 
-    Map < String, ServiceRecord > recordMap = RecordOperations.extractServiceRecords(
+    Map<String, ServiceRecord> recordMap = extractServiceRecords(
         registryService,
-        recordsPath,
-        registryService.listFull(recordsPath))
+        recordsPath);
     def serviceRecords = recordMap.values();
     dumpCollection(serviceRecords)
     assert serviceRecords.size() == 1

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/f058495a/slider-core/src/test/groovy/org/apache/slider/server/appmaster/web/rest/registry/TestRegistryRestResources.groovy
----------------------------------------------------------------------
diff --git a/slider-core/src/test/groovy/org/apache/slider/server/appmaster/web/rest/registry/TestRegistryRestResources.groovy b/slider-core/src/test/groovy/org/apache/slider/server/appmaster/web/rest/registry/TestRegistryRestResources.groovy
index 9dd3092..e6d31fd 100644
--- a/slider-core/src/test/groovy/org/apache/slider/server/appmaster/web/rest/registry/TestRegistryRestResources.groovy
+++ b/slider-core/src/test/groovy/org/apache/slider/server/appmaster/web/rest/registry/TestRegistryRestResources.groovy
@@ -25,7 +25,7 @@ import com.sun.jersey.api.client.WebResource
 import groovy.transform.CompileStatic
 import groovy.util.logging.Slf4j
 import org.apache.hadoop.yarn.registry.client.api.RegistryConstants
-import org.apache.hadoop.yarn.registry.client.binding.RegistryOperationUtils
+import org.apache.hadoop.yarn.registry.client.binding.RegistryUtils
 import org.apache.slider.api.StatusKeys
 import org.apache.slider.client.SliderClient
 import org.apache.slider.common.SliderKeys
@@ -113,7 +113,7 @@ class TestRegistryRestResources extends AgentTestBase {
     assert entryResource.service == null;
 
     // test the available GET URIs
-    def userhome = RegistryOperationUtils.homePathForCurrentUser()
+    def userhome = RegistryUtils.homePathForCurrentUser()
 
     def userServicesURL = appendToURL(registry_url,
         userhome + RegistryConstants.PATH_USER_SERVICES)

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/f058495a/slider-providers/hbase/slider-hbase-provider/src/test/groovy/org/apache/slider/providers/hbase/minicluster/live/TestHBaseMaster.groovy
----------------------------------------------------------------------
diff --git a/slider-providers/hbase/slider-hbase-provider/src/test/groovy/org/apache/slider/providers/hbase/minicluster/live/TestHBaseMaster.groovy b/slider-providers/hbase/slider-hbase-provider/src/test/groovy/org/apache/slider/providers/hbase/minicluster/live/TestHBaseMaster.groovy
index 422ee8d..84db30f 100644
--- a/slider-providers/hbase/slider-hbase-provider/src/test/groovy/org/apache/slider/providers/hbase/minicluster/live/TestHBaseMaster.groovy
+++ b/slider-providers/hbase/slider-hbase-provider/src/test/groovy/org/apache/slider/providers/hbase/minicluster/live/TestHBaseMaster.groovy
@@ -20,7 +20,7 @@ package org.apache.slider.providers.hbase.minicluster.live
 
 import groovy.transform.CompileStatic
 import groovy.util.logging.Slf4j
-import org.apache.hadoop.yarn.registry.client.binding.RegistryOperationUtils
+import org.apache.hadoop.yarn.registry.client.binding.RegistryUtils
 import org.apache.hadoop.yarn.registry.client.types.ServiceRecord
 import org.apache.slider.common.SliderXmlConfKeys
 import org.apache.slider.api.ClusterDescription
@@ -76,10 +76,10 @@ class TestHBaseMaster extends HBaseMiniClusterTestBase {
     
     // look up the registry entries for HBase 
     describe "service registry names"
-    Map<String, ServiceRecord> records = RegistryOperationUtils.listServiceRecords(
+    Map<String, ServiceRecord> records = RegistryUtils.listServiceRecords(
         client.registryOperations,
-        RegistryOperationUtils.serviceclassPath(
-            RegistryOperationUtils.homePathForCurrentUser(),
+        RegistryUtils.serviceclassPath(
+            RegistryUtils.homePathForCurrentUser(),
             HBaseKeys.HBASE_SERVICE_TYPE
         )
     )

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/f058495a/slider-providers/hbase/slider-hbase-provider/src/test/groovy/org/apache/slider/providers/hbase/minicluster/live/TestTwoLiveClusters.groovy
----------------------------------------------------------------------
diff --git a/slider-providers/hbase/slider-hbase-provider/src/test/groovy/org/apache/slider/providers/hbase/minicluster/live/TestTwoLiveClusters.groovy b/slider-providers/hbase/slider-hbase-provider/src/test/groovy/org/apache/slider/providers/hbase/minicluster/live/TestTwoLiveClusters.groovy
index 01a67c7..bb652b3 100644
--- a/slider-providers/hbase/slider-hbase-provider/src/test/groovy/org/apache/slider/providers/hbase/minicluster/live/TestTwoLiveClusters.groovy
+++ b/slider-providers/hbase/slider-hbase-provider/src/test/groovy/org/apache/slider/providers/hbase/minicluster/live/TestTwoLiveClusters.groovy
@@ -21,7 +21,7 @@ package org.apache.slider.providers.hbase.minicluster.live
 import groovy.transform.CompileStatic
 import groovy.util.logging.Slf4j
 import org.apache.hadoop.yarn.registry.client.api.RegistryOperations
-import org.apache.hadoop.yarn.registry.client.binding.RegistryOperationUtils
+import org.apache.hadoop.yarn.registry.client.binding.RegistryUtils
 import org.apache.hadoop.yarn.registry.client.binding.RegistryPathUtils
 import org.apache.hadoop.yarn.registry.client.types.ServiceRecord
 import org.apache.slider.common.SliderKeys
@@ -86,19 +86,17 @@ class TestTwoLiveClusters extends HBaseMiniClusterTestBase {
     // registry instances    def names = client.listRegistryNames(clustername)
     describe "service registry names"
     RegistryOperations registry = cluster2Client.registryOperations
-    def home = RegistryOperationUtils.homePathForCurrentUser()
+    def home = RegistryUtils.homePathForCurrentUser()
 
-    def userSliderInstancesPath = RegistryOperationUtils.serviceclassPath(
-        RegistryOperationUtils.currentUser(), SliderKeys.APP_TYPE)
+    def userSliderInstancesPath = RegistryUtils.serviceclassPath(
+        RegistryUtils.currentUser(), SliderKeys.APP_TYPE)
     
    
-    def names = RegistryOperationUtils.listServiceRecords(registry,
+    def names = RegistryUtils.listServiceRecords(registry,
         userSliderInstancesPath)
     dumpMap(names)
-    
-    def stats = registry.listFull(userSliderInstancesPath)
-    
-    dumpCollection(stats)
+    def statMap = RegistryUtils.statChildren(registry, userSliderInstancesPath)
+    assert statMap.size() == 2
     List<String> instanceIds = sliderClient.listRegisteredSliderInstances()
 
     dumpRegistryInstanceIDs(instanceIds)
@@ -112,11 +110,11 @@ class TestTwoLiveClusters extends HBaseMiniClusterTestBase {
     assert instances.size() == 2
 
 
-    def hbaseServicePath = RegistryOperationUtils.serviceclassPath(
-        RegistryOperationUtils.currentUser(),
+    def hbaseServicePath = RegistryUtils.serviceclassPath(
+        RegistryUtils.currentUser(),
         HBaseKeys.HBASE_SERVICE_TYPE)
     Map<String, ServiceRecord> hbaseInstances =
-        RegistryOperationUtils.listServiceRecords(registry,
+        RegistryUtils.listServiceRecords(registry,
             hbaseServicePath);
         
     assert hbaseInstances.size() == 2


[02/12] git commit: SLIDER-461 Asymmetry in default arguments

Posted by st...@apache.org.
SLIDER-461 Asymmetry in default arguments


Project: http://git-wip-us.apache.org/repos/asf/incubator-slider/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-slider/commit/964e4033
Tree: http://git-wip-us.apache.org/repos/asf/incubator-slider/tree/964e4033
Diff: http://git-wip-us.apache.org/repos/asf/incubator-slider/diff/964e4033

Branch: refs/heads/feature/SLIDER-149_Support_a_YARN_service_registry
Commit: 964e4033d5d0df4c6f15e016417af664f0f8ac31
Parents: bb4e7d3
Author: tedyu <yu...@gmail.com>
Authored: Fri Sep 26 09:14:48 2014 -0700
Committer: tedyu <yu...@gmail.com>
Committed: Fri Sep 26 09:14:48 2014 -0700

----------------------------------------------------------------------
 app-packages/hbase-win/appConfig-default.json                      | 2 +-
 app-packages/hbase/appConfig-default.json                          | 2 +-
 app-packages/storm/appConfig-default.json                          | 2 +-
 .../org/apache/slider/providers/agent/AgentProviderService.java    | 2 +-
 4 files changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/964e4033/app-packages/hbase-win/appConfig-default.json
----------------------------------------------------------------------
diff --git a/app-packages/hbase-win/appConfig-default.json b/app-packages/hbase-win/appConfig-default.json
index 5b97e69..95e79ab 100644
--- a/app-packages/hbase-win/appConfig-default.json
+++ b/app-packages/hbase-win/appConfig-default.json
@@ -24,7 +24,7 @@
         "site.hbase-site.hbase.tmp.dir": "${AGENT_WORK_ROOT}/work/app/tmp",
         "site.hbase-site.hbase.local.dir": "${hbase.tmp.dir}/local",
         "site.hbase-site.hbase.zookeeper.quorum": "${ZK_HOST}",
-        "site.hbase-site.zookeeper.znode.parent": "${DEF_ZK_PATH}",
+        "site.hbase-site.zookeeper.znode.parent": "${DEFAULT_ZK_PATH}",
         "site.hbase-site.hbase.regionserver.info.port": "0",
         "site.hbase-site.hbase.master.info.port": "${HBASE_MASTER.ALLOCATED_PORT}",
         "site.hbase-site.hbase.regionserver.port": "0",

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/964e4033/app-packages/hbase/appConfig-default.json
----------------------------------------------------------------------
diff --git a/app-packages/hbase/appConfig-default.json b/app-packages/hbase/appConfig-default.json
index 78479db..c7952df 100644
--- a/app-packages/hbase/appConfig-default.json
+++ b/app-packages/hbase/appConfig-default.json
@@ -33,7 +33,7 @@
         "site.hbase-site.hbase.tmp.dir": "${AGENT_WORK_ROOT}/work/app/tmp",
         "site.hbase-site.hbase.local.dir": "${hbase.tmp.dir}/local",
         "site.hbase-site.hbase.zookeeper.quorum": "${ZK_HOST}",
-        "site.hbase-site.zookeeper.znode.parent": "${DEF_ZK_PATH}",
+        "site.hbase-site.zookeeper.znode.parent": "${DEFAULT_ZK_PATH}",
         "site.hbase-site.hbase.regionserver.info.port": "0",
         "site.hbase-site.hbase.master.info.port": "${HBASE_MASTER.ALLOCATED_PORT}",
         "site.hbase-site.hbase.regionserver.port": "0",

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/964e4033/app-packages/storm/appConfig-default.json
----------------------------------------------------------------------
diff --git a/app-packages/storm/appConfig-default.json b/app-packages/storm/appConfig-default.json
index 683b8cc..2b0eea0 100644
--- a/app-packages/storm/appConfig-default.json
+++ b/app-packages/storm/appConfig-default.json
@@ -29,7 +29,7 @@
     "site.storm-site.worker.childopts": "-Xmx768m -javaagent:${AGENT_WORK_ROOT}/app/install/apache-storm-${pkg.version}/external/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=${@//site/global/ganglia_server_host},port=${@//site/global/ganglia_server_port},wireformat31x=true,mode=multicast,config=${AGENT_WORK_ROOT}/app/install/apache-storm-${pkg.version}/external/storm-jmxetric/conf/jmxetric-conf.xml,process=Worker_%ID%_JVM",
     "site.storm-site.dev.zookeeper.path": "${AGENT_WORK_ROOT}/app/tmp/dev-storm-zookeeper",
     "site.storm-site.drpc.invocations.port": "0",
-    "site.storm-site.storm.zookeeper.root": "${DEF_ZK_PATH}",
+    "site.storm-site.storm.zookeeper.root": "${DEFAULT_ZK_PATH}",
     "site.storm-site.transactional.zookeeper.port": "null",
     "site.storm-site.nimbus.host": "${NIMBUS_HOST}",
     "site.storm-site.ui.port": "${STORM_UI_SERVER.ALLOCATED_PORT}",

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/964e4033/slider-core/src/main/java/org/apache/slider/providers/agent/AgentProviderService.java
----------------------------------------------------------------------
diff --git a/slider-core/src/main/java/org/apache/slider/providers/agent/AgentProviderService.java b/slider-core/src/main/java/org/apache/slider/providers/agent/AgentProviderService.java
index 647c6a8..755b9d8 100644
--- a/slider-core/src/main/java/org/apache/slider/providers/agent/AgentProviderService.java
+++ b/slider-core/src/main/java/org/apache/slider/providers/agent/AgentProviderService.java
@@ -1427,7 +1427,7 @@ public class AgentProviderService extends AbstractProviderService implements
     tokens.put("${NN_URI}", nnuri);
     tokens.put("${NN_HOST}", URI.create(nnuri).getHost());
     tokens.put("${ZK_HOST}", appConf.get(OptionKeys.ZOOKEEPER_HOSTS));
-    tokens.put("${DEF_ZK_PATH}", appConf.get(OptionKeys.ZOOKEEPER_PATH));
+    tokens.put("${DEFAULT_ZK_PATH}", appConf.get(OptionKeys.ZOOKEEPER_PATH));
     tokens.put("${DEFAULT_DATA_DIR}", getAmState()
         .getInternalsSnapshot()
         .getGlobalOptions()


[07/12] git commit: SLIDER-448 AMFailuresIT needs vagrant

Posted by st...@apache.org.
SLIDER-448 AMFailuresIT needs vagrant


Project: http://git-wip-us.apache.org/repos/asf/incubator-slider/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-slider/commit/ba1e79b9
Tree: http://git-wip-us.apache.org/repos/asf/incubator-slider/tree/ba1e79b9
Diff: http://git-wip-us.apache.org/repos/asf/incubator-slider/diff/ba1e79b9

Branch: refs/heads/feature/SLIDER-149_Support_a_YARN_service_registry
Commit: ba1e79b9e0447bdf5b5173b7d4d17477e33fad19
Parents: 2a3a4ed
Author: Gour Saha <go...@apache.org>
Authored: Mon Sep 29 16:55:54 2014 -0700
Committer: Gour Saha <go...@apache.org>
Committed: Mon Sep 29 16:55:54 2014 -0700

----------------------------------------------------------------------
 .../org/apache/slider/common/SliderXMLConfKeysForTesting.java | 1 +
 .../slider/funtest/framework/AgentCommandTestBase.groovy      | 7 +++++++
 .../org/apache/slider/funtest/lifecycle/AMFailuresIT.groovy   | 6 ++++++
 src/test/clusters/remote/slider/slider-client.xml             | 6 ++++++
 4 files changed, 20 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/ba1e79b9/slider-core/src/main/java/org/apache/slider/common/SliderXMLConfKeysForTesting.java
----------------------------------------------------------------------
diff --git a/slider-core/src/main/java/org/apache/slider/common/SliderXMLConfKeysForTesting.java b/slider-core/src/main/java/org/apache/slider/common/SliderXMLConfKeysForTesting.java
index 4c56240..8886cb7 100644
--- a/slider-core/src/main/java/org/apache/slider/common/SliderXMLConfKeysForTesting.java
+++ b/slider-core/src/main/java/org/apache/slider/common/SliderXMLConfKeysForTesting.java
@@ -62,6 +62,7 @@ public interface SliderXMLConfKeysForTesting {
   String KEY_TEST_AGENT_ENABLED = "slider.test.agent.enabled";
   String KEY_AGENTTESTS_QUEUE_LABELED_DEFINED = "slider.test.agent.labeled.queue.enabled";
   String KEY_AGENTTESTS_LABELS_RED_BLUE_DEFINED = "slider.test.agent.labels.defined";
+  String KEY_AGENTTESTS_AM_FAILURES_ENABLED = "slider.test.agent.am.failures.enabled";
 
   int DEFAULT_AGENT_LAUNCH_TIME_SECONDS = 60 * 3;
 

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/ba1e79b9/slider-funtest/src/main/groovy/org/apache/slider/funtest/framework/AgentCommandTestBase.groovy
----------------------------------------------------------------------
diff --git a/slider-funtest/src/main/groovy/org/apache/slider/funtest/framework/AgentCommandTestBase.groovy b/slider-funtest/src/main/groovy/org/apache/slider/funtest/framework/AgentCommandTestBase.groovy
index cf32e94..602fe2c 100644
--- a/slider-funtest/src/main/groovy/org/apache/slider/funtest/framework/AgentCommandTestBase.groovy
+++ b/slider-funtest/src/main/groovy/org/apache/slider/funtest/framework/AgentCommandTestBase.groovy
@@ -39,6 +39,7 @@ implements FuntestProperties, Arguments, SliderExitCodes, SliderActions {
   public static final boolean AGENTTESTS_ENABLED
   public static final boolean AGENTTESTS_QUEUE_LABELED_DEFINED
   public static final boolean AGENTTESTS_LABELS_RED_BLUE_DEFINED
+  public static final boolean AGENTTESTS_AM_FAILURES_ENABLED
   private static String TEST_APP_PKG_DIR_PROP = "test.app.pkg.dir"
   private static String TEST_APP_PKG_FILE_PROP = "test.app.pkg.file"
   private static String TEST_APP_PKG_NAME_PROP = "test.app.pkg.name"
@@ -65,6 +66,8 @@ implements FuntestProperties, Arguments, SliderExitCodes, SliderActions {
         SLIDER_CONFIG.getBoolean(KEY_AGENTTESTS_QUEUE_LABELED_DEFINED, false)
     AGENTTESTS_LABELS_RED_BLUE_DEFINED =
         SLIDER_CONFIG.getBoolean(KEY_AGENTTESTS_LABELS_RED_BLUE_DEFINED, false)
+    AGENTTESTS_AM_FAILURES_ENABLED = 
+        SLIDER_CONFIG.getBoolean(KEY_AGENTTESTS_AM_FAILURES_ENABLED, false)
   }
 
   protected String getAppResource() {
@@ -90,6 +93,10 @@ implements FuntestProperties, Arguments, SliderExitCodes, SliderActions {
     assume(AGENTTESTS_LABELS_RED_BLUE_DEFINED, "Custom node labels not defined")
   }
 
+  public static void assumeAmFailureTestsEnabled() {
+    assume(AGENTTESTS_AM_FAILURES_ENABLED, "AM failure tests are disabled")
+  }
+
   @BeforeClass
   public static void setupAgent() {
     assumeAgentTestsEnabled()

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/ba1e79b9/slider-funtest/src/test/groovy/org/apache/slider/funtest/lifecycle/AMFailuresIT.groovy
----------------------------------------------------------------------
diff --git a/slider-funtest/src/test/groovy/org/apache/slider/funtest/lifecycle/AMFailuresIT.groovy b/slider-funtest/src/test/groovy/org/apache/slider/funtest/lifecycle/AMFailuresIT.groovy
index 852121f..9db0fec 100644
--- a/slider-funtest/src/test/groovy/org/apache/slider/funtest/lifecycle/AMFailuresIT.groovy
+++ b/slider-funtest/src/test/groovy/org/apache/slider/funtest/lifecycle/AMFailuresIT.groovy
@@ -33,6 +33,7 @@ import org.apache.slider.funtest.framework.AgentCommandTestBase
 import org.apache.slider.funtest.framework.FuntestProperties
 import org.apache.slider.funtest.framework.SliderShell
 import org.junit.After
+import org.junit.BeforeClass;
 import org.junit.Test
 
 @CompileStatic
@@ -46,6 +47,11 @@ implements FuntestProperties, Arguments, SliderExitCodes, SliderActions {
   public static final String VAGRANT_CWD = "vagrant.current.working.dir"
   File sshkey
 
+  @BeforeClass
+  public static void setupAMTests() {
+    assumeAmFailureTestsEnabled()
+  }
+
   @After
   public void destroyCluster() {
     cleanup(APPLICATION_NAME)

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/ba1e79b9/src/test/clusters/remote/slider/slider-client.xml
----------------------------------------------------------------------
diff --git a/src/test/clusters/remote/slider/slider-client.xml b/src/test/clusters/remote/slider/slider-client.xml
index ce261e9..f8d88eb 100644
--- a/src/test/clusters/remote/slider/slider-client.xml
+++ b/src/test/clusters/remote/slider/slider-client.xml
@@ -67,6 +67,12 @@
   </property>
 
   <property>
+    <name>slider.test.agent.am.failures.enabled</name>
+    <value>false</value>
+    <description>AM failure test scenarios - if enabled vagrant.current.working.dir needs to be set as well</description>
+  </property>
+
+  <property>
     <name>slider.test.agent.tar</name>
     <value>hdfs://c6403.ambari.apache.org:8020/slider/agent/slider-agent.tar.gz</value>
   </property>


[05/12] git commit: SLIDER-158. Add a slider diagnostics command (Thomas Liu via smohanty)

Posted by st...@apache.org.
SLIDER-158. Add a slider diagnostics command (Thomas Liu via smohanty)


Project: http://git-wip-us.apache.org/repos/asf/incubator-slider/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-slider/commit/f8210a7c
Tree: http://git-wip-us.apache.org/repos/asf/incubator-slider/tree/f8210a7c
Diff: http://git-wip-us.apache.org/repos/asf/incubator-slider/diff/f8210a7c

Branch: refs/heads/feature/SLIDER-149_Support_a_YARN_service_registry
Commit: f8210a7c00c54054dc690abbe5088cd4ddea4e3e
Parents: ce647d3
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Sat Sep 27 00:22:12 2014 -0700
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Sat Sep 27 00:22:26 2014 -0700

----------------------------------------------------------------------
 slider-core/pom.xml                             |   6 +
 .../org/apache/slider/client/SliderClient.java  | 280 ++++++++++++++++++-
 .../common/params/ActionDiagnosticArgs.java     |  66 +++++
 .../apache/slider/common/params/Arguments.java  |   7 +
 .../apache/slider/common/params/ClientArgs.java |  11 +-
 .../slider/common/params/SliderActions.java     |   4 +
 .../apache/slider/common/tools/SliderUtils.java | 108 +++++++
 .../slider/providers/agent/AgentKeys.java       |   5 +
 8 files changed, 485 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/f8210a7c/slider-core/pom.xml
----------------------------------------------------------------------
diff --git a/slider-core/pom.xml b/slider-core/pom.xml
index 39c17c6..7af8bd7 100644
--- a/slider-core/pom.xml
+++ b/slider-core/pom.xml
@@ -448,6 +448,12 @@
     </dependency>
 
     <dependency>
+      <groupId>org.codehaus.jettison</groupId>
+      <artifactId>jettison</artifactId>
+      <version>1.1</version>
+    </dependency>
+
+    <dependency>
       <groupId>org.powermock</groupId>
       <artifactId>powermock-core</artifactId>
       <version>1.5</version>

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/f8210a7c/slider-core/src/main/java/org/apache/slider/client/SliderClient.java
----------------------------------------------------------------------
diff --git a/slider-core/src/main/java/org/apache/slider/client/SliderClient.java b/slider-core/src/main/java/org/apache/slider/client/SliderClient.java
index d57f7dc..06c37ba 100644
--- a/slider-core/src/main/java/org/apache/slider/client/SliderClient.java
+++ b/slider-core/src/main/java/org/apache/slider/client/SliderClient.java
@@ -19,6 +19,7 @@
 package org.apache.slider.client;
 
 import com.google.common.annotations.VisibleForTesting;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -33,6 +34,8 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.NodeReport;
+import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
@@ -48,6 +51,7 @@ import org.apache.slider.common.SliderExitCodes;
 import org.apache.slider.common.SliderKeys;
 import org.apache.slider.common.params.AbstractActionArgs;
 import org.apache.slider.common.params.AbstractClusterBuildingActionArgs;
+import org.apache.slider.common.params.ActionDiagnosticArgs;
 import org.apache.slider.common.params.ActionInstallPackageArgs;
 import org.apache.slider.common.params.ActionAMSuicideArgs;
 import org.apache.slider.common.params.ActionCreateArgs;
@@ -91,6 +95,7 @@ import org.apache.slider.core.launch.LaunchedApplication;
 import org.apache.slider.core.launch.RunningApplication;
 import org.apache.slider.core.main.RunService;
 import org.apache.slider.core.persist.ConfPersister;
+import org.apache.slider.core.persist.JsonSerDeser;
 import org.apache.slider.core.persist.LockAcquireFailedException;
 import org.apache.slider.core.registry.YARNRegistryClient;
 import org.apache.slider.core.registry.docstore.ConfigFormat;
@@ -115,9 +120,15 @@ import org.apache.slider.server.services.utility.AbstractSliderLaunchedService;
 import org.apache.zookeeper.CreateMode;
 import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.ZooDefs;
+import org.codehaus.jackson.map.DeserializationConfig;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.map.SerializationConfig;
+import org.codehaus.jettison.json.JSONException;
+import org.codehaus.jettison.json.JSONObject;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.BufferedWriter;
 import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.FileWriter;
@@ -125,6 +136,7 @@ import java.io.IOException;
 import java.io.StringWriter;
 import java.io.Writer;
 import java.net.InetSocketAddress;
+import java.net.URISyntaxException;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -338,6 +350,8 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
     } else if (ACTION_VERSION.equals(action)) {
 
       exitCode = actionVersion();
+    } else if (ACTION_DIAGNOSTIC.equals(action)) {
+        exitCode = actionDiagnostic(serviceArgs.getActionDiagnosticArgs());
     } else {
       throw new SliderException(EXIT_UNIMPLEMENTED,
           "Unimplemented: " + action);
@@ -346,7 +360,7 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
     return exitCode;
   }
 
-  /**
+/**
    * Perform everything needed to init the hadoop binding.
    * This assumes that the service is already  in inited or started state
    * @throws IOException
@@ -2386,6 +2400,268 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
     return sids;
   }
 
+	/**
+	 * diagnostic operation
+	 *
+	 * @param clusterName
+	 *            application name
+	 * @param diagosticArgs
+	 *            diagnostic Arguments
+	 * @return 0 for success, -1 for some issues that aren't errors, just
+	 *         failures to retrieve information (e.g. no application name
+	 *         specified)
+	 * @throws YarnException
+	 *             YARN problems
+	 * @throws IOException
+	 *             Network or other problems
+	 */
+	private int actionDiagnostic(ActionDiagnosticArgs diagnosticArgs) {
+		try {
+			if (diagnosticArgs.client) {
+				actionDiagnosticClient();
+			} else if (SliderUtils.isSet(diagnosticArgs.application)) {
+				actionDiagnosticApplication(diagnosticArgs);
+			} else if (SliderUtils.isSet(diagnosticArgs.slider)) {
+				actionDiagnosticSlider(diagnosticArgs);
+			} else if (diagnosticArgs.yarn) {
+				actionDiagnosticYarn(diagnosticArgs);
+			} else if (diagnosticArgs.credentials) {
+				actionDiagnosticCredentials();
+			} else if (SliderUtils.isSet(diagnosticArgs.all)) {
+				actionDiagnosticAll(diagnosticArgs);
+			} else if (SliderUtils.isSet(diagnosticArgs.level)) {
+				actionDiagnosticIntelligent(diagnosticArgs);
+			} else {
+				// it's an unknown command
+		        log.info(ActionDiagnosticArgs.USAGE);
+		        return EXIT_USAGE;
+			}
+		} catch (Exception e) {
+			log.error(e.toString());
+			return EXIT_FALSE;
+		}
+		return EXIT_SUCCESS;
+	}
+
+	private void actionDiagnosticIntelligent(ActionDiagnosticArgs diagnosticArgs)
+			throws YarnException, IOException, URISyntaxException {
+		// not using member variable clustername because we want to place
+		// application name after --application option and member variable
+		// cluster name has to be put behind action
+		String clusterName = diagnosticArgs.level;
+
+		try {
+			SliderUtils.validateClientConfigFile();
+			log.info("Slider-client.xml is accessible");
+		} catch (IOException e) {
+			// we are catching exceptions here because those are indication of
+			// validation result, and we need to print them here
+			log.error("validation of slider-client.xml fails because: "
+					+ e.toString());
+			return;
+		}
+		SliderClusterOperations clusterOperations = createClusterOperations(clusterName);
+		// cluster not found exceptions will be thrown upstream
+		ClusterDescription clusterDescription = clusterOperations
+				.getClusterDescription();
+		log.info("Slider AppMaster is accessible");
+		
+		if (clusterDescription.state == ClusterDescription.STATE_LIVE) {
+			AggregateConf instanceDefinition = clusterOperations
+					.getInstanceDefinition();
+			String imagePath = instanceDefinition.getInternalOperations().get(
+					InternalKeys.INTERNAL_APPLICATION_IMAGE_PATH);
+			//if null, that means slider uploaded the agent tarball for the user
+			//and we need to use where slider has put
+			if(imagePath == null){
+				ApplicationReport appReport = YARNRegistryClient.findInstance(clusterName);
+				Path path1 = sliderFileSystem.getTempPathForCluster(clusterName);
+				Path subPath = new Path(path1, appReport.getApplicationId().toString() + "/am");
+				imagePath = subPath.toString();
+			}
+			try {
+				SliderUtils.validateHDFSFile(sliderFileSystem, imagePath);
+				log.info("Slider agent tarball is properly installed");
+			} catch (IOException e) {
+				log.error("can not find or open agent tar ball: " + e.toString());
+				return;
+			}
+			String pkgTarballPath = instanceDefinition.getAppConfOperations()
+					.getGlobalOptions().getMandatoryOption(AgentKeys.APP_DEF);
+			try {
+				SliderUtils.validateHDFSFile(sliderFileSystem, pkgTarballPath);
+				log.info("Application tarball is properly installed");
+			} catch (IOException e) {
+				log.error("can not find or open application tar ball: "
+						+ e.toString());
+				return;
+			}
+		}
+	}
+
+	private void actionDiagnosticAll(ActionDiagnosticArgs diagnosticArgs)
+			throws IOException, YarnException {
+		//assign application name from param to each sub diagnostic function
+		diagnosticArgs.application = diagnosticArgs.all;
+		diagnosticArgs.slider = diagnosticArgs.all;
+		actionDiagnosticClient();
+		actionDiagnosticApplication(diagnosticArgs);
+		actionDiagnosticSlider(diagnosticArgs);
+		actionDiagnosticYarn(diagnosticArgs);
+		actionDiagnosticCredentials();
+	}
+
+	private void actionDiagnosticCredentials() throws BadConfigException, IOException
+			 {
+		if (SliderUtils.isHadoopClusterSecure(SliderUtils
+				.loadClientConfigurationResource())) {
+			String credentialCacheFileDescription = null;
+			try {
+				credentialCacheFileDescription = SliderUtils
+						.checkCredentialCacheFile();
+			} catch (BadConfigException e) {
+				log.error("The credential config is not valid: " + e.toString());
+				throw e;
+			} catch (IOException e) {
+				log.error("Unable to read the credential file: " + e.toString());
+				throw e;
+			}
+			log.info("Credential cache file for the current user: "
+					+ credentialCacheFileDescription);
+		} else {
+			log.info("the cluster is not in secure mode");
+		}
+	}
+
+	private void actionDiagnosticYarn(ActionDiagnosticArgs diagnosticArgs) throws IOException, YarnException {
+		JSONObject converter = null;
+		log.info("the node in the YARN cluster has below state: ");
+		List<NodeReport> yarnClusterInfo;
+		try {
+			yarnClusterInfo = yarnClient.getNodeReports(NodeState.RUNNING);
+		} catch (YarnException e1) {
+			log.error("Exception happened when fetching node report from the YARN cluster: " + e1.toString());
+			throw e1;
+		} catch (IOException e1) {
+			log.error("Network problem happened when fetching node report YARN cluster: " + e1.toString());
+			throw e1;
+		}
+		for(NodeReport nodeReport : yarnClusterInfo){
+			log.info(nodeReport.toString());
+		}
+		
+		if (diagnosticArgs.verbose) {
+			Writer configWriter = new StringWriter();
+			try {
+				Configuration.dumpConfiguration(yarnClient.getConfig(), configWriter);
+			} catch (IOException e1) {
+				log.error("Network problem happened when retrieving YARN config from YARN: " + e1.toString());
+				throw e1;
+			}
+			try {
+				converter = new JSONObject(configWriter.toString());
+				log.info("the configuration of the YARN cluster is: "
+						+ converter.toString(2));
+				
+			} catch (JSONException e) {
+				log.error("JSONException happened during parsing response from YARN: " + e.toString());
+			}
+		}
+	}
+
+	private void actionDiagnosticSlider(ActionDiagnosticArgs diagnosticArgs) throws YarnException, IOException
+			{
+		// not using member variable clustername because we want to place
+		// application name after --application option and member variable
+		// cluster name has to be put behind action
+		String clusterName = diagnosticArgs.slider;
+		SliderClusterOperations clusterOperations;
+		AggregateConf instanceDefinition = null;
+		try {
+			clusterOperations = createClusterOperations(clusterName);
+			instanceDefinition = clusterOperations
+					.getInstanceDefinition();
+		} catch (YarnException e) {
+			log.error("Exception happened when retrieving instance definition from YARN: " + e.toString());
+			throw e;
+		} catch (IOException e) {
+			log.error("Network problem happened when retrieving instance definition from YARN: " + e.toString());
+			throw e;
+		}
+		String imagePath = instanceDefinition.getInternalOperations().get(
+				InternalKeys.INTERNAL_APPLICATION_IMAGE_PATH);
+		//if null, it will be uploaded by Slider and thus at slider's path
+		if(imagePath == null){
+			ApplicationReport appReport = YARNRegistryClient.findInstance(clusterName);
+			Path path1 = sliderFileSystem.getTempPathForCluster(clusterName);
+			Path subPath = new Path(path1, appReport.getApplicationId().toString() + "/am");
+			imagePath = subPath.toString();
+		}
+		log.info("The path of slider agent tarball on HDFS is: " + imagePath);
+	}
+
+	private void actionDiagnosticApplication(ActionDiagnosticArgs diagnosticArgs) throws YarnException, IOException
+			{
+		// not using member variable clustername because we want to place
+		// application name after --application option and member variable
+		// cluster name has to be put behind action
+		String clusterName = diagnosticArgs.application;
+		SliderClusterOperations clusterOperations;
+		AggregateConf instanceDefinition = null;
+		try {
+			clusterOperations = createClusterOperations(clusterName);
+			instanceDefinition = clusterOperations
+					.getInstanceDefinition();
+		} catch (YarnException e) {
+			log.error("Exception happened when retrieving instance definition from YARN: " + e.toString());
+			throw e;
+		} catch (IOException e) {
+			log.error("Network problem happened when retrieving instance definition from YARN: " + e.toString());
+			throw e;
+		}
+		String clusterDir = instanceDefinition.getAppConfOperations()
+				.getGlobalOptions().get(AgentKeys.APP_ROOT);
+		String pkgTarball = instanceDefinition.getAppConfOperations()
+				.getGlobalOptions().get(AgentKeys.APP_DEF);
+		String runAsUser = instanceDefinition.getAppConfOperations()
+				.getGlobalOptions().get(AgentKeys.RUNAS_USER);
+
+		log.info("The location of the cluster instance directory in HDFS is: "
+				+ clusterDir);
+		log.info("The name of the application package tarball on HDFS is: "
+				+ pkgTarball);
+		log.info("The runas user of the application in the cluster is: "
+				+ runAsUser);
+
+		if (diagnosticArgs.verbose) {
+			log.info("App config of the application: "
+					+ instanceDefinition.getAppConf().toJson());
+			log.info("Resource config of the application: "
+					+ instanceDefinition.getResources().toJson());
+		}
+	}
+
+	private void actionDiagnosticClient() throws SliderException, IOException {
+		String currentCommandPath = SliderUtils.getCurrentCommandPath();
+		SliderVersionInfo.loadAndPrintVersionInfo(log);
+		String clientConfigPath = SliderUtils.getClientConfigPath();
+		String jdkInfo = SliderUtils.getJDKInfo();
+		log.info("The slider command path: " + currentCommandPath);
+		log.info("The slider-client.xml used by current running command path: "
+				+ clientConfigPath);
+		log.info(jdkInfo);
+
+		try {
+			SliderUtils.validateSliderClientEnvironment(log);
+		} catch (SliderException e) {
+			log.error(e.toString());
+			throw e;
+		} catch (IOException e) {
+			log.error(e.toString());
+			throw e;
+		}
+	}
+
   private void logInstance(ServiceInstanceData instance,
       boolean verbose) {
     if (!verbose) {
@@ -2612,3 +2888,5 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
     System.out.append(src);
   }
 }
+
+

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/f8210a7c/slider-core/src/main/java/org/apache/slider/common/params/ActionDiagnosticArgs.java
----------------------------------------------------------------------
diff --git a/slider-core/src/main/java/org/apache/slider/common/params/ActionDiagnosticArgs.java b/slider-core/src/main/java/org/apache/slider/common/params/ActionDiagnosticArgs.java
new file mode 100644
index 0000000..b2fde07
--- /dev/null
+++ b/slider-core/src/main/java/org/apache/slider/common/params/ActionDiagnosticArgs.java
@@ -0,0 +1,66 @@
+package org.apache.slider.common.params;
+
+import com.beust.jcommander.Parameter;
+import com.beust.jcommander.Parameters;
+
+@Parameters(commandNames = {SliderActions.ACTION_DIAGNOSTIC},
+commandDescription = SliderActions.DESCRIBE_ACTION_DIAGNOSTIC)
+public class ActionDiagnosticArgs extends AbstractActionArgs
+{
+	  public static final String USAGE =
+	      "Usage: " + SliderActions.ACTION_DIAGNOSTIC
+	      + Arguments.ARG_CLIENT + "| "
+	      + Arguments.ARG_SLIDER + " <appname> " + "| "
+	      + Arguments.ARG_APPLICATION + " <appname> " + "| "
+	      + Arguments.ARG_YARN + "| "
+	      + Arguments.ARG_CREDENTIALS + "| "
+	      + Arguments.ARG_ALL + " <appname> " + "| "
+	      + Arguments.ARG_LEVEL + " <appname> "
+	      + " [" + Arguments.ARG_VERBOSE + "] ";
+	  
+	@Override
+	public String getActionName() {
+		return SliderActions.ACTION_DIAGNOSTIC;
+	}
+	
+	  @Parameter(names = {ARG_CLIENT}, 
+	      description = "print configuration of the slider client")
+	  public boolean client = false;
+	
+	  @Parameter(names = {ARG_SLIDER}, 
+	      description = "print configuration of the running slider app master")
+	  public String slider;
+	
+	  @Parameter(names = {ARG_APPLICATION}, 
+	      description = "print configuration of the running application")
+	  public String application;
+
+	  @Parameter(names = {ARG_VERBOSE}, 
+	      description = "print out information in details")
+	  public boolean verbose = false;
+
+	  @Parameter(names = {ARG_YARN}, 
+	      description = "print configuration of the YARN cluster")
+	  public boolean yarn = false;
+	
+	  @Parameter(names = {ARG_CREDENTIALS}, 
+	      description = "print credentials of the current user")
+	  public boolean credentials = false;
+	
+	  @Parameter(names = {ARG_ALL}, 
+	      description = "print all of the information above")
+	  public String all;
+	
+	  @Parameter(names = {ARG_LEVEL}, 
+	      description = "diagnoze the application intelligently")
+	  public String level;
+
+	  /**
+	   * Get the min #of params expected
+	   * @return the min number of params in the {@link #parameters} field
+	   */
+	  @Override
+	  public int getMinParams() {
+	    return 0;
+	  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/f8210a7c/slider-core/src/main/java/org/apache/slider/common/params/Arguments.java
----------------------------------------------------------------------
diff --git a/slider-core/src/main/java/org/apache/slider/common/params/Arguments.java b/slider-core/src/main/java/org/apache/slider/common/params/Arguments.java
index b119245..2b45ce8 100644
--- a/slider-core/src/main/java/org/apache/slider/common/params/Arguments.java
+++ b/slider-core/src/main/java/org/apache/slider/common/params/Arguments.java
@@ -78,6 +78,13 @@ public interface Arguments {
   String ARG_ZKPORT = "--zkport";
   String ARG_ZKHOSTS = "--zkhosts";
   String ARG_REPLACE_PKG = "--replacepkg";
+  String ARG_CLIENT = "--client";
+  String ARG_SLIDER = "--slider";
+  String ARG_APPLICATION = "--application";
+  String ARG_YARN = "--yarn";
+  String ARG_CREDENTIALS = "--credentials";
+  String ARG_ALL = "--all";
+  String ARG_LEVEL = "--level";
   String ARG_QUEUE = "--queue";
 
 

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/f8210a7c/slider-core/src/main/java/org/apache/slider/common/params/ClientArgs.java
----------------------------------------------------------------------
diff --git a/slider-core/src/main/java/org/apache/slider/common/params/ClientArgs.java b/slider-core/src/main/java/org/apache/slider/common/params/ClientArgs.java
index 7173a85..cd981b1 100644
--- a/slider-core/src/main/java/org/apache/slider/common/params/ClientArgs.java
+++ b/slider-core/src/main/java/org/apache/slider/common/params/ClientArgs.java
@@ -62,6 +62,7 @@ public class ClientArgs extends CommonArgs {
   private final ActionThawArgs actionThawArgs = new ActionThawArgs();
   private final ActionVersionArgs actionVersionArgs = new ActionVersionArgs();
   private final ActionHelpArgs actionHelpArgs = new ActionHelpArgs();
+  private final ActionDiagnosticArgs actionDiagnosticArgs = new ActionDiagnosticArgs();
 
 
   public ClientArgs(String[] args) {
@@ -92,7 +93,8 @@ public class ClientArgs extends CommonArgs {
       actionThawArgs,
       actionHelpArgs,
       actionVersionArgs,
-      actionInstallPackageArgs
+      actionInstallPackageArgs,
+      actionDiagnosticArgs
               );
   }
 
@@ -112,6 +114,10 @@ public class ClientArgs extends CommonArgs {
     }
   }
 
+  public ActionDiagnosticArgs getActionDiagnosticArgs() {
+	return actionDiagnosticArgs;
+  }
+
   public AbstractClusterBuildingActionArgs getBuildingActionArgs() {
     return buildingActionArgs;
   }
@@ -240,6 +246,9 @@ public class ClientArgs extends CommonArgs {
     } else if (SliderActions.ACTION_VERSION.equals(action)) {
       bindCoreAction(actionVersionArgs);
 
+    } else if (SliderActions.ACTION_DIAGNOSTIC.equals(action)) {
+        bindCoreAction(actionDiagnosticArgs);
+
     } else if (action == null || action.isEmpty()) {
       throw new BadCommandArgumentsException(ErrorStrings.ERROR_NO_ACTION);
 

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/f8210a7c/slider-core/src/main/java/org/apache/slider/common/params/SliderActions.java
----------------------------------------------------------------------
diff --git a/slider-core/src/main/java/org/apache/slider/common/params/SliderActions.java b/slider-core/src/main/java/org/apache/slider/common/params/SliderActions.java
index 964f184..29fd098 100644
--- a/slider-core/src/main/java/org/apache/slider/common/params/SliderActions.java
+++ b/slider-core/src/main/java/org/apache/slider/common/params/SliderActions.java
@@ -44,6 +44,7 @@ public interface SliderActions {
   String ACTION_THAW = "start";
   String ACTION_USAGE = "usage";
   String ACTION_VERSION = "version";
+  String ACTION_DIAGNOSTIC = "diagnostic";
   String ACTION_INSTALL_PACKAGE = "install-package";
   String DESCRIBE_ACTION_AM_SUICIDE =
     "Tell the Slider Application Master to simulate a process failure by terminating itself";
@@ -78,4 +79,7 @@ public interface SliderActions {
   String DESCRIBE_ACTION_VERSION =
                         "Print the Slider version information";
   String DESCRIBE_ACTION_INSTALL_PACKAGE = "Install the application package in the home directory under sub-folder packages";
+  String DESCRIBE_ACTION_DIAGNOSTIC = "Diagnose the configuration of the running slider application and slider client";
+  
 }
+

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/f8210a7c/slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java
----------------------------------------------------------------------
diff --git a/slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java b/slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java
index 263460d..5313ab9 100644
--- a/slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java
+++ b/slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java
@@ -19,6 +19,7 @@
 package org.apache.slider.common.tools;
 
 import com.google.common.base.Preconditions;
+
 import org.apache.commons.compress.archivers.zip.ZipArchiveEntry;
 import org.apache.commons.compress.archivers.zip.ZipArchiveInputStream;
 import org.apache.commons.io.output.ByteArrayOutputStream;
@@ -44,6 +45,7 @@ import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.LocalResource;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.slider.Slider;
 import org.apache.slider.api.InternalKeys;
 import org.apache.slider.api.RoleKeys;
 import org.apache.slider.common.SliderKeys;
@@ -76,6 +78,8 @@ import java.io.StringWriter;
 import java.net.InetSocketAddress;
 import java.net.ServerSocket;
 import java.net.Socket;
+import java.net.URI;
+import java.net.URISyntaxException;
 import java.net.URL;
 import java.net.URLDecoder;
 import java.util.ArrayList;
@@ -1780,4 +1784,108 @@ public final class SliderUtils {
     execCommand(OPENSSL, 0, 5000, logger, "OpenSSL", OPENSSL, "version");
     execCommand(PYTHON, 0, 5000, logger, "Python", PYTHON, "--version");
   }
+
+	/**
+	 * return the path to the currently running slider command
+	 * 
+	 * @throws NullPointerException
+	 *             - If the pathname argument is null
+	 * @throws SecurityException
+	 *             - if a security manager exists and its checkPermission method
+	 *             doesn't allow getting the ProtectionDomain
+	 */
+	public static String getCurrentCommandPath() {
+		File f = new File(Slider.class.getProtectionDomain().getCodeSource()
+				.getLocation().getPath());
+		return f.getAbsolutePath();
+	}
+
+	/**
+	 * return the path to the slider-client.xml used by the current running
+	 * slider command
+	 * 
+	 * @throws SecurityException
+	 *             - if a security manager exists and its checkPermission method
+	 *             denies access to the class loader for the class
+	 */
+	public static String getClientConfigPath() {
+		URL path = ConfigHelper.class.getClassLoader().getResource(
+				SliderKeys.CLIENT_RESOURCE);
+		return path.toString();
+	}
+
+	/**
+	 * validate if slider-client.xml under the path can be opened
+	 * 
+	 * @throws IOException
+	 *             : the file can't be found or open
+	 */
+	public static void validateClientConfigFile() throws IOException {
+		URL resURL = SliderVersionInfo.class.getClassLoader().getResource(
+				SliderKeys.CLIENT_RESOURCE);
+		if (resURL == null) {
+			throw new IOException(
+					"slider-client.xml doesn't exist on the path: "
+							+ getClientConfigPath());
+		}
+
+		try {
+			InputStream inStream = resURL.openStream();
+			if (inStream == null) {
+				throw new IOException("slider-client.xml can't be opened");
+			}
+		} catch (IOException e) {
+			throw new IOException("slider-client.xml can't be opened: "
+					+ e.toString());
+		}
+	}
+
+	/**
+	 * validate if a file on HDFS can be open
+	 * 
+	 * @throws IOException
+	 *             : the file can't be found or open
+	 * @throws URISyntaxException
+	 */
+	public static void validateHDFSFile(SliderFileSystem sliderFileSystem, String pathStr) throws IOException, URISyntaxException{
+	  URI pathURI = new URI(pathStr);
+	  InputStream inputStream = sliderFileSystem.getFileSystem().open(new Path(pathURI));
+	  if(inputStream == null){
+		  throw new IOException("HDFS file " + pathStr + " can't be opened");
+	  }
+  }
+
+	/**
+	 * return the version and path of the JDK invoking the current running
+	 * slider command
+	 * 
+	 * @throws SecurityException
+	 *             - if a security manager exists and its checkPropertyAccess
+	 *             method doesn't allow access to the specified system property.
+	 */
+	public static String getJDKInfo() {
+		String version = System.getProperty("java.version");
+		String javaHome = System.getProperty("java.home");
+		return "The version of the JDK invoking the current running slider command: "
+				+ version + "; The path to it is: " + javaHome;
+	}
+
+	/**
+	 * return a description of whether the current user has created credential
+	 * cache files from kerberos servers
+	 * 
+	 * @throws IOException
+	 * @throws BadConfigException
+	 * @throws SecurityException
+	 *             - if a security manager exists and its checkPropertyAccess
+	 *             method doesn't allow access to the specified system property.
+	 */
+	public static String checkCredentialCacheFile() throws IOException,
+			BadConfigException {
+		String result = null;
+		if (!Shell.WINDOWS) {
+			result = Shell.execCommand("klist");
+		}
+		return result;
+	}
 }

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/f8210a7c/slider-core/src/main/java/org/apache/slider/providers/agent/AgentKeys.java
----------------------------------------------------------------------
diff --git a/slider-core/src/main/java/org/apache/slider/providers/agent/AgentKeys.java b/slider-core/src/main/java/org/apache/slider/providers/agent/AgentKeys.java
index c271e8e..b30c18c 100644
--- a/slider-core/src/main/java/org/apache/slider/providers/agent/AgentKeys.java
+++ b/slider-core/src/main/java/org/apache/slider/providers/agent/AgentKeys.java
@@ -60,6 +60,11 @@ public interface AgentKeys {
    * Execution home for the agent.
    */
   String APP_HOME = "app.home";
+  String APP_ROOT = "site.global.app_root";
+  /**
+   * Runas user of the application
+   */
+  String RUNAS_USER = "site.global.app_user";
   /**
    * Name of the service.
    */


[10/12] git commit: SLIDER-149 sync with move to short-named status

Posted by st...@apache.org.
SLIDER-149 sync with move to short-named status


Project: http://git-wip-us.apache.org/repos/asf/incubator-slider/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-slider/commit/62335bfd
Tree: http://git-wip-us.apache.org/repos/asf/incubator-slider/tree/62335bfd
Diff: http://git-wip-us.apache.org/repos/asf/incubator-slider/diff/62335bfd

Branch: refs/heads/feature/SLIDER-149_Support_a_YARN_service_registry
Commit: 62335bfd1a894db41016fb0aa14268c5e21737e2
Parents: 969b734
Author: Steve Loughran <st...@apache.org>
Authored: Tue Sep 30 15:24:18 2014 -0700
Committer: Steve Loughran <st...@apache.org>
Committed: Tue Sep 30 15:24:18 2014 -0700

----------------------------------------------------------------------
 .../TestStandaloneYarnRegistryAM.groovy         |  1 +
 .../apache/slider/test/SliderTestUtils.groovy   |  7 +++++
 .../minicluster/live/TestTwoLiveClusters.groovy | 32 ++++++++++++--------
 3 files changed, 28 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/62335bfd/slider-core/src/test/groovy/org/apache/slider/agent/standalone/TestStandaloneYarnRegistryAM.groovy
----------------------------------------------------------------------
diff --git a/slider-core/src/test/groovy/org/apache/slider/agent/standalone/TestStandaloneYarnRegistryAM.groovy b/slider-core/src/test/groovy/org/apache/slider/agent/standalone/TestStandaloneYarnRegistryAM.groovy
index d081676..0155232 100644
--- a/slider-core/src/test/groovy/org/apache/slider/agent/standalone/TestStandaloneYarnRegistryAM.groovy
+++ b/slider-core/src/test/groovy/org/apache/slider/agent/standalone/TestStandaloneYarnRegistryAM.groovy
@@ -154,6 +154,7 @@ class TestStandaloneYarnRegistryAM extends AgentMiniClusterTestBase {
 
     Map < String, ServiceRecord > recordMap = RecordOperations.extractServiceRecords(
         registryService,
+        recordsPath,
         registryService.listFull(recordsPath))
     def serviceRecords = recordMap.values();
     dumpCollection(serviceRecords)

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/62335bfd/slider-core/src/test/groovy/org/apache/slider/test/SliderTestUtils.groovy
----------------------------------------------------------------------
diff --git a/slider-core/src/test/groovy/org/apache/slider/test/SliderTestUtils.groovy b/slider-core/src/test/groovy/org/apache/slider/test/SliderTestUtils.groovy
index 9f8e850..fe9e587 100644
--- a/slider-core/src/test/groovy/org/apache/slider/test/SliderTestUtils.groovy
+++ b/slider-core/src/test/groovy/org/apache/slider/test/SliderTestUtils.groovy
@@ -713,6 +713,13 @@ class SliderTestUtils extends Assert {
     log.info("number of entries: ${entries.length}")
     entries.each { log.info(it.toString()) }
   }
+
+  public static void dumpMap(Map map) {
+    map.entrySet().each { Map.Entry it ->
+      log.info("\"${it.key.toString()}\": \"${it.value.toString()}\"")
+    }
+  }
+  
   /**
    * Get a time option in seconds if set, otherwise the default value (also in seconds).
    * This operation picks up the time value as a system property if set -that

http://git-wip-us.apache.org/repos/asf/incubator-slider/blob/62335bfd/slider-providers/hbase/slider-hbase-provider/src/test/groovy/org/apache/slider/providers/hbase/minicluster/live/TestTwoLiveClusters.groovy
----------------------------------------------------------------------
diff --git a/slider-providers/hbase/slider-hbase-provider/src/test/groovy/org/apache/slider/providers/hbase/minicluster/live/TestTwoLiveClusters.groovy b/slider-providers/hbase/slider-hbase-provider/src/test/groovy/org/apache/slider/providers/hbase/minicluster/live/TestTwoLiveClusters.groovy
index a415524..01a67c7 100644
--- a/slider-providers/hbase/slider-hbase-provider/src/test/groovy/org/apache/slider/providers/hbase/minicluster/live/TestTwoLiveClusters.groovy
+++ b/slider-providers/hbase/slider-hbase-provider/src/test/groovy/org/apache/slider/providers/hbase/minicluster/live/TestTwoLiveClusters.groovy
@@ -20,11 +20,11 @@ package org.apache.slider.providers.hbase.minicluster.live
 
 import groovy.transform.CompileStatic
 import groovy.util.logging.Slf4j
-import org.apache.hadoop.yarn.registry.client.api.RegistryConstants
 import org.apache.hadoop.yarn.registry.client.api.RegistryOperations
 import org.apache.hadoop.yarn.registry.client.binding.RegistryOperationUtils
 import org.apache.hadoop.yarn.registry.client.binding.RegistryPathUtils
 import org.apache.hadoop.yarn.registry.client.types.ServiceRecord
+import org.apache.slider.common.SliderKeys
 import org.apache.slider.common.params.Arguments
 import org.apache.slider.client.SliderClient
 import org.apache.slider.providers.hbase.HBaseKeys
@@ -87,16 +87,20 @@ class TestTwoLiveClusters extends HBaseMiniClusterTestBase {
     describe "service registry names"
     RegistryOperations registry = cluster2Client.registryOperations
     def home = RegistryOperationUtils.homePathForCurrentUser()
-    def names = RegistryOperationUtils.listServiceRecords(registry,
-        RegistryPathUtils.join(home, RegistryConstants.PATH_USER_SERVICES))
 
-    def stats = registry.listFull(
-        RegistryPathUtils.join(home, RegistryConstants.PATH_USER_SERVICES))
+    def userSliderInstancesPath = RegistryOperationUtils.serviceclassPath(
+        RegistryOperationUtils.currentUser(), SliderKeys.APP_TYPE)
+    
+   
+    def names = RegistryOperationUtils.listServiceRecords(registry,
+        userSliderInstancesPath)
+    dumpMap(names)
+    
+    def stats = registry.listFull(userSliderInstancesPath)
     
     dumpCollection(stats)
     List<String> instanceIds = sliderClient.listRegisteredSliderInstances()
 
-
     dumpRegistryInstanceIDs(instanceIds)
     assert names.size() == 2
     assert instanceIds.size() == 2
@@ -107,16 +111,20 @@ class TestTwoLiveClusters extends HBaseMiniClusterTestBase {
     dumpRegistryInstances(instances)
     assert instances.size() == 2
 
+
+    def hbaseServicePath = RegistryOperationUtils.serviceclassPath(
+        RegistryOperationUtils.currentUser(),
+        HBaseKeys.HBASE_SERVICE_TYPE)
     Map<String, ServiceRecord> hbaseInstances =
         RegistryOperationUtils.listServiceRecords(registry,
-        RegistryOperationUtils.serviceclassPath(
-            RegistryOperationUtils.currentUser(),
-            HBaseKeys.HBASE_SERVICE_TYPE));
+            hbaseServicePath);
         
     assert hbaseInstances.size() == 2
-    def hbase1ServiceData = hbaseInstances[clustername1]
-    def hbase2ServiceData = hbaseInstances[clustername2]
-    assert !(hbase1ServiceData == hbase2ServiceData)
+    String clusterPath1 = RegistryPathUtils.join(hbaseServicePath, clustername1)
+    String clusterPath2 = RegistryPathUtils.join(hbaseServicePath, clustername2)
+    assert hbaseInstances[clusterPath1] != null
+    assert hbaseInstances[clusterPath2] != null
+    assert hbaseInstances[clusterPath1] != hbaseInstances[clusterPath2]
 
     clusterActionFreeze(cluster2Client, clustername2, "stop cluster 2")
     clusterActionFreeze(sliderClient, clustername1, "Stop cluster 1")