You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@bigtop.apache.org by kw...@apache.org on 2017/03/28 04:19:07 UTC

[2/2] bigtop git commit: BIGTOP-2712: Juju CI driven updates (closes #189)

BIGTOP-2712: Juju CI driven updates (closes #189)

Signed-off-by: Kevin W Monroe <ke...@canonical.com>


Project: http://git-wip-us.apache.org/repos/asf/bigtop/repo
Commit: http://git-wip-us.apache.org/repos/asf/bigtop/commit/4a24c4bd
Tree: http://git-wip-us.apache.org/repos/asf/bigtop/tree/4a24c4bd
Diff: http://git-wip-us.apache.org/repos/asf/bigtop/diff/4a24c4bd

Branch: refs/heads/master
Commit: 4a24c4bd92ce3b917901493b45bb923ef555b2c2
Parents: 895ecd5
Author: Kevin W Monroe <ke...@canonical.com>
Authored: Sun Mar 19 02:03:16 2017 +0000
Committer: Kevin W Monroe <ke...@canonical.com>
Committed: Mon Mar 27 23:11:00 2017 -0500

----------------------------------------------------------------------
 bigtop-deploy/juju/hadoop-hbase/.gitignore      |   2 +
 bigtop-deploy/juju/hadoop-hbase/README.md       | 329 +++++++++++++++++++
 bigtop-deploy/juju/hadoop-hbase/bundle-dev.yaml | 131 ++++++++
 .../juju/hadoop-hbase/bundle-local.yaml         | 131 ++++++++
 bigtop-deploy/juju/hadoop-hbase/bundle.yaml     | 131 ++++++++
 bigtop-deploy/juju/hadoop-hbase/ci-info.yaml    |  34 ++
 bigtop-deploy/juju/hadoop-hbase/copyright       |  16 +
 .../juju/hadoop-hbase/tests/01-bundle.py        | 139 ++++++++
 .../juju/hadoop-hbase/tests/tests.yaml          |  13 +
 bigtop-deploy/juju/hadoop-kafka/bundle-dev.yaml |   2 +-
 .../juju/hadoop-kafka/bundle-local.yaml         |   2 +-
 bigtop-deploy/juju/hadoop-kafka/bundle.yaml     |  14 +-
 bigtop-deploy/juju/hadoop-kafka/ci-info.yaml    |  16 +-
 .../juju/hadoop-kafka/tests/01-bundle.py        |  13 +
 .../juju/hadoop-kafka/tests/tests.yaml          |   8 +-
 .../juju/hadoop-processing/bundle-dev.yaml      |   2 +-
 .../juju/hadoop-processing/bundle-local.yaml    |   2 +-
 .../juju/hadoop-processing/bundle.yaml          |  10 +-
 .../juju/hadoop-processing/ci-info.yaml         |  12 +-
 .../juju/hadoop-processing/tests/01-bundle.py   |  12 +
 .../juju/hadoop-processing/tests/tests.yaml     |   8 +-
 bigtop-deploy/juju/hadoop-spark/bundle-dev.yaml |   2 +-
 .../juju/hadoop-spark/bundle-local.yaml         |   2 +-
 bigtop-deploy/juju/hadoop-spark/bundle.yaml     |  14 +-
 bigtop-deploy/juju/hadoop-spark/ci-info.yaml    |  16 +-
 .../juju/hadoop-spark/tests/01-bundle.py        |  13 +
 .../juju/hadoop-spark/tests/tests.yaml          |   8 +-
 .../juju/spark-processing/bundle-dev.yaml       |   2 +-
 .../juju/spark-processing/bundle-local.yaml     |   2 +-
 bigtop-deploy/juju/spark-processing/bundle.yaml |   6 +-
 .../juju/spark-processing/ci-info.yaml          |   6 +-
 .../juju/spark-processing/tests/01-bundle.py    |  19 +-
 .../juju/spark-processing/tests/tests.yaml      |  14 +-
 .../layer-hadoop-resourcemanager/actions.yaml   |  12 +-
 .../src/charm/hbase/layer-hbase/README.md       |  91 +++--
 .../src/charm/hbase/layer-hbase/actions/restart |   7 +-
 .../src/charm/hbase/layer-hbase/actions/start   |   7 +-
 .../layer-hbase/actions/start-hbase-master      |   2 +-
 .../actions/start-hbase-regionserver            |   2 +-
 .../src/charm/hbase/layer-hbase/actions/stop    |   7 +-
 .../hbase/layer-hbase/actions/stop-hbase-master |   2 +-
 .../layer-hbase/actions/stop-hbase-regionserver |   2 +-
 .../src/charm/hbase/layer-hbase/layer.yaml      |   1 +
 .../src/charm/hbase/layer-hbase/metadata.yaml   |   7 +-
 .../charm/hbase/layer-hbase/reactive/hbase.py   |  12 +-
 .../charm/kafka/layer-kafka/tests/01-deploy.py  |   2 +-
 .../kafka/layer-kafka/tests/02-smoke-test.py    |   2 +-
 .../layer-kafka/tests/10-config-changed.py      |  26 +-
 .../mahout/layer-mahout/actions/smoke-test      |  53 ++-
 .../mahout/layer-mahout/reactive/mahout.py      |   7 +-
 .../src/charm/pig/layer-pig/tests/01-deploy.py  |   2 +-
 .../layer-spark/tests/03-scale-standalone.py    |  15 +-
 .../charm/spark/layer-spark/tests/10-test-ha.py |  17 +-
 .../layer-zookeeper/tests/01-deploy-smoke.py    |  11 +-
 .../layer-zookeeper/tests/10-bind-address.py    |  25 +-
 55 files changed, 1289 insertions(+), 154 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-deploy/juju/hadoop-hbase/.gitignore
----------------------------------------------------------------------
diff --git a/bigtop-deploy/juju/hadoop-hbase/.gitignore b/bigtop-deploy/juju/hadoop-hbase/.gitignore
new file mode 100644
index 0000000..a295864
--- /dev/null
+++ b/bigtop-deploy/juju/hadoop-hbase/.gitignore
@@ -0,0 +1,2 @@
+*.pyc
+__pycache__

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-deploy/juju/hadoop-hbase/README.md
----------------------------------------------------------------------
diff --git a/bigtop-deploy/juju/hadoop-hbase/README.md b/bigtop-deploy/juju/hadoop-hbase/README.md
new file mode 100644
index 0000000..b45bf7b
--- /dev/null
+++ b/bigtop-deploy/juju/hadoop-hbase/README.md
@@ -0,0 +1,329 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+# Overview
+
+The Apache Hadoop software library is a framework that allows for the
+distributed processing of large data sets across clusters of computers
+using a simple programming model.
+
+Hadoop is designed to scale from a few servers to thousands of machines,
+each offering local computation and storage. Rather than rely on hardware
+to deliver high-availability, Hadoop can detect and handle failures at the
+application layer. This provides a highly-available service on top of a cluster
+of machines, each of which may be prone to failure.
+
+HBase is the Hadoop database. Think of it as a distributed, scalable Big Data
+store.
+
+This bundle provides a complete deployment of Hadoop and HBase components from
+[Apache Bigtop][] that performs distributed data processing at scale. Ganglia
+and rsyslog applications are also provided to monitor cluster health and syslog
+activity.
+
+[Apache Bigtop]: http://bigtop.apache.org/
+
+## Bundle Composition
+
+The applications that comprise this bundle are spread across 8 units as
+follows:
+
+  * NameNode (HDFS)
+  * ResourceManager (YARN)
+    * Colocated on the NameNode unit
+  * Zookeeper
+    * 3 separate units
+  * Slave (DataNode and NodeManager)
+    * 3 separate units
+  * HBase
+    * 3 units colocated with the Hadoop Slaves
+  * Client (Hadoop endpoint)
+  * Plugin (Facilitates communication with the Hadoop cluster)
+    * Subordinate to the HBase and Client units
+  * Ganglia (Web interface for monitoring cluster metrics)
+    * Colocated on the Client unit
+  * Rsyslog (Aggregate cluster syslog events in a single location)
+    * Colocated on the Client unit
+
+Deploying this bundle results in a fully configured Apache Bigtop
+cluster on any supported cloud, which can be scaled to meet workload
+demands.
+
+
+# Deploying
+
+A working Juju installation is assumed to be present. If Juju is not yet set
+up, please follow the [getting-started][] instructions prior to deploying this
+bundle.
+
+> **Note**: This bundle requires hardware resources that may exceed limits
+of Free-tier or Trial accounts on some clouds. To deploy to these
+environments, modify a local copy of [bundle.yaml][] to set
+`services: 'X': num_units: 1` and `machines: 'X': constraints: mem=3G` as
+needed to satisfy account limits.
+
+Deploy this bundle from the Juju charm store with the `juju deploy` command:
+
+    juju deploy hadoop-hbase
+
+> **Note**: The above assumes Juju 2.0 or greater. If using an earlier version
+of Juju, use [juju-quickstart][] with the following syntax: `juju quickstart
+hadoop-hbase`.
+
+Alternatively, deploy a locally modified `bundle.yaml` with:
+
+    juju deploy /path/to/bundle.yaml
+
+> **Note**: The above assumes Juju 2.0 or greater. If using an earlier version
+of Juju, use [juju-quickstart][] with the following syntax: `juju quickstart
+/path/to/bundle.yaml`.
+
+The charms in this bundle can also be built from their source layers in the
+[Bigtop charm repository][].  See the [Bigtop charm README][] for instructions
+on building and deploying these charms locally.
+
+## Network-Restricted Environments
+Charms can be deployed in environments with limited network access. To deploy
+in this environment, configure a Juju model with appropriate proxy and/or
+mirror options. See [Configuring Models][] for more information.
+
+[getting-started]: https://jujucharms.com/docs/stable/getting-started
+[bundle.yaml]: https://github.com/apache/bigtop/blob/master/bigtop-deploy/juju/hadoop-hbase/bundle.yaml
+[juju-quickstart]: https://launchpad.net/juju-quickstart
+[Bigtop charm repository]: https://github.com/apache/bigtop/tree/master/bigtop-packages/src/charm
+[Bigtop charm README]: https://github.com/apache/bigtop/blob/master/bigtop-packages/src/charm/README.md
+[Configuring Models]: https://jujucharms.com/docs/stable/models-config
+
+
+# Verifying
+
+## Status
+The applications that make up this bundle provide status messages to indicate
+when they are ready:
+
+    juju status
+
+This is particularly useful when combined with `watch` to track the on-going
+progress of the deployment:
+
+    watch -n 2 juju status
+
+The message for each unit will provide information about that unit's state.
+Once they all indicate that they are ready, perform application smoke tests
+to verify that the bundle is working as expected.
+
+## Smoke Test
+The charms for each core component (namenode, resourcemanager, slave, hbase,
+and zookeeper) provide a `smoke-test` action that can be used to verify the
+application is functioning as expected. Note that the 'slave' component runs
+extensive tests provided by Apache Bigtop and may take up to 30 minutes to
+complete. Run the smoke-test actions as follows:
+
+    juju run-action namenode/0 smoke-test
+    juju run-action resourcemanager/0 smoke-test
+    juju run-action slave/0 smoke-test
+    juju run-action hbase/0 smoke-test
+    juju run-action zookeeper/0 smoke-test
+
+> **Note**: The above assumes Juju 2.0 or greater. If using an earlier version
+of Juju, the syntax is `juju action do <application>/0 smoke-test`.
+
+Watch the progress of the smoke test actions with:
+
+    watch -n 2 juju show-action-status
+
+> **Note**: The above assumes Juju 2.0 or greater. If using an earlier version
+of Juju, the syntax is `juju action status`.
+
+Eventually, all of the actions should settle to `status: completed`.  If
+any report `status: failed`, that application is not working as expected. Get
+more information about a specific smoke test with:
+
+    juju show-action-output <action-id>
+
+> **Note**: The above assumes Juju 2.0 or greater. If using an earlier version
+of Juju, the syntax is `juju action fetch <action-id>`.
+
+## Utilities
+Applications in this bundle include command line and web utilities that
+can be used to verify information about the cluster.
+
+From the command line, show the HDFS dfsadmin report and view the current list
+of YARN NodeManager units with the following:
+
+    juju run --application namenode "su hdfs -c 'hdfs dfsadmin -report'"
+    juju run --application resourcemanager "su yarn -c 'yarn node -list'"
+
+Show the list of Zookeeper nodes with the following:
+
+    juju run --unit zookeeper/0 'echo "ls /" | /usr/lib/zookeeper/bin/zkCli.sh'
+
+To access the HDFS web console, find the `PUBLIC-ADDRESS` of the namenode
+application and expose it:
+
+    juju status namenode
+    juju expose namenode
+
+The web interface will be available at the following URL:
+
+    http://NAMENODE_PUBLIC_IP:50070
+
+Similarly, to access the Resource Manager web consoles, find the
+`PUBLIC-ADDRESS` of the resourcemanager application and expose it:
+
+    juju status resourcemanager
+    juju expose resourcemanager
+
+The YARN and Job History web interfaces will be available at the following URLs:
+
+    http://RESOURCEMANAGER_PUBLIC_IP:8088
+    http://RESOURCEMANAGER_PUBLIC_IP:19888
+
+Finally, to access the HBase web console, find the `PUBLIC-ADDRESS` of any
+hbase unit and expose the application:
+
+    juju status hbase
+    juju expose hbase
+
+The web interface will be available at the following URL:
+
+    http://HBASE_PUBLIC_IP:60010
+
+
+# Monitoring
+
+This bundle includes Ganglia for system-level monitoring of the namenode,
+resourcemanager, slave, hbase, and zookeeper units. Metrics are sent to a
+centralized ganglia unit for easy viewing in a browser. To view the ganglia web
+interface, find the `PUBLIC-ADDRESS` of the Ganglia application and expose it:
+
+    juju status ganglia
+    juju expose ganglia
+
+The web interface will be available at:
+
+    http://GANGLIA_PUBLIC_IP/ganglia
+
+
+# Logging
+
+This bundle includes rsyslog to collect syslog data from the namenode,
+resourcemanager, slave, hbase, and zookeeper units. These logs are sent to a
+centralized rsyslog unit for easy syslog analysis. One method of viewing this
+log data is to simply cat syslog from the rsyslog unit:
+
+    juju run --unit rsyslog/0 'sudo cat /var/log/syslog'
+
+Logs may also be forwarded to an external rsyslog processing service. See
+the *Forwarding logs to a system outside of the Juju environment* section of
+the [rsyslog README](https://jujucharms.com/rsyslog/) for more information.
+
+
+# Benchmarking
+
+The `resourcemanager` charm in this bundle provide several benchmarks to gauge
+the performance of the Hadoop cluster. Each benchmark is an action that can be
+run with `juju run-action`:
+
+    $ juju actions resourcemanager
+    ACTION      DESCRIPTION
+    mrbench     Mapreduce benchmark for small jobs
+    nnbench     Load test the NameNode hardware and configuration
+    smoke-test  Run an Apache Bigtop smoke test.
+    teragen     Generate data with teragen
+    terasort    Runs teragen to generate sample data, and then runs terasort to sort that data
+    testdfsio   DFS IO Testing
+
+    $ juju run-action resourcemanager/0 nnbench
+    Action queued with id: 55887b40-116c-4020-8b35-1e28a54cc622
+
+    $ juju show-action-output 55887b40-116c-4020-8b35-1e28a54cc622
+    results:
+      meta:
+        composite:
+          direction: asc
+          units: secs
+          value: "128"
+        start: 2016-02-04T14:55:39Z
+        stop: 2016-02-04T14:57:47Z
+      results:
+        raw: '{"BAD_ID": "0", "FILE: Number of read operations": "0", "Reduce input groups":
+          "8", "Reduce input records": "95", "Map output bytes": "1823", "Map input records":
+          "12", "Combine input records": "0", "HDFS: Number of bytes read": "18635", "FILE:
+          Number of bytes written": "32999982", "HDFS: Number of write operations": "330",
+          "Combine output records": "0", "Total committed heap usage (bytes)": "3144749056",
+          "Bytes Written": "164", "WRONG_LENGTH": "0", "Failed Shuffles": "0", "FILE:
+          Number of bytes read": "27879457", "WRONG_MAP": "0", "Spilled Records": "190",
+          "Merged Map outputs": "72", "HDFS: Number of large read operations": "0", "Reduce
+          shuffle bytes": "2445", "FILE: Number of large read operations": "0", "Map output
+          materialized bytes": "2445", "IO_ERROR": "0", "CONNECTION": "0", "HDFS: Number
+          of read operations": "567", "Map output records": "95", "Reduce output records":
+          "8", "WRONG_REDUCE": "0", "HDFS: Number of bytes written": "27412", "GC time
+          elapsed (ms)": "603", "Input split bytes": "1610", "Shuffled Maps ": "72", "FILE:
+          Number of write operations": "0", "Bytes Read": "1490"}'
+    status: completed
+    timing:
+      completed: 2016-02-04 14:57:48 +0000 UTC
+      enqueued: 2016-02-04 14:55:14 +0000 UTC
+      started: 2016-02-04 14:55:27 +0000 UTC
+
+The `hbase` charm in this bundle also provides a benchmark to gauge
+the performance of the HBase cluster:
+
+    $ juju run-action hbase/0 perf-test
+    Action queued with id: 339cec1f-e903-4ee7-85ca-876fb0c3d28e
+
+    $ juju show-action-output 339cec1f-e903-4ee7-85ca-876fb0c3d28e
+    results:
+      meta:
+        composite:
+          direction: asc
+          units: secs
+          value: "200.754000"
+    status: completed
+    timing:
+      completed: 2016-11-02 03:11:48 +0000 UTC
+      enqueued: 2016-11-02 03:08:21 +0000 UTC
+      started: 2016-11-02 03:08:26 +0000 UTC
+
+
+# Scaling
+
+By default, three Hadoop slave, HBase, and Zookeeper units are deployed.
+Scaling these applications is as simple as adding more units. To add one unit:
+
+    juju add-unit slave
+    juju add-unit hbase
+    juju add-unit zookeeper
+
+Multiple units may be added at once.  For example, add four more slave units:
+
+    juju add-unit -n4 slave
+
+
+# Contact Information
+
+- <bi...@lists.ubuntu.com>
+
+
+# Resources
+
+- [Apache Bigtop home page](http://bigtop.apache.org/)
+- [Apache Bigtop issue tracking](http://bigtop.apache.org/issue-tracking.html)
+- [Apache Bigtop mailing lists](http://bigtop.apache.org/mail-lists.html)
+- [Juju Bigtop charms](https://jujucharms.com/q/apache/bigtop)
+- [Juju mailing list](https://lists.ubuntu.com/mailman/listinfo/juju)
+- [Juju community](https://jujucharms.com/community)

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-deploy/juju/hadoop-hbase/bundle-dev.yaml
----------------------------------------------------------------------
diff --git a/bigtop-deploy/juju/hadoop-hbase/bundle-dev.yaml b/bigtop-deploy/juju/hadoop-hbase/bundle-dev.yaml
new file mode 100644
index 0000000..e4737e1
--- /dev/null
+++ b/bigtop-deploy/juju/hadoop-hbase/bundle-dev.yaml
@@ -0,0 +1,131 @@
+services:
+  namenode:
+    charm: "cs:~bigdata-dev/xenial/hadoop-namenode"
+    constraints: "mem=7G root-disk=32G"
+    num_units: 1
+    annotations:
+      gui-x: "500"
+      gui-y: "800"
+    to:
+      - "0"
+  resourcemanager:
+    charm: "cs:~bigdata-dev/xenial/hadoop-resourcemanager"
+    constraints: "mem=7G root-disk=32G"
+    num_units: 1
+    annotations:
+      gui-x: "500"
+      gui-y: "0"
+    to:
+      - "0"
+  slave:
+    charm: "cs:~bigdata-dev/xenial/hadoop-slave"
+    constraints: "mem=7G root-disk=32G"
+    num_units: 3
+    annotations:
+      gui-x: "0"
+      gui-y: "400"
+    to:
+      - "1"
+      - "2"
+      - "3"
+  plugin:
+    charm: "cs:~bigdata-dev/xenial/hadoop-plugin"
+    annotations:
+      gui-x: "1000"
+      gui-y: "400"
+  client:
+    charm: "cs:xenial/hadoop-client-3"
+    constraints: "mem=3G"
+    num_units: 1
+    annotations:
+      gui-x: "1250"
+      gui-y: "400"
+    to:
+      - "4"
+  hbase:
+    charm: "cs:~bigdata-dev/xenial/hbase"
+    constraints: "mem=7G root-disk=32G"
+    num_units: 3
+    annotations:
+      gui-x: "0"
+      gui-y: "0"
+    to:
+      - "1"
+      - "2"
+      - "3"
+  zookeeper:
+    charm: "cs:~bigdata-dev/xenial/zookeeper"
+    constraints: "mem=3G root-disk=32G"
+    num_units: 3
+    annotations:
+      gui-x: "500"
+      gui-y: "400"
+    to:
+      - "5"
+      - "6"
+      - "7"
+  ganglia:
+    charm: "cs:~bigdata-dev/xenial/ganglia-5"
+    num_units: 1
+    annotations:
+      gui-x: "0"
+      gui-y: "800"
+    to:
+      - "4"
+  ganglia-node:
+    charm: "cs:~bigdata-dev/xenial/ganglia-node-7"
+    annotations:
+      gui-x: "250"
+      gui-y: "400"
+  rsyslog:
+    charm: "cs:~bigdata-dev/xenial/rsyslog-7"
+    num_units: 1
+    annotations:
+      gui-x: "1000"
+      gui-y: "800"
+    to:
+      - "4"
+  rsyslog-forwarder-ha:
+    charm: "cs:~bigdata-dev/xenial/rsyslog-forwarder-ha-7"
+    annotations:
+      gui-x: "750"
+      gui-y: "400"
+series: xenial
+relations:
+  - [resourcemanager, namenode]
+  - [namenode, slave]
+  - [resourcemanager, slave]
+  - [plugin, namenode]
+  - [plugin, resourcemanager]
+  - [client, plugin]
+  - [hbase, plugin]
+  - [hbase, zookeeper]
+  - ["ganglia-node:juju-info", "namenode:juju-info"]
+  - ["ganglia-node:juju-info", "resourcemanager:juju-info"]
+  - ["ganglia-node:juju-info", "slave:juju-info"]
+  - ["ganglia-node:juju-info", "hbase:juju-info"]
+  - ["ganglia-node:juju-info", "zookeeper:juju-info"]
+  - ["ganglia:node", "ganglia-node:node"]
+  - ["rsyslog-forwarder-ha:juju-info", "namenode:juju-info"]
+  - ["rsyslog-forwarder-ha:juju-info", "resourcemanager:juju-info"]
+  - ["rsyslog-forwarder-ha:juju-info", "slave:juju-info"]
+  - ["rsyslog-forwarder-ha:juju-info", "hbase:juju-info"]
+  - ["rsyslog-forwarder-ha:juju-info", "zookeeper:juju-info"]
+  - ["rsyslog:aggregator", "rsyslog-forwarder-ha:syslog"]
+machines:
+  "0":
+    series: "xenial"
+  "1":
+    series: "xenial"
+  "2":
+    series: "xenial"
+  "3":
+    series: "xenial"
+  "4":
+    series: "xenial"
+  "5":
+    series: "xenial"
+  "6":
+    series: "xenial"
+  "7":
+    series: "xenial"

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-deploy/juju/hadoop-hbase/bundle-local.yaml
----------------------------------------------------------------------
diff --git a/bigtop-deploy/juju/hadoop-hbase/bundle-local.yaml b/bigtop-deploy/juju/hadoop-hbase/bundle-local.yaml
new file mode 100644
index 0000000..7ae57f6
--- /dev/null
+++ b/bigtop-deploy/juju/hadoop-hbase/bundle-local.yaml
@@ -0,0 +1,131 @@
+services:
+  namenode:
+    charm: "/home/ubuntu/charms/xenial/hadoop-namenode"
+    constraints: "mem=7G root-disk=32G"
+    num_units: 1
+    annotations:
+      gui-x: "500"
+      gui-y: "800"
+    to:
+      - "0"
+  resourcemanager:
+    charm: "/home/ubuntu/charms/xenial/hadoop-resourcemanager"
+    constraints: "mem=7G root-disk=32G"
+    num_units: 1
+    annotations:
+      gui-x: "500"
+      gui-y: "0"
+    to:
+      - "0"
+  slave:
+    charm: "/home/ubuntu/charms/xenial/hadoop-slave"
+    constraints: "mem=7G root-disk=32G"
+    num_units: 3
+    annotations:
+      gui-x: "0"
+      gui-y: "400"
+    to:
+      - "1"
+      - "2"
+      - "3"
+  plugin:
+    charm: "/home/ubuntu/charms/xenial/hadoop-plugin"
+    annotations:
+      gui-x: "1000"
+      gui-y: "400"
+  client:
+    charm: "cs:xenial/hadoop-client-3"
+    constraints: "mem=3G"
+    num_units: 1
+    annotations:
+      gui-x: "1250"
+      gui-y: "400"
+    to:
+      - "4"
+  hbase:
+    charm: "/home/ubuntu/charms/xenial/hbase"
+    constraints: "mem=7G root-disk=32G"
+    num_units: 3
+    annotations:
+      gui-x: "0"
+      gui-y: "0"
+    to:
+      - "1"
+      - "2"
+      - "3"
+  zookeeper:
+    charm: "/home/ubuntu/charms/xenial/zookeeper"
+    constraints: "mem=3G root-disk=32G"
+    num_units: 3
+    annotations:
+      gui-x: "500"
+      gui-y: "400"
+    to:
+      - "5"
+      - "6"
+      - "7"
+  ganglia:
+    charm: "cs:~bigdata-dev/xenial/ganglia-5"
+    num_units: 1
+    annotations:
+      gui-x: "0"
+      gui-y: "800"
+    to:
+      - "4"
+  ganglia-node:
+    charm: "cs:~bigdata-dev/xenial/ganglia-node-7"
+    annotations:
+      gui-x: "250"
+      gui-y: "400"
+  rsyslog:
+    charm: "cs:~bigdata-dev/xenial/rsyslog-7"
+    num_units: 1
+    annotations:
+      gui-x: "1000"
+      gui-y: "800"
+    to:
+      - "4"
+  rsyslog-forwarder-ha:
+    charm: "cs:~bigdata-dev/xenial/rsyslog-forwarder-ha-7"
+    annotations:
+      gui-x: "750"
+      gui-y: "400"
+series: xenial
+relations:
+  - [resourcemanager, namenode]
+  - [namenode, slave]
+  - [resourcemanager, slave]
+  - [plugin, namenode]
+  - [plugin, resourcemanager]
+  - [client, plugin]
+  - [hbase, plugin]
+  - [hbase, zookeeper]
+  - ["ganglia-node:juju-info", "namenode:juju-info"]
+  - ["ganglia-node:juju-info", "resourcemanager:juju-info"]
+  - ["ganglia-node:juju-info", "slave:juju-info"]
+  - ["ganglia-node:juju-info", "hbase:juju-info"]
+  - ["ganglia-node:juju-info", "zookeeper:juju-info"]
+  - ["ganglia:node", "ganglia-node:node"]
+  - ["rsyslog-forwarder-ha:juju-info", "namenode:juju-info"]
+  - ["rsyslog-forwarder-ha:juju-info", "resourcemanager:juju-info"]
+  - ["rsyslog-forwarder-ha:juju-info", "slave:juju-info"]
+  - ["rsyslog-forwarder-ha:juju-info", "hbase:juju-info"]
+  - ["rsyslog-forwarder-ha:juju-info", "zookeeper:juju-info"]
+  - ["rsyslog:aggregator", "rsyslog-forwarder-ha:syslog"]
+machines:
+  "0":
+    series: "xenial"
+  "1":
+    series: "xenial"
+  "2":
+    series: "xenial"
+  "3":
+    series: "xenial"
+  "4":
+    series: "xenial"
+  "5":
+    series: "xenial"
+  "6":
+    series: "xenial"
+  "7":
+    series: "xenial"

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-deploy/juju/hadoop-hbase/bundle.yaml
----------------------------------------------------------------------
diff --git a/bigtop-deploy/juju/hadoop-hbase/bundle.yaml b/bigtop-deploy/juju/hadoop-hbase/bundle.yaml
new file mode 100644
index 0000000..95b63d7
--- /dev/null
+++ b/bigtop-deploy/juju/hadoop-hbase/bundle.yaml
@@ -0,0 +1,131 @@
+services:
+  namenode:
+    charm: "cs:xenial/hadoop-namenode-12"
+    constraints: "mem=7G root-disk=32G"
+    num_units: 1
+    annotations:
+      gui-x: "500"
+      gui-y: "800"
+    to:
+      - "0"
+  resourcemanager:
+    charm: "cs:xenial/hadoop-resourcemanager-13"
+    constraints: "mem=7G root-disk=32G"
+    num_units: 1
+    annotations:
+      gui-x: "500"
+      gui-y: "0"
+    to:
+      - "0"
+  slave:
+    charm: "cs:xenial/hadoop-slave-12"
+    constraints: "mem=7G root-disk=32G"
+    num_units: 3
+    annotations:
+      gui-x: "0"
+      gui-y: "400"
+    to:
+      - "1"
+      - "2"
+      - "3"
+  plugin:
+    charm: "cs:xenial/hadoop-plugin-12"
+    annotations:
+      gui-x: "1000"
+      gui-y: "400"
+  client:
+    charm: "cs:xenial/hadoop-client-3"
+    constraints: "mem=3G"
+    num_units: 1
+    annotations:
+      gui-x: "1250"
+      gui-y: "400"
+    to:
+      - "4"
+  hbase:
+    charm: "cs:xenial/hbase-10"
+    constraints: "mem=7G root-disk=32G"
+    num_units: 3
+    annotations:
+      gui-x: "0"
+      gui-y: "0"
+    to:
+      - "1"
+      - "2"
+      - "3"
+  zookeeper:
+    charm: "cs:xenial/zookeeper-16"
+    constraints: "mem=3G root-disk=32G"
+    num_units: 3
+    annotations:
+      gui-x: "500"
+      gui-y: "400"
+    to:
+      - "5"
+      - "6"
+      - "7"
+  ganglia:
+    charm: "cs:~bigdata-dev/xenial/ganglia-5"
+    num_units: 1
+    annotations:
+      gui-x: "0"
+      gui-y: "800"
+    to:
+      - "4"
+  ganglia-node:
+    charm: "cs:~bigdata-dev/xenial/ganglia-node-7"
+    annotations:
+      gui-x: "250"
+      gui-y: "400"
+  rsyslog:
+    charm: "cs:~bigdata-dev/xenial/rsyslog-7"
+    num_units: 1
+    annotations:
+      gui-x: "1000"
+      gui-y: "800"
+    to:
+      - "4"
+  rsyslog-forwarder-ha:
+    charm: "cs:~bigdata-dev/xenial/rsyslog-forwarder-ha-7"
+    annotations:
+      gui-x: "750"
+      gui-y: "400"
+series: xenial
+relations:
+  - [resourcemanager, namenode]
+  - [namenode, slave]
+  - [resourcemanager, slave]
+  - [plugin, namenode]
+  - [plugin, resourcemanager]
+  - [client, plugin]
+  - [hbase, plugin]
+  - [hbase, zookeeper]
+  - ["ganglia-node:juju-info", "namenode:juju-info"]
+  - ["ganglia-node:juju-info", "resourcemanager:juju-info"]
+  - ["ganglia-node:juju-info", "slave:juju-info"]
+  - ["ganglia-node:juju-info", "hbase:juju-info"]
+  - ["ganglia-node:juju-info", "zookeeper:juju-info"]
+  - ["ganglia:node", "ganglia-node:node"]
+  - ["rsyslog-forwarder-ha:juju-info", "namenode:juju-info"]
+  - ["rsyslog-forwarder-ha:juju-info", "resourcemanager:juju-info"]
+  - ["rsyslog-forwarder-ha:juju-info", "slave:juju-info"]
+  - ["rsyslog-forwarder-ha:juju-info", "hbase:juju-info"]
+  - ["rsyslog-forwarder-ha:juju-info", "zookeeper:juju-info"]
+  - ["rsyslog:aggregator", "rsyslog-forwarder-ha:syslog"]
+machines:
+  "0":
+    series: "xenial"
+  "1":
+    series: "xenial"
+  "2":
+    series: "xenial"
+  "3":
+    series: "xenial"
+  "4":
+    series: "xenial"
+  "5":
+    series: "xenial"
+  "6":
+    series: "xenial"
+  "7":
+    series: "xenial"

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-deploy/juju/hadoop-hbase/ci-info.yaml
----------------------------------------------------------------------
diff --git a/bigtop-deploy/juju/hadoop-hbase/ci-info.yaml b/bigtop-deploy/juju/hadoop-hbase/ci-info.yaml
new file mode 100644
index 0000000..aa6f230
--- /dev/null
+++ b/bigtop-deploy/juju/hadoop-hbase/ci-info.yaml
@@ -0,0 +1,34 @@
+bundle:
+  name: hadoop-hbase
+  namespace: bigdata-charmers
+  release: true
+  to-channel: edge
+charm-upgrade:
+  hadoop-namenode:
+    from-channel: edge
+    release: false
+    to-channel: beta
+  hadoop-resourcemanager:
+    from-channel: edge
+    release: false
+    to-channel: beta
+  hadoop-slave:
+    from-channel: edge
+    release: false
+    to-channel: beta
+  hadoop-client:
+    from-channel: edge
+    release: false
+    to-channel: beta
+  hadoop-plugin:
+    from-channel: edge
+    release: false
+    to-channel: beta
+  hbase:
+    from-channel: edge
+    release: false
+    to-channel: beta
+  zookeeper:
+    from-channel: edge
+    release: false
+    to-channel: beta

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-deploy/juju/hadoop-hbase/copyright
----------------------------------------------------------------------
diff --git a/bigtop-deploy/juju/hadoop-hbase/copyright b/bigtop-deploy/juju/hadoop-hbase/copyright
new file mode 100644
index 0000000..e900b97
--- /dev/null
+++ b/bigtop-deploy/juju/hadoop-hbase/copyright
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2015, Canonical Ltd., All Rights Reserved.
+License: Apache License 2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+     http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-deploy/juju/hadoop-hbase/tests/01-bundle.py
----------------------------------------------------------------------
diff --git a/bigtop-deploy/juju/hadoop-hbase/tests/01-bundle.py b/bigtop-deploy/juju/hadoop-hbase/tests/01-bundle.py
new file mode 100755
index 0000000..166ac54
--- /dev/null
+++ b/bigtop-deploy/juju/hadoop-hbase/tests/01-bundle.py
@@ -0,0 +1,139 @@
+#!/usr/bin/env python3
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import amulet
+import os
+import re
+import unittest
+import yaml
+
+
+class TestBundle(unittest.TestCase):
+    bundle_file = os.path.join(os.path.dirname(__file__), '..', 'bundle.yaml')
+
+    @classmethod
+    def setUpClass(cls):
+        # classmethod inheritance doesn't work quite right with
+        # setUpClass / tearDownClass, so subclasses have to manually call this
+        cls.d = amulet.Deployment(series='xenial')
+        with open(cls.bundle_file) as f:
+            bun = f.read()
+        bundle = yaml.safe_load(bun)
+
+        # NB: strip machine ('to') placement. We don't seem to be guaranteed
+        # the same machine numbering after the initial bundletester deployment,
+        # so we might fail when redeploying --to a specific machine to run
+        # these bundle tests. This is ok because all charms in this bundle are
+        # using 'reset: false', so we'll already have our deployment just the
+        # way we want it by the time this test runs. This was originally
+        # raised as:
+        #  https://github.com/juju/amulet/issues/148
+        for service, service_config in bundle['services'].items():
+            if 'to' in service_config:
+                del service_config['to']
+
+        cls.d.load(bundle)
+        cls.d.setup(timeout=3600)
+
+        # we need units reporting ready before we attempt our smoke tests
+        cls.d.sentry.wait_for_messages({'client': re.compile('ready'),
+                                        'namenode': re.compile('ready'),
+                                        'resourcemanager': re.compile('ready'),
+                                        'slave': re.compile('ready'),
+                                        'hbase': re.compile('ready'),
+                                        'zookeeper': re.compile('ready'),
+                                        }, timeout=3600)
+        cls.hdfs = cls.d.sentry['namenode'][0]
+        cls.yarn = cls.d.sentry['resourcemanager'][0]
+        cls.slave = cls.d.sentry['slave'][0]
+        cls.hbase = cls.d.sentry['hbase'][0]
+
+    def test_components(self):
+        """
+        Confirm that all of the required components are up and running.
+        """
+        hdfs, retcode = self.hdfs.run("pgrep -a java")
+        yarn, retcode = self.yarn.run("pgrep -a java")
+        slave, retcode = self.slave.run("pgrep -a java")
+        hbase, retcode = self.hbase.run("pgrep -a java")
+
+        assert 'NameNode' in hdfs, "NameNode not started"
+        assert 'NameNode' not in slave, "NameNode should not be running on slave"
+
+        assert 'ResourceManager' in yarn, "ResourceManager not started"
+        assert 'ResourceManager' not in slave, "ResourceManager should not be running on slave"
+
+        assert 'JobHistoryServer' in yarn, "JobHistoryServer not started"
+        assert 'JobHistoryServer' not in slave, "JobHistoryServer should not be running on slave"
+
+        assert 'NodeManager' in slave, "NodeManager not started"
+        assert 'NodeManager' not in yarn, "NodeManager should not be running on resourcemanager"
+        assert 'NodeManager' not in hdfs, "NodeManager should not be running on namenode"
+
+        assert 'DataNode' in slave, "DataNode not started"
+        assert 'DataNode' not in yarn, "DataNode should not be running on resourcemanager"
+        assert 'DataNode' not in hdfs, "DataNode should not be running on namenode"
+
+        assert 'Master' in hbase, "HBase Master not started"
+
+    def test_hdfs(self):
+        """
+        Validates mkdir, ls, chmod, and rm HDFS operations.
+        """
+        uuid = self.hdfs.run_action('smoke-test')
+        result = self.d.action_fetch(uuid, timeout=600, full_output=True)
+        # action status=completed on success
+        if (result['status'] != "completed"):
+            self.fail('HDFS smoke-test did not complete: %s' % result)
+
+    def test_yarn(self):
+        """
+        Validates YARN using the Bigtop 'yarn' smoke test.
+        """
+        uuid = self.yarn.run_action('smoke-test')
+        # 'yarn' smoke takes a while (bigtop tests download lots of stuff)
+        result = self.d.action_fetch(uuid, timeout=1800, full_output=True)
+        # action status=completed on success
+        if (result['status'] != "completed"):
+            self.fail('YARN smoke-test did not complete: %s' % result)
+
+    def test_hbase(self):
+        """
+        Validates HBase with a simple smoke test.
+        """
+        uuid = self.hbase.run_action('smoke-test')
+        result = self.d.action_fetch(uuid, timeout=600, full_output=True)
+        # action status=completed on success
+        if (result['status'] != "completed"):
+            self.fail('HBase smoke-test did not complete: %s' % result)
+
+    @unittest.skip(
+        'Skipping slave smoke tests; they are too inconsistent and long running for CWR.')
+    def test_slave(self):
+        """
+        Validates slave using the Bigtop 'hdfs' and 'mapred' smoke test.
+        """
+        uuid = self.slave.run_action('smoke-test')
+        # 'hdfs+mapred' smoke takes a long while (bigtop tests are slow)
+        result = self.d.action_fetch(uuid, timeout=3600, full_output=True)
+        # action status=completed on success
+        if (result['status'] != "completed"):
+            self.fail('Slave smoke-test did not complete: %s' % result)
+
+
+if __name__ == '__main__':
+    unittest.main()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-deploy/juju/hadoop-hbase/tests/tests.yaml
----------------------------------------------------------------------
diff --git a/bigtop-deploy/juju/hadoop-hbase/tests/tests.yaml b/bigtop-deploy/juju/hadoop-hbase/tests/tests.yaml
new file mode 100644
index 0000000..a3b7803
--- /dev/null
+++ b/bigtop-deploy/juju/hadoop-hbase/tests/tests.yaml
@@ -0,0 +1,13 @@
+reset: false
+bundle_deploy: false
+sources:
+  - 'ppa:juju/stable'
+packages:
+  - amulet
+  - python3-yaml
+# exclude tests that are unrelated to bigtop.
+excludes:
+  - ganglia
+  - ganglia-node
+  - rsyslog
+  - rsyslog-forwarder-ha

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-deploy/juju/hadoop-kafka/bundle-dev.yaml
----------------------------------------------------------------------
diff --git a/bigtop-deploy/juju/hadoop-kafka/bundle-dev.yaml b/bigtop-deploy/juju/hadoop-kafka/bundle-dev.yaml
index 36053a8..45b821f 100644
--- a/bigtop-deploy/juju/hadoop-kafka/bundle-dev.yaml
+++ b/bigtop-deploy/juju/hadoop-kafka/bundle-dev.yaml
@@ -89,7 +89,7 @@ services:
     to:
       - "4"
   ganglia-node:
-    charm: "cs:~bigdata-dev/xenial/ganglia-node-6"
+    charm: "cs:~bigdata-dev/xenial/ganglia-node-7"
     annotations:
       gui-x: "250"
       gui-y: "400"

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-deploy/juju/hadoop-kafka/bundle-local.yaml
----------------------------------------------------------------------
diff --git a/bigtop-deploy/juju/hadoop-kafka/bundle-local.yaml b/bigtop-deploy/juju/hadoop-kafka/bundle-local.yaml
index 500503c..bd89872 100644
--- a/bigtop-deploy/juju/hadoop-kafka/bundle-local.yaml
+++ b/bigtop-deploy/juju/hadoop-kafka/bundle-local.yaml
@@ -89,7 +89,7 @@ services:
     to:
       - "4"
   ganglia-node:
-    charm: "cs:~bigdata-dev/xenial/ganglia-node-6"
+    charm: "cs:~bigdata-dev/xenial/ganglia-node-7"
     annotations:
       gui-x: "250"
       gui-y: "400"

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-deploy/juju/hadoop-kafka/bundle.yaml
----------------------------------------------------------------------
diff --git a/bigtop-deploy/juju/hadoop-kafka/bundle.yaml b/bigtop-deploy/juju/hadoop-kafka/bundle.yaml
index d4cb91f..80aa895 100644
--- a/bigtop-deploy/juju/hadoop-kafka/bundle.yaml
+++ b/bigtop-deploy/juju/hadoop-kafka/bundle.yaml
@@ -1,6 +1,6 @@
 services:
   namenode:
-    charm: "cs:xenial/hadoop-namenode-11"
+    charm: "cs:xenial/hadoop-namenode-12"
     constraints: "mem=7G root-disk=32G"
     num_units: 1
     annotations:
@@ -9,7 +9,7 @@ services:
     to:
       - "0"
   resourcemanager:
-    charm: "cs:xenial/hadoop-resourcemanager-11"
+    charm: "cs:xenial/hadoop-resourcemanager-13"
     constraints: "mem=7G root-disk=32G"
     num_units: 1
     annotations:
@@ -18,7 +18,7 @@ services:
     to:
       - "0"
   slave:
-    charm: "cs:xenial/hadoop-slave-11"
+    charm: "cs:xenial/hadoop-slave-12"
     constraints: "mem=7G root-disk=32G"
     num_units: 3
     annotations:
@@ -29,7 +29,7 @@ services:
       - "2"
       - "3"
   plugin:
-    charm: "cs:xenial/hadoop-plugin-11"
+    charm: "cs:xenial/hadoop-plugin-12"
     annotations:
       gui-x: "1000"
       gui-y: "400"
@@ -52,7 +52,7 @@ services:
     to:
       - "4"
   zookeeper:
-    charm: "cs:xenial/zookeeper-12"
+    charm: "cs:xenial/zookeeper-16"
     constraints: "mem=3G root-disk=32G"
     num_units: 3
     annotations:
@@ -63,7 +63,7 @@ services:
       - "6"
       - "7"
   kafka:
-    charm: "cs:xenial/kafka-7"
+    charm: "cs:xenial/kafka-11"
     constraints: "mem=3G"
     num_units: 1
     annotations:
@@ -89,7 +89,7 @@ services:
     to:
       - "4"
   ganglia-node:
-    charm: "cs:~bigdata-dev/xenial/ganglia-node-6"
+    charm: "cs:~bigdata-dev/xenial/ganglia-node-7"
     annotations:
       gui-x: "250"
       gui-y: "400"

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-deploy/juju/hadoop-kafka/ci-info.yaml
----------------------------------------------------------------------
diff --git a/bigtop-deploy/juju/hadoop-kafka/ci-info.yaml b/bigtop-deploy/juju/hadoop-kafka/ci-info.yaml
index 56f11bb..fa4df9b 100644
--- a/bigtop-deploy/juju/hadoop-kafka/ci-info.yaml
+++ b/bigtop-deploy/juju/hadoop-kafka/ci-info.yaml
@@ -2,33 +2,33 @@ bundle:
   name: hadoop-kafka
   namespace: bigdata-charmers
   release: true
-  to-channel: beta
+  to-channel: edge
 charm-upgrade:
   hadoop-namenode:
     from-channel: edge
-    release: true
+    release: false
     to-channel: beta
   hadoop-resourcemanager:
     from-channel: edge
-    release: true
+    release: false
     to-channel: beta
   hadoop-slave:
     from-channel: edge
-    release: true
+    release: false
     to-channel: beta
   hadoop-client:
     from-channel: edge
-    release: true
+    release: false
     to-channel: beta
   hadoop-plugin:
     from-channel: edge
-    release: true
+    release: false
     to-channel: beta
   kafka:
     from-channel: edge
-    release: true
+    release: false
     to-channel: beta
   zookeeper:
     from-channel: edge
-    release: true
+    release: false
     to-channel: beta

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-deploy/juju/hadoop-kafka/tests/01-bundle.py
----------------------------------------------------------------------
diff --git a/bigtop-deploy/juju/hadoop-kafka/tests/01-bundle.py b/bigtop-deploy/juju/hadoop-kafka/tests/01-bundle.py
index ee35369..fb113fc 100755
--- a/bigtop-deploy/juju/hadoop-kafka/tests/01-bundle.py
+++ b/bigtop-deploy/juju/hadoop-kafka/tests/01-bundle.py
@@ -34,6 +34,18 @@ class TestBundle(unittest.TestCase):
             bun = f.read()
         bundle = yaml.safe_load(bun)
 
+        # NB: strip machine ('to') placement. We don't seem to be guaranteed
+        # the same machine numbering after the initial bundletester deployment,
+        # so we might fail when redeploying --to a specific machine to run
+        # these bundle tests. This is ok because all charms in this bundle are
+        # using 'reset: false', so we'll already have our deployment just the
+        # way we want it by the time this test runs. This was originally
+        # raised as:
+        #  https://github.com/juju/amulet/issues/148
+        for service, service_config in bundle['services'].items():
+            if 'to' in service_config:
+                del service_config['to']
+
         cls.d.load(bundle)
         cls.d.setup(timeout=3600)
         # we need units reporting ready before we attempt our smoke tests
@@ -41,6 +53,7 @@ class TestBundle(unittest.TestCase):
                                         'namenode': re.compile('ready'),
                                         'resourcemanager': re.compile('ready'),
                                         'slave': re.compile('ready'),
+                                        'zookeeper': re.compile('ready'),
                                         }, timeout=3600)
         cls.hdfs = cls.d.sentry['namenode'][0]
         cls.yarn = cls.d.sentry['resourcemanager'][0]

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-deploy/juju/hadoop-kafka/tests/tests.yaml
----------------------------------------------------------------------
diff --git a/bigtop-deploy/juju/hadoop-kafka/tests/tests.yaml b/bigtop-deploy/juju/hadoop-kafka/tests/tests.yaml
index 84f78d7..a3b7803 100644
--- a/bigtop-deploy/juju/hadoop-kafka/tests/tests.yaml
+++ b/bigtop-deploy/juju/hadoop-kafka/tests/tests.yaml
@@ -1,7 +1,13 @@
 reset: false
-deployment_timeout: 3600
+bundle_deploy: false
 sources:
   - 'ppa:juju/stable'
 packages:
   - amulet
   - python3-yaml
+# exclude tests that are unrelated to bigtop.
+excludes:
+  - ganglia
+  - ganglia-node
+  - rsyslog
+  - rsyslog-forwarder-ha

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-deploy/juju/hadoop-processing/bundle-dev.yaml
----------------------------------------------------------------------
diff --git a/bigtop-deploy/juju/hadoop-processing/bundle-dev.yaml b/bigtop-deploy/juju/hadoop-processing/bundle-dev.yaml
index 00fbdff..20ae8af 100644
--- a/bigtop-deploy/juju/hadoop-processing/bundle-dev.yaml
+++ b/bigtop-deploy/juju/hadoop-processing/bundle-dev.yaml
@@ -51,7 +51,7 @@ services:
     to:
       - "4"
   ganglia-node:
-    charm: "cs:~bigdata-dev/xenial/ganglia-node-6"
+    charm: "cs:~bigdata-dev/xenial/ganglia-node-7"
     annotations:
       gui-x: "250"
       gui-y: "400"

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-deploy/juju/hadoop-processing/bundle-local.yaml
----------------------------------------------------------------------
diff --git a/bigtop-deploy/juju/hadoop-processing/bundle-local.yaml b/bigtop-deploy/juju/hadoop-processing/bundle-local.yaml
index 39e7a2a..b277df4 100644
--- a/bigtop-deploy/juju/hadoop-processing/bundle-local.yaml
+++ b/bigtop-deploy/juju/hadoop-processing/bundle-local.yaml
@@ -51,7 +51,7 @@ services:
     to:
       - "4"
   ganglia-node:
-    charm: "cs:~bigdata-dev/xenial/ganglia-node-6"
+    charm: "cs:~bigdata-dev/xenial/ganglia-node-7"
     annotations:
       gui-x: "250"
       gui-y: "400"

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-deploy/juju/hadoop-processing/bundle.yaml
----------------------------------------------------------------------
diff --git a/bigtop-deploy/juju/hadoop-processing/bundle.yaml b/bigtop-deploy/juju/hadoop-processing/bundle.yaml
index c4c6ad6..fcd1017 100644
--- a/bigtop-deploy/juju/hadoop-processing/bundle.yaml
+++ b/bigtop-deploy/juju/hadoop-processing/bundle.yaml
@@ -1,6 +1,6 @@
 services:
   namenode:
-    charm: "cs:xenial/hadoop-namenode-11"
+    charm: "cs:xenial/hadoop-namenode-12"
     constraints: "mem=7G root-disk=32G"
     num_units: 1
     annotations:
@@ -9,7 +9,7 @@ services:
     to:
       - "0"
   resourcemanager:
-    charm: "cs:xenial/hadoop-resourcemanager-11"
+    charm: "cs:xenial/hadoop-resourcemanager-13"
     constraints: "mem=7G root-disk=32G"
     num_units: 1
     annotations:
@@ -18,7 +18,7 @@ services:
     to:
       - "0"
   slave:
-    charm: "cs:xenial/hadoop-slave-11"
+    charm: "cs:xenial/hadoop-slave-12"
     constraints: "mem=7G root-disk=32G"
     num_units: 3
     annotations:
@@ -29,7 +29,7 @@ services:
       - "2"
       - "3"
   plugin:
-    charm: "cs:xenial/hadoop-plugin-11"
+    charm: "cs:xenial/hadoop-plugin-12"
     annotations:
       gui-x: "1000"
       gui-y: "400"
@@ -51,7 +51,7 @@ services:
     to:
       - "4"
   ganglia-node:
-    charm: "cs:~bigdata-dev/xenial/ganglia-node-6"
+    charm: "cs:~bigdata-dev/xenial/ganglia-node-7"
     annotations:
       gui-x: "250"
       gui-y: "400"

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-deploy/juju/hadoop-processing/ci-info.yaml
----------------------------------------------------------------------
diff --git a/bigtop-deploy/juju/hadoop-processing/ci-info.yaml b/bigtop-deploy/juju/hadoop-processing/ci-info.yaml
index 72e2082..38ec28b 100644
--- a/bigtop-deploy/juju/hadoop-processing/ci-info.yaml
+++ b/bigtop-deploy/juju/hadoop-processing/ci-info.yaml
@@ -2,25 +2,25 @@ bundle:
   name: hadoop-processing
   namespace: bigdata-charmers
   release: true
-  to-channel: beta
+  to-channel: edge
 charm-upgrade:
   hadoop-namenode:
     from-channel: edge
-    release: true
+    release: false
     to-channel: beta
   hadoop-resourcemanager:
     from-channel: edge
-    release: true
+    release: false
     to-channel: beta
   hadoop-slave:
     from-channel: edge
-    release: true
+    release: false
     to-channel: beta
   hadoop-client:
     from-channel: edge
-    release: true
+    release: false
     to-channel: beta
   hadoop-plugin:
     from-channel: edge
-    release: true
+    release: false
     to-channel: beta

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-deploy/juju/hadoop-processing/tests/01-bundle.py
----------------------------------------------------------------------
diff --git a/bigtop-deploy/juju/hadoop-processing/tests/01-bundle.py b/bigtop-deploy/juju/hadoop-processing/tests/01-bundle.py
index b10ed22..51d1c3d 100755
--- a/bigtop-deploy/juju/hadoop-processing/tests/01-bundle.py
+++ b/bigtop-deploy/juju/hadoop-processing/tests/01-bundle.py
@@ -34,6 +34,18 @@ class TestBundle(unittest.TestCase):
             bun = f.read()
         bundle = yaml.safe_load(bun)
 
+        # NB: strip machine ('to') placement. We don't seem to be guaranteed
+        # the same machine numbering after the initial bundletester deployment,
+        # so we might fail when redeploying --to a specific machine to run
+        # these bundle tests. This is ok because all charms in this bundle are
+        # using 'reset: false', so we'll already have our deployment just the
+        # way we want it by the time this test runs. This was originally
+        # raised as:
+        #  https://github.com/juju/amulet/issues/148
+        for service, service_config in bundle['services'].items():
+            if 'to' in service_config:
+                del service_config['to']
+
         cls.d.load(bundle)
         cls.d.setup(timeout=3600)
 

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-deploy/juju/hadoop-processing/tests/tests.yaml
----------------------------------------------------------------------
diff --git a/bigtop-deploy/juju/hadoop-processing/tests/tests.yaml b/bigtop-deploy/juju/hadoop-processing/tests/tests.yaml
index 84f78d7..a3b7803 100644
--- a/bigtop-deploy/juju/hadoop-processing/tests/tests.yaml
+++ b/bigtop-deploy/juju/hadoop-processing/tests/tests.yaml
@@ -1,7 +1,13 @@
 reset: false
-deployment_timeout: 3600
+bundle_deploy: false
 sources:
   - 'ppa:juju/stable'
 packages:
   - amulet
   - python3-yaml
+# exclude tests that are unrelated to bigtop.
+excludes:
+  - ganglia
+  - ganglia-node
+  - rsyslog
+  - rsyslog-forwarder-ha

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-deploy/juju/hadoop-spark/bundle-dev.yaml
----------------------------------------------------------------------
diff --git a/bigtop-deploy/juju/hadoop-spark/bundle-dev.yaml b/bigtop-deploy/juju/hadoop-spark/bundle-dev.yaml
index 0bd529c..a42242e 100644
--- a/bigtop-deploy/juju/hadoop-spark/bundle-dev.yaml
+++ b/bigtop-deploy/juju/hadoop-spark/bundle-dev.yaml
@@ -73,7 +73,7 @@ services:
     to:
       - "4"
   ganglia-node:
-    charm: "cs:~bigdata-dev/xenial/ganglia-node-6"
+    charm: "cs:~bigdata-dev/xenial/ganglia-node-7"
     annotations:
       gui-x: "250"
       gui-y: "400"

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-deploy/juju/hadoop-spark/bundle-local.yaml
----------------------------------------------------------------------
diff --git a/bigtop-deploy/juju/hadoop-spark/bundle-local.yaml b/bigtop-deploy/juju/hadoop-spark/bundle-local.yaml
index 0c172ef..bffc459 100644
--- a/bigtop-deploy/juju/hadoop-spark/bundle-local.yaml
+++ b/bigtop-deploy/juju/hadoop-spark/bundle-local.yaml
@@ -73,7 +73,7 @@ services:
     to:
       - "4"
   ganglia-node:
-    charm: "cs:~bigdata-dev/xenial/ganglia-node-6"
+    charm: "cs:~bigdata-dev/xenial/ganglia-node-7"
     annotations:
       gui-x: "250"
       gui-y: "400"

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-deploy/juju/hadoop-spark/bundle.yaml
----------------------------------------------------------------------
diff --git a/bigtop-deploy/juju/hadoop-spark/bundle.yaml b/bigtop-deploy/juju/hadoop-spark/bundle.yaml
index 6346c01..cfbdd8b 100644
--- a/bigtop-deploy/juju/hadoop-spark/bundle.yaml
+++ b/bigtop-deploy/juju/hadoop-spark/bundle.yaml
@@ -1,6 +1,6 @@
 services:
   namenode:
-    charm: "cs:xenial/hadoop-namenode-11"
+    charm: "cs:xenial/hadoop-namenode-12"
     constraints: "mem=7G root-disk=32G"
     num_units: 1
     annotations:
@@ -9,7 +9,7 @@ services:
     to:
       - "0"
   resourcemanager:
-    charm: "cs:xenial/hadoop-resourcemanager-11"
+    charm: "cs:xenial/hadoop-resourcemanager-13"
     constraints: "mem=7G root-disk=32G"
     num_units: 1
     annotations:
@@ -18,7 +18,7 @@ services:
     to:
       - "0"
   slave:
-    charm: "cs:xenial/hadoop-slave-11"
+    charm: "cs:xenial/hadoop-slave-12"
     constraints: "mem=7G root-disk=32G"
     num_units: 3
     annotations:
@@ -29,7 +29,7 @@ services:
       - "2"
       - "3"
   plugin:
-    charm: "cs:xenial/hadoop-plugin-11"
+    charm: "cs:xenial/hadoop-plugin-12"
     annotations:
       gui-x: "1000"
       gui-y: "400"
@@ -43,7 +43,7 @@ services:
     to:
       - "4"
   spark:
-    charm: "cs:xenial/spark-19"
+    charm: "cs:xenial/spark-24"
     constraints: "mem=7G root-disk=32G"
     num_units: 1
     options:
@@ -54,7 +54,7 @@ services:
     to:
       - "5"
   zookeeper:
-    charm: "cs:xenial/zookeeper-12"
+    charm: "cs:xenial/zookeeper-16"
     constraints: "mem=3G root-disk=32G"
     num_units: 3
     annotations:
@@ -73,7 +73,7 @@ services:
     to:
       - "4"
   ganglia-node:
-    charm: "cs:~bigdata-dev/xenial/ganglia-node-6"
+    charm: "cs:~bigdata-dev/xenial/ganglia-node-7"
     annotations:
       gui-x: "250"
       gui-y: "400"

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-deploy/juju/hadoop-spark/ci-info.yaml
----------------------------------------------------------------------
diff --git a/bigtop-deploy/juju/hadoop-spark/ci-info.yaml b/bigtop-deploy/juju/hadoop-spark/ci-info.yaml
index ae79aee..17aff8b 100644
--- a/bigtop-deploy/juju/hadoop-spark/ci-info.yaml
+++ b/bigtop-deploy/juju/hadoop-spark/ci-info.yaml
@@ -2,33 +2,33 @@ bundle:
   name: hadoop-spark
   namespace: bigdata-charmers
   release: true
-  to-channel: beta
+  to-channel: edge
 charm-upgrade:
   hadoop-namenode:
     from-channel: edge
-    release: true
+    release: false
     to-channel: beta
   hadoop-resourcemanager:
     from-channel: edge
-    release: true
+    release: false
     to-channel: beta
   hadoop-slave:
     from-channel: edge
-    release: true
+    release: false
     to-channel: beta
   hadoop-client:
     from-channel: edge
-    release: true
+    release: false
     to-channel: beta
   hadoop-plugin:
     from-channel: edge
-    release: true
+    release: false
     to-channel: beta
   spark:
     from-channel: edge
-    release: true
+    release: false
     to-channel: beta
   zookeeper:
     from-channel: edge
-    release: true
+    release: false
     to-channel: beta

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-deploy/juju/hadoop-spark/tests/01-bundle.py
----------------------------------------------------------------------
diff --git a/bigtop-deploy/juju/hadoop-spark/tests/01-bundle.py b/bigtop-deploy/juju/hadoop-spark/tests/01-bundle.py
index e8a0766..1dc4147 100755
--- a/bigtop-deploy/juju/hadoop-spark/tests/01-bundle.py
+++ b/bigtop-deploy/juju/hadoop-spark/tests/01-bundle.py
@@ -34,6 +34,18 @@ class TestBundle(unittest.TestCase):
             bun = f.read()
         bundle = yaml.safe_load(bun)
 
+        # NB: strip machine ('to') placement. We don't seem to be guaranteed
+        # the same machine numbering after the initial bundletester deployment,
+        # so we might fail when redeploying --to a specific machine to run
+        # these bundle tests. This is ok because all charms in this bundle are
+        # using 'reset: false', so we'll already have our deployment just the
+        # way we want it by the time this test runs. This was originally
+        # raised as:
+        #  https://github.com/juju/amulet/issues/148
+        for service, service_config in bundle['services'].items():
+            if 'to' in service_config:
+                del service_config['to']
+
         cls.d.load(bundle)
         cls.d.setup(timeout=3600)
 
@@ -43,6 +55,7 @@ class TestBundle(unittest.TestCase):
                                         'resourcemanager': re.compile('ready'),
                                         'slave': re.compile('ready'),
                                         'spark': re.compile('ready'),
+                                        'zookeeper': re.compile('ready'),
                                         }, timeout=3600)
         cls.hdfs = cls.d.sentry['namenode'][0]
         cls.yarn = cls.d.sentry['resourcemanager'][0]

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-deploy/juju/hadoop-spark/tests/tests.yaml
----------------------------------------------------------------------
diff --git a/bigtop-deploy/juju/hadoop-spark/tests/tests.yaml b/bigtop-deploy/juju/hadoop-spark/tests/tests.yaml
index 84f78d7..a3b7803 100644
--- a/bigtop-deploy/juju/hadoop-spark/tests/tests.yaml
+++ b/bigtop-deploy/juju/hadoop-spark/tests/tests.yaml
@@ -1,7 +1,13 @@
 reset: false
-deployment_timeout: 3600
+bundle_deploy: false
 sources:
   - 'ppa:juju/stable'
 packages:
   - amulet
   - python3-yaml
+# exclude tests that are unrelated to bigtop.
+excludes:
+  - ganglia
+  - ganglia-node
+  - rsyslog
+  - rsyslog-forwarder-ha

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-deploy/juju/spark-processing/bundle-dev.yaml
----------------------------------------------------------------------
diff --git a/bigtop-deploy/juju/spark-processing/bundle-dev.yaml b/bigtop-deploy/juju/spark-processing/bundle-dev.yaml
index df8306f..af76214 100644
--- a/bigtop-deploy/juju/spark-processing/bundle-dev.yaml
+++ b/bigtop-deploy/juju/spark-processing/bundle-dev.yaml
@@ -29,7 +29,7 @@ services:
     to:
       - "5"
   ganglia-node:
-    charm: "cs:~bigdata-dev/xenial/ganglia-node-6"
+    charm: "cs:~bigdata-dev/xenial/ganglia-node-7"
     annotations:
       gui-x: "250"
       gui-y: "400"

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-deploy/juju/spark-processing/bundle-local.yaml
----------------------------------------------------------------------
diff --git a/bigtop-deploy/juju/spark-processing/bundle-local.yaml b/bigtop-deploy/juju/spark-processing/bundle-local.yaml
index 063d5e7..63cdc6f 100644
--- a/bigtop-deploy/juju/spark-processing/bundle-local.yaml
+++ b/bigtop-deploy/juju/spark-processing/bundle-local.yaml
@@ -29,7 +29,7 @@ services:
     to:
       - "5"
   ganglia-node:
-    charm: "cs:~bigdata-dev/xenial/ganglia-node-6"
+    charm: "cs:~bigdata-dev/xenial/ganglia-node-7"
     annotations:
       gui-x: "250"
       gui-y: "400"

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-deploy/juju/spark-processing/bundle.yaml
----------------------------------------------------------------------
diff --git a/bigtop-deploy/juju/spark-processing/bundle.yaml b/bigtop-deploy/juju/spark-processing/bundle.yaml
index 0a37882..70ef4cf 100644
--- a/bigtop-deploy/juju/spark-processing/bundle.yaml
+++ b/bigtop-deploy/juju/spark-processing/bundle.yaml
@@ -1,6 +1,6 @@
 services:
   spark:
-    charm: "cs:xenial/spark-19"
+    charm: "cs:xenial/spark-24"
     constraints: "mem=7G root-disk=32G"
     num_units: 2
     annotations:
@@ -10,7 +10,7 @@ services:
       - "0"
       - "1"
   zookeeper:
-    charm: "cs:xenial/zookeeper-12"
+    charm: "cs:xenial/zookeeper-16"
     constraints: "mem=3G root-disk=32G"
     num_units: 3
     annotations:
@@ -29,7 +29,7 @@ services:
     to:
       - "5"
   ganglia-node:
-    charm: "cs:~bigdata-dev/xenial/ganglia-node-6"
+    charm: "cs:~bigdata-dev/xenial/ganglia-node-7"
     annotations:
       gui-x: "250"
       gui-y: "400"

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-deploy/juju/spark-processing/ci-info.yaml
----------------------------------------------------------------------
diff --git a/bigtop-deploy/juju/spark-processing/ci-info.yaml b/bigtop-deploy/juju/spark-processing/ci-info.yaml
index 4402a9a..ab67fd5 100644
--- a/bigtop-deploy/juju/spark-processing/ci-info.yaml
+++ b/bigtop-deploy/juju/spark-processing/ci-info.yaml
@@ -2,13 +2,13 @@ bundle:
   name: spark-processing
   namespace: bigdata-charmers
   release: true
-  to-channel: beta
+  to-channel: edge
 charm-upgrade:
   spark:
     from-channel: edge
-    release: true
+    release: false
     to-channel: beta
   zookeeper:
     from-channel: edge
-    release: true
+    release: false
     to-channel: beta

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-deploy/juju/spark-processing/tests/01-bundle.py
----------------------------------------------------------------------
diff --git a/bigtop-deploy/juju/spark-processing/tests/01-bundle.py b/bigtop-deploy/juju/spark-processing/tests/01-bundle.py
index 7782136..6275d43 100755
--- a/bigtop-deploy/juju/spark-processing/tests/01-bundle.py
+++ b/bigtop-deploy/juju/spark-processing/tests/01-bundle.py
@@ -17,6 +17,7 @@
 
 import amulet
 import os
+import re
 import unittest
 import yaml
 
@@ -31,9 +32,25 @@ class TestBundle(unittest.TestCase):
             bun = f.read()
         bundle = yaml.safe_load(bun)
 
+        # NB: strip machine ('to') placement. We don't seem to be guaranteed
+        # the same machine numbering after the initial bundletester deployment,
+        # so we might fail when redeploying --to a specific machine to run
+        # these bundle tests. This is ok because all charms in this bundle are
+        # using 'reset: false', so we'll already have our deployment just the
+        # way we want it by the time this test runs. This was originally
+        # raised as:
+        #  https://github.com/juju/amulet/issues/148
+        for service, service_config in bundle['services'].items():
+            if 'to' in service_config:
+                del service_config['to']
+
         cls.d.load(bundle)
         cls.d.setup(timeout=3600)
-        cls.d.sentry.wait_for_messages({'spark': 'ready (standalone - HA)'}, timeout=3600)
+
+        # we need units reporting ready before we attempt our smoke tests
+        cls.d.sentry.wait_for_messages({'spark': 'ready (standalone - HA)',
+                                        'zookeeper': re.compile('ready'),
+                                        }, timeout=3600)
         cls.spark = cls.d.sentry['spark'][0]
         cls.zookeeper = cls.d.sentry['zookeeper'][0]
 

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-deploy/juju/spark-processing/tests/tests.yaml
----------------------------------------------------------------------
diff --git a/bigtop-deploy/juju/spark-processing/tests/tests.yaml b/bigtop-deploy/juju/spark-processing/tests/tests.yaml
index 84f78d7..8a28f2b 100644
--- a/bigtop-deploy/juju/spark-processing/tests/tests.yaml
+++ b/bigtop-deploy/juju/spark-processing/tests/tests.yaml
@@ -1,7 +1,19 @@
 reset: false
-deployment_timeout: 3600
+bundle_deploy: false
 sources:
   - 'ppa:juju/stable'
 packages:
   - amulet
   - python3-yaml
+# exclude tests that are unrelated to bigtop. the exclusion of spark might
+# look weird here, but for this bundle, we only care that spark is good in
+# HA mode (covered by this bundle when we invoke the spark smoke-test). the
+# typical spark tests will test spark once in standalone and twice more in
+# various HA modes. that takes forever, so leave those heavy tests for the
+# hadoop-spark bundle. let's go fast on this one.
+excludes:
+  - ganglia
+  - ganglia-node
+  - rsyslog
+  - rsyslog-forwarder-ha
+  - spark

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-packages/src/charm/hadoop/layer-hadoop-resourcemanager/actions.yaml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/charm/hadoop/layer-hadoop-resourcemanager/actions.yaml b/bigtop-packages/src/charm/hadoop/layer-hadoop-resourcemanager/actions.yaml
index bdd28be..ebeaa4b 100644
--- a/bigtop-packages/src/charm/hadoop/layer-hadoop-resourcemanager/actions.yaml
+++ b/bigtop-packages/src/charm/hadoop/layer-hadoop-resourcemanager/actions.yaml
@@ -77,15 +77,15 @@ testdfsio:
                 default: 1000
         buffersize:
                 description: Buffer size in bytes
-                type: integer
-                default: 1000000
+                type: string
+                default: "1000000"
 teragen:
     description: Generate data with teragen
     params:
         size:
             description: The number of 100 byte rows, default to 1GB of data to generate
-            type: integer
-            default: 10000000
+            type: string
+            default: "10000000"
         indir:
             description: HDFS directory where generated data is stored
             type: string
@@ -103,8 +103,8 @@ terasort:
             default: '/benchmarks/TeraSort'
         size:
             description: The number of 100 byte rows, default to 1GB of data to generate and sort
-            type: integer
-            default: 10000000
+            type: string
+            default: "10000000"
         maps:
             description: The default number of map tasks per job. 1-20
             type: integer

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-packages/src/charm/hbase/layer-hbase/README.md
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/charm/hbase/layer-hbase/README.md b/bigtop-packages/src/charm/hbase/layer-hbase/README.md
index a63ddf8..d5b714a 100644
--- a/bigtop-packages/src/charm/hbase/layer-hbase/README.md
+++ b/bigtop-packages/src/charm/hbase/layer-hbase/README.md
@@ -16,7 +16,7 @@
 -->
 # Overview
 
-HBase is the Hadoop database. Think of it as a distributed scalable Big Data
+HBase is the Hadoop database. Think of it as a distributed, scalable Big Data
 store.
 
 Use HBase when you need random, realtime read/write access to your Big Data.
@@ -64,30 +64,21 @@ In a distributed HBase environment, one master and one regionserver are
 deployed on each unit. HBase makes sure that only one master is active and
 the rest are in standby mode in case the active one fails.
 
-HBase operates over HDFS, so we first need to deploy an HDFS cluster:
+Because HBase requires HDFS, this charm is recommended to be deployed as part
+of the `hadoop-hbase` bundle:
 
-    juju deploy hadoop-namenode namenode
-    juju deploy hadoop-slave slave
-    juju deploy hadoop-plugin plugin
+    juju deploy hadoop-hbase
 
-    juju add-relation namenode slave
-    juju add-relation plugin namenode
-
-In order to function correctly, the HBase master and regionserver applications
-have a mandatory relationship with Zookeeper. Use the zookeeper charm to
-create a functional zookeeper quorum. Remember that quorums come in odd numbers
-starting with 3 (one will work, but will offer no resilience):
-
-    juju deploy zookeeper -n 3
-
-Now add HBase scaled to 3 units and add the required relations:
-
-    juju deploy hbase -n 3
+> **Note**: The above assumes Juju 2.0 or greater. If using an earlier version
+of Juju, use [juju-quickstart][] with the following syntax: `juju quickstart
+hadoop-processing`.
 
-    juju add-relation plugin hbase
-    juju add-relation zookeeper hbase
+This will deploy an Apache Bigtop Hadoop cluster with 3 HBase units. More
+information about this deployment can be found in the
+[bundle readme](https://jujucharms.com/hadoop-hbase/).
 
-The charm also supports use of the thrift gateway.
+This charm also supports the Thrift client API for HBase. Thrift is both
+cross-platform and more lightweight than REST for many operations.
 
 ## Network-Restricted Environments
 Charms can be deployed in environments with limited network access. To deploy
@@ -95,6 +86,7 @@ in this environment, configure a Juju model with appropriate proxy and/or
 mirror options. See [Configuring Models][] for more information.
 
 [getting-started]: https://jujucharms.com/docs/stable/getting-started
+[juju-quickstart]: https://launchpad.net/juju-quickstart
 [Configuring Models]: https://jujucharms.com/docs/stable/models-config
 
 
@@ -140,14 +132,61 @@ more information about a specific smoke test with:
 > **Note**: The above assumes Juju 2.0 or greater. If using an earlier version
 of Juju, the syntax is `juju action fetch <action-id>`.
 
+## HBase web UI
+HBase provides a web console that can be used to verify information about
+the cluster. To access it, find the `PUBLIC-ADDRESS` of any hbase unit and
+expose the application:
+
+    juju status hbase
+    juju expose hbase
+
+The web interface will be available at the following URL:
+
+    http://HBASE_PUBLIC_IP:60010
+
+
+# Using
+
+Once the deployment has been verified, there are a number of actions available
+in this charm.
+> **Note**: Actions described below assume Juju 2.0 or greater. If using an
+earlier version of Juju, the action syntax is:
+`juju action do hbase/0 <action_name> <action_args>; juju action fetch <id>`.
+
+Run a performance test:
+
+    juju run-action hbase/0 perf-test
+    juju show-action-output <id>  # <-- id from above command
+
+Run a smoke test (as described in the above **Verifying** section):
+
+    juju run-action hbase/0 smoke-test
+    juju show-action-output <id>  # <-- id from above command
+
+Start/Stop/Restart all HBase services on a unit:
+
+    juju run-action hbase/0 [start|stop|restart]
+    juju show-action-output <id>  # <-- id from above command
+
+
+Start/Stop the HBase Master service on a unit:
+
+    juju run-action hbase/0 [start|stop]-hbase-master
+    juju show-action-output <id>  # <-- id from above command
+
+Start/Stop the HBase RegionServer and Thrift services on a unit:
+
+    juju run-action hbase/0 [start|stop]-hbase-regionserver
+    juju show-action-output <id>  # <-- id from above command
+
 
 # Limitations
 
 Restarting an HBase deployment is potentially disruptive. Be aware that the
 following events will cause a restart:
 
-- Zookeeper service units joining or departing relations.
-- Upgrading the charm or changing the configuration.
+- Zookeeper units joining or departing the quorum.
+- Upgrading the hbase charm.
 
 
 # Contact Information
@@ -157,10 +196,10 @@ following events will cause a restart:
 
 # Resources
 
-- [Apache Bigtop](http://bigtop.apache.org/) home page
-- [Apache Bigtop mailing lists](http://bigtop.apache.org/mail-lists.html)
 - [Apache HBase home page](https://hbase.apache.org/)
-- [Apache Zookeeper issue tracker](https://issues.apache.org/jira/browse/HBASE)
+- [Apache HBase issue tracker](https://issues.apache.org/jira/browse/HBASE)
+- [Apache Bigtop home page](http://bigtop.apache.org/)
+- [Apache Bigtop mailing lists](http://bigtop.apache.org/mail-lists.html)
 - [Juju Bigtop charms](https://jujucharms.com/q/apache/bigtop)
 - [Juju mailing list](https://lists.ubuntu.com/mailman/listinfo/juju)
 - [Juju community](https://jujucharms.com/community)

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-packages/src/charm/hbase/layer-hbase/actions/restart
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/charm/hbase/layer-hbase/actions/restart b/bigtop-packages/src/charm/hbase/layer-hbase/actions/restart
index c798db7..9ce0a62 100755
--- a/bigtop-packages/src/charm/hbase/layer-hbase/actions/restart
+++ b/bigtop-packages/src/charm/hbase/layer-hbase/actions/restart
@@ -18,15 +18,16 @@
 import sys
 sys.path.append('lib')
 
-from charmhelpers.core import hookenv
-from charms.reactive import is_state
-from charms.layer.bigtop_hbase import HBase
+from charmhelpers.core import hookenv  # noqa: E402
+from charms.reactive import is_state  # noqa: E402
+from charms.layer.bigtop_hbase import HBase  # noqa: E402
 
 
 def fail(msg):
     hookenv.action_fail(msg)
     sys.exit()
 
+
 if not is_state('hbase.installed'):
     fail('HBase is not yet ready')
 

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-packages/src/charm/hbase/layer-hbase/actions/start
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/charm/hbase/layer-hbase/actions/start b/bigtop-packages/src/charm/hbase/layer-hbase/actions/start
index 9a6c91b..9a5a473 100755
--- a/bigtop-packages/src/charm/hbase/layer-hbase/actions/start
+++ b/bigtop-packages/src/charm/hbase/layer-hbase/actions/start
@@ -18,15 +18,16 @@
 import sys
 sys.path.append('lib')
 
-from charmhelpers.core import hookenv
-from charms.reactive import is_state
-from charms.layer.bigtop_hbase import HBase
+from charmhelpers.core import hookenv  # noqa: E402
+from charms.reactive import is_state  # noqa: E402
+from charms.layer.bigtop_hbase import HBase  # noqa: E402
 
 
 def fail(msg):
     hookenv.action_fail(msg)
     sys.exit()
 
+
 if not is_state('hbase.installed'):
     fail('HBase is not yet ready')
 

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-packages/src/charm/hbase/layer-hbase/actions/start-hbase-master
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/charm/hbase/layer-hbase/actions/start-hbase-master b/bigtop-packages/src/charm/hbase/layer-hbase/actions/start-hbase-master
index 85aa926..13bc1bc 100755
--- a/bigtop-packages/src/charm/hbase/layer-hbase/actions/start-hbase-master
+++ b/bigtop-packages/src/charm/hbase/layer-hbase/actions/start-hbase-master
@@ -16,7 +16,6 @@
 # limitations under the License.
 
 import sys
-sys.path.append('lib')
 
 from charmhelpers.core import host, hookenv
 from charms.reactive import is_state
@@ -26,6 +25,7 @@ def fail(msg):
     hookenv.action_fail(msg)
     sys.exit()
 
+
 if not is_state('hbase.installed'):
     fail('HBase is not yet ready')
 

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-packages/src/charm/hbase/layer-hbase/actions/start-hbase-regionserver
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/charm/hbase/layer-hbase/actions/start-hbase-regionserver b/bigtop-packages/src/charm/hbase/layer-hbase/actions/start-hbase-regionserver
index aaa2ad7..b7c01a7 100755
--- a/bigtop-packages/src/charm/hbase/layer-hbase/actions/start-hbase-regionserver
+++ b/bigtop-packages/src/charm/hbase/layer-hbase/actions/start-hbase-regionserver
@@ -16,7 +16,6 @@
 # limitations under the License.
 
 import sys
-sys.path.append('lib')
 
 from charmhelpers.core import host, hookenv
 from charms.reactive import is_state
@@ -26,6 +25,7 @@ def fail(msg):
     hookenv.action_fail(msg)
     sys.exit()
 
+
 if not is_state('hbase.installed'):
     fail('HBase is not yet ready')
 

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-packages/src/charm/hbase/layer-hbase/actions/stop
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/charm/hbase/layer-hbase/actions/stop b/bigtop-packages/src/charm/hbase/layer-hbase/actions/stop
index 7cc16ff..8bdbad6 100755
--- a/bigtop-packages/src/charm/hbase/layer-hbase/actions/stop
+++ b/bigtop-packages/src/charm/hbase/layer-hbase/actions/stop
@@ -18,15 +18,16 @@
 import sys
 sys.path.append('lib')
 
-from charmhelpers.core import hookenv
-from charms.reactive import is_state
-from charms.layer.bigtop_hbase import HBase
+from charmhelpers.core import hookenv  # noqa: E402
+from charms.reactive import is_state  # noqa: E402
+from charms.layer.bigtop_hbase import HBase  # noqa: E402
 
 
 def fail(msg):
     hookenv.action_fail(msg)
     sys.exit()
 
+
 if not is_state('hbase.installed'):
     fail('HBase is not yet ready')
 

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-packages/src/charm/hbase/layer-hbase/actions/stop-hbase-master
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/charm/hbase/layer-hbase/actions/stop-hbase-master b/bigtop-packages/src/charm/hbase/layer-hbase/actions/stop-hbase-master
index 2936118..0279089 100755
--- a/bigtop-packages/src/charm/hbase/layer-hbase/actions/stop-hbase-master
+++ b/bigtop-packages/src/charm/hbase/layer-hbase/actions/stop-hbase-master
@@ -16,7 +16,6 @@
 # limitations under the License.
 
 import sys
-sys.path.append('lib')
 
 from charmhelpers.core import host, hookenv
 from charms.reactive import is_state
@@ -26,6 +25,7 @@ def fail(msg):
     hookenv.action_fail(msg)
     sys.exit()
 
+
 if not is_state('hbase.installed'):
     fail('HBase is not yet ready')
 

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-packages/src/charm/hbase/layer-hbase/actions/stop-hbase-regionserver
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/charm/hbase/layer-hbase/actions/stop-hbase-regionserver b/bigtop-packages/src/charm/hbase/layer-hbase/actions/stop-hbase-regionserver
index a203dbe..862770f 100755
--- a/bigtop-packages/src/charm/hbase/layer-hbase/actions/stop-hbase-regionserver
+++ b/bigtop-packages/src/charm/hbase/layer-hbase/actions/stop-hbase-regionserver
@@ -16,7 +16,6 @@
 # limitations under the License.
 
 import sys
-sys.path.append('lib')
 
 from charmhelpers.core import host, hookenv
 from charms.reactive import is_state
@@ -26,6 +25,7 @@ def fail(msg):
     hookenv.action_fail(msg)
     sys.exit()
 
+
 if not is_state('hbase.installed'):
     fail('HBase is not yet ready')
 

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-packages/src/charm/hbase/layer-hbase/layer.yaml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/charm/hbase/layer-hbase/layer.yaml b/bigtop-packages/src/charm/hbase/layer-hbase/layer.yaml
index ebebc56..a35e252 100644
--- a/bigtop-packages/src/charm/hbase/layer-hbase/layer.yaml
+++ b/bigtop-packages/src/charm/hbase/layer-hbase/layer.yaml
@@ -5,6 +5,7 @@ includes:
   - 'interface:zookeeper'
   - 'interface:benchmark'
   - 'interface:hbase'
+  - 'interface:hbase-quorum'
 options:
   apache-bigtop-base:
     ports:

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-packages/src/charm/hbase/layer-hbase/metadata.yaml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/charm/hbase/layer-hbase/metadata.yaml b/bigtop-packages/src/charm/hbase/layer-hbase/metadata.yaml
index 9157ad1..821f3fa 100644
--- a/bigtop-packages/src/charm/hbase/layer-hbase/metadata.yaml
+++ b/bigtop-packages/src/charm/hbase/layer-hbase/metadata.yaml
@@ -2,8 +2,8 @@ name: hbase
 summary: Apache Bitop HBase
 maintainer: Juju Big Data <bi...@lists.ubuntu.com>
 description: >
-  HBase is the Hadoop database. This charm provides a Apache HBase from
-  Apache Bigtop.
+  HBase is the Hadoop database. This charm provides HBase from the
+  Apache Bigtop project.
 tags: []
 requires:
   zookeeper:
@@ -13,3 +13,6 @@ provides:
     interface: hbase
   benchmark:
     interface: benchmark
+peers:
+  hbpeer:
+    interface: hbase-quorum

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-packages/src/charm/hbase/layer-hbase/reactive/hbase.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/charm/hbase/layer-hbase/reactive/hbase.py b/bigtop-packages/src/charm/hbase/layer-hbase/reactive/hbase.py
index b11cac2..26751b5 100644
--- a/bigtop-packages/src/charm/hbase/layer-hbase/reactive/hbase.py
+++ b/bigtop-packages/src/charm/hbase/layer-hbase/reactive/hbase.py
@@ -17,7 +17,7 @@ from charms.reactive import when, when_not_all, is_state, set_state, remove_stat
 from charms.layer.bigtop_hbase import HBase
 from charmhelpers.core import hookenv
 from charms.reactive.helpers import data_changed
-from charms.layer.apache_bigtop_base import get_layer_opts
+from charms.layer.apache_bigtop_base import get_layer_opts, get_package_version
 
 
 @when('bigtop.available')
@@ -45,9 +45,10 @@ def report_status():
 
 
 @when('bigtop.available', 'zookeeper.ready', 'hadoop.hdfs.ready')
-def installing_hbase(zk, hdfs):
+def install_hbase(zk, hdfs):
     zks = zk.zookeepers()
-    if is_state('hbase.installed') and (not data_changed('zks', zks)):
+    if (is_state('hbase.installed') and
+            (not data_changed('zks', zks))):
         return
 
     msg = "configuring hbase" if is_state('hbase.installed') else "installing hbase"
@@ -60,7 +61,10 @@ def installing_hbase(zk, hdfs):
     hbase.configure(hosts, zks)
     hbase.open_ports()
     set_state('hbase.installed')
-    hookenv.status_set('active', 'ready')
+    report_status()
+    # set app version string for juju status output
+    hbase_version = get_package_version('hbase-master') or 'unknown'
+    hookenv.application_version_set(hbase_version)
 
 
 @when('hbase.installed')

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-packages/src/charm/kafka/layer-kafka/tests/01-deploy.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/charm/kafka/layer-kafka/tests/01-deploy.py b/bigtop-packages/src/charm/kafka/layer-kafka/tests/01-deploy.py
index 62d6a55..f8f0679 100755
--- a/bigtop-packages/src/charm/kafka/layer-kafka/tests/01-deploy.py
+++ b/bigtop-packages/src/charm/kafka/layer-kafka/tests/01-deploy.py
@@ -26,7 +26,7 @@ class TestDeploy(unittest.TestCase):
     @classmethod
     def setUpClass(cls):
         cls.d = amulet.Deployment(series='xenial')
-        cls.d.add('kafka', charm='kafka')
+        cls.d.add('kafka', charm='cs:xenial/kafka')
         cls.d.add('zookeeper', charm='cs:xenial/zookeeper')
 
         cls.d.relate('kafka:zookeeper', 'zookeeper:zookeeper')

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-packages/src/charm/kafka/layer-kafka/tests/02-smoke-test.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/charm/kafka/layer-kafka/tests/02-smoke-test.py b/bigtop-packages/src/charm/kafka/layer-kafka/tests/02-smoke-test.py
index f396bdb..c688c69 100755
--- a/bigtop-packages/src/charm/kafka/layer-kafka/tests/02-smoke-test.py
+++ b/bigtop-packages/src/charm/kafka/layer-kafka/tests/02-smoke-test.py
@@ -26,7 +26,7 @@ class TestDeploy(unittest.TestCase):
     @classmethod
     def setUpClass(cls):
         cls.d = amulet.Deployment(series='xenial')
-        cls.d.add('kafka', charm='kafka')
+        cls.d.add('kafka', charm='cs:xenial/kafka')
         cls.d.add('zookeeper', charm='cs:xenial/zookeeper')
 
         cls.d.relate('kafka:zookeeper', 'zookeeper:zookeeper')

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-packages/src/charm/kafka/layer-kafka/tests/10-config-changed.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/charm/kafka/layer-kafka/tests/10-config-changed.py b/bigtop-packages/src/charm/kafka/layer-kafka/tests/10-config-changed.py
index 4fd44ce..a27f783 100755
--- a/bigtop-packages/src/charm/kafka/layer-kafka/tests/10-config-changed.py
+++ b/bigtop-packages/src/charm/kafka/layer-kafka/tests/10-config-changed.py
@@ -29,14 +29,26 @@ class TestConfigChanged(unittest.TestCase):
     @classmethod
     def setUpClass(cls):
         cls.d = amulet.Deployment(series='xenial')
-        cls.d.add('kafka', charm='kafka')
-        cls.d.add('zookeeper', charm='cs:xenial/zookeeper')
+        cls.d.add('kafka-test', charm='cs:xenial/kafka')
+        cls.d.add('zk-test', charm='cs:xenial/zookeeper')
 
-        cls.d.relate('kafka:zookeeper', 'zookeeper:zookeeper')
+        cls.d.relate('kafka-test:zookeeper', 'zk-test:zookeeper')
 
         cls.d.setup(timeout=1800)
-        cls.d.sentry.wait_for_messages({'kafka': 'ready'}, timeout=1800)
-        cls.unit = cls.d.sentry['kafka'][0]
+        cls.d.sentry.wait_for_messages({'kafka-test': 'ready'}, timeout=1800)
+        cls.unit = cls.d.sentry['kafka-test'][0]
+
+    @classmethod
+    def tearDownClass(cls):
+        # NB: seems to be a remove_service issue with amulet. However, the
+        # unit does still get removed. Pass OSError for now:
+        #  OSError: juju command failed ['remove-application', 'zk-test']:
+        #  ERROR allocation for service ...zk-test... owned by ... not found
+        try:
+            cls.d.remove_service('zk-test', 'kafka-test')
+        except OSError as e:
+            print("IGNORE: Amulet remove_service failed: {}".format(e))
+            pass
 
     def test_bind_network_interface(self):
         """
@@ -59,7 +71,7 @@ class TestConfigChanged(unittest.TestCase):
             raise Exception(
                 "Could not find any interface on the unit that matched my "
                 "criteria.")
-        self.d.configure('kafka', {'network_interface': network_interface})
+        self.d.configure('kafka-test', {'network_interface': network_interface})
 
         # NB: we used to watch for a maintenance status message, but every now
         # and then, we'd miss it. Wait 2m to let the config-changed hook settle.
@@ -85,7 +97,7 @@ class TestConfigChanged(unittest.TestCase):
         """
         Verify that we can reset the client port bindings to 0.0.0.0
         """
-        self.d.configure('kafka', {'network_interface': '0.0.0.0'})
+        self.d.configure('kafka-test', {'network_interface': '0.0.0.0'})
 
         # NB: we used to watch for a maintenance status message, but every now
         # and then, we'd miss it. Wait 2m to let the config-changed hook settle.

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4a24c4bd/bigtop-packages/src/charm/mahout/layer-mahout/actions/smoke-test
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/charm/mahout/layer-mahout/actions/smoke-test b/bigtop-packages/src/charm/mahout/layer-mahout/actions/smoke-test
index 22cf7c7..cc98b46 100755
--- a/bigtop-packages/src/charm/mahout/layer-mahout/actions/smoke-test
+++ b/bigtop-packages/src/charm/mahout/layer-mahout/actions/smoke-test
@@ -17,19 +17,48 @@
 
 set -ex
 
-if hdfs dfs -stat /tmp/input/ &> /dev/null; then
-    hdfs dfs -rm -r -skipTrash /tmp/input/ || true
+if ! charms.reactive is_state 'mahout.installed'; then
+    action-fail 'Mahout is not yet ready'
+    exit
 fi
 
-hdfs dfs -mkdir /tmp/input/
-hdfs dfs -put resources/links-converted.txt /tmp/input/
-hdfs dfs -put resources/users.txt /tmp/input/
+# create dir to store results
+RUN=`date +%s`
+RESULT_DIR=/opt/mahout-smoke-results
+RESULT_LOG=${RESULT_DIR}/${RUN}.log
+mkdir -p ${RESULT_DIR}
+chown -R ubuntu:ubuntu ${RESULT_DIR}
 
-if hdfs dfs -stat temp &> /dev/null; then
-    hdfs dfs -rm -r -skipTrash temp || true
-fi
-if hdfs dfs -stat output &> /dev/null; then
-    hdfs dfs -rm -r -skipTrash output || true
-fi
+# hdfs dirs
+MAHOUT_SMOKE="/tmp/mahout-smoke"
+MAHOUT_INPUT="${MAHOUT_SMOKE}/input"
+MAHOUT_OUTPUT="${MAHOUT_SMOKE}/output"
+
+# remove any previous smoke test run. must be run as ubuntu, since that user
+# owns the hdfs space
+su - ubuntu -c "hadoop fs -rm -f -r -skipTrash ${MAHOUT_SMOKE}"
+su - ubuntu -c "hadoop fs -rm -f -r -skipTrash temp"
+
+echo 'running mahout smoke-test as the ubuntu user'
+# NB: Escaped envars in the block below (e.g., \${CHARM_DIR}) come from
+# the environment while non-escaped vars (e.g., ${MAHOUT_INPUT}) come from
+# this outer scope.
+su ubuntu << EOF
+set -x
+. /etc/default/hadoop
+. /etc/environment
+
+# setup our smoke test input
+hdfs dfs -mkdir -p ${MAHOUT_INPUT}
+hdfs dfs -put \${CHARM_DIR}/resources/links-converted.txt ${MAHOUT_INPUT}
+hdfs dfs -put \${CHARM_DIR}/resources/users.txt ${MAHOUT_INPUT}
 
-hadoop jar /usr/lib/mahout/mahout-mr-*-job.jar org.apache.mahout.cf.taste.hadoop.item.RecommenderJob -Dmapred.input.dir=/tmp/input/links-converted.txt -Dmapred.output.dir=output --usersFile /tmp/input/users.txt --booleanData -s SIMILARITY_LOGLIKELIHOOD
+hadoop jar /usr/lib/mahout/mahout-mr-*-job.jar \
+  org.apache.mahout.cf.taste.hadoop.item.RecommenderJob \
+  -Dmapreduce.input.fileinputformat.inputdir=${MAHOUT_INPUT}/links-converted.txt \
+  -Dmapred.output.dir=${MAHOUT_OUTPUT} \
+  --usersFile ${MAHOUT_INPUT}/users.txt \
+  --booleanData \
+  -s SIMILARITY_LOGLIKELIHOOD 2>&1 | tee -a ${RESULT_LOG}
+EOF
+echo 'mahout smoke-test complete'